summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorJoonas Lahtinen <joonas.lahtinen@linux.intel.com>2016-03-18 11:42:57 +0300
committerJoonas Lahtinen <joonas.lahtinen@linux.intel.com>2016-03-18 16:18:15 +0300
commit62106b4f6b9118073ec59e3e34ec393ed76cf24f (patch)
tree6df24203169102d048c1950b1c955c94209d109a /drivers/gpu/drm
parentdc3b04fbf43227e21fa95dbe6c7a13def655c891 (diff)
downloadlinux-62106b4f6b9118073ec59e3e34ec393ed76cf24f.tar.xz
drm/i915: Rename dev_priv->gtt to dev_priv->ggtt
Refer to Global GTT consistently as GGTT, thus rename dev_priv->gtt to dev_priv->ggtt and struct i915_gtt to struct i915_ggtt. Fix a couple of whitespace problems while at it. v2: - Fix a typo in commit message. Reviewed-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c8
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c18
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h4
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c16
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c147
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.h9
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c50
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c10
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c14
-rw-r--r--drivers/gpu/drm/i915/intel_display.c4
-rw-r--r--drivers/gpu/drm/i915/intel_fbc.c4
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c6
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c4
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c10
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c2
17 files changed, 156 insertions, 158 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index ccdca2c7d799..e0ba3e38000f 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -203,7 +203,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
struct list_head *head;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_address_space *vm = &dev_priv->gtt.base;
+ struct i915_address_space *vm = &dev_priv->ggtt.base;
struct i915_vma *vma;
u64 total_obj_size, total_gtt_size;
int count, ret;
@@ -433,7 +433,7 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
u32 count, mappable_count, purgeable_count;
u64 size, mappable_size, purgeable_size;
struct drm_i915_gem_object *obj;
- struct i915_address_space *vm = &dev_priv->gtt.base;
+ struct i915_address_space *vm = &dev_priv->ggtt.base;
struct drm_file *file;
struct i915_vma *vma;
int ret;
@@ -492,8 +492,8 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
count, size);
seq_printf(m, "%llu [%llu] gtt total\n",
- dev_priv->gtt.base.total,
- (u64)dev_priv->gtt.mappable_end - dev_priv->gtt.base.start);
+ dev_priv->ggtt.base.total,
+ (u64)dev_priv->ggtt.mappable_end - dev_priv->ggtt.base.start);
seq_putc(m, '\n');
print_batch_pool_stats(m, dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 68592b0de874..3565163d7b31 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -491,8 +491,8 @@ static int i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
if (!ap)
return -ENOMEM;
- ap->ranges[0].base = dev_priv->gtt.mappable_base;
- ap->ranges[0].size = dev_priv->gtt.mappable_end;
+ ap->ranges[0].base = dev_priv->ggtt.mappable_base;
+ ap->ranges[0].size = dev_priv->ggtt.mappable_end;
primary =
pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
@@ -1172,17 +1172,17 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv)
if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(32));
- aperture_size = dev_priv->gtt.mappable_end;
+ aperture_size = dev_priv->ggtt.mappable_end;
- dev_priv->gtt.mappable =
- io_mapping_create_wc(dev_priv->gtt.mappable_base,
+ dev_priv->ggtt.mappable =
+ io_mapping_create_wc(dev_priv->ggtt.mappable_base,
aperture_size);
- if (dev_priv->gtt.mappable == NULL) {
+ if (dev_priv->ggtt.mappable == NULL) {
ret = -EIO;
goto out_gtt;
}
- dev_priv->gtt.mtrr = arch_phys_wc_add(dev_priv->gtt.mappable_base,
+ dev_priv->ggtt.mtrr = arch_phys_wc_add(dev_priv->ggtt.mappable_base,
aperture_size);
pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY,
@@ -1230,8 +1230,8 @@ static void i915_driver_cleanup_hw(struct drm_i915_private *dev_priv)
pci_disable_msi(dev->pdev);
pm_qos_remove_request(&dev_priv->pm_qos);
- arch_phys_wc_del(dev_priv->gtt.mtrr);
- io_mapping_free(dev_priv->gtt.mappable);
+ arch_phys_wc_del(dev_priv->ggtt.mtrr);
+ io_mapping_free(dev_priv->ggtt.mappable);
i915_global_gtt_cleanup(dev);
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 00c41a4bde2a..b1d65540b48b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1810,7 +1810,7 @@ struct drm_i915_private {
struct drm_atomic_state *modeset_restore_state;
struct list_head vm_list; /* Global list of all address spaces */
- struct i915_gtt gtt; /* VM representing the global address space */
+ struct i915_ggtt ggtt; /* VM representing the global address space */
struct i915_gem_mm mm;
DECLARE_HASHTABLE(mm_structs, 7);
@@ -3126,7 +3126,7 @@ bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj);
/* Some GGTT VM helpers */
#define i915_obj_to_ggtt(obj) \
- (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base)
+ (&((struct drm_i915_private *)(obj)->base.dev->dev_private)->ggtt.base)
static inline struct i915_hw_ppgtt *
i915_vm_to_ppgtt(struct i915_address_space *vm)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index f45856d7084c..8588c83abb35 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -132,7 +132,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_get_aperture *args = data;
- struct i915_gtt *ggtt = &dev_priv->gtt;
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_vma *vma;
size_t pinned;
@@ -146,7 +146,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
pinned += vma->node.size;
mutex_unlock(&dev->struct_mutex);
- args->aper_size = dev_priv->gtt.base.total;
+ args->aper_size = dev_priv->ggtt.base.total;
args->aper_available_size = args->aper_size - pinned;
return 0;
@@ -807,7 +807,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
* source page isn't available. Return the error and we'll
* retry in the slow path.
*/
- if (fast_user_write(dev_priv->gtt.mappable, page_base,
+ if (fast_user_write(dev_priv->ggtt.mappable, page_base,
page_offset, user_data, page_length)) {
ret = -EFAULT;
goto out_flush;
@@ -1825,7 +1825,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
/* Use a partial view if the object is bigger than the aperture. */
- if (obj->base.size >= dev_priv->gtt.mappable_end &&
+ if (obj->base.size >= dev_priv->ggtt.mappable_end &&
obj->tiling_mode == I915_TILING_NONE) {
static const unsigned int chunk_size = 256; // 1 MiB
@@ -1853,7 +1853,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
goto unpin;
/* Finally, remap it using the new GTT offset */
- pfn = dev_priv->gtt.mappable_base +
+ pfn = dev_priv->ggtt.mappable_base +
i915_gem_obj_ggtt_offset_view(obj, &view);
pfn >>= PAGE_SHIFT;
@@ -3511,7 +3511,7 @@ i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
end = vm->total;
if (flags & PIN_MAPPABLE)
- end = min_t(u64, end, dev_priv->gtt.mappable_end);
+ end = min_t(u64, end, dev_priv->ggtt.mappable_end);
if (flags & PIN_ZONE_4G)
end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
@@ -3772,7 +3772,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
vma = i915_gem_obj_to_ggtt(obj);
if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
list_move_tail(&vma->vm_link,
- &to_i915(obj->base.dev)->gtt.base.inactive_list);
+ &to_i915(obj->base.dev)->ggtt.base.inactive_list);
return 0;
}
@@ -4209,7 +4209,7 @@ void __i915_vma_set_map_and_fenceable(struct i915_vma *vma)
(vma->node.start & (fence_alignment - 1)) == 0);
mappable = (vma->node.start + fence_size <=
- to_i915(obj->base.dev)->gtt.mappable_end);
+ to_i915(obj->base.dev)->ggtt.mappable_end);
obj->map_and_fenceable = mappable && fenceable;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 6627bbe9ea24..394e525e55f1 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -945,7 +945,7 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
else if (to_i915(dev)->mm.aliasing_ppgtt)
args->value = to_i915(dev)->mm.aliasing_ppgtt->base.total;
else
- args->value = to_i915(dev)->gtt.base.total;
+ args->value = to_i915(dev)->ggtt.base.total;
break;
default:
ret = -EINVAL;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index dac01ee8cfa3..374a0cb7a092 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -330,7 +330,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
/* Map the page containing the relocation we're going to perform. */
offset = i915_gem_obj_ggtt_offset(obj);
offset += reloc->offset;
- reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
+ reloc_page = io_mapping_map_atomic_wc(dev_priv->ggtt.mappable,
offset & PAGE_MASK);
iowrite32(lower_32_bits(delta), reloc_page + offset_in_page(offset));
@@ -340,7 +340,7 @@ relocate_entry_gtt(struct drm_i915_gem_object *obj,
if (offset_in_page(offset) == 0) {
io_mapping_unmap_atomic(reloc_page);
reloc_page =
- io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
+ io_mapping_map_atomic_wc(dev_priv->ggtt.mappable,
offset);
}
@@ -1504,7 +1504,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (ctx->ppgtt)
vm = &ctx->ppgtt->base;
else
- vm = &dev_priv->gtt.base;
+ vm = &dev_priv->ggtt.base;
memset(&params_master, 0x00, sizeof(params_master));
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index be204076f8dc..41b4606293d1 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1637,7 +1637,7 @@ static void gen6_write_page_range(struct drm_i915_private *dev_priv,
/* Make sure write is complete before other code can use this page
* table. Also require for WC mapped PTEs */
- readl(dev_priv->gtt.gsm);
+ readl(dev_priv->ggtt.gsm);
}
static uint32_t get_pd_offset(struct i915_hw_ppgtt *ppgtt)
@@ -1932,7 +1932,7 @@ static int gen6_alloc_va_range(struct i915_address_space *vm,
/* Make sure write is complete before other code can use this page
* table. Also require for WC mapped PTEs */
- readl(dev_priv->gtt.gsm);
+ readl(dev_priv->ggtt.gsm);
mark_tlbs_dirty(ppgtt);
return 0;
@@ -2005,23 +2005,23 @@ static int gen6_ppgtt_allocate_page_directories(struct i915_hw_ppgtt *ppgtt)
* allocator works in address space sizes, so it's multiplied by page
* size. We allocate at the top of the GTT to avoid fragmentation.
*/
- BUG_ON(!drm_mm_initialized(&dev_priv->gtt.base.mm));
+ BUG_ON(!drm_mm_initialized(&dev_priv->ggtt.base.mm));
ret = gen6_init_scratch(vm);
if (ret)
return ret;
alloc:
- ret = drm_mm_insert_node_in_range_generic(&dev_priv->gtt.base.mm,
+ ret = drm_mm_insert_node_in_range_generic(&dev_priv->ggtt.base.mm,
&ppgtt->node, GEN6_PD_SIZE,
GEN6_PD_ALIGN, 0,
- 0, dev_priv->gtt.base.total,
+ 0, dev_priv->ggtt.base.total,
DRM_MM_TOPDOWN);
if (ret == -ENOSPC && !retried) {
- ret = i915_gem_evict_something(dev, &dev_priv->gtt.base,
+ ret = i915_gem_evict_something(dev, &dev_priv->ggtt.base,
GEN6_PD_SIZE, GEN6_PD_ALIGN,
I915_CACHE_NONE,
- 0, dev_priv->gtt.base.total,
+ 0, dev_priv->ggtt.base.total,
0);
if (ret)
goto err_out;
@@ -2034,7 +2034,7 @@ alloc:
goto err_out;
- if (ppgtt->node.start < dev_priv->gtt.mappable_end)
+ if (ppgtt->node.start < dev_priv->ggtt.mappable_end)
DRM_DEBUG("Forced to use aperture for PDEs\n");
return 0;
@@ -2065,7 +2065,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- ppgtt->base.pte_encode = dev_priv->gtt.base.pte_encode;
+ ppgtt->base.pte_encode = dev_priv->ggtt.base.pte_encode;
if (IS_GEN6(dev)) {
ppgtt->switch_mm = gen6_mm_switch;
} else if (IS_HASWELL(dev)) {
@@ -2095,7 +2095,7 @@ static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
ppgtt->pd.base.ggtt_offset =
ppgtt->node.start / PAGE_SIZE * sizeof(gen6_pte_t);
- ppgtt->pd_addr = (gen6_pte_t __iomem *)dev_priv->gtt.gsm +
+ ppgtt->pd_addr = (gen6_pte_t __iomem *)dev_priv->ggtt.gsm +
ppgtt->pd.base.ggtt_offset / sizeof(gen6_pte_t);
gen6_scratch_va_range(ppgtt, 0, ppgtt->base.total);
@@ -2265,7 +2265,7 @@ static bool do_idling(struct drm_i915_private *dev_priv)
{
bool ret = dev_priv->mm.interruptible;
- if (unlikely(dev_priv->gtt.do_idle_maps)) {
+ if (unlikely(dev_priv->ggtt.do_idle_maps)) {
dev_priv->mm.interruptible = false;
if (i915_gpu_idle(dev_priv->dev)) {
DRM_ERROR("Couldn't idle GPU\n");
@@ -2279,7 +2279,7 @@ static bool do_idling(struct drm_i915_private *dev_priv)
static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
{
- if (unlikely(dev_priv->gtt.do_idle_maps))
+ if (unlikely(dev_priv->ggtt.do_idle_maps))
dev_priv->mm.interruptible = interruptible;
}
@@ -2334,9 +2334,9 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
i915_check_and_clear_faults(dev);
- dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
- dev_priv->gtt.base.start,
- dev_priv->gtt.base.total,
+ dev_priv->ggtt.base.clear_range(&dev_priv->ggtt.base,
+ dev_priv->ggtt.base.start,
+ dev_priv->ggtt.base.total,
true);
i915_ggtt_flush(dev_priv);
@@ -2370,7 +2370,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
struct drm_i915_private *dev_priv = vm->dev->dev_private;
unsigned first_entry = start >> PAGE_SHIFT;
gen8_pte_t __iomem *gtt_entries =
- (gen8_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
+ (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + first_entry;
int i = 0;
struct sg_page_iter sg_iter;
dma_addr_t addr = 0; /* shut up gcc */
@@ -2447,7 +2447,7 @@ static void gen6_ggtt_insert_entries(struct i915_address_space *vm,
struct drm_i915_private *dev_priv = vm->dev->dev_private;
unsigned first_entry = start >> PAGE_SHIFT;
gen6_pte_t __iomem *gtt_entries =
- (gen6_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
+ (gen6_pte_t __iomem *)dev_priv->ggtt.gsm + first_entry;
int i = 0;
struct sg_page_iter sg_iter;
dma_addr_t addr = 0;
@@ -2491,8 +2491,8 @@ static void gen8_ggtt_clear_range(struct i915_address_space *vm,
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
gen8_pte_t scratch_pte, __iomem *gtt_base =
- (gen8_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
- const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
+ (gen8_pte_t __iomem *) dev_priv->ggtt.gsm + first_entry;
+ const int max_entries = gtt_total_entries(dev_priv->ggtt) - first_entry;
int i;
int rpm_atomic_seq;
@@ -2522,8 +2522,8 @@ static void gen6_ggtt_clear_range(struct i915_address_space *vm,
unsigned first_entry = start >> PAGE_SHIFT;
unsigned num_entries = length >> PAGE_SHIFT;
gen6_pte_t scratch_pte, __iomem *gtt_base =
- (gen6_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
- const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
+ (gen6_pte_t __iomem *) dev_priv->ggtt.gsm + first_entry;
+ const int max_entries = gtt_total_entries(dev_priv->ggtt) - first_entry;
int i;
int rpm_atomic_seq;
@@ -2718,7 +2718,7 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
* of the aperture.
*/
struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
+ struct i915_address_space *ggtt_vm = &dev_priv->ggtt.base;
struct drm_mm_node *entry;
struct drm_i915_gem_object *obj;
unsigned long hole_start, hole_end;
@@ -2801,8 +2801,8 @@ static int i915_gem_setup_global_gtt(struct drm_device *dev,
true);
dev_priv->mm.aliasing_ppgtt = ppgtt;
- WARN_ON(dev_priv->gtt.base.bind_vma != ggtt_bind_vma);
- dev_priv->gtt.base.bind_vma = aliasing_gtt_bind_vma;
+ WARN_ON(dev_priv->ggtt.base.bind_vma != ggtt_bind_vma);
+ dev_priv->ggtt.base.bind_vma = aliasing_gtt_bind_vma;
}
return 0;
@@ -2813,8 +2813,8 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
u64 gtt_size, mappable_size;
- gtt_size = dev_priv->gtt.base.total;
- mappable_size = dev_priv->gtt.mappable_end;
+ gtt_size = dev_priv->ggtt.base.total;
+ mappable_size = dev_priv->ggtt.mappable_end;
i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
}
@@ -2822,7 +2822,7 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
void i915_global_gtt_cleanup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_address_space *vm = &dev_priv->gtt.base;
+ struct i915_address_space *vm = &dev_priv->ggtt.base;
if (dev_priv->mm.aliasing_ppgtt) {
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
@@ -2940,10 +2940,10 @@ static int ggtt_probe_common(struct drm_device *dev,
* readback check when writing GTT PTE entries.
*/
if (IS_BROXTON(dev))
- dev_priv->gtt.gsm = ioremap_nocache(gtt_phys_addr, gtt_size);
+ dev_priv->ggtt.gsm = ioremap_nocache(gtt_phys_addr, gtt_size);
else
- dev_priv->gtt.gsm = ioremap_wc(gtt_phys_addr, gtt_size);
- if (!dev_priv->gtt.gsm) {
+ dev_priv->ggtt.gsm = ioremap_wc(gtt_phys_addr, gtt_size);
+ if (!dev_priv->ggtt.gsm) {
DRM_ERROR("Failed to map the gtt page table\n");
return -ENOMEM;
}
@@ -2952,11 +2952,11 @@ static int ggtt_probe_common(struct drm_device *dev,
if (IS_ERR(scratch_page)) {
DRM_ERROR("Scratch setup failed\n");
/* iounmap will also get called at remove, but meh */
- iounmap(dev_priv->gtt.gsm);
+ iounmap(dev_priv->ggtt.gsm);
return PTR_ERR(scratch_page);
}
- dev_priv->gtt.base.scratch_page = scratch_page;
+ dev_priv->ggtt.base.scratch_page = scratch_page;
return 0;
}
@@ -3074,13 +3074,13 @@ static int gen8_gmch_probe(struct drm_device *dev,
ret = ggtt_probe_common(dev, gtt_size);
- dev_priv->gtt.base.clear_range = gen8_ggtt_clear_range;
- dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries;
- dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
- dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
+ dev_priv->ggtt.base.clear_range = gen8_ggtt_clear_range;
+ dev_priv->ggtt.base.insert_entries = gen8_ggtt_insert_entries;
+ dev_priv->ggtt.base.bind_vma = ggtt_bind_vma;
+ dev_priv->ggtt.base.unbind_vma = ggtt_unbind_vma;
if (IS_CHERRYVIEW(dev_priv))
- dev_priv->gtt.base.insert_entries = gen8_ggtt_insert_entries__BKL;
+ dev_priv->ggtt.base.insert_entries = gen8_ggtt_insert_entries__BKL;
return ret;
}
@@ -3118,20 +3118,19 @@ static int gen6_gmch_probe(struct drm_device *dev,
ret = ggtt_probe_common(dev, gtt_size);
- dev_priv->gtt.base.clear_range = gen6_ggtt_clear_range;
- dev_priv->gtt.base.insert_entries = gen6_ggtt_insert_entries;
- dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
- dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
+ dev_priv->ggtt.base.clear_range = gen6_ggtt_clear_range;
+ dev_priv->ggtt.base.insert_entries = gen6_ggtt_insert_entries;
+ dev_priv->ggtt.base.bind_vma = ggtt_bind_vma;
+ dev_priv->ggtt.base.unbind_vma = ggtt_unbind_vma;
return ret;
}
static void gen6_gmch_remove(struct i915_address_space *vm)
{
+ struct i915_ggtt *ggtt = container_of(vm, struct i915_ggtt, base);
- struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
-
- iounmap(gtt->gsm);
+ iounmap(ggtt->gsm);
free_scratch_page(vm->dev, vm->scratch_page);
}
@@ -3152,13 +3151,13 @@ static int i915_gmch_probe(struct drm_device *dev,
intel_gtt_get(gtt_total, stolen, mappable_base, mappable_end);
- dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
- dev_priv->gtt.base.insert_entries = i915_ggtt_insert_entries;
- dev_priv->gtt.base.clear_range = i915_ggtt_clear_range;
- dev_priv->gtt.base.bind_vma = ggtt_bind_vma;
- dev_priv->gtt.base.unbind_vma = ggtt_unbind_vma;
+ dev_priv->ggtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
+ dev_priv->ggtt.base.insert_entries = i915_ggtt_insert_entries;
+ dev_priv->ggtt.base.clear_range = i915_ggtt_clear_range;
+ dev_priv->ggtt.base.bind_vma = ggtt_bind_vma;
+ dev_priv->ggtt.base.unbind_vma = ggtt_unbind_vma;
- if (unlikely(dev_priv->gtt.do_idle_maps))
+ if (unlikely(dev_priv->ggtt.do_idle_maps))
DRM_INFO("applying Ironlake quirks for intel_iommu\n");
return 0;
@@ -3172,35 +3171,35 @@ static void i915_gmch_remove(struct i915_address_space *vm)
int i915_gem_gtt_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_gtt *gtt = &dev_priv->gtt;
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
int ret;
if (INTEL_INFO(dev)->gen <= 5) {
- gtt->gtt_probe = i915_gmch_probe;
- gtt->base.cleanup = i915_gmch_remove;
+ ggtt->probe = i915_gmch_probe;
+ ggtt->base.cleanup = i915_gmch_remove;
} else if (INTEL_INFO(dev)->gen < 8) {
- gtt->gtt_probe = gen6_gmch_probe;
- gtt->base.cleanup = gen6_gmch_remove;
+ ggtt->probe = gen6_gmch_probe;
+ ggtt->base.cleanup = gen6_gmch_remove;
if (IS_HASWELL(dev) && dev_priv->ellc_size)
- gtt->base.pte_encode = iris_pte_encode;
+ ggtt->base.pte_encode = iris_pte_encode;
else if (IS_HASWELL(dev))
- gtt->base.pte_encode = hsw_pte_encode;
+ ggtt->base.pte_encode = hsw_pte_encode;
else if (IS_VALLEYVIEW(dev))
- gtt->base.pte_encode = byt_pte_encode;
+ ggtt->base.pte_encode = byt_pte_encode;
else if (INTEL_INFO(dev)->gen >= 7)
- gtt->base.pte_encode = ivb_pte_encode;
+ ggtt->base.pte_encode = ivb_pte_encode;
else
- gtt->base.pte_encode = snb_pte_encode;
+ ggtt->base.pte_encode = snb_pte_encode;
} else {
- dev_priv->gtt.gtt_probe = gen8_gmch_probe;
- dev_priv->gtt.base.cleanup = gen6_gmch_remove;
+ ggtt->probe = gen8_gmch_probe;
+ ggtt->base.cleanup = gen6_gmch_remove;
}
- gtt->base.dev = dev;
- gtt->base.is_ggtt = true;
+ ggtt->base.dev = dev;
+ ggtt->base.is_ggtt = true;
- ret = gtt->gtt_probe(dev, &gtt->base.total, &gtt->stolen_size,
- &gtt->mappable_base, &gtt->mappable_end);
+ ret = ggtt->probe(dev, &ggtt->base.total, &ggtt->stolen_size,
+ &ggtt->mappable_base, &ggtt->mappable_end);
if (ret)
return ret;
@@ -3214,9 +3213,9 @@ int i915_gem_gtt_init(struct drm_device *dev)
/* GMADR is the PCI mmio aperture into the global GTT. */
DRM_INFO("Memory usable by graphics device = %lluM\n",
- gtt->base.total >> 20);
- DRM_DEBUG_DRIVER("GMADR size = %lldM\n", gtt->mappable_end >> 20);
- DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", gtt->stolen_size >> 20);
+ ggtt->base.total >> 20);
+ DRM_DEBUG_DRIVER("GMADR size = %lldM\n", ggtt->mappable_end >> 20);
+ DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n", ggtt->stolen_size >> 20);
#ifdef CONFIG_INTEL_IOMMU
if (intel_iommu_gfx_mapped)
DRM_INFO("VT-d active for gfx access\n");
@@ -3233,7 +3232,7 @@ int i915_gem_gtt_init(struct drm_device *dev)
return 0;
out_gtt_cleanup:
- gtt->base.cleanup(&dev_priv->gtt.base);
+ ggtt->base.cleanup(&dev_priv->ggtt.base);
return ret;
}
@@ -3249,13 +3248,13 @@ void i915_gem_restore_gtt_mappings(struct drm_device *dev)
i915_check_and_clear_faults(dev);
/* First fill our portion of the GTT with scratch pages */
- dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
- dev_priv->gtt.base.start,
- dev_priv->gtt.base.total,
+ dev_priv->ggtt.base.clear_range(&dev_priv->ggtt.base,
+ dev_priv->ggtt.base.start,
+ dev_priv->ggtt.base.total,
true);
/* Cache flush objects bound into GGTT and rebind them. */
- vm = &dev_priv->gtt.base;
+ vm = &dev_priv->ggtt.base;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
flush = false;
list_for_each_entry(vma, &obj->vma_list, obj_link) {
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.h b/drivers/gpu/drm/i915/i915_gem_gtt.h
index dc208c05cd2c..2906bb1ee290 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.h
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.h
@@ -339,7 +339,7 @@ struct i915_address_space {
* and correct (in cases like swizzling). That region is referred to as GMADR in
* the spec.
*/
-struct i915_gtt {
+struct i915_ggtt {
struct i915_address_space base;
size_t stolen_size; /* Total size of stolen memory */
@@ -357,10 +357,9 @@ struct i915_gtt {
int mtrr;
- /* global gtt ops */
- int (*gtt_probe)(struct drm_device *dev, u64 *gtt_total,
- size_t *stolen, phys_addr_t *mappable_base,
- u64 *mappable_end);
+ int (*probe)(struct drm_device *dev, u64 *gtt_total,
+ size_t *stolen, phys_addr_t *mappable_base,
+ u64 *mappable_end);
};
struct i915_hw_ppgtt {
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 2e6e9fb6f80d..de891c928b2f 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -74,7 +74,7 @@ int i915_gem_stolen_insert_node(struct drm_i915_private *dev_priv,
{
return i915_gem_stolen_insert_node_in_range(dev_priv, node, size,
alignment, 0,
- dev_priv->gtt.stolen_usable_size);
+ dev_priv->ggtt.stolen_usable_size);
}
void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
@@ -134,7 +134,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
I85X_DRB3, &tmp);
tom = tmp * MB(32);
- base = tom - tseg_size - dev_priv->gtt.stolen_size;
+ base = tom - tseg_size - dev_priv->ggtt.stolen_size;
} else if (IS_845G(dev)) {
u32 tseg_size = 0;
u32 tom;
@@ -158,7 +158,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
I830_DRB3, &tmp);
tom = tmp * MB(32);
- base = tom - tseg_size - dev_priv->gtt.stolen_size;
+ base = tom - tseg_size - dev_priv->ggtt.stolen_size;
} else if (IS_I830(dev)) {
u32 tseg_size = 0;
u32 tom;
@@ -178,7 +178,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
I830_DRB3, &tmp);
tom = tmp * MB(32);
- base = tom - tseg_size - dev_priv->gtt.stolen_size;
+ base = tom - tseg_size - dev_priv->ggtt.stolen_size;
}
if (base == 0)
@@ -189,8 +189,8 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
struct {
u32 start, end;
} stolen[2] = {
- { .start = base, .end = base + dev_priv->gtt.stolen_size, },
- { .start = base, .end = base + dev_priv->gtt.stolen_size, },
+ { .start = base, .end = base + dev_priv->ggtt.stolen_size, },
+ { .start = base, .end = base + dev_priv->ggtt.stolen_size, },
};
u64 gtt_start, gtt_end;
@@ -200,7 +200,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
(gtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
else
gtt_start &= PGTBL_ADDRESS_LO_MASK;
- gtt_end = gtt_start + gtt_total_entries(dev_priv->gtt) * 4;
+ gtt_end = gtt_start + gtt_total_entries(dev_priv->ggtt) * 4;
if (gtt_start >= stolen[0].start && gtt_start < stolen[0].end)
stolen[0].end = gtt_start;
@@ -211,10 +211,10 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
if (stolen[0].end - stolen[0].start >
stolen[1].end - stolen[1].start) {
base = stolen[0].start;
- dev_priv->gtt.stolen_size = stolen[0].end - stolen[0].start;
+ dev_priv->ggtt.stolen_size = stolen[0].end - stolen[0].start;
} else {
base = stolen[1].start;
- dev_priv->gtt.stolen_size = stolen[1].end - stolen[1].start;
+ dev_priv->ggtt.stolen_size = stolen[1].end - stolen[1].start;
}
if (stolen[0].start != stolen[1].start ||
@@ -223,7 +223,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
(unsigned long long) gtt_start,
(unsigned long long) gtt_end - 1);
DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n",
- base, base + (u32) dev_priv->gtt.stolen_size - 1);
+ base, base + (u32) dev_priv->ggtt.stolen_size - 1);
}
}
@@ -233,7 +233,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
* kernel. So if the region is already marked as busy, something
* is seriously wrong.
*/
- r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size,
+ r = devm_request_mem_region(dev->dev, base, dev_priv->ggtt.stolen_size,
"Graphics Stolen Memory");
if (r == NULL) {
/*
@@ -245,7 +245,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
* reservation starting from 1 instead of 0.
*/
r = devm_request_mem_region(dev->dev, base + 1,
- dev_priv->gtt.stolen_size - 1,
+ dev_priv->ggtt.stolen_size - 1,
"Graphics Stolen Memory");
/*
* GEN3 firmware likes to smash pci bridges into the stolen
@@ -253,7 +253,7 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
*/
if (r == NULL && !IS_GEN3(dev)) {
DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
- base, base + (uint32_t)dev_priv->gtt.stolen_size);
+ base, base + (uint32_t)dev_priv->ggtt.stolen_size);
base = 0;
}
}
@@ -278,7 +278,7 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *dev_priv,
CTG_STOLEN_RESERVED :
ELK_STOLEN_RESERVED);
unsigned long stolen_top = dev_priv->mm.stolen_base +
- dev_priv->gtt.stolen_size;
+ dev_priv->ggtt.stolen_size;
*base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
@@ -372,7 +372,7 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *dev_priv,
uint32_t reg_val = I915_READ(GEN6_STOLEN_RESERVED);
unsigned long stolen_top;
- stolen_top = dev_priv->mm.stolen_base + dev_priv->gtt.stolen_size;
+ stolen_top = dev_priv->mm.stolen_base + dev_priv->ggtt.stolen_size;
*base = reg_val & GEN6_STOLEN_RESERVED_ADDR_MASK;
@@ -401,14 +401,14 @@ int i915_gem_init_stolen(struct drm_device *dev)
}
#endif
- if (dev_priv->gtt.stolen_size == 0)
+ if (dev_priv->ggtt.stolen_size == 0)
return 0;
dev_priv->mm.stolen_base = i915_stolen_to_physical(dev);
if (dev_priv->mm.stolen_base == 0)
return 0;
- stolen_top = dev_priv->mm.stolen_base + dev_priv->gtt.stolen_size;
+ stolen_top = dev_priv->mm.stolen_base + dev_priv->ggtt.stolen_size;
switch (INTEL_INFO(dev_priv)->gen) {
case 2:
@@ -458,18 +458,18 @@ int i915_gem_init_stolen(struct drm_device *dev)
return 0;
}
- dev_priv->gtt.stolen_reserved_base = reserved_base;
- dev_priv->gtt.stolen_reserved_size = reserved_size;
+ dev_priv->ggtt.stolen_reserved_base = reserved_base;
+ dev_priv->ggtt.stolen_reserved_size = reserved_size;
/* It is possible for the reserved area to end before the end of stolen
* memory, so just consider the start. */
reserved_total = stolen_top - reserved_base;
DRM_DEBUG_KMS("Memory reserved for graphics device: %zuK, usable: %luK\n",
- dev_priv->gtt.stolen_size >> 10,
- (dev_priv->gtt.stolen_size - reserved_total) >> 10);
+ dev_priv->ggtt.stolen_size >> 10,
+ (dev_priv->ggtt.stolen_size - reserved_total) >> 10);
- dev_priv->gtt.stolen_usable_size = dev_priv->gtt.stolen_size -
+ dev_priv->ggtt.stolen_usable_size = dev_priv->ggtt.stolen_size -
reserved_total;
/*
@@ -483,7 +483,7 @@ int i915_gem_init_stolen(struct drm_device *dev)
* i915_gem_stolen_insert_node_in_range(). We may want to fix the fbcon
* problem later.
*/
- drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_usable_size);
+ drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->ggtt.stolen_usable_size);
return 0;
}
@@ -497,7 +497,7 @@ i915_pages_create_for_stolen(struct drm_device *dev,
struct scatterlist *sg;
DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
- BUG_ON(offset > dev_priv->gtt.stolen_size - size);
+ BUG_ON(offset > dev_priv->ggtt.stolen_size - size);
/* We hide that we have no struct page backing our stolen object
* by wrapping the contiguous physical allocation with a fake
@@ -629,7 +629,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
u32 size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct i915_address_space *ggtt = &dev_priv->gtt.base;
+ struct i915_address_space *ggtt = &dev_priv->ggtt.base;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
struct i915_vma *vma;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 34397a67b09e..db8600ae5a54 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -653,7 +653,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
vma = i915_gem_obj_to_ggtt(src);
use_ggtt = (src->cache_level == I915_CACHE_NONE &&
vma && (vma->bound & GLOBAL_BIND) &&
- reloc_offset + num_pages * PAGE_SIZE <= dev_priv->gtt.mappable_end);
+ reloc_offset + num_pages * PAGE_SIZE <= dev_priv->ggtt.mappable_end);
/* Cannot access stolen address directly, try to use the aperture */
if (src->stolen) {
@@ -663,7 +663,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
goto unwind;
reloc_offset = i915_gem_obj_ggtt_offset(src);
- if (reloc_offset + num_pages * PAGE_SIZE > dev_priv->gtt.mappable_end)
+ if (reloc_offset + num_pages * PAGE_SIZE > dev_priv->ggtt.mappable_end)
goto unwind;
}
@@ -689,7 +689,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
* captures what the GPU read.
*/
- s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
+ s = io_mapping_map_atomic_wc(dev_priv->ggtt.mappable,
reloc_offset);
memcpy_fromio(d, s, PAGE_SIZE);
io_mapping_unmap_atomic(s);
@@ -722,7 +722,7 @@ unwind:
return NULL;
}
#define i915_error_ggtt_object_create(dev_priv, src) \
- i915_error_object_create((dev_priv), (src), &(dev_priv)->gtt.base)
+ i915_error_object_create((dev_priv), (src), &(dev_priv)->ggtt.base)
static void capture_bo(struct drm_i915_error_buffer *err,
struct i915_vma *vma)
@@ -1038,7 +1038,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
vm = request->ctx && request->ctx->ppgtt ?
&request->ctx->ppgtt->base :
- &dev_priv->gtt.base;
+ &dev_priv->ggtt.base;
/* We need to copy these to an anonymous buffer
* as the simplest method to avoid being overwritten
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index dea7429be4d0..2891bcfcd71e 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -181,7 +181,7 @@ static int vgt_balloon_space(struct drm_mm *mm,
int intel_vgt_balloon(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_address_space *ggtt_vm = &dev_priv->gtt.base;
+ struct i915_address_space *ggtt_vm = &dev_priv->ggtt.base;
unsigned long ggtt_vm_end = ggtt_vm->start + ggtt_vm->total;
unsigned long mappable_base, mappable_size, mappable_end;
@@ -203,18 +203,18 @@ int intel_vgt_balloon(struct drm_device *dev)
unmappable_base, unmappable_size / 1024);
if (mappable_base < ggtt_vm->start ||
- mappable_end > dev_priv->gtt.mappable_end ||
- unmappable_base < dev_priv->gtt.mappable_end ||
+ mappable_end > dev_priv->ggtt.mappable_end ||
+ unmappable_base < dev_priv->ggtt.mappable_end ||
unmappable_end > ggtt_vm_end) {
DRM_ERROR("Invalid ballooning configuration!\n");
return -EINVAL;
}
/* Unmappable graphic memory ballooning */
- if (unmappable_base > dev_priv->gtt.mappable_end) {
+ if (unmappable_base > dev_priv->ggtt.mappable_end) {
ret = vgt_balloon_space(&ggtt_vm->mm,
&bl_info.space[2],
- dev_priv->gtt.mappable_end,
+ dev_priv->ggtt.mappable_end,
unmappable_base);
if (ret)
@@ -244,11 +244,11 @@ int intel_vgt_balloon(struct drm_device *dev)
goto err;
}
- if (mappable_end < dev_priv->gtt.mappable_end) {
+ if (mappable_end < dev_priv->ggtt.mappable_end) {
ret = vgt_balloon_space(&ggtt_vm->mm,
&bl_info.space[1],
mappable_end,
- dev_priv->gtt.mappable_end);
+ dev_priv->ggtt.mappable_end);
if (ret)
goto err;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index ab1ec8daae92..74b0165238dc 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2503,7 +2503,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
/* If the FB is too big, just don't use it since fbdev is not very
* important and we should probably use that space with FBC or other
* features. */
- if (size_aligned * 2 > dev_priv->gtt.stolen_usable_size)
+ if (size_aligned * 2 > dev_priv->ggtt.stolen_usable_size)
return false;
mutex_lock(&dev->struct_mutex);
@@ -15339,7 +15339,7 @@ void intel_modeset_init(struct drm_device *dev)
dev->mode_config.cursor_height = MAX_CURSOR_HEIGHT;
}
- dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
+ dev->mode_config.fb_base = dev_priv->ggtt.mappable_base;
DRM_DEBUG_KMS("%d display pipe%s available.\n",
INTEL_INFO(dev)->num_pipes,
diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c
index 0f0492f4a357..2e571f5f3b22 100644
--- a/drivers/gpu/drm/i915/intel_fbc.c
+++ b/drivers/gpu/drm/i915/intel_fbc.c
@@ -516,9 +516,9 @@ static int find_compression_threshold(struct drm_i915_private *dev_priv,
* underruns, even if that range is not reserved by the BIOS. */
if (IS_BROADWELL(dev_priv) ||
IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
- end = dev_priv->gtt.stolen_size - 8 * 1024 * 1024;
+ end = dev_priv->ggtt.stolen_size - 8 * 1024 * 1024;
else
- end = dev_priv->gtt.stolen_usable_size;
+ end = dev_priv->ggtt.stolen_usable_size;
/* HACK: This code depends on what we will do in *_enable_fbc. If that
* code changes, this code needs to change as well.
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index ae9cf6fcb870..ea4188ac2e73 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -146,7 +146,7 @@ static int intelfb_alloc(struct drm_fb_helper *helper,
/* If the FB is too big, just don't use it since fbdev is not very
* important and we should probably use that space with FBC or other
* features. */
- if (size * 2 < dev_priv->gtt.stolen_usable_size)
+ if (size * 2 < dev_priv->ggtt.stolen_usable_size)
obj = i915_gem_object_create_stolen(dev, size);
if (obj == NULL)
obj = i915_gem_alloc_object(dev, size);
@@ -244,13 +244,13 @@ static int intelfb_create(struct drm_fb_helper *helper,
/* setup aperture base/size for vesafb takeover */
info->apertures->ranges[0].base = dev->mode_config.fb_base;
- info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;
+ info->apertures->ranges[0].size = dev_priv->ggtt.mappable_end;
info->fix.smem_start = dev->mode_config.fb_base + i915_gem_obj_ggtt_offset(obj);
info->fix.smem_len = size;
info->screen_base =
- ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
+ ioremap_wc(dev_priv->ggtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
size);
if (!info->screen_base) {
DRM_ERROR("Failed to remap framebuffer into virtual memory\n");
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 13e22f52666c..e1acb41f187a 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -196,7 +196,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
else
- regs = io_mapping_map_wc(dev_priv->gtt.mappable,
+ regs = io_mapping_map_wc(dev_priv->ggtt.mappable,
i915_gem_obj_ggtt_offset(overlay->reg_bo));
return regs;
@@ -1490,7 +1490,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
regs = (struct overlay_registers __iomem *)
overlay->reg_bo->phys_handle->vaddr;
else
- regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
+ regs = io_mapping_map_atomic_wc(dev_priv->ggtt.mappable,
i915_gem_obj_ggtt_offset(overlay->reg_bo));
return regs;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index a539fbc0c051..521cf4564329 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4644,9 +4644,9 @@ static bool bxt_check_bios_rc6_setup(const struct drm_device *dev)
* for this check.
*/
rc6_ctx_base = I915_READ(RC6_CTX_BASE) & RC6_CTX_BASE_MASK;
- if (!((rc6_ctx_base >= dev_priv->gtt.stolen_reserved_base) &&
- (rc6_ctx_base + PAGE_SIZE <= dev_priv->gtt.stolen_reserved_base +
- dev_priv->gtt.stolen_reserved_size))) {
+ if (!((rc6_ctx_base >= dev_priv->ggtt.stolen_reserved_base) &&
+ (rc6_ctx_base + PAGE_SIZE <= dev_priv->ggtt.stolen_reserved_base +
+ dev_priv->ggtt.stolen_reserved_size))) {
DRM_DEBUG_KMS("RC6 Base address not as expected.\n");
enable_rc6 = false;
}
@@ -5291,7 +5291,7 @@ static void cherryview_setup_pctx(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long pctx_paddr, paddr;
- struct i915_gtt *gtt = &dev_priv->gtt;
+ struct i915_ggtt *ggtt = &dev_priv->ggtt;
u32 pcbr;
int pctx_size = 32*1024;
@@ -5299,7 +5299,7 @@ static void cherryview_setup_pctx(struct drm_device *dev)
if ((pcbr >> VLV_PCBR_ADDR_SHIFT) == 0) {
DRM_DEBUG_DRIVER("BIOS didn't set up PCBR, fixing up\n");
paddr = (dev_priv->mm.stolen_base +
- (gtt->stolen_size - pctx_size));
+ (ggtt->stolen_size - pctx_size));
pctx_paddr = (paddr & (~4095));
I915_WRITE(VLV_PCBR, pctx_paddr);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 9c59ede5dd9a..df0ef5bba8e5 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -2136,7 +2136,7 @@ int intel_pin_and_map_ringbuffer_obj(struct drm_device *dev,
/* Access through the GTT requires the device to be awake. */
assert_rpm_wakelock_held(dev_priv);
- ringbuf->virtual_start = ioremap_wc(dev_priv->gtt.mappable_base +
+ ringbuf->virtual_start = ioremap_wc(dev_priv->ggtt.mappable_base +
i915_gem_obj_ggtt_offset(obj), ringbuf->size);
if (ringbuf->virtual_start == NULL) {
i915_gem_object_ggtt_unpin(obj);