From 440fd5283a87345cdd4237bdf45fb01130ea0056 Mon Sep 17 00:00:00 2001 From: Thierry Reding Date: Fri, 23 Jan 2015 09:05:06 +0100 Subject: drm/mm: Support 4 GiB and larger ranges The current implementation is limited by the number of addresses that fit into an unsigned long. This causes problems on 32-bit Tegra where unsigned long is 32-bit but drm_mm is used to manage an IOVA space of 4 GiB. Given the 32-bit limitation, the range is limited to 4 GiB - 1 (or 4 GiB - 4 KiB for page granularity). This commit changes the start and size of the range to be an unsigned 64-bit integer, thus allowing much larger ranges to be supported. [airlied: fix i915 warnings and coloring callback] Signed-off-by: Thierry Reding Reviewed-by: Alex Deucher Reviewed-by: Chris Wilson Signed-off-by: Dave Airlie fixupo --- include/drm/drm_mm.h | 52 ++++++++++++++++++++++++++-------------------------- 1 file changed, 26 insertions(+), 26 deletions(-) (limited to 'include') diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h index a24addfdfcec..0de6290df4da 100644 --- a/include/drm/drm_mm.h +++ b/include/drm/drm_mm.h @@ -68,8 +68,8 @@ struct drm_mm_node { unsigned scanned_preceeds_hole : 1; unsigned allocated : 1; unsigned long color; - unsigned long start; - unsigned long size; + u64 start; + u64 size; struct drm_mm *mm; }; @@ -82,16 +82,16 @@ struct drm_mm { unsigned int scan_check_range : 1; unsigned scan_alignment; unsigned long scan_color; - unsigned long scan_size; - unsigned long scan_hit_start; - unsigned long scan_hit_end; + u64 scan_size; + u64 scan_hit_start; + u64 scan_hit_end; unsigned scanned_blocks; - unsigned long scan_start; - unsigned long scan_end; + u64 scan_start; + u64 scan_end; struct drm_mm_node *prev_scanned_node; void (*color_adjust)(struct drm_mm_node *node, unsigned long color, - unsigned long *start, unsigned long *end); + u64 *start, u64 *end); }; /** @@ -124,7 +124,7 @@ static inline bool drm_mm_initialized(struct drm_mm *mm) return mm->hole_stack.next; } -static inline unsigned long __drm_mm_hole_node_start(struct drm_mm_node *hole_node) +static inline u64 __drm_mm_hole_node_start(struct drm_mm_node *hole_node) { return hole_node->start + hole_node->size; } @@ -140,13 +140,13 @@ static inline unsigned long __drm_mm_hole_node_start(struct drm_mm_node *hole_no * Returns: * Start of the subsequent hole. */ -static inline unsigned long drm_mm_hole_node_start(struct drm_mm_node *hole_node) +static inline u64 drm_mm_hole_node_start(struct drm_mm_node *hole_node) { BUG_ON(!hole_node->hole_follows); return __drm_mm_hole_node_start(hole_node); } -static inline unsigned long __drm_mm_hole_node_end(struct drm_mm_node *hole_node) +static inline u64 __drm_mm_hole_node_end(struct drm_mm_node *hole_node) { return list_entry(hole_node->node_list.next, struct drm_mm_node, node_list)->start; @@ -163,7 +163,7 @@ static inline unsigned long __drm_mm_hole_node_end(struct drm_mm_node *hole_node * Returns: * End of the subsequent hole. */ -static inline unsigned long drm_mm_hole_node_end(struct drm_mm_node *hole_node) +static inline u64 drm_mm_hole_node_end(struct drm_mm_node *hole_node) { return __drm_mm_hole_node_end(hole_node); } @@ -222,7 +222,7 @@ int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node); int drm_mm_insert_node_generic(struct drm_mm *mm, struct drm_mm_node *node, - unsigned long size, + u64 size, unsigned alignment, unsigned long color, enum drm_mm_search_flags sflags, @@ -245,7 +245,7 @@ int drm_mm_insert_node_generic(struct drm_mm *mm, */ static inline int drm_mm_insert_node(struct drm_mm *mm, struct drm_mm_node *node, - unsigned long size, + u64 size, unsigned alignment, enum drm_mm_search_flags flags) { @@ -255,11 +255,11 @@ static inline int drm_mm_insert_node(struct drm_mm *mm, int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, struct drm_mm_node *node, - unsigned long size, + u64 size, unsigned alignment, unsigned long color, - unsigned long start, - unsigned long end, + u64 start, + u64 end, enum drm_mm_search_flags sflags, enum drm_mm_allocator_flags aflags); /** @@ -282,10 +282,10 @@ int drm_mm_insert_node_in_range_generic(struct drm_mm *mm, */ static inline int drm_mm_insert_node_in_range(struct drm_mm *mm, struct drm_mm_node *node, - unsigned long size, + u64 size, unsigned alignment, - unsigned long start, - unsigned long end, + u64 start, + u64 end, enum drm_mm_search_flags flags) { return drm_mm_insert_node_in_range_generic(mm, node, size, alignment, @@ -296,21 +296,21 @@ static inline int drm_mm_insert_node_in_range(struct drm_mm *mm, void drm_mm_remove_node(struct drm_mm_node *node); void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new); void drm_mm_init(struct drm_mm *mm, - unsigned long start, - unsigned long size); + u64 start, + u64 size); void drm_mm_takedown(struct drm_mm *mm); bool drm_mm_clean(struct drm_mm *mm); void drm_mm_init_scan(struct drm_mm *mm, - unsigned long size, + u64 size, unsigned alignment, unsigned long color); void drm_mm_init_scan_with_range(struct drm_mm *mm, - unsigned long size, + u64 size, unsigned alignment, unsigned long color, - unsigned long start, - unsigned long end); + u64 start, + u64 end); bool drm_mm_scan_add_block(struct drm_mm_node *node); bool drm_mm_scan_remove_block(struct drm_mm_node *node); -- cgit v1.2.3 From 54c4cd68ed7abd9f245722bee39464d04ddb4cfd Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 4 Mar 2015 00:18:38 -0500 Subject: drm/ttm: device address space != CPU address space We need to store device offsets in 64 bit as the device address space may be larger than the CPU's. Fixes GPU init failures on radeons with 4GB or more of vram on 32 bit kernels. We put vram at the start of the GPU's address space so the gart aperture starts at 4 GB causing all GPU addresses in the gart aperture to get truncated. bug: https://bugs.freedesktop.org/show_bug.cgi?id=89072 [airlied: fix warning on nouveau build] Signed-off-by: Alex Deucher Cc: thellstrom@vmware.com Acked-by: Thomas Hellstrom Signed-off-by: Dave Airlie --- drivers/gpu/drm/nouveau/nouveau_fbcon.c | 2 +- drivers/gpu/drm/ttm/ttm_bo.c | 2 +- include/drm/ttm/ttm_bo_api.h | 2 +- include/drm/ttm/ttm_bo_driver.h | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) (limited to 'include') diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 79924e4b1b49..6751553abe4a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -418,7 +418,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper, nouveau_fbcon_zfill(dev, fbcon); /* To allow resizeing without swapping buffers */ - NV_INFO(drm, "allocated %dx%d fb: 0x%lx, bo %p\n", + NV_INFO(drm, "allocated %dx%d fb: 0x%llx, bo %p\n", nouveau_fb->base.width, nouveau_fb->base.height, nvbo->bo.offset, nvbo); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index d395b0bef73b..8d9b7de25613 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -74,7 +74,7 @@ static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type) pr_err(" has_type: %d\n", man->has_type); pr_err(" use_type: %d\n", man->use_type); pr_err(" flags: 0x%08X\n", man->flags); - pr_err(" gpu_offset: 0x%08lX\n", man->gpu_offset); + pr_err(" gpu_offset: 0x%08llX\n", man->gpu_offset); pr_err(" size: %llu\n", man->size); pr_err(" available_caching: 0x%08X\n", man->available_caching); pr_err(" default_caching: 0x%08X\n", man->default_caching); diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 0ccf7f267ff9..c768ddfbe53c 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -249,7 +249,7 @@ struct ttm_buffer_object { * either of these locks held. */ - unsigned long offset; + uint64_t offset; /* GPU address space is independent of CPU word size */ uint32_t cur_placement; struct sg_table *sg; diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 142d752fc450..813042cede57 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -277,7 +277,7 @@ struct ttm_mem_type_manager { bool has_type; bool use_type; uint32_t flags; - unsigned long gpu_offset; + uint64_t gpu_offset; /* GPU address space is independent of CPU word size */ uint64_t size; uint32_t available_caching; uint32_t default_caching; -- cgit v1.2.3