summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/xe/xe_bo.c
diff options
context:
space:
mode:
authorFrancois Dugast <francois.dugast@intel.com>2023-07-27 17:55:29 +0300
committerRodrigo Vivi <rodrigo.vivi@intel.com>2023-12-21 19:39:17 +0300
commit99fea6828879381405dba598627aea79fa6edd78 (patch)
tree1ae501df46eb3a5274eec4f4a5006eae1ddf08b9 /drivers/gpu/drm/xe/xe_bo.c
parent3207a32163cdf7b3345a44e255aae614859ea0d6 (diff)
downloadlinux-99fea6828879381405dba598627aea79fa6edd78.tar.xz
drm/xe: Prefer WARN() over BUG() to avoid crashing the kernel
Replace calls to XE_BUG_ON() with calls XE_WARN_ON() which in turn calls WARN() instead of BUG(). BUG() crashes the kernel and should only be used when it is absolutely unavoidable in case of catastrophic and unrecoverable failures, which is not the case here. Signed-off-by: Francois Dugast <francois.dugast@intel.com> Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com> Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Diffstat (limited to 'drivers/gpu/drm/xe/xe_bo.c')
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c52
1 files changed, 26 insertions, 26 deletions
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index 65b56e7a2fde..cf0faaefd03d 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -103,7 +103,7 @@ static bool xe_bo_is_user(struct xe_bo *bo)
static struct xe_tile *
mem_type_to_tile(struct xe_device *xe, u32 mem_type)
{
- XE_BUG_ON(mem_type != XE_PL_STOLEN && !mem_type_is_vram(mem_type));
+ XE_WARN_ON(mem_type != XE_PL_STOLEN && !mem_type_is_vram(mem_type));
return &xe->tiles[mem_type == XE_PL_STOLEN ? 0 : (mem_type - XE_PL_VRAM0)];
}
@@ -142,7 +142,7 @@ static void add_vram(struct xe_device *xe, struct xe_bo *bo,
struct ttm_place place = { .mem_type = mem_type };
u64 io_size = tile->mem.vram.io_size;
- XE_BUG_ON(!tile->mem.vram.usable_size);
+ XE_WARN_ON(!tile->mem.vram.usable_size);
/*
* For eviction / restore on suspend / resume objects
@@ -285,7 +285,7 @@ static int xe_tt_map_sg(struct ttm_tt *tt)
unsigned long num_pages = tt->num_pages;
int ret;
- XE_BUG_ON(tt->page_flags & TTM_TT_FLAG_EXTERNAL);
+ XE_WARN_ON(tt->page_flags & TTM_TT_FLAG_EXTERNAL);
if (xe_tt->sg)
return 0;
@@ -544,8 +544,8 @@ static int xe_bo_move_dmabuf(struct ttm_buffer_object *ttm_bo,
ttm);
struct sg_table *sg;
- XE_BUG_ON(!attach);
- XE_BUG_ON(!ttm_bo->ttm);
+ XE_WARN_ON(!attach);
+ XE_WARN_ON(!ttm_bo->ttm);
if (new_res->mem_type == XE_PL_SYSTEM)
goto out;
@@ -707,8 +707,8 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
else if (mem_type_is_vram(old_mem_type))
tile = mem_type_to_tile(xe, old_mem_type);
- XE_BUG_ON(!tile);
- XE_BUG_ON(!tile->migrate);
+ XE_WARN_ON(!tile);
+ XE_WARN_ON(!tile->migrate);
trace_xe_bo_move(bo);
xe_device_mem_access_get(xe);
@@ -738,7 +738,7 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
goto out;
}
- XE_BUG_ON(new_mem->start !=
+ XE_WARN_ON(new_mem->start !=
bo->placements->fpfn);
iosys_map_set_vaddr_iomem(&bo->vmap, new_addr);
@@ -1198,7 +1198,7 @@ struct xe_bo *__xe_bo_create_locked(struct xe_device *xe, struct xe_bo *bo,
int err;
/* Only kernel objects should set GT */
- XE_BUG_ON(tile && type != ttm_bo_type_kernel);
+ XE_WARN_ON(tile && type != ttm_bo_type_kernel);
if (XE_WARN_ON(!size))
return ERR_PTR(-EINVAL);
@@ -1350,7 +1350,7 @@ xe_bo_create_locked_range(struct xe_device *xe,
if (!tile && flags & XE_BO_CREATE_STOLEN_BIT)
tile = xe_device_get_root_tile(xe);
- XE_BUG_ON(!tile);
+ XE_WARN_ON(!tile);
if (flags & XE_BO_CREATE_STOLEN_BIT &&
flags & XE_BO_FIXED_PLACEMENT_BIT) {
@@ -1481,8 +1481,8 @@ int xe_bo_pin_external(struct xe_bo *bo)
struct xe_device *xe = xe_bo_device(bo);
int err;
- XE_BUG_ON(bo->vm);
- XE_BUG_ON(!xe_bo_is_user(bo));
+ XE_WARN_ON(bo->vm);
+ XE_WARN_ON(!xe_bo_is_user(bo));
if (!xe_bo_is_pinned(bo)) {
err = xe_bo_validate(bo, NULL, false);
@@ -1514,20 +1514,20 @@ int xe_bo_pin(struct xe_bo *bo)
int err;
/* We currently don't expect user BO to be pinned */
- XE_BUG_ON(xe_bo_is_user(bo));
+ XE_WARN_ON(xe_bo_is_user(bo));
/* Pinned object must be in GGTT or have pinned flag */
- XE_BUG_ON(!(bo->flags & (XE_BO_CREATE_PINNED_BIT |
+ XE_WARN_ON(!(bo->flags & (XE_BO_CREATE_PINNED_BIT |
XE_BO_CREATE_GGTT_BIT)));
/*
* No reason we can't support pinning imported dma-bufs we just don't
* expect to pin an imported dma-buf.
*/
- XE_BUG_ON(bo->ttm.base.import_attach);
+ XE_WARN_ON(bo->ttm.base.import_attach);
/* We only expect at most 1 pin */
- XE_BUG_ON(xe_bo_is_pinned(bo));
+ XE_WARN_ON(xe_bo_is_pinned(bo));
err = xe_bo_validate(bo, NULL, false);
if (err)
@@ -1543,7 +1543,7 @@ int xe_bo_pin(struct xe_bo *bo)
struct ttm_place *place = &(bo->placements[0]);
if (mem_type_is_vram(place->mem_type)) {
- XE_BUG_ON(!(place->flags & TTM_PL_FLAG_CONTIGUOUS));
+ XE_WARN_ON(!(place->flags & TTM_PL_FLAG_CONTIGUOUS));
place->fpfn = (xe_bo_addr(bo, 0, PAGE_SIZE) -
vram_region_gpu_offset(bo->ttm.resource)) >> PAGE_SHIFT;
@@ -1580,9 +1580,9 @@ void xe_bo_unpin_external(struct xe_bo *bo)
{
struct xe_device *xe = xe_bo_device(bo);
- XE_BUG_ON(bo->vm);
- XE_BUG_ON(!xe_bo_is_pinned(bo));
- XE_BUG_ON(!xe_bo_is_user(bo));
+ XE_WARN_ON(bo->vm);
+ XE_WARN_ON(!xe_bo_is_pinned(bo));
+ XE_WARN_ON(!xe_bo_is_user(bo));
if (bo->ttm.pin_count == 1 && !list_empty(&bo->pinned_link)) {
spin_lock(&xe->pinned.lock);
@@ -1603,15 +1603,15 @@ void xe_bo_unpin(struct xe_bo *bo)
{
struct xe_device *xe = xe_bo_device(bo);
- XE_BUG_ON(bo->ttm.base.import_attach);
- XE_BUG_ON(!xe_bo_is_pinned(bo));
+ XE_WARN_ON(bo->ttm.base.import_attach);
+ XE_WARN_ON(!xe_bo_is_pinned(bo));
if (IS_DGFX(xe) && !(IS_ENABLED(CONFIG_DRM_XE_DEBUG) &&
bo->flags & XE_BO_INTERNAL_TEST)) {
struct ttm_place *place = &(bo->placements[0]);
if (mem_type_is_vram(place->mem_type)) {
- XE_BUG_ON(list_empty(&bo->pinned_link));
+ XE_WARN_ON(list_empty(&bo->pinned_link));
spin_lock(&xe->pinned.lock);
list_del_init(&bo->pinned_link);
@@ -1675,12 +1675,12 @@ dma_addr_t __xe_bo_addr(struct xe_bo *bo, u64 offset, size_t page_size)
struct xe_res_cursor cur;
u64 page;
- XE_BUG_ON(page_size > PAGE_SIZE);
+ XE_WARN_ON(page_size > PAGE_SIZE);
page = offset >> PAGE_SHIFT;
offset &= (PAGE_SIZE - 1);
if (!xe_bo_is_vram(bo) && !xe_bo_is_stolen(bo)) {
- XE_BUG_ON(!bo->ttm.ttm);
+ XE_WARN_ON(!bo->ttm.ttm);
xe_res_first_sg(xe_bo_get_sg(bo), page << PAGE_SHIFT,
page_size, &cur);
@@ -1874,7 +1874,7 @@ int xe_bo_lock(struct xe_bo *bo, struct ww_acquire_ctx *ww,
LIST_HEAD(objs);
LIST_HEAD(dups);
- XE_BUG_ON(!ww);
+ XE_WARN_ON(!ww);
tv_bo.num_shared = num_resv;
tv_bo.bo = &bo->ttm;