summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/xe/xe_migrate.c
diff options
context:
space:
mode:
authorFrancois Dugast <francois.dugast@intel.com>2023-07-27 17:55:29 +0300
committerRodrigo Vivi <rodrigo.vivi@intel.com>2023-12-21 19:39:17 +0300
commit99fea6828879381405dba598627aea79fa6edd78 (patch)
tree1ae501df46eb3a5274eec4f4a5006eae1ddf08b9 /drivers/gpu/drm/xe/xe_migrate.c
parent3207a32163cdf7b3345a44e255aae614859ea0d6 (diff)
downloadlinux-99fea6828879381405dba598627aea79fa6edd78.tar.xz
drm/xe: Prefer WARN() over BUG() to avoid crashing the kernel
Replace calls to XE_BUG_ON() with calls XE_WARN_ON() which in turn calls WARN() instead of BUG(). BUG() crashes the kernel and should only be used when it is absolutely unavoidable in case of catastrophic and unrecoverable failures, which is not the case here. Signed-off-by: Francois Dugast <francois.dugast@intel.com> Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com> Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Diffstat (limited to 'drivers/gpu/drm/xe/xe_migrate.c')
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c30
1 files changed, 15 insertions, 15 deletions
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index 0405136bc0b1..9c4b432d496f 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -106,7 +106,7 @@ static void xe_migrate_fini(struct drm_device *dev, void *arg)
static u64 xe_migrate_vm_addr(u64 slot, u32 level)
{
- XE_BUG_ON(slot >= NUM_PT_SLOTS);
+ XE_WARN_ON(slot >= NUM_PT_SLOTS);
/* First slot is reserved for mapping of PT bo and bb, start from 1 */
return (slot + 1ULL) << xe_pt_shift(level + 1);
@@ -171,7 +171,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
BUILD_BUG_ON(!(NUM_KERNEL_PDE & 1));
/* Need to be sure everything fits in the first PT, or create more */
- XE_BUG_ON(m->batch_base_ofs + batch->size >= SZ_2M);
+ XE_WARN_ON(m->batch_base_ofs + batch->size >= SZ_2M);
bo = xe_bo_create_pin_map(vm->xe, tile, vm,
num_entries * XE_PAGE_SIZE,
@@ -205,7 +205,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
}
if (!IS_DGFX(xe)) {
- XE_BUG_ON(xe->info.supports_usm);
+ XE_WARN_ON(xe->info.supports_usm);
/* Write out batch too */
m->batch_base_ofs = NUM_PT_SLOTS * XE_PAGE_SIZE;
@@ -487,7 +487,7 @@ static void emit_copy_ccs(struct xe_gt *gt, struct xe_bb *bb,
num_ccs_blks = DIV_ROUND_UP(xe_device_ccs_bytes(gt_to_xe(gt), size),
NUM_CCS_BYTES_PER_BLOCK);
- XE_BUG_ON(num_ccs_blks > NUM_CCS_BLKS_PER_XFER);
+ XE_WARN_ON(num_ccs_blks > NUM_CCS_BLKS_PER_XFER);
*cs++ = XY_CTRL_SURF_COPY_BLT |
(src_is_indirect ? 0x0 : 0x1) << SRC_ACCESS_TYPE_SHIFT |
(dst_is_indirect ? 0x0 : 0x1) << DST_ACCESS_TYPE_SHIFT |
@@ -507,9 +507,9 @@ static void emit_copy(struct xe_gt *gt, struct xe_bb *bb,
u64 src_ofs, u64 dst_ofs, unsigned int size,
unsigned int pitch)
{
- XE_BUG_ON(size / pitch > S16_MAX);
- XE_BUG_ON(pitch / 4 > S16_MAX);
- XE_BUG_ON(pitch > U16_MAX);
+ XE_WARN_ON(size / pitch > S16_MAX);
+ XE_WARN_ON(pitch / 4 > S16_MAX);
+ XE_WARN_ON(pitch > U16_MAX);
bb->cs[bb->len++] = XY_FAST_COPY_BLT_CMD | (10 - 2);
bb->cs[bb->len++] = XY_FAST_COPY_BLT_DEPTH_32 | pitch;
@@ -569,7 +569,7 @@ static u32 xe_migrate_ccs_copy(struct xe_migrate *m,
* At the moment, we don't support copying CCS metadata from
* system to system.
*/
- XE_BUG_ON(!src_is_vram && !dst_is_vram);
+ XE_WARN_ON(!src_is_vram && !dst_is_vram);
emit_copy_ccs(gt, bb, dst_ofs, dst_is_vram, src_ofs,
src_is_vram, dst_size);
@@ -781,7 +781,7 @@ static void emit_clear_link_copy(struct xe_gt *gt, struct xe_bb *bb, u64 src_ofs
*cs++ = upper_32_bits(src_ofs);
*cs++ = FIELD_PREP(PVC_MS_MOCS_INDEX_MASK, mocs);
- XE_BUG_ON(cs - bb->cs != len + bb->len);
+ XE_WARN_ON(cs - bb->cs != len + bb->len);
bb->len += len;
}
@@ -819,7 +819,7 @@ static void emit_clear_main_copy(struct xe_gt *gt, struct xe_bb *bb,
*cs++ = 0;
}
- XE_BUG_ON(cs - bb->cs != len + bb->len);
+ XE_WARN_ON(cs - bb->cs != len + bb->len);
bb->len += len;
}
@@ -992,9 +992,9 @@ static void write_pgtable(struct xe_tile *tile, struct xe_bb *bb, u64 ppgtt_ofs,
* PDE. This requires a BO that is almost vm->size big.
*
* This shouldn't be possible in practice.. might change when 16K
- * pages are used. Hence the BUG_ON.
+ * pages are used. Hence the XE_WARN_ON.
*/
- XE_BUG_ON(update->qwords > 0x1ff);
+ XE_WARN_ON(update->qwords > 0x1ff);
if (!ppgtt_ofs) {
ppgtt_ofs = xe_migrate_vram_ofs(xe_bo_addr(update->pt_bo, 0,
XE_PAGE_SIZE));
@@ -1184,7 +1184,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
* Worst case: Sum(2 * (each lower level page size) + (top level page size))
* Should be reasonably bound..
*/
- XE_BUG_ON(batch_size >= SZ_128K);
+ XE_WARN_ON(batch_size >= SZ_128K);
bb = xe_bb_new(gt, batch_size, !eng && xe->info.supports_usm);
if (IS_ERR(bb))
@@ -1194,7 +1194,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
if (!IS_DGFX(xe)) {
ppgtt_ofs = NUM_KERNEL_PDE - 1;
if (eng) {
- XE_BUG_ON(num_updates > NUM_VMUSA_WRITES_PER_UNIT);
+ XE_WARN_ON(num_updates > NUM_VMUSA_WRITES_PER_UNIT);
sa_bo = drm_suballoc_new(&m->vm_update_sa, 1,
GFP_KERNEL, true, 0);
@@ -1223,7 +1223,7 @@ xe_migrate_update_pgtables(struct xe_migrate *m,
for (i = 0; i < num_updates; i++) {
struct xe_bo *pt_bo = updates[i].pt_bo;
- BUG_ON(pt_bo->size != SZ_4K);
+ XE_WARN_ON(pt_bo->size != SZ_4K);
addr = xe_pte_encode(pt_bo, 0, XE_CACHE_WB, 0);
bb->cs[bb->len++] = lower_32_bits(addr);