summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLorenzo Bianconi <lorenzo@kernel.org>2022-12-01 18:26:53 +0300
committerJakub Kicinski <kuba@kernel.org>2022-12-03 08:23:02 +0300
commit65e6af6cebefbf7d8d8ac52b71cd251c2071ad00 (patch)
treec1cd7ea5d0f0e8d8ec93a4317af75526cd2acd16
parent55fb80d518c7323d05b71eda0c9f9d657b373816 (diff)
downloadlinux-65e6af6cebefbf7d8d8ac52b71cd251c2071ad00.tar.xz
net: ethernet: mtk_wed: fix sleep while atomic in mtk_wed_wo_queue_refill
In order to fix the following sleep while atomic bug always alloc pages with GFP_ATOMIC in mtk_wed_wo_queue_refill since page_frag_alloc runs in spin_lock critical section. [ 9.049719] Hardware name: MediaTek MT7986a RFB (DT) [ 9.054665] Call trace: [ 9.057096] dump_backtrace+0x0/0x154 [ 9.060751] show_stack+0x14/0x1c [ 9.064052] dump_stack_lvl+0x64/0x7c [ 9.067702] dump_stack+0x14/0x2c [ 9.071001] ___might_sleep+0xec/0x120 [ 9.074736] __might_sleep+0x4c/0x9c [ 9.078296] __alloc_pages+0x184/0x2e4 [ 9.082030] page_frag_alloc_align+0x98/0x1ac [ 9.086369] mtk_wed_wo_queue_refill+0x134/0x234 [ 9.090974] mtk_wed_wo_init+0x174/0x2c0 [ 9.094881] mtk_wed_attach+0x7c8/0x7e0 [ 9.098701] mt7915_mmio_wed_init+0x1f0/0x3a0 [mt7915e] [ 9.103940] mt7915_pci_probe+0xec/0x3bc [mt7915e] [ 9.108727] pci_device_probe+0xac/0x13c [ 9.112638] really_probe.part.0+0x98/0x2f4 [ 9.116807] __driver_probe_device+0x94/0x13c [ 9.121147] driver_probe_device+0x40/0x114 [ 9.125314] __driver_attach+0x7c/0x180 [ 9.129133] bus_for_each_dev+0x5c/0x90 [ 9.132953] driver_attach+0x20/0x2c [ 9.136513] bus_add_driver+0x104/0x1fc [ 9.140333] driver_register+0x74/0x120 [ 9.144153] __pci_register_driver+0x40/0x50 [ 9.148407] mt7915_init+0x5c/0x1000 [mt7915e] [ 9.152848] do_one_initcall+0x40/0x25c [ 9.156669] do_init_module+0x44/0x230 [ 9.160403] load_module+0x1f30/0x2750 [ 9.164135] __do_sys_init_module+0x150/0x200 [ 9.168475] __arm64_sys_init_module+0x18/0x20 [ 9.172901] invoke_syscall.constprop.0+0x4c/0xe0 [ 9.177589] do_el0_svc+0x48/0xe0 [ 9.180889] el0_svc+0x14/0x50 [ 9.183929] el0t_64_sync_handler+0x9c/0x120 [ 9.188183] el0t_64_sync+0x158/0x15c Fixes: 799684448e3e ("net: ethernet: mtk_wed: introduce wed wo support") Signed-off-by: Lorenzo Bianconi <lorenzo@kernel.org> Reviewed-by: Pavan Chebbi <pavan.chebbi@broadcom.com> Link: https://lore.kernel.org/r/67ca94bdd3d9eaeb86e52b3050fbca0bcf7bb02f.1669908312.git.lorenzo@kernel.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
-rw-r--r--drivers/net/ethernet/mediatek/mtk_wed_wo.c11
1 files changed, 6 insertions, 5 deletions
diff --git a/drivers/net/ethernet/mediatek/mtk_wed_wo.c b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
index 4754b6db009e..a219da85f4db 100644
--- a/drivers/net/ethernet/mediatek/mtk_wed_wo.c
+++ b/drivers/net/ethernet/mediatek/mtk_wed_wo.c
@@ -133,17 +133,18 @@ mtk_wed_wo_dequeue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q, u32 *len,
static int
mtk_wed_wo_queue_refill(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q,
- gfp_t gfp, bool rx)
+ bool rx)
{
enum dma_data_direction dir = rx ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
int n_buf = 0;
spin_lock_bh(&q->lock);
while (q->queued < q->n_desc) {
- void *buf = page_frag_alloc(&q->cache, q->buf_size, gfp);
struct mtk_wed_wo_queue_entry *entry;
dma_addr_t addr;
+ void *buf;
+ buf = page_frag_alloc(&q->cache, q->buf_size, GFP_ATOMIC);
if (!buf)
break;
@@ -215,7 +216,7 @@ mtk_wed_wo_rx_run_queue(struct mtk_wed_wo *wo, struct mtk_wed_wo_queue *q)
mtk_wed_mcu_rx_unsolicited_event(wo, skb);
}
- if (mtk_wed_wo_queue_refill(wo, q, GFP_ATOMIC, true)) {
+ if (mtk_wed_wo_queue_refill(wo, q, true)) {
u32 index = (q->head - 1) % q->n_desc;
mtk_wed_wo_queue_kick(wo, q, index);
@@ -432,7 +433,7 @@ mtk_wed_wo_hardware_init(struct mtk_wed_wo *wo)
if (ret)
goto error;
- mtk_wed_wo_queue_refill(wo, &wo->q_tx, GFP_KERNEL, false);
+ mtk_wed_wo_queue_refill(wo, &wo->q_tx, false);
mtk_wed_wo_queue_reset(wo, &wo->q_tx);
regs.desc_base = MTK_WED_WO_CCIF_DUMMY5;
@@ -446,7 +447,7 @@ mtk_wed_wo_hardware_init(struct mtk_wed_wo *wo)
if (ret)
goto error;
- mtk_wed_wo_queue_refill(wo, &wo->q_rx, GFP_KERNEL, true);
+ mtk_wed_wo_queue_refill(wo, &wo->q_rx, true);
mtk_wed_wo_queue_reset(wo, &wo->q_rx);
/* rx queue irqmask */