diff options
author | Jakub Kicinski <kuba@kernel.org> | 2023-08-04 00:29:50 +0300 |
---|---|---|
committer | Jakub Kicinski <kuba@kernel.org> | 2023-08-04 00:34:37 +0300 |
commit | 35b1b1fd96388d5e3cf179bf36bd8a4153baf4a3 (patch) | |
tree | 0c0debbc24d6633ec6f8980f94e70f924aeee74e /drivers/s390/block/dasd.c | |
parent | 36e68eadd303dce58018b503186a89bcb27d527e (diff) | |
parent | 999f6631866e9ea81add935b9c6ebaab0579d259 (diff) | |
download | linux-35b1b1fd96388d5e3cf179bf36bd8a4153baf4a3.tar.xz |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Cross-merge networking fixes after downstream PR.
Conflicts:
net/dsa/port.c
9945c1fb03a3 ("net: dsa: fix older DSA drivers using phylink")
a88dd7538461 ("net: dsa: remove legacy_pre_march2020 detection")
https://lore.kernel.org/all/20230731102254.2c9868ca@canb.auug.org.au/
net/xdp/xsk.c
3c5b4d69c358 ("net: annotate data-races around sk->sk_mark")
b7f72a30e9ac ("xsk: introduce wrappers and helpers for supporting multi-buffer in Tx path")
https://lore.kernel.org/all/20230731102631.39988412@canb.auug.org.au/
drivers/net/ethernet/broadcom/bnxt/bnxt.c
37b61cda9c16 ("bnxt: don't handle XDP in netpoll")
2b56b3d99241 ("eth: bnxt: handle invalid Tx completions more gracefully")
https://lore.kernel.org/all/20230801101708.1dc7faac@canb.auug.org.au/
Adjacent changes:
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c
62da08331f1a ("net/mlx5e: Set proper IPsec source port in L4 selector")
fbd517549c32 ("net/mlx5e: Add function to get IPsec offload namespace")
drivers/net/ethernet/sfc/selftest.c
55c1528f9b97 ("sfc: fix field-spanning memcpy in selftest")
ae9d445cd41f ("sfc: Miscellaneous comment removals")
Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'drivers/s390/block/dasd.c')
-rw-r--r-- | drivers/s390/block/dasd.c | 125 |
1 files changed, 48 insertions, 77 deletions
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index edcbf77852c3..50a5ff70814a 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c @@ -2943,41 +2943,32 @@ static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data) * Requeue a request back to the block request queue * only works for block requests */ -static int _dasd_requeue_request(struct dasd_ccw_req *cqr) +static void _dasd_requeue_request(struct dasd_ccw_req *cqr) { - struct dasd_block *block = cqr->block; struct request *req; - if (!block) - return -EINVAL; /* * If the request is an ERP request there is nothing to requeue. * This will be done with the remaining original request. */ if (cqr->refers) - return 0; + return; spin_lock_irq(&cqr->dq->lock); req = (struct request *) cqr->callback_data; blk_mq_requeue_request(req, true); spin_unlock_irq(&cqr->dq->lock); - return 0; + return; } -/* - * Go through all request on the dasd_block request queue, cancel them - * on the respective dasd_device, and return them to the generic - * block layer. - */ -static int dasd_flush_block_queue(struct dasd_block *block) +static int _dasd_requests_to_flushqueue(struct dasd_block *block, + struct list_head *flush_queue) { struct dasd_ccw_req *cqr, *n; - int rc, i; - struct list_head flush_queue; unsigned long flags; + int rc, i; - INIT_LIST_HEAD(&flush_queue); - spin_lock_bh(&block->queue_lock); + spin_lock_irqsave(&block->queue_lock, flags); rc = 0; restart: list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) { @@ -2992,13 +2983,32 @@ restart: * is returned from the dasd_device layer. */ cqr->callback = _dasd_wake_block_flush_cb; - for (i = 0; cqr != NULL; cqr = cqr->refers, i++) - list_move_tail(&cqr->blocklist, &flush_queue); + for (i = 0; cqr; cqr = cqr->refers, i++) + list_move_tail(&cqr->blocklist, flush_queue); if (i > 1) /* moved more than one request - need to restart */ goto restart; } - spin_unlock_bh(&block->queue_lock); + spin_unlock_irqrestore(&block->queue_lock, flags); + + return rc; +} + +/* + * Go through all request on the dasd_block request queue, cancel them + * on the respective dasd_device, and return them to the generic + * block layer. + */ +static int dasd_flush_block_queue(struct dasd_block *block) +{ + struct dasd_ccw_req *cqr, *n; + struct list_head flush_queue; + unsigned long flags; + int rc; + + INIT_LIST_HEAD(&flush_queue); + rc = _dasd_requests_to_flushqueue(block, &flush_queue); + /* Now call the callback function of flushed requests */ restart_cb: list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) { @@ -3881,75 +3891,36 @@ EXPORT_SYMBOL_GPL(dasd_generic_space_avail); */ int dasd_generic_requeue_all_requests(struct dasd_device *device) { + struct dasd_block *block = device->block; struct list_head requeue_queue; struct dasd_ccw_req *cqr, *n; - struct dasd_ccw_req *refers; int rc; - INIT_LIST_HEAD(&requeue_queue); - spin_lock_irq(get_ccwdev_lock(device->cdev)); - rc = 0; - list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { - /* Check status and move request to flush_queue */ - if (cqr->status == DASD_CQR_IN_IO) { - rc = device->discipline->term_IO(cqr); - if (rc) { - /* unable to terminate requeust */ - dev_err(&device->cdev->dev, - "Unable to terminate request %p " - "on suspend\n", cqr); - spin_unlock_irq(get_ccwdev_lock(device->cdev)); - dasd_put_device(device); - return rc; - } - } - list_move_tail(&cqr->devlist, &requeue_queue); - } - spin_unlock_irq(get_ccwdev_lock(device->cdev)); - - list_for_each_entry_safe(cqr, n, &requeue_queue, devlist) { - wait_event(dasd_flush_wq, - (cqr->status != DASD_CQR_CLEAR_PENDING)); + if (!block) + return 0; - /* - * requeue requests to blocklayer will only work - * for block device requests - */ - if (_dasd_requeue_request(cqr)) - continue; + INIT_LIST_HEAD(&requeue_queue); + rc = _dasd_requests_to_flushqueue(block, &requeue_queue); - /* remove requests from device and block queue */ - list_del_init(&cqr->devlist); - while (cqr->refers != NULL) { - refers = cqr->refers; - /* remove the request from the block queue */ - list_del(&cqr->blocklist); - /* free the finished erp request */ - dasd_free_erp_request(cqr, cqr->memdev); - cqr = refers; + /* Now call the callback function of flushed requests */ +restart_cb: + list_for_each_entry_safe(cqr, n, &requeue_queue, blocklist) { + wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); + /* Process finished ERP request. */ + if (cqr->refers) { + spin_lock_bh(&block->queue_lock); + __dasd_process_erp(block->base, cqr); + spin_unlock_bh(&block->queue_lock); + /* restart list_for_xx loop since dasd_process_erp + * might remove multiple elements + */ + goto restart_cb; } - - /* - * _dasd_requeue_request already checked for a valid - * blockdevice, no need to check again - * all erp requests (cqr->refers) have a cqr->block - * pointer copy from the original cqr - */ + _dasd_requeue_request(cqr); list_del_init(&cqr->blocklist); cqr->block->base->discipline->free_cp( cqr, (struct request *) cqr->callback_data); } - - /* - * if requests remain then they are internal request - * and go back to the device queue - */ - if (!list_empty(&requeue_queue)) { - /* move freeze_queue to start of the ccw_queue */ - spin_lock_irq(get_ccwdev_lock(device->cdev)); - list_splice_tail(&requeue_queue, &device->ccw_queue); - spin_unlock_irq(get_ccwdev_lock(device->cdev)); - } dasd_schedule_device_bh(device); return rc; } |