summaryrefslogtreecommitdiff
path: root/fs/fuse
diff options
context:
space:
mode:
authorKirill Tkhai <ktkhai@virtuozzo.com>2018-11-08 12:05:25 +0300
committerMiklos Szeredi <mszeredi@redhat.com>2019-02-13 15:15:13 +0300
commit217316a601016d0d2913290070e6338576ae73f6 (patch)
tree018240bc0794d5702b9eaca6a7b392bff21eb5fd /fs/fuse
parent8da6e9183275c837e7d2401b3018ff81f657a19a (diff)
downloadlinux-217316a601016d0d2913290070e6338576ae73f6.tar.xz
fuse: Optimize request_end() by not taking fiq->waitq.lock
We take global fiq->waitq.lock every time, when we are in this function, but interrupted requests are just small subset of all requests. This patch optimizes request_end() and makes it to take the lock when it's really needed. queue_interrupt() needs small change for that. After req is linked to interrupt list, we do smp_mb() and check for FR_FINISHED again. In case of FR_FINISHED bit has appeared, we remove req and leave the function: Signed-off-by: Kirill Tkhai <ktkhai@virtuozzo.com> Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
Diffstat (limited to 'fs/fuse')
-rw-r--r--fs/fuse/dev.c28
1 files changed, 20 insertions, 8 deletions
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 11246e7965e8..74171d568621 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -431,10 +431,16 @@ static void request_end(struct fuse_conn *fc, struct fuse_req *req)
if (test_and_set_bit(FR_FINISHED, &req->flags))
goto put_request;
-
- spin_lock(&fiq->waitq.lock);
- list_del_init(&req->intr_entry);
- spin_unlock(&fiq->waitq.lock);
+ /*
+ * test_and_set_bit() implies smp_mb() between bit
+ * changing and below intr_entry check. Pairs with
+ * smp_mb() from queue_interrupt().
+ */
+ if (!list_empty(&req->intr_entry)) {
+ spin_lock(&fiq->waitq.lock);
+ list_del_init(&req->intr_entry);
+ spin_unlock(&fiq->waitq.lock);
+ }
WARN_ON(test_bit(FR_PENDING, &req->flags));
WARN_ON(test_bit(FR_SENT, &req->flags));
if (test_bit(FR_BACKGROUND, &req->flags)) {
@@ -473,12 +479,18 @@ put_request:
static void queue_interrupt(struct fuse_iqueue *fiq, struct fuse_req *req)
{
spin_lock(&fiq->waitq.lock);
- if (test_bit(FR_FINISHED, &req->flags)) {
- spin_unlock(&fiq->waitq.lock);
- return;
- }
if (list_empty(&req->intr_entry)) {
list_add_tail(&req->intr_entry, &fiq->interrupts);
+ /*
+ * Pairs with smp_mb() implied by test_and_set_bit()
+ * from request_end().
+ */
+ smp_mb();
+ if (test_bit(FR_FINISHED, &req->flags)) {
+ list_del_init(&req->intr_entry);
+ spin_unlock(&fiq->waitq.lock);
+ return;
+ }
wake_up_locked(&fiq->waitq);
kill_fasync(&fiq->fasync, SIGIO, POLL_IN);
}