summaryrefslogtreecommitdiff
path: root/drivers/mmc
diff options
context:
space:
mode:
authorAdrian Hunter <adrian.hunter@intel.com>2016-11-29 13:09:14 +0300
committerUlf Hansson <ulf.hansson@linaro.org>2016-12-05 12:31:06 +0300
commitc09949cff5eb408c30f154207ebdc706d94fe1f3 (patch)
tree7159e0870e289c8c7e734c2107b23a48689feb7e /drivers/mmc
parent64e29e42a61b8b531eb77f363ddb8e507dfd35ed (diff)
downloadlinux-c09949cff5eb408c30f154207ebdc706d94fe1f3.tar.xz
mmc: queue: Factor out mmc_queue_reqs_free_bufs()
In preparation for supporting a queue of requests, factor out mmc_queue_reqs_free_bufs(). Signed-off-by: Adrian Hunter <adrian.hunter@intel.com> Reviewed-by: Linus Walleij <linus.walleij@linaro.org> Reviewed-by: Harjani Ritesh <riteshh@codeaurora.org> Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/card/queue.c65
1 files changed, 26 insertions, 39 deletions
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 280708d804b9..8ba82cf5feff 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -252,6 +252,27 @@ static int mmc_queue_alloc_sgs(struct mmc_queue *mq, int max_segs)
return ret;
}
+static void mmc_queue_reqs_free_bufs(struct mmc_queue *mq)
+{
+ struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
+ struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
+
+ kfree(mqrq_cur->bounce_sg);
+ mqrq_cur->bounce_sg = NULL;
+ kfree(mqrq_prev->bounce_sg);
+ mqrq_prev->bounce_sg = NULL;
+
+ kfree(mqrq_cur->sg);
+ mqrq_cur->sg = NULL;
+ kfree(mqrq_cur->bounce_buf);
+ mqrq_cur->bounce_buf = NULL;
+
+ kfree(mqrq_prev->sg);
+ mqrq_prev->sg = NULL;
+ kfree(mqrq_prev->bounce_buf);
+ mqrq_prev->bounce_buf = NULL;
+}
+
/**
* mmc_init_queue - initialise a queue structure.
* @mq: mmc queue
@@ -268,8 +289,6 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
u64 limit = BLK_BOUNCE_HIGH;
bool bounce = false;
int ret;
- struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
- struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
@@ -279,8 +298,8 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
if (!mq->queue)
return -ENOMEM;
- mq->mqrq_cur = mqrq_cur;
- mq->mqrq_prev = mqrq_prev;
+ mq->mqrq_cur = &mq->mqrq[0];
+ mq->mqrq_prev = &mq->mqrq[1];
mq->queue->queuedata = mq;
blk_queue_prep_rq(mq->queue, mmc_prep_request);
@@ -336,27 +355,13 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
if (IS_ERR(mq->thread)) {
ret = PTR_ERR(mq->thread);
- goto free_bounce_sg;
+ goto cleanup_queue;
}
return 0;
- free_bounce_sg:
- kfree(mqrq_cur->bounce_sg);
- mqrq_cur->bounce_sg = NULL;
- kfree(mqrq_prev->bounce_sg);
- mqrq_prev->bounce_sg = NULL;
cleanup_queue:
- kfree(mqrq_cur->sg);
- mqrq_cur->sg = NULL;
- kfree(mqrq_cur->bounce_buf);
- mqrq_cur->bounce_buf = NULL;
-
- kfree(mqrq_prev->sg);
- mqrq_prev->sg = NULL;
- kfree(mqrq_prev->bounce_buf);
- mqrq_prev->bounce_buf = NULL;
-
+ mmc_queue_reqs_free_bufs(mq);
blk_cleanup_queue(mq->queue);
return ret;
}
@@ -365,8 +370,6 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
{
struct request_queue *q = mq->queue;
unsigned long flags;
- struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
- struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
/* Make sure the queue isn't suspended, as that will deadlock */
mmc_queue_resume(mq);
@@ -380,23 +383,7 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
blk_start_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
- kfree(mqrq_cur->bounce_sg);
- mqrq_cur->bounce_sg = NULL;
-
- kfree(mqrq_cur->sg);
- mqrq_cur->sg = NULL;
-
- kfree(mqrq_cur->bounce_buf);
- mqrq_cur->bounce_buf = NULL;
-
- kfree(mqrq_prev->bounce_sg);
- mqrq_prev->bounce_sg = NULL;
-
- kfree(mqrq_prev->sg);
- mqrq_prev->sg = NULL;
-
- kfree(mqrq_prev->bounce_buf);
- mqrq_prev->bounce_buf = NULL;
+ mmc_queue_reqs_free_bufs(mq);
mq->card = NULL;
}