summaryrefslogtreecommitdiff
path: root/drivers/mmc/card/block.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc/card/block.c')
-rw-r--r--drivers/mmc/card/block.c310
1 files changed, 212 insertions, 98 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 4c1a648d00fc..a1cb21f95302 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -94,6 +94,11 @@ struct mmc_blk_data {
unsigned int read_only;
unsigned int part_type;
unsigned int name_idx;
+ unsigned int reset_done;
+#define MMC_BLK_READ BIT(0)
+#define MMC_BLK_WRITE BIT(1)
+#define MMC_BLK_DISCARD BIT(2)
+#define MMC_BLK_SECDISCARD BIT(3)
/*
* Only set in main mmc_blk_data associated
@@ -109,11 +114,11 @@ static DEFINE_MUTEX(open_lock);
enum mmc_blk_status {
MMC_BLK_SUCCESS = 0,
MMC_BLK_PARTIAL,
- MMC_BLK_RETRY,
- MMC_BLK_RETRY_SINGLE,
- MMC_BLK_DATA_ERR,
MMC_BLK_CMD_ERR,
+ MMC_BLK_RETRY,
MMC_BLK_ABORT,
+ MMC_BLK_DATA_ERR,
+ MMC_BLK_ECC_ERR,
};
module_param(perdev_minors, int, 0444);
@@ -291,7 +296,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
struct mmc_card *card;
struct mmc_command cmd = {0};
struct mmc_data data = {0};
- struct mmc_request mrq = {0};
+ struct mmc_request mrq = {NULL};
struct scatterlist sg;
int err;
@@ -442,19 +447,24 @@ static inline int mmc_blk_part_switch(struct mmc_card *card,
{
int ret;
struct mmc_blk_data *main_md = mmc_get_drvdata(card);
+
if (main_md->part_curr == md->part_type)
return 0;
if (mmc_card_mmc(card)) {
- card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
- card->ext_csd.part_config |= md->part_type;
+ u8 part_config = card->ext_csd.part_config;
+
+ part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
+ part_config |= md->part_type;
ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_PART_CONFIG, card->ext_csd.part_config,
+ EXT_CSD_PART_CONFIG, part_config,
card->ext_csd.part_time);
if (ret)
return ret;
-}
+
+ card->ext_csd.part_config = part_config;
+ }
main_md->part_curr = md->part_type;
return 0;
@@ -466,7 +476,7 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
u32 result;
__be32 *blocks;
- struct mmc_request mrq = {0};
+ struct mmc_request mrq = {NULL};
struct mmc_command cmd = {0};
struct mmc_data data = {0};
unsigned int timeout_us;
@@ -616,7 +626,7 @@ static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
* Otherwise we don't understand what happened, so abort.
*/
static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
- struct mmc_blk_request *brq)
+ struct mmc_blk_request *brq, int *ecc_err)
{
bool prev_cmd_status_valid = true;
u32 status, stop_status = 0;
@@ -641,6 +651,12 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
if (err)
return ERR_ABORT;
+ /* Flag ECC errors */
+ if ((status & R1_CARD_ECC_FAILED) ||
+ (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
+ (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
+ *ecc_err = 1;
+
/*
* Check the current card state. If it is in some data transfer
* mode, tell it to stop (and hopefully transition back to TRAN.)
@@ -658,6 +674,8 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
*/
if (err)
return ERR_ABORT;
+ if (stop_status & R1_CARD_ECC_FAILED)
+ *ecc_err = 1;
}
/* Check for set block count errors */
@@ -670,6 +688,10 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
prev_cmd_status_valid, status);
+ /* Data errors */
+ if (!brq->stop.error)
+ return ERR_CONTINUE;
+
/* Now for stop errors. These aren't fatal to the transfer. */
pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
req->rq_disk->disk_name, brq->stop.error,
@@ -686,12 +708,45 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
return ERR_CONTINUE;
}
+static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
+ int type)
+{
+ int err;
+
+ if (md->reset_done & type)
+ return -EEXIST;
+
+ md->reset_done |= type;
+ err = mmc_hw_reset(host);
+ /* Ensure we switch back to the correct partition */
+ if (err != -EOPNOTSUPP) {
+ struct mmc_blk_data *main_md = mmc_get_drvdata(host->card);
+ int part_err;
+
+ main_md->part_curr = main_md->part_type;
+ part_err = mmc_blk_part_switch(host->card, md);
+ if (part_err) {
+ /*
+ * We have failed to get back into the correct
+ * partition, so we need to abort the whole request.
+ */
+ return -ENODEV;
+ }
+ }
+ return err;
+}
+
+static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
+{
+ md->reset_done &= ~type;
+}
+
static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
{
struct mmc_blk_data *md = mq->data;
struct mmc_card *card = md->queue.card;
unsigned int from, nr, arg;
- int err = 0;
+ int err = 0, type = MMC_BLK_DISCARD;
if (!mmc_can_erase(card)) {
err = -EOPNOTSUPP;
@@ -701,11 +756,13 @@ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
from = blk_rq_pos(req);
nr = blk_rq_sectors(req);
- if (mmc_can_trim(card))
+ if (mmc_can_discard(card))
+ arg = MMC_DISCARD_ARG;
+ else if (mmc_can_trim(card))
arg = MMC_TRIM_ARG;
else
arg = MMC_ERASE_ARG;
-
+retry:
if (card->quirks & MMC_QUIRK_INAND_CMD38) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
INAND_CMD38_ARG_EXT_CSD,
@@ -718,6 +775,10 @@ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
}
err = mmc_erase(card, from, nr, arg);
out:
+ if (err == -EIO && !mmc_blk_reset(md, card->host, type))
+ goto retry;
+ if (!err)
+ mmc_blk_reset_success(md, type);
spin_lock_irq(&md->lock);
__blk_end_request(req, err, blk_rq_bytes(req));
spin_unlock_irq(&md->lock);
@@ -731,13 +792,20 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
struct mmc_blk_data *md = mq->data;
struct mmc_card *card = md->queue.card;
unsigned int from, nr, arg;
- int err = 0;
+ int err = 0, type = MMC_BLK_SECDISCARD;
- if (!mmc_can_secure_erase_trim(card)) {
+ if (!(mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))) {
err = -EOPNOTSUPP;
goto out;
}
+ /* The sanitize operation is supported at v4.5 only */
+ if (mmc_can_sanitize(card)) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_SANITIZE_START, 1, 0);
+ goto out;
+ }
+
from = blk_rq_pos(req);
nr = blk_rq_sectors(req);
@@ -745,7 +813,7 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
arg = MMC_SECURE_TRIM1_ARG;
else
arg = MMC_SECURE_ERASE_ARG;
-
+retry:
if (card->quirks & MMC_QUIRK_INAND_CMD38) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
INAND_CMD38_ARG_EXT_CSD,
@@ -769,6 +837,10 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
}
out:
+ if (err == -EIO && !mmc_blk_reset(md, card->host, type))
+ goto retry;
+ if (!err)
+ mmc_blk_reset_success(md, type);
spin_lock_irq(&md->lock);
__blk_end_request(req, err, blk_rq_bytes(req));
spin_unlock_irq(&md->lock);
@@ -779,16 +851,18 @@ out:
static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
{
struct mmc_blk_data *md = mq->data;
+ struct mmc_card *card = md->queue.card;
+ int ret = 0;
+
+ ret = mmc_flush_cache(card);
+ if (ret)
+ ret = -EIO;
- /*
- * No-op, only service this because we need REQ_FUA for reliable
- * writes.
- */
spin_lock_irq(&md->lock);
- __blk_end_request_all(req, 0);
+ __blk_end_request_all(req, ret);
spin_unlock_irq(&md->lock);
- return 1;
+ return ret ? 0 : 1;
}
/*
@@ -825,11 +899,11 @@ static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
static int mmc_blk_err_check(struct mmc_card *card,
struct mmc_async_req *areq)
{
- enum mmc_blk_status ret = MMC_BLK_SUCCESS;
struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
mmc_active);
struct mmc_blk_request *brq = &mq_mrq->brq;
struct request *req = mq_mrq->req;
+ int ecc_err = 0;
/*
* sbc.error indicates a problem with the set block count
@@ -841,8 +915,9 @@ static int mmc_blk_err_check(struct mmc_card *card,
* stop.error indicates a problem with the stop command. Data
* may have been transferred, or may still be transferring.
*/
- if (brq->sbc.error || brq->cmd.error || brq->stop.error) {
- switch (mmc_blk_cmd_recovery(card, req, brq)) {
+ if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
+ brq->data.error) {
+ switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err)) {
case ERR_RETRY:
return MMC_BLK_RETRY;
case ERR_ABORT:
@@ -873,7 +948,7 @@ static int mmc_blk_err_check(struct mmc_card *card,
do {
int err = get_card_status(card, &status, 5);
if (err) {
- printk(KERN_ERR "%s: error %d requesting status\n",
+ pr_err("%s: error %d requesting status\n",
req->rq_disk->disk_name, err);
return MMC_BLK_CMD_ERR;
}
@@ -894,23 +969,21 @@ static int mmc_blk_err_check(struct mmc_card *card,
brq->cmd.resp[0], brq->stop.resp[0]);
if (rq_data_dir(req) == READ) {
- if (brq->data.blocks > 1) {
- /* Redo read one sector at a time */
- pr_warning("%s: retrying using single block read\n",
- req->rq_disk->disk_name);
- return MMC_BLK_RETRY_SINGLE;
- }
+ if (ecc_err)
+ return MMC_BLK_ECC_ERR;
return MMC_BLK_DATA_ERR;
} else {
return MMC_BLK_CMD_ERR;
}
}
- if (ret == MMC_BLK_SUCCESS &&
- blk_rq_bytes(req) != brq->data.bytes_xfered)
- ret = MMC_BLK_PARTIAL;
+ if (!brq->data.bytes_xfered)
+ return MMC_BLK_RETRY;
- return ret;
+ if (blk_rq_bytes(req) != brq->data.bytes_xfered)
+ return MMC_BLK_PARTIAL;
+
+ return MMC_BLK_SUCCESS;
}
static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
@@ -957,13 +1030,20 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
if (brq->data.blocks > card->host->max_blk_count)
brq->data.blocks = card->host->max_blk_count;
- /*
- * After a read error, we redo the request one sector at a time
- * in order to accurately determine which sectors can be read
- * successfully.
- */
- if (disable_multi && brq->data.blocks > 1)
- brq->data.blocks = 1;
+ if (brq->data.blocks > 1) {
+ /*
+ * After a read error, we redo the request one sector
+ * at a time in order to accurately determine which
+ * sectors can be read successfully.
+ */
+ if (disable_multi)
+ brq->data.blocks = 1;
+
+ /* Some controllers can't do multiblock reads due to hw bugs */
+ if (card->host->caps2 & MMC_CAP2_NO_MULTI_READ &&
+ rq_data_dir(req) == READ)
+ brq->data.blocks = 1;
+ }
if (brq->data.blocks > 1 || do_rel_wr) {
/* SPI multiblock writes terminate using a special
@@ -1049,12 +1129,41 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
mmc_queue_bounce_pre(mqrq);
}
+static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
+ struct mmc_blk_request *brq, struct request *req,
+ int ret)
+{
+ /*
+ * If this is an SD card and we're writing, we can first
+ * mark the known good sectors as ok.
+ *
+ * If the card is not SD, we can still ok written sectors
+ * as reported by the controller (which might be less than
+ * the real number of written sectors, but never more).
+ */
+ if (mmc_card_sd(card)) {
+ u32 blocks;
+
+ blocks = mmc_sd_num_wr_blocks(card);
+ if (blocks != (u32)-1) {
+ spin_lock_irq(&md->lock);
+ ret = __blk_end_request(req, 0, blocks << 9);
+ spin_unlock_irq(&md->lock);
+ }
+ } else {
+ spin_lock_irq(&md->lock);
+ ret = __blk_end_request(req, 0, brq->data.bytes_xfered);
+ spin_unlock_irq(&md->lock);
+ }
+ return ret;
+}
+
static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
{
struct mmc_blk_data *md = mq->data;
struct mmc_card *card = md->queue.card;
struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
- int ret = 1, disable_multi = 0, retry = 0;
+ int ret = 1, disable_multi = 0, retry = 0, type;
enum mmc_blk_status status;
struct mmc_queue_req *mq_rq;
struct request *req;
@@ -1076,6 +1185,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
brq = &mq_rq->brq;
req = mq_rq->req;
+ type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
mmc_queue_bounce_post(mq_rq);
switch (status) {
@@ -1084,18 +1194,18 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
/*
* A block was successfully transferred.
*/
+ mmc_blk_reset_success(md, type);
spin_lock_irq(&md->lock);
ret = __blk_end_request(req, 0,
brq->data.bytes_xfered);
spin_unlock_irq(&md->lock);
+ /*
+ * If the blk_end_request function returns non-zero even
+ * though all data has been transferred and no errors
+ * were returned by the host controller, it's a bug.
+ */
if (status == MMC_BLK_SUCCESS && ret) {
- /*
- * The blk_end_request has returned non zero
- * even though all data is transfered and no
- * erros returned by host.
- * If this happen it's a bug.
- */
- printk(KERN_ERR "%s BUG rq_tot %d d_xfer %d\n",
+ pr_err("%s BUG rq_tot %d d_xfer %d\n",
__func__, blk_rq_bytes(req),
brq->data.bytes_xfered);
rqc = NULL;
@@ -1103,16 +1213,36 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
}
break;
case MMC_BLK_CMD_ERR:
- goto cmd_err;
- case MMC_BLK_RETRY_SINGLE:
- disable_multi = 1;
- break;
+ ret = mmc_blk_cmd_err(md, card, brq, req, ret);
+ if (!mmc_blk_reset(md, card->host, type))
+ break;
+ goto cmd_abort;
case MMC_BLK_RETRY:
if (retry++ < 5)
break;
+ /* Fall through */
case MMC_BLK_ABORT:
+ if (!mmc_blk_reset(md, card->host, type))
+ break;
goto cmd_abort;
- case MMC_BLK_DATA_ERR:
+ case MMC_BLK_DATA_ERR: {
+ int err;
+
+ err = mmc_blk_reset(md, card->host, type);
+ if (!err)
+ break;
+ if (err == -ENODEV)
+ goto cmd_abort;
+ /* Fall through */
+ }
+ case MMC_BLK_ECC_ERR:
+ if (brq->data.blocks > 1) {
+ /* Redo read one sector at a time */
+ pr_warning("%s: retrying using single block read\n",
+ req->rq_disk->disk_name);
+ disable_multi = 1;
+ break;
+ }
/*
* After an error, we redo I/O one sector at a
* time, so we only reach here after trying to
@@ -1129,7 +1259,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
if (ret) {
/*
- * In case of a none complete request
+ * In case of a incomplete request
* prepare it again and resend.
*/
mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq);
@@ -1139,30 +1269,6 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
return 1;
- cmd_err:
- /*
- * If this is an SD card and we're writing, we can first
- * mark the known good sectors as ok.
- *
- * If the card is not SD, we can still ok written sectors
- * as reported by the controller (which might be less than
- * the real number of written sectors, but never more).
- */
- if (mmc_card_sd(card)) {
- u32 blocks;
-
- blocks = mmc_sd_num_wr_blocks(card);
- if (blocks != (u32)-1) {
- spin_lock_irq(&md->lock);
- ret = __blk_end_request(req, 0, blocks << 9);
- spin_unlock_irq(&md->lock);
- }
- } else {
- spin_lock_irq(&md->lock);
- ret = __blk_end_request(req, 0, brq->data.bytes_xfered);
- spin_unlock_irq(&md->lock);
- }
-
cmd_abort:
spin_lock_irq(&md->lock);
while (ret)
@@ -1190,6 +1296,11 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
ret = mmc_blk_part_switch(card, md);
if (ret) {
+ if (req) {
+ spin_lock_irq(&md->lock);
+ __blk_end_request_all(req, -EIO);
+ spin_unlock_irq(&md->lock);
+ }
ret = 0;
goto out;
}
@@ -1374,32 +1485,35 @@ static int mmc_blk_alloc_part(struct mmc_card *card,
string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2,
cap_str, sizeof(cap_str));
- printk(KERN_INFO "%s: %s %s partition %u %s\n",
+ pr_info("%s: %s %s partition %u %s\n",
part_md->disk->disk_name, mmc_card_id(card),
mmc_card_name(card), part_md->part_type, cap_str);
return 0;
}
+/* MMC Physical partitions consist of two boot partitions and
+ * up to four general purpose partitions.
+ * For each partition enabled in EXT_CSD a block device will be allocatedi
+ * to provide access to the partition.
+ */
+
static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
{
- int ret = 0;
+ int idx, ret = 0;
if (!mmc_card_mmc(card))
return 0;
- if (card->ext_csd.boot_size) {
- ret = mmc_blk_alloc_part(card, md, EXT_CSD_PART_CONFIG_ACC_BOOT0,
- card->ext_csd.boot_size >> 9,
- true,
- "boot0");
- if (ret)
- return ret;
- ret = mmc_blk_alloc_part(card, md, EXT_CSD_PART_CONFIG_ACC_BOOT1,
- card->ext_csd.boot_size >> 9,
- true,
- "boot1");
- if (ret)
- return ret;
+ for (idx = 0; idx < card->nr_parts; idx++) {
+ if (card->part[idx].size) {
+ ret = mmc_blk_alloc_part(card, md,
+ card->part[idx].part_cfg,
+ card->part[idx].size >> 9,
+ card->part[idx].force_ro,
+ card->part[idx].name);
+ if (ret)
+ return ret;
+ }
}
return ret;
@@ -1415,7 +1529,7 @@ mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
mmc_release_host(card->host);
if (err) {
- printk(KERN_ERR "%s: unable to set block size to 512: %d\n",
+ pr_err("%s: unable to set block size to 512: %d\n",
md->disk->disk_name, err);
return -EINVAL;
}
@@ -1517,7 +1631,7 @@ static int mmc_blk_probe(struct mmc_card *card)
string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2,
cap_str, sizeof(cap_str));
- printk(KERN_INFO "%s: %s %s %s %s\n",
+ pr_info("%s: %s %s %s %s\n",
md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
cap_str, md->read_only ? "(ro)" : "");