From f6b9d0fe5c0573ddd0cbf3071a2003897325c86c Mon Sep 17 00:00:00 2001 From: Bean Huo Date: Thu, 1 Dec 2022 15:04:35 +0100 Subject: scsi: ufs: core: Advanced RPMB detection Check UFS Advanced RPMB LU enablement during ufshcd_lu_init(). Signed-off-by: Bean Huo Signed-off-by: Martin K. Petersen --- include/ufs/ufs.h | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) (limited to 'include/ufs') diff --git a/include/ufs/ufs.h b/include/ufs/ufs.h index 1bba3fead2ce..17e401df674c 100644 --- a/include/ufs/ufs.h +++ b/include/ufs/ufs.h @@ -212,6 +212,28 @@ enum unit_desc_param { UNIT_DESC_PARAM_WB_BUF_ALLOC_UNITS = 0x29, }; +/* RPMB Unit descriptor parameters offsets in bytes*/ +enum rpmb_unit_desc_param { + RPMB_UNIT_DESC_PARAM_LEN = 0x0, + RPMB_UNIT_DESC_PARAM_TYPE = 0x1, + RPMB_UNIT_DESC_PARAM_UNIT_INDEX = 0x2, + RPMB_UNIT_DESC_PARAM_LU_ENABLE = 0x3, + RPMB_UNIT_DESC_PARAM_BOOT_LUN_ID = 0x4, + RPMB_UNIT_DESC_PARAM_LU_WR_PROTECT = 0x5, + RPMB_UNIT_DESC_PARAM_LU_Q_DEPTH = 0x6, + RPMB_UNIT_DESC_PARAM_PSA_SENSITIVE = 0x7, + RPMB_UNIT_DESC_PARAM_MEM_TYPE = 0x8, + RPMB_UNIT_DESC_PARAM_REGION_EN = 0x9, + RPMB_UNIT_DESC_PARAM_LOGICAL_BLK_SIZE = 0xA, + RPMB_UNIT_DESC_PARAM_LOGICAL_BLK_COUNT = 0xB, + RPMB_UNIT_DESC_PARAM_REGION0_SIZE = 0x13, + RPMB_UNIT_DESC_PARAM_REGION1_SIZE = 0x14, + RPMB_UNIT_DESC_PARAM_REGION2_SIZE = 0x15, + RPMB_UNIT_DESC_PARAM_REGION3_SIZE = 0x16, + RPMB_UNIT_DESC_PARAM_PROVISIONING_TYPE = 0x17, + RPMB_UNIT_DESC_PARAM_PHY_MEM_RSRC_CNT = 0x18, +}; + /* Device descriptor parameters offsets in bytes*/ enum device_desc_param { DEVICE_DESC_PARAM_LEN = 0x0, @@ -601,6 +623,8 @@ struct ufs_dev_info { bool b_rpm_dev_flush_capable; u8 b_presrv_uspc_en; + + bool b_advanced_rpmb_en; }; /* -- cgit v1.2.3 From 6ff265fc5ef660499e0edc4641647e99eed3f519 Mon Sep 17 00:00:00 2001 From: Bean Huo Date: Thu, 1 Dec 2022 15:04:37 +0100 Subject: scsi: ufs: core: bsg: Add advanced RPMB support in ufs_bsg Add advanced RPMB support in ufs_bsg: 1. According to the UFS specification, only one RPMB operation can be performed at any time. We can ensure this by using reserved slot and its dev_cmd sync operation protection mechanism. 2. For Advanced RPMB, RPMB metadata is packaged in an EHS (Extra Header Segment) of a command UPIU, and the corresponding reply EHS (from the device) should also be returned to the user space. bsg_job->request and bsg_job->reply allow us to pass and return EHS from/back to userspace. Compared to normal/legacy RPMB, the advantages of advanced RPMB are: 1. The data length in the Advanced RPMB data read/write command can be larger than 4KB. For the legacy RPMB, the data length in a single RPMB data transfer is 256 bytes. 2. All of the advanced RPMB operations will be a single command. For legacy RPMB, take the read write-counter value as an example, you need two commands (first SECURITY PROTOCOL OUT, then second SECURITY PROTOCOL IN). Signed-off-by: Bean Huo Reviewed-by: Avri Altman Signed-off-by: Martin K. Petersen --- drivers/ufs/core/ufs_bsg.c | 93 +++++++++++++++++++++++++++++++---- drivers/ufs/core/ufshcd.c | 103 +++++++++++++++++++++++++++++++++++++++ include/uapi/scsi/scsi_bsg_ufs.h | 46 ++++++++++++++++- include/ufs/ufs.h | 5 ++ include/ufs/ufshcd.h | 6 ++- include/ufs/ufshci.h | 1 + 6 files changed, 242 insertions(+), 12 deletions(-) (limited to 'include/ufs') diff --git a/drivers/ufs/core/ufs_bsg.c b/drivers/ufs/core/ufs_bsg.c index 850a0d798f63..a8e58faa7da2 100644 --- a/drivers/ufs/core/ufs_bsg.c +++ b/drivers/ufs/core/ufs_bsg.c @@ -6,6 +6,7 @@ */ #include +#include #include #include #include "ufs_bsg.h" @@ -68,6 +69,72 @@ out: return 0; } +static int ufs_bsg_exec_advanced_rpmb_req(struct ufs_hba *hba, struct bsg_job *job) +{ + struct ufs_rpmb_request *rpmb_request = job->request; + struct ufs_rpmb_reply *rpmb_reply = job->reply; + struct bsg_buffer *payload = NULL; + enum dma_data_direction dir; + struct scatterlist *sg_list; + int rpmb_req_type; + int sg_cnt; + int ret; + int data_len; + + if (hba->ufs_version < ufshci_version(4, 0) || !hba->dev_info.b_advanced_rpmb_en || + !(hba->capabilities & MASK_EHSLUTRD_SUPPORTED)) + return -EINVAL; + + if (rpmb_request->ehs_req.length != 2 || rpmb_request->ehs_req.ehs_type != 1) + return -EINVAL; + + rpmb_req_type = be16_to_cpu(rpmb_request->ehs_req.meta.req_resp_type); + + switch (rpmb_req_type) { + case UFS_RPMB_WRITE_KEY: + case UFS_RPMB_READ_CNT: + case UFS_RPMB_PURGE_ENABLE: + dir = DMA_NONE; + break; + case UFS_RPMB_WRITE: + case UFS_RPMB_SEC_CONF_WRITE: + dir = DMA_TO_DEVICE; + break; + case UFS_RPMB_READ: + case UFS_RPMB_SEC_CONF_READ: + case UFS_RPMB_PURGE_STATUS_READ: + dir = DMA_FROM_DEVICE; + break; + default: + return -EINVAL; + } + + if (dir != DMA_NONE) { + payload = &job->request_payload; + if (!payload || !payload->payload_len || !payload->sg_cnt) + return -EINVAL; + + sg_cnt = dma_map_sg(hba->host->dma_dev, payload->sg_list, payload->sg_cnt, dir); + if (unlikely(!sg_cnt)) + return -ENOMEM; + sg_list = payload->sg_list; + data_len = payload->payload_len; + } + + ret = ufshcd_advanced_rpmb_req_handler(hba, &rpmb_request->bsg_request.upiu_req, + &rpmb_reply->bsg_reply.upiu_rsp, &rpmb_request->ehs_req, + &rpmb_reply->ehs_rsp, sg_cnt, sg_list, dir); + + if (dir != DMA_NONE) { + dma_unmap_sg(hba->host->dma_dev, payload->sg_list, payload->sg_cnt, dir); + + if (!ret) + rpmb_reply->bsg_reply.reply_payload_rcv_len = data_len; + } + + return ret; +} + static int ufs_bsg_request(struct bsg_job *job) { struct ufs_bsg_request *bsg_request = job->request; @@ -75,10 +142,11 @@ static int ufs_bsg_request(struct bsg_job *job) struct ufs_hba *hba = shost_priv(dev_to_shost(job->dev->parent)); struct uic_command uc = {}; int msgcode; - uint8_t *desc_buff = NULL; + uint8_t *buff = NULL; int desc_len = 0; enum query_opcode desc_op = UPIU_QUERY_OPCODE_NOP; int ret; + bool rpmb = false; bsg_reply->reply_payload_rcv_len = 0; @@ -88,8 +156,7 @@ static int ufs_bsg_request(struct bsg_job *job) switch (msgcode) { case UPIU_TRANSACTION_QUERY_REQ: desc_op = bsg_request->upiu_req.qr.opcode; - ret = ufs_bsg_alloc_desc_buffer(hba, job, &desc_buff, - &desc_len, desc_op); + ret = ufs_bsg_alloc_desc_buffer(hba, job, &buff, &desc_len, desc_op); if (ret) goto out; fallthrough; @@ -97,25 +164,31 @@ static int ufs_bsg_request(struct bsg_job *job) case UPIU_TRANSACTION_TASK_REQ: ret = ufshcd_exec_raw_upiu_cmd(hba, &bsg_request->upiu_req, &bsg_reply->upiu_rsp, msgcode, - desc_buff, &desc_len, desc_op); + buff, &desc_len, desc_op); if (ret) dev_err(hba->dev, "exe raw upiu: error code %d\n", ret); - else if (desc_op == UPIU_QUERY_OPCODE_READ_DESC && desc_len) + else if (desc_op == UPIU_QUERY_OPCODE_READ_DESC && desc_len) { bsg_reply->reply_payload_rcv_len = sg_copy_from_buffer(job->request_payload.sg_list, job->request_payload.sg_cnt, - desc_buff, desc_len); + buff, desc_len); + } break; case UPIU_TRANSACTION_UIC_CMD: memcpy(&uc, &bsg_request->upiu_req.uc, UIC_CMD_SIZE); ret = ufshcd_send_uic_cmd(hba, &uc); if (ret) - dev_err(hba->dev, - "send uic cmd: error code %d\n", ret); + dev_err(hba->dev, "send uic cmd: error code %d\n", ret); memcpy(&bsg_reply->upiu_rsp.uc, &uc, UIC_CMD_SIZE); break; + case UPIU_TRANSACTION_ARPMB_CMD: + rpmb = true; + ret = ufs_bsg_exec_advanced_rpmb_req(hba, job); + if (ret) + dev_err(hba->dev, "ARPMB OP failed: error code %d\n", ret); + break; default: ret = -ENOTSUPP; dev_err(hba->dev, "unsupported msgcode 0x%x\n", msgcode); @@ -125,9 +198,9 @@ static int ufs_bsg_request(struct bsg_job *job) out: ufshcd_rpm_put_sync(hba); - kfree(desc_buff); + kfree(buff); bsg_reply->result = ret; - job->reply_len = sizeof(struct ufs_bsg_reply); + job->reply_len = !rpmb ? sizeof(struct ufs_bsg_reply) : sizeof(struct ufs_rpmb_reply); /* complete the job here only if no error */ if (ret == 0) bsg_job_done(job, ret, bsg_reply->reply_payload_rcv_len); diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index bacc94f87ee2..1ecee6507e88 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -56,6 +56,9 @@ /* Query request timeout */ #define QUERY_REQ_TIMEOUT 1500 /* 1.5 seconds */ +/* Advanced RPMB request timeout */ +#define ADVANCED_RPMB_REQ_TIMEOUT 3000 /* 3 seconds */ + /* Task management command timeout */ #define TM_CMD_TIMEOUT 100 /* msecs */ @@ -2954,6 +2957,12 @@ ufshcd_dev_cmd_completion(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) dev_err(hba->dev, "%s: Reject UPIU not fully implemented\n", __func__); break; + case UPIU_TRANSACTION_RESPONSE: + if (hba->dev_cmd.type != DEV_CMD_TYPE_RPMB) { + err = -EINVAL; + dev_err(hba->dev, "%s: unexpected response %x\n", __func__, resp); + } + break; default: err = -EINVAL; dev_err(hba->dev, "%s: Invalid device management cmd response: %x\n", @@ -7006,6 +7015,100 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba, return err; } +/** + * ufshcd_advanced_rpmb_req_handler - handle advanced RPMB request + * @hba: per adapter instance + * @req_upiu: upiu request + * @rsp_upiu: upiu reply + * @req_ehs: EHS field which contains Advanced RPMB Request Message + * @rsp_ehs: EHS field which returns Advanced RPMB Response Message + * @sg_cnt: The number of sg lists actually used + * @sg_list: Pointer to SG list when DATA IN/OUT UPIU is required in ARPMB operation + * @dir: DMA direction + * + * Returns zero on success, non-zero on failure + */ +int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *req_upiu, + struct utp_upiu_req *rsp_upiu, struct ufs_ehs *req_ehs, + struct ufs_ehs *rsp_ehs, int sg_cnt, struct scatterlist *sg_list, + enum dma_data_direction dir) +{ + DECLARE_COMPLETION_ONSTACK(wait); + const u32 tag = hba->reserved_slot; + struct ufshcd_lrb *lrbp; + int err = 0; + int result; + u8 upiu_flags; + u8 *ehs_data; + u16 ehs_len; + + /* Protects use of hba->reserved_slot. */ + ufshcd_hold(hba, false); + mutex_lock(&hba->dev_cmd.lock); + down_read(&hba->clk_scaling_lock); + + lrbp = &hba->lrb[tag]; + WARN_ON(lrbp->cmd); + lrbp->cmd = NULL; + lrbp->task_tag = tag; + lrbp->lun = UFS_UPIU_RPMB_WLUN; + + lrbp->intr_cmd = true; + ufshcd_prepare_lrbp_crypto(NULL, lrbp); + hba->dev_cmd.type = DEV_CMD_TYPE_RPMB; + + /* Advanced RPMB starts from UFS 4.0, so its command type is UTP_CMD_TYPE_UFS_STORAGE */ + lrbp->command_type = UTP_CMD_TYPE_UFS_STORAGE; + + ufshcd_prepare_req_desc_hdr(lrbp, &upiu_flags, dir, 2); + + /* update the task tag and LUN in the request upiu */ + req_upiu->header.dword_0 |= cpu_to_be32(upiu_flags << 16 | UFS_UPIU_RPMB_WLUN << 8 | tag); + + /* copy the UPIU(contains CDB) request as it is */ + memcpy(lrbp->ucd_req_ptr, req_upiu, sizeof(*lrbp->ucd_req_ptr)); + /* Copy EHS, starting with byte32, immediately after the CDB package */ + memcpy(lrbp->ucd_req_ptr + 1, req_ehs, sizeof(*req_ehs)); + + if (dir != DMA_NONE && sg_list) + ufshcd_sgl_to_prdt(hba, lrbp, sg_cnt, sg_list); + + memset(lrbp->ucd_rsp_ptr, 0, sizeof(struct utp_upiu_rsp)); + + hba->dev_cmd.complete = &wait; + + ufshcd_send_command(hba, tag); + + err = ufshcd_wait_for_dev_cmd(hba, lrbp, ADVANCED_RPMB_REQ_TIMEOUT); + + if (!err) { + /* Just copy the upiu response as it is */ + memcpy(rsp_upiu, lrbp->ucd_rsp_ptr, sizeof(*rsp_upiu)); + /* Get the response UPIU result */ + result = ufshcd_get_rsp_upiu_result(lrbp->ucd_rsp_ptr); + + ehs_len = be32_to_cpu(lrbp->ucd_rsp_ptr->header.dword_2) >> 24; + /* + * Since the bLength in EHS indicates the total size of the EHS Header and EHS Data + * in 32 Byte units, the value of the bLength Request/Response for Advanced RPMB + * Message is 02h + */ + if (ehs_len == 2 && rsp_ehs) { + /* + * ucd_rsp_ptr points to a buffer with a length of 512 bytes + * (ALIGNED_UPIU_SIZE = 512), and the EHS data just starts from byte32 + */ + ehs_data = (u8 *)lrbp->ucd_rsp_ptr + EHS_OFFSET_IN_RESPONSE; + memcpy(rsp_ehs, ehs_data, ehs_len * 32); + } + } + + up_read(&hba->clk_scaling_lock); + mutex_unlock(&hba->dev_cmd.lock); + ufshcd_release(hba); + return err ? : result; +} + /** * ufshcd_eh_device_reset_handler() - Reset a single logical unit. * @cmd: SCSI command pointer diff --git a/include/uapi/scsi/scsi_bsg_ufs.h b/include/uapi/scsi/scsi_bsg_ufs.h index 64b0cb33e549..276e2772328f 100644 --- a/include/uapi/scsi/scsi_bsg_ufs.h +++ b/include/uapi/scsi/scsi_bsg_ufs.h @@ -14,10 +14,27 @@ */ #define UFS_CDB_SIZE 16 -#define UPIU_TRANSACTION_UIC_CMD 0x1F /* uic commands are 4DW long, per UFSHCI V2.1 paragraph 5.6.1 */ #define UIC_CMD_SIZE (sizeof(__u32) * 4) +enum ufs_bsg_msg_code { + UPIU_TRANSACTION_UIC_CMD = 0x1F, + UPIU_TRANSACTION_ARPMB_CMD, +}; + +/* UFS RPMB Request Message Types */ +enum ufs_rpmb_op_type { + UFS_RPMB_WRITE_KEY = 0x01, + UFS_RPMB_READ_CNT = 0x02, + UFS_RPMB_WRITE = 0x03, + UFS_RPMB_READ = 0x04, + UFS_RPMB_READ_RESP = 0x05, + UFS_RPMB_SEC_CONF_WRITE = 0x06, + UFS_RPMB_SEC_CONF_READ = 0x07, + UFS_RPMB_PURGE_ENABLE = 0x08, + UFS_RPMB_PURGE_STATUS_READ = 0x09, +}; + /** * struct utp_upiu_header - UPIU header structure * @dword_0: UPIU header DW-0 @@ -79,6 +96,23 @@ struct utp_upiu_req { }; }; +struct ufs_arpmb_meta { + __u16 req_resp_type; + __u8 nonce[16]; + __u32 write_counter; + __u16 addr_lun; + __u16 block_count; + __u16 result; +} __attribute__((__packed__)); + +struct ufs_ehs { + __u8 length; + __u8 ehs_type; + __u16 ehssub_type; + struct ufs_arpmb_meta meta; + __u8 mac_key[32]; +} __attribute__((__packed__)); + /* request (CDB) structure of the sg_io_v4 */ struct ufs_bsg_request { __u32 msgcode; @@ -102,4 +136,14 @@ struct ufs_bsg_reply { struct utp_upiu_req upiu_rsp; }; + +struct ufs_rpmb_request { + struct ufs_bsg_request bsg_request; + struct ufs_ehs ehs_req; +}; + +struct ufs_rpmb_reply { + struct ufs_bsg_reply bsg_reply; + struct ufs_ehs ehs_rsp; +}; #endif /* UFS_BSG_H */ diff --git a/include/ufs/ufs.h b/include/ufs/ufs.h index 17e401df674c..0c112195b288 100644 --- a/include/ufs/ufs.h +++ b/include/ufs/ufs.h @@ -49,6 +49,11 @@ */ #define UFS_WB_EXCEED_LIFETIME 0x0B +/* + * In UFS Spec, the Extra Header Segment (EHS) starts from byte 32 in UPIU request/response packet + */ +#define EHS_OFFSET_IN_RESPONSE 32 + /* Well known logical unit id in LUN field of UPIU */ enum { UFS_UPIU_REPORT_LUNS_WLUN = 0x81, diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h index 5cf81dff60aa..c3dfa8084b5c 100644 --- a/include/ufs/ufshcd.h +++ b/include/ufs/ufshcd.h @@ -30,6 +30,7 @@ struct ufs_hba; enum dev_cmd_type { DEV_CMD_TYPE_NOP = 0x0, DEV_CMD_TYPE_QUERY = 0x1, + DEV_CMD_TYPE_RPMB = 0x2, }; enum ufs_event_type { @@ -1201,7 +1202,10 @@ int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba, int msgcode, u8 *desc_buff, int *buff_len, enum query_opcode desc_op); - +int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *req_upiu, + struct utp_upiu_req *rsp_upiu, struct ufs_ehs *ehs_req, + struct ufs_ehs *ehs_rsp, int sg_cnt, + struct scatterlist *sg_list, enum dma_data_direction dir); int ufshcd_wb_toggle(struct ufs_hba *hba, bool enable); int ufshcd_wb_toggle_buf_flush(struct ufs_hba *hba, bool enable); int ufshcd_suspend_prepare(struct device *dev); diff --git a/include/ufs/ufshci.h b/include/ufs/ufshci.h index f525566a0864..af216296b86e 100644 --- a/include/ufs/ufshci.h +++ b/include/ufs/ufshci.h @@ -63,6 +63,7 @@ enum { enum { MASK_TRANSFER_REQUESTS_SLOTS = 0x0000001F, MASK_TASK_MANAGEMENT_REQUEST_SLOTS = 0x00070000, + MASK_EHSLUTRD_SUPPORTED = 0x00400000, MASK_AUTO_HIBERN8_SUPPORT = 0x00800000, MASK_64_ADDRESSING_SUPPORT = 0x01000000, MASK_OUT_OF_ORDER_DATA_DELIVERY_SUPPORT = 0x02000000, -- cgit v1.2.3 From ada1e653a5eae7361d95781ed812caa0c8e07dbb Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 8 Dec 2022 15:43:58 -0800 Subject: scsi: ufs: core: Allow UFS host drivers to override the sg entry size Modify the UFSHCD core to allow 'struct ufshcd_sg_entry' to be variable-length. The default is the standard length, but variants can override ufs_hba::sg_entry_size with a larger value if there are vendor-specific fields following the standard ones. This is needed to support inline encryption with ufs-exynos (FMP). Cc: Eric Biggers Reviewed-by: Avri Altman Signed-off-by: Eric Biggers [ bvanassche: edited commit message and introduced CONFIG_SCSI_UFS_VARIABLE_SG_ENTRY_SIZE ] Signed-off-by: Bart Van Assche Signed-off-by: Martin K. Petersen --- drivers/ufs/core/ufshcd.c | 39 ++++++++++++++++++--------------------- drivers/ufs/host/Kconfig | 4 ++++ include/ufs/ufshcd.h | 30 ++++++++++++++++++++++++++++++ include/ufs/ufshci.h | 9 +++++++-- 4 files changed, 59 insertions(+), 23 deletions(-) (limited to 'include/ufs') diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index a7d1cf2377e1..62ee2c1ff83d 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -528,7 +528,7 @@ void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt) prdt_length = le16_to_cpu( lrbp->utr_descriptor_ptr->prd_table_length); if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) - prdt_length /= sizeof(struct ufshcd_sg_entry); + prdt_length /= ufshcd_sg_entry_size(hba); dev_err(hba->dev, "UPIU[%d] - PRDT - %d entries phys@0x%llx\n", @@ -537,7 +537,7 @@ void ufshcd_print_trs(struct ufs_hba *hba, unsigned long bitmap, bool pr_prdt) if (pr_prdt) ufshcd_hex_dump("UPIU PRDT: ", lrbp->ucd_prdt_ptr, - sizeof(struct ufshcd_sg_entry) * prdt_length); + ufshcd_sg_entry_size(hba) * prdt_length); } } @@ -2418,7 +2418,7 @@ int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd) */ static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) { - struct ufshcd_sg_entry *prd_table; + struct ufshcd_sg_entry *prd; struct scatterlist *sg; struct scsi_cmnd *cmd; int sg_segments; @@ -2433,13 +2433,12 @@ static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) if (hba->quirks & UFSHCD_QUIRK_PRDT_BYTE_GRAN) lrbp->utr_descriptor_ptr->prd_table_length = - cpu_to_le16((sg_segments * - sizeof(struct ufshcd_sg_entry))); + cpu_to_le16(sg_segments * ufshcd_sg_entry_size(hba)); else lrbp->utr_descriptor_ptr->prd_table_length = cpu_to_le16(sg_segments); - prd_table = lrbp->ucd_prdt_ptr; + prd = lrbp->ucd_prdt_ptr; scsi_for_each_sg(cmd, sg, sg_segments, i) { const unsigned int len = sg_dma_len(sg); @@ -2453,9 +2452,10 @@ static int ufshcd_map_sg(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) * indicates 4 bytes, '7' indicates 8 bytes, etc." */ WARN_ONCE(len > 256 * 1024, "len = %#x\n", len); - prd_table[i].size = cpu_to_le32(len - 1); - prd_table[i].addr = cpu_to_le64(sg->dma_address); - prd_table[i].reserved = 0; + prd->size = cpu_to_le32(len - 1); + prd->addr = cpu_to_le64(sg->dma_address); + prd->reserved = 0; + prd = (void *)prd + ufshcd_sg_entry_size(hba); } } else { lrbp->utr_descriptor_ptr->prd_table_length = 0; @@ -2745,10 +2745,11 @@ static void ufshcd_map_queues(struct Scsi_Host *shost) static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i) { - struct utp_transfer_cmd_desc *cmd_descp = hba->ucdl_base_addr; + struct utp_transfer_cmd_desc *cmd_descp = (void *)hba->ucdl_base_addr + + i * sizeof_utp_transfer_cmd_desc(hba); struct utp_transfer_req_desc *utrdlp = hba->utrdl_base_addr; dma_addr_t cmd_desc_element_addr = hba->ucdl_dma_addr + - i * sizeof(struct utp_transfer_cmd_desc); + i * sizeof_utp_transfer_cmd_desc(hba); u16 response_offset = offsetof(struct utp_transfer_cmd_desc, response_upiu); u16 prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table); @@ -2756,11 +2757,11 @@ static void ufshcd_init_lrb(struct ufs_hba *hba, struct ufshcd_lrb *lrb, int i) lrb->utr_descriptor_ptr = utrdlp + i; lrb->utrd_dma_addr = hba->utrdl_dma_addr + i * sizeof(struct utp_transfer_req_desc); - lrb->ucd_req_ptr = (struct utp_upiu_req *)(cmd_descp + i); + lrb->ucd_req_ptr = (struct utp_upiu_req *)cmd_descp->command_upiu; lrb->ucd_req_dma_addr = cmd_desc_element_addr; - lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp[i].response_upiu; + lrb->ucd_rsp_ptr = (struct utp_upiu_rsp *)cmd_descp->response_upiu; lrb->ucd_rsp_dma_addr = cmd_desc_element_addr + response_offset; - lrb->ucd_prdt_ptr = cmd_descp[i].prd_table; + lrb->ucd_prdt_ptr = (struct ufshcd_sg_entry *)cmd_descp->prd_table; lrb->ucd_prdt_dma_addr = cmd_desc_element_addr + prdt_offset; } @@ -3669,7 +3670,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba) size_t utmrdl_size, utrdl_size, ucdl_size; /* Allocate memory for UTP command descriptors */ - ucdl_size = (sizeof(struct utp_transfer_cmd_desc) * hba->nutrs); + ucdl_size = sizeof_utp_transfer_cmd_desc(hba) * hba->nutrs; hba->ucdl_base_addr = dmam_alloc_coherent(hba->dev, ucdl_size, &hba->ucdl_dma_addr, @@ -3763,7 +3764,7 @@ static void ufshcd_host_memory_configure(struct ufs_hba *hba) prdt_offset = offsetof(struct utp_transfer_cmd_desc, prd_table); - cmd_desc_size = sizeof(struct utp_transfer_cmd_desc); + cmd_desc_size = sizeof_utp_transfer_cmd_desc(hba); cmd_desc_dma_addr = hba->ucdl_dma_addr; for (i = 0; i < hba->nutrs; i++) { @@ -9658,6 +9659,7 @@ int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle) hba->dev = dev; hba->dev_ref_clk_freq = REF_CLK_FREQ_INVAL; hba->nop_out_timeout = NOP_OUT_TIMEOUT; + ufshcd_set_sg_entry_size(hba, sizeof(struct ufshcd_sg_entry)); INIT_LIST_HEAD(&hba->clk_list_head); spin_lock_init(&hba->outstanding_lock); @@ -10036,11 +10038,6 @@ static int __init ufshcd_core_init(void) { int ret; - /* Verify that there are no gaps in struct utp_transfer_cmd_desc. */ - static_assert(sizeof(struct utp_transfer_cmd_desc) == - 2 * ALIGNED_UPIU_SIZE + - SG_ALL * sizeof(struct ufshcd_sg_entry)); - ufs_debugfs_init(); ret = scsi_register_driver(&ufs_dev_wlun_template.gendrv); diff --git a/drivers/ufs/host/Kconfig b/drivers/ufs/host/Kconfig index 4cc2dbd79ed0..7f01f453e792 100644 --- a/drivers/ufs/host/Kconfig +++ b/drivers/ufs/host/Kconfig @@ -124,3 +124,7 @@ config SCSI_UFS_EXYNOS Select this if you have UFS host controller on Samsung Exynos SoC. If unsure, say N. + +config SCSI_UFS_VARIABLE_SG_ENTRY_SIZE + bool + default y if SCSI_UFS_EXYNOS && SCSI_UFS_CRYPTO diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h index 5cf81dff60aa..e03f111947b6 100644 --- a/include/ufs/ufshcd.h +++ b/include/ufs/ufshcd.h @@ -754,6 +754,7 @@ struct ufs_hba_monitor { * @vops: pointer to variant specific operations * @vps: pointer to variant specific parameters * @priv: pointer to variant specific private data + * @sg_entry_size: size of struct ufshcd_sg_entry (may include variant fields) * @irq: Irq number of the controller * @is_irq_enabled: whether or not the UFS controller interrupt is enabled. * @dev_ref_clk_freq: reference clock frequency @@ -877,6 +878,9 @@ struct ufs_hba { const struct ufs_hba_variant_ops *vops; struct ufs_hba_variant_params *vps; void *priv; +#ifdef CONFIG_SCSI_UFS_VARIABLE_SG_ENTRY_SIZE + size_t sg_entry_size; +#endif unsigned int irq; bool is_irq_enabled; enum ufs_ref_clk_freq dev_ref_clk_freq; @@ -980,6 +984,32 @@ struct ufs_hba { bool complete_put; }; +#ifdef CONFIG_SCSI_UFS_VARIABLE_SG_ENTRY_SIZE +static inline size_t ufshcd_sg_entry_size(const struct ufs_hba *hba) +{ + return hba->sg_entry_size; +} + +static inline void ufshcd_set_sg_entry_size(struct ufs_hba *hba, size_t sg_entry_size) +{ + WARN_ON_ONCE(sg_entry_size < sizeof(struct ufshcd_sg_entry)); + hba->sg_entry_size = sg_entry_size; +} +#else +static inline size_t ufshcd_sg_entry_size(const struct ufs_hba *hba) +{ + return sizeof(struct ufshcd_sg_entry); +} + +#define ufshcd_set_sg_entry_size(hba, sg_entry_size) \ + ({ (void)(hba); BUILD_BUG_ON(sg_entry_size != sizeof(struct ufshcd_sg_entry)); }) +#endif + +static inline size_t sizeof_utp_transfer_cmd_desc(const struct ufs_hba *hba) +{ + return sizeof(struct utp_transfer_cmd_desc) + SG_ALL * ufshcd_sg_entry_size(hba); +} + /* Returns true if clocks can be gated. Otherwise false */ static inline bool ufshcd_is_clkgating_allowed(struct ufs_hba *hba) { diff --git a/include/ufs/ufshci.h b/include/ufs/ufshci.h index f525566a0864..e145a478afa2 100644 --- a/include/ufs/ufshci.h +++ b/include/ufs/ufshci.h @@ -422,18 +422,23 @@ struct ufshcd_sg_entry { __le64 addr; __le32 reserved; __le32 size; + /* + * followed by variant-specific fields if + * CONFIG_SCSI_UFS_VARIABLE_SG_ENTRY_SIZE has been defined. + */ }; /** * struct utp_transfer_cmd_desc - UTP Command Descriptor (UCD) * @command_upiu: Command UPIU Frame address * @response_upiu: Response UPIU Frame address - * @prd_table: Physical Region Descriptor + * @prd_table: Physical Region Descriptor: an array of SG_ALL struct + * ufshcd_sg_entry's. Variant-specific fields may be present after each. */ struct utp_transfer_cmd_desc { u8 command_upiu[ALIGNED_UPIU_SIZE]; u8 response_upiu[ALIGNED_UPIU_SIZE]; - struct ufshcd_sg_entry prd_table[SG_ALL]; + u8 prd_table[]; }; /** -- cgit v1.2.3 From f2a89b071b26b79abbe892ce88c4d674d1f21f63 Mon Sep 17 00:00:00 2001 From: Arthur Simchaev Date: Sun, 11 Dec 2022 15:05:09 +0200 Subject: scsi: ufs: core: Remove redundant desc_size variable from hba Always read the descriptor with QUERY_DESC_MAX_SIZE. According to the spec, the device returns the actual size. Signed-off-by: Arthur Simchaev Reviewed-by: Bean Huo Reviewed-by: Bart Van Assche Signed-off-by: Martin K. Petersen --- drivers/ufs/core/ufs_bsg.c | 6 +--- drivers/ufs/core/ufshcd-priv.h | 3 -- drivers/ufs/core/ufshcd.c | 72 ++++++++---------------------------------- drivers/ufs/core/ufshpb.c | 4 +-- include/ufs/ufs.h | 1 - include/ufs/ufshcd.h | 4 --- 6 files changed, 15 insertions(+), 75 deletions(-) (limited to 'include/ufs') diff --git a/drivers/ufs/core/ufs_bsg.c b/drivers/ufs/core/ufs_bsg.c index b99e3f3dc4ef..7eec38c63d9f 100644 --- a/drivers/ufs/core/ufs_bsg.c +++ b/drivers/ufs/core/ufs_bsg.c @@ -21,11 +21,7 @@ static int ufs_bsg_get_query_desc_size(struct ufs_hba *hba, int *desc_len, if (desc_size <= 0) return -EINVAL; - ufshcd_map_desc_id_to_length(hba, desc_id, desc_len); - if (!*desc_len) - return -EINVAL; - - *desc_len = min_t(int, *desc_len, desc_size); + *desc_len = min_t(int, QUERY_DESC_MAX_SIZE, desc_size); return 0; } diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h index a9e8e1f5afe7..c52e2f349aaa 100644 --- a/drivers/ufs/core/ufshcd-priv.h +++ b/drivers/ufs/core/ufshcd-priv.h @@ -70,9 +70,6 @@ int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index, int ufshcd_hold(struct ufs_hba *hba, bool async); void ufshcd_release(struct ufs_hba *hba); -void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id, - int *desc_length); - int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd); int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba, diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 2e47c69d9b04..bb032bc3948e 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -3368,37 +3368,6 @@ int ufshcd_query_descriptor_retry(struct ufs_hba *hba, return err; } -/** - * ufshcd_map_desc_id_to_length - map descriptor IDN to its length - * @hba: Pointer to adapter instance - * @desc_id: descriptor idn value - * @desc_len: mapped desc length (out) - */ -void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id, - int *desc_len) -{ - if (desc_id >= QUERY_DESC_IDN_MAX || desc_id == QUERY_DESC_IDN_RFU_0 || - desc_id == QUERY_DESC_IDN_RFU_1) - *desc_len = 0; - else - *desc_len = hba->desc_size[desc_id]; -} -EXPORT_SYMBOL(ufshcd_map_desc_id_to_length); - -static void ufshcd_update_desc_length(struct ufs_hba *hba, - enum desc_idn desc_id, int desc_index, - unsigned char desc_len) -{ - if (hba->desc_size[desc_id] == QUERY_DESC_MAX_SIZE && - desc_id != QUERY_DESC_IDN_STRING && desc_index != UFS_RPMB_UNIT) - /* For UFS 3.1, the normal unit descriptor is 10 bytes larger - * than the RPMB unit, however, both descriptors share the same - * desc_idn, to cover both unit descriptors with one length, we - * choose the normal unit descriptor length by desc_index. - */ - hba->desc_size[desc_id] = desc_len; -} - /** * ufshcd_read_desc_param - read the specified descriptor parameter * @hba: Pointer to adapter instance @@ -3419,20 +3388,13 @@ int ufshcd_read_desc_param(struct ufs_hba *hba, { int ret; u8 *desc_buf; - int buff_len; + int buff_len = QUERY_DESC_MAX_SIZE; bool is_kmalloc = true; /* Safety check */ if (desc_id >= QUERY_DESC_IDN_MAX || !param_size) return -EINVAL; - /* Get the length of descriptor */ - ufshcd_map_desc_id_to_length(hba, desc_id, &buff_len); - if (!buff_len) { - dev_err(hba->dev, "%s: Failed to get desc length\n", __func__); - return -EINVAL; - } - if (param_offset >= buff_len) { dev_err(hba->dev, "%s: Invalid offset 0x%x in descriptor IDN 0x%x, length 0x%x\n", __func__, param_offset, desc_id, buff_len); @@ -3470,7 +3432,6 @@ int ufshcd_read_desc_param(struct ufs_hba *hba, /* Update descriptor length */ buff_len = desc_buf[QUERY_DESC_LENGTH_OFFSET]; - ufshcd_update_desc_length(hba, desc_id, desc_index, buff_len); if (is_kmalloc) { /* Make sure we don't copy more data than available */ @@ -4909,7 +4870,7 @@ static void ufshcd_setup_links(struct ufs_hba *hba, struct scsi_device *sdev) */ static void ufshcd_lu_init(struct ufs_hba *hba, struct scsi_device *sdev) { - int len = hba->desc_size[QUERY_DESC_IDN_UNIT]; + int len = QUERY_DESC_MAX_SIZE; u8 lun = ufshcd_scsi_to_upiu_lun(sdev->lun); u8 lun_qdepth = hba->nutrs; u8 *desc_buf; @@ -7480,25 +7441,24 @@ out: static void ufshcd_set_active_icc_lvl(struct ufs_hba *hba) { int ret; - int buff_len = hba->desc_size[QUERY_DESC_IDN_POWER]; u8 *desc_buf; u32 icc_level; - desc_buf = kmalloc(buff_len, GFP_KERNEL); + desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL); if (!desc_buf) return; ret = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_POWER, 0, 0, - desc_buf, buff_len); + desc_buf, QUERY_DESC_MAX_SIZE); if (ret) { dev_err(hba->dev, - "%s: Failed reading power descriptor.len = %d ret = %d", - __func__, buff_len, ret); + "%s: Failed reading power descriptor ret = %d", + __func__, ret); goto out; } icc_level = ufshcd_find_max_sup_active_icc_level(hba, desc_buf, - buff_len); + QUERY_DESC_MAX_SIZE); dev_dbg(hba->dev, "%s: setting icc_level 0x%x", __func__, icc_level); ret = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_WRITE_ATTR, @@ -7715,14 +7675,14 @@ static int ufs_get_device_desc(struct ufs_hba *hba) u8 *desc_buf; struct ufs_dev_info *dev_info = &hba->dev_info; - desc_buf = kmalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL); + desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL); if (!desc_buf) { err = -ENOMEM; goto out; } err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_DEVICE, 0, 0, desc_buf, - hba->desc_size[QUERY_DESC_IDN_DEVICE]); + QUERY_DESC_MAX_SIZE); if (err) { dev_err(hba->dev, "%s: Failed reading Device Desc. err = %d\n", __func__, err); @@ -7969,18 +7929,16 @@ static void ufshcd_clear_dbg_ufs_stats(struct ufs_hba *hba) static int ufshcd_device_geo_params_init(struct ufs_hba *hba) { int err; - size_t buff_len; u8 *desc_buf; - buff_len = hba->desc_size[QUERY_DESC_IDN_GEOMETRY]; - desc_buf = kmalloc(buff_len, GFP_KERNEL); + desc_buf = kzalloc(QUERY_DESC_MAX_SIZE, GFP_KERNEL); if (!desc_buf) { err = -ENOMEM; goto out; } err = ufshcd_read_desc_param(hba, QUERY_DESC_IDN_GEOMETRY, 0, 0, - desc_buf, buff_len); + desc_buf, QUERY_DESC_MAX_SIZE); if (err) { dev_err(hba->dev, "%s: Failed reading Geometry Desc. err = %d\n", __func__, err); @@ -7992,7 +7950,7 @@ static int ufshcd_device_geo_params_init(struct ufs_hba *hba) else if (desc_buf[GEOMETRY_DESC_PARAM_MAX_NUM_LUN] == 0) hba->dev_info.max_lu_supported = 8; - if (hba->desc_size[QUERY_DESC_IDN_GEOMETRY] >= + if (desc_buf[QUERY_DESC_LENGTH_OFFSET] >= GEOMETRY_DESC_PARAM_HPB_MAX_ACTIVE_REGS) ufshpb_get_geo_info(hba, desc_buf); @@ -8077,11 +8035,7 @@ out: static int ufshcd_device_params_init(struct ufs_hba *hba) { bool flag; - int ret, i; - - /* Init device descriptor sizes */ - for (i = 0; i < QUERY_DESC_IDN_MAX; i++) - hba->desc_size[i] = QUERY_DESC_MAX_SIZE; + int ret; /* Init UFS geometry descriptor related parameters */ ret = ufshcd_device_geo_params_init(hba); diff --git a/drivers/ufs/core/ufshpb.c b/drivers/ufs/core/ufshpb.c index be3fb24b93d8..19c9b5d1dcf8 100644 --- a/drivers/ufs/core/ufshpb.c +++ b/drivers/ufs/core/ufshpb.c @@ -2382,12 +2382,10 @@ static int ufshpb_get_lu_info(struct ufs_hba *hba, int lun, { u16 max_active_rgns; u8 lu_enable; - int size; + int size = QUERY_DESC_MAX_SIZE; int ret; char desc_buf[QUERY_DESC_MAX_SIZE]; - ufshcd_map_desc_id_to_length(hba, QUERY_DESC_IDN_UNIT, &size); - ufshcd_rpm_get_sync(hba); ret = ufshcd_query_descriptor_retry(hba, UPIU_QUERY_OPCODE_READ_DESC, QUERY_DESC_IDN_UNIT, lun, 0, diff --git a/include/ufs/ufs.h b/include/ufs/ufs.h index 1bba3fead2ce..2fc71075c5a1 100644 --- a/include/ufs/ufs.h +++ b/include/ufs/ufs.h @@ -38,7 +38,6 @@ #define UFS_UPIU_MAX_UNIT_NUM_ID 0x7F #define UFS_MAX_LUNS (SCSI_W_LUN_BASE + UFS_UPIU_MAX_UNIT_NUM_ID) #define UFS_UPIU_WLUN_ID (1 << 7) -#define UFS_RPMB_UNIT 0xC4 /* WriteBooster buffer is available only for the logical unit from 0 to 7 */ #define UFS_UPIU_MAX_WB_LUN_ID 8 diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h index 5cf81dff60aa..830ababe9932 100644 --- a/include/ufs/ufshcd.h +++ b/include/ufs/ufshcd.h @@ -952,7 +952,6 @@ struct ufs_hba { bool is_urgent_bkops_lvl_checked; struct rw_semaphore clk_scaling_lock; - unsigned char desc_size[QUERY_DESC_IDN_MAX]; atomic_t scsi_block_reqs_cnt; struct device bsg_dev; @@ -1186,9 +1185,6 @@ void ufshcd_release(struct ufs_hba *hba); void ufshcd_clkgate_delay_set(struct device *dev, unsigned long value); -void ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id, - int *desc_length); - u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba); int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg); -- cgit v1.2.3 From c2c38c573a2e6184f9acd10e98305d3cb5f1c62b Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Thu, 22 Dec 2022 19:39:56 +0530 Subject: scsi: ufs: core: Add reinit_notify() callback reinit_notify() callback can be used by the UFS controller drivers to perform changes required for UFSHCD reinit that can happen during max gear switch. Tested-by: Andrew Halaney # Qdrive3/sa8540p-ride Signed-off-by: Manivannan Sadhasivam Reviewed-by: Bart Van Assche Signed-off-by: Martin K. Petersen --- drivers/ufs/core/ufshcd-priv.h | 6 ++++++ include/ufs/ufshcd.h | 2 ++ 2 files changed, 8 insertions(+) (limited to 'include/ufs') diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h index c52e2f349aaa..8fff72e63e36 100644 --- a/drivers/ufs/core/ufshcd-priv.h +++ b/drivers/ufs/core/ufshcd-priv.h @@ -223,6 +223,12 @@ static inline void ufshcd_vops_config_scaling_param(struct ufs_hba *hba, hba->vops->config_scaling_param(hba, p, data); } +static inline void ufshcd_vops_reinit_notify(struct ufs_hba *hba) +{ + if (hba->vops && hba->vops->reinit_notify) + hba->vops->reinit_notify(hba); +} + extern const struct ufs_pm_lvl_states ufs_pm_lvl_states[]; /** diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h index dd5912b4db77..97f007d3b851 100644 --- a/include/ufs/ufshcd.h +++ b/include/ufs/ufshcd.h @@ -298,6 +298,7 @@ struct ufs_pwr_mode_info { * @config_scaling_param: called to configure clock scaling parameters * @program_key: program or evict an inline encryption key * @event_notify: called to notify important events + * @reinit_notify: called to notify reinit of UFSHCD during max gear switch */ struct ufs_hba_variant_ops { const char *name; @@ -336,6 +337,7 @@ struct ufs_hba_variant_ops { const union ufs_crypto_cfg_entry *cfg, int slot); void (*event_notify)(struct ufs_hba *hba, enum ufs_event_type evt, void *data); + void (*reinit_notify)(struct ufs_hba *); }; /* clock gating state */ -- cgit v1.2.3 From 96a7141da33207672d7a354c885d65af4f0f9b6c Mon Sep 17 00:00:00 2001 From: Manivannan Sadhasivam Date: Thu, 22 Dec 2022 19:39:57 +0530 Subject: scsi: ufs: core: Add support for reinitializing the UFS device Some platforms like Qcom, requires the UFS device to be reinitialized after switching to maximum gear speed. So add support for that in UFS core by introducing a new quirk (UFSHCD_CAP_REINIT_AFTER_MAX_GEAR_SWITCH) and doing the reinitialization, if the quirk is enabled by the controller driver. Suggested-by: Can Guo Tested-by: Andrew Halaney # Qdrive3/sa8540p-ride Signed-off-by: Manivannan Sadhasivam Reviewed-by: Bart Van Assche Signed-off-by: Martin K. Petersen --- drivers/ufs/core/ufshcd.c | 63 +++++++++++++++++++++++++++++++++++------------ include/ufs/ufshcd.h | 6 +++++ 2 files changed, 53 insertions(+), 16 deletions(-) (limited to 'include/ufs') diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 99ca5b035028..0514669e03be 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -8231,27 +8231,18 @@ out: return ret; } -/** - * ufshcd_probe_hba - probe hba to detect device and initialize it - * @hba: per-adapter instance - * @init_dev_params: whether or not to call ufshcd_device_params_init(). - * - * Execute link-startup and verify device initialization - */ -static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params) +static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params) { int ret; - unsigned long flags; - ktime_t start = ktime_get(); hba->ufshcd_state = UFSHCD_STATE_RESET; ret = ufshcd_link_startup(hba); if (ret) - goto out; + return ret; if (hba->quirks & UFSHCD_QUIRK_SKIP_PH_CONFIGURATION) - goto out; + return ret; /* Debug counters initialization */ ufshcd_clear_dbg_ufs_stats(hba); @@ -8262,12 +8253,12 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params) /* Verify device initialization by sending NOP OUT UPIU */ ret = ufshcd_verify_dev_init(hba); if (ret) - goto out; + return ret; /* Initiate UFS initialization, and waiting until completion */ ret = ufshcd_complete_dev_init(hba); if (ret) - goto out; + return ret; /* * Initialize UFS device parameters used by driver, these @@ -8276,7 +8267,7 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params) if (init_dev_params) { ret = ufshcd_device_params_init(hba); if (ret) - goto out; + return ret; } ufshcd_tune_unipro_params(hba); @@ -8297,11 +8288,51 @@ static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params) if (ret) { dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n", __func__, ret); + return ret; + } + } + + return 0; +} + +/** + * ufshcd_probe_hba - probe hba to detect device and initialize it + * @hba: per-adapter instance + * @init_dev_params: whether or not to call ufshcd_device_params_init(). + * + * Execute link-startup and verify device initialization + */ +static int ufshcd_probe_hba(struct ufs_hba *hba, bool init_dev_params) +{ + ktime_t start = ktime_get(); + unsigned long flags; + int ret; + + ret = ufshcd_device_init(hba, init_dev_params); + if (ret) + goto out; + + if (hba->quirks & UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH) { + /* Reset the device and controller before doing reinit */ + ufshcd_device_reset(hba); + ufshcd_hba_stop(hba); + ufshcd_vops_reinit_notify(hba); + ret = ufshcd_hba_enable(hba); + if (ret) { + dev_err(hba->dev, "Host controller enable failed\n"); + ufshcd_print_evt_hist(hba); + ufshcd_print_host_state(hba); goto out; } - ufshcd_print_pwr_info(hba); + + /* Reinit the device */ + ret = ufshcd_device_init(hba, init_dev_params); + if (ret) + goto out; } + ufshcd_print_pwr_info(hba); + /* * bActiveICCLevel is volatile for UFS device (as per latest v2.1 spec) * and for removable UFS card as well, hence always set the parameter. diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h index 97f007d3b851..ff138927676b 100644 --- a/include/ufs/ufshcd.h +++ b/include/ufs/ufshcd.h @@ -596,6 +596,12 @@ enum ufshcd_quirks { * auto-hibernate capability but it's FASTAUTO only. */ UFSHCD_QUIRK_HIBERN_FASTAUTO = 1 << 18, + + /* + * This quirk needs to be enabled if the host controller needs + * to reinit the device after switching to maximum gear. + */ + UFSHCD_QUIRK_REINIT_AFTER_MAX_GEAR_SWITCH = 1 << 19, }; enum ufshcd_caps { -- cgit v1.2.3 From f3e57da528127febae7eb03d9c87408d572b0fd8 Mon Sep 17 00:00:00 2001 From: Bean Huo Date: Sun, 8 Jan 2023 23:40:56 +0100 Subject: scsi: core: Fix invisible definition compilation warning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In 'include/ufs/ufshcd.h' file, 'enum dma_data_direction' will be used, which is defined in linux/dma-direction.h, however, this header file is not included in ufshcd.h, thus causing the following compilation warning: "warning: ‘enum dma_data_direction’ declared inside parameter list will not be visible outside of this definition or declaration" Fix this warning by including 'linux/dma-direction.h'. Fixes: 6ff265fc5ef6 ("scsi: ufs: core: bsg: Add advanced RPMB support in ufs_bsg") Reported-by: Xiaosen He Reported-by: Bart Van Assche Signed-off-by: Bean Huo Reviewed-by: Bart Van Assche Signed-off-by: Martin K. Petersen --- include/ufs/ufshcd.h | 1 + 1 file changed, 1 insertion(+) (limited to 'include/ufs') diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h index ff138927676b..fc7373a1a15e 100644 --- a/include/ufs/ufshcd.h +++ b/include/ufs/ufshcd.h @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include -- cgit v1.2.3 From 6e1d850acff9477ae4c18a73c19ef52841ac2010 Mon Sep 17 00:00:00 2001 From: Asutosh Das Date: Fri, 13 Jan 2023 12:48:38 -0800 Subject: scsi: ufs: core: Probe for EXT_IID support Task Tag is limited to 8 bits and this restricts the number of active I/Os to 255. In multi-circular queue mode, this may not be enough. The specification provides EXT_IID which can be used to increase the number of I/Os if the UFS device and UFSHC support it. This patch adds support to probe for EXT_IID support in UFS device and UFSHC. Co-developed-by: Can Guo Signed-off-by: Can Guo Signed-off-by: Asutosh Das Reviewed-by: Bart Van Assche Reviewed-by: Avri Altman Reviewed-by: Manivannan Sadhasivam Reviewed-by: Stanley Chu Signed-off-by: Martin K. Petersen --- drivers/ufs/core/ufshcd.c | 32 ++++++++++++++++++++++++++++++++ include/ufs/ufs.h | 5 +++++ include/ufs/ufshcd.h | 4 ++++ include/ufs/ufshci.h | 7 +++++++ 4 files changed, 48 insertions(+) (limited to 'include/ufs') diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 0514669e03be..6ba05976c139 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -2261,6 +2261,10 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba) if (err) dev_err(hba->dev, "crypto setup failed\n"); + hba->mcq_capabilities = ufshcd_readl(hba, REG_MCQCAP); + hba->ext_iid_sup = FIELD_GET(MASK_EXT_IID_SUPPORT, + hba->mcq_capabilities); + return err; } @@ -7766,6 +7770,31 @@ static void ufshcd_temp_notif_probe(struct ufs_hba *hba, const u8 *desc_buf) } } +static void ufshcd_ext_iid_probe(struct ufs_hba *hba, u8 *desc_buf) +{ + struct ufs_dev_info *dev_info = &hba->dev_info; + u32 ext_ufs_feature; + u32 ext_iid_en = 0; + int err; + + /* Only UFS-4.0 and above may support EXT_IID */ + if (dev_info->wspecversion < 0x400) + goto out; + + ext_ufs_feature = get_unaligned_be32(desc_buf + + DEVICE_DESC_PARAM_EXT_UFS_FEATURE_SUP); + if (!(ext_ufs_feature & UFS_DEV_EXT_IID_SUP)) + goto out; + + err = ufshcd_query_attr_retry(hba, UPIU_QUERY_OPCODE_READ_ATTR, + QUERY_ATTR_IDN_EXT_IID_EN, 0, 0, &ext_iid_en); + if (err) + dev_err(hba->dev, "failed reading bEXTIIDEn. err = %d\n", err); + +out: + dev_info->b_ext_iid_en = ext_iid_en; +} + void ufshcd_fixup_dev_quirks(struct ufs_hba *hba, const struct ufs_dev_quirk *fixups) { @@ -7864,6 +7893,9 @@ static int ufs_get_device_desc(struct ufs_hba *hba) ufshcd_temp_notif_probe(hba, desc_buf); + if (hba->ext_iid_sup) + ufshcd_ext_iid_probe(hba, desc_buf); + /* * ufshcd_read_string_desc returns size of the string * reset the error value diff --git a/include/ufs/ufs.h b/include/ufs/ufs.h index c146de52918d..bea7b2086a4a 100644 --- a/include/ufs/ufs.h +++ b/include/ufs/ufs.h @@ -169,6 +169,7 @@ enum attr_idn { QUERY_ATTR_IDN_AVAIL_WB_BUFF_SIZE = 0x1D, QUERY_ATTR_IDN_WB_BUFF_LIFE_TIME_EST = 0x1E, QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE = 0x1F, + QUERY_ATTR_IDN_EXT_IID_EN = 0x2A, }; /* Descriptor idn for Query requests */ @@ -378,6 +379,7 @@ enum { UFS_DEV_EXT_TEMP_NOTIF = BIT(6), UFS_DEV_HPB_SUPPORT = BIT(7), UFS_DEV_WRITE_BOOSTER_SUP = BIT(8), + UFS_DEV_EXT_IID_SUP = BIT(16), }; #define UFS_DEV_HPB_SUPPORT_VERSION 0x310 @@ -629,6 +631,9 @@ struct ufs_dev_info { u8 b_presrv_uspc_en; bool b_advanced_rpmb_en; + + /* UFS EXT_IID Enable */ + bool b_ext_iid_en; }; /* diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h index fc7373a1a15e..8f2080f9fdb5 100644 --- a/include/ufs/ufshcd.h +++ b/include/ufs/ufshcd.h @@ -757,6 +757,7 @@ struct ufs_hba_monitor { * @outstanding_lock: Protects @outstanding_reqs. * @outstanding_reqs: Bits representing outstanding transfer requests * @capabilities: UFS Controller Capabilities + * @mcq_capabilities: UFS Multi Circular Queue capabilities * @nutrs: Transfer Request Queue depth supported by controller * @nutmrs: Task Management Queue depth supported by controller * @reserved_slot: Used to submit device commands. Protected by @dev_cmd.lock. @@ -841,6 +842,7 @@ struct ufs_hba_monitor { * device * @complete_put: whether or not to call ufshcd_rpm_put() from inside * ufshcd_resume_complete() + * @ext_iid_sup: is EXT_IID is supported by UFSHC */ struct ufs_hba { void __iomem *mmio_base; @@ -882,6 +884,7 @@ struct ufs_hba { u32 capabilities; int nutrs; + u32 mcq_capabilities; int nutmrs; u32 reserved_slot; u32 ufs_version; @@ -991,6 +994,7 @@ struct ufs_hba { #endif u32 luns_avail; bool complete_put; + bool ext_iid_sup; }; #ifdef CONFIG_SCSI_UFS_VARIABLE_SG_ENTRY_SIZE diff --git a/include/ufs/ufshci.h b/include/ufs/ufshci.h index 9346efea3eb3..f41bc7b03092 100644 --- a/include/ufs/ufshci.h +++ b/include/ufs/ufshci.h @@ -22,6 +22,7 @@ enum { /* UFSHCI Registers */ enum { REG_CONTROLLER_CAPABILITIES = 0x00, + REG_MCQCAP = 0x04, REG_UFS_VERSION = 0x08, REG_CONTROLLER_DEV_ID = 0x10, REG_CONTROLLER_PROD_ID = 0x14, @@ -69,6 +70,12 @@ enum { MASK_OUT_OF_ORDER_DATA_DELIVERY_SUPPORT = 0x02000000, MASK_UIC_DME_TEST_MODE_SUPPORT = 0x04000000, MASK_CRYPTO_SUPPORT = 0x10000000, + MASK_MCQ_SUPPORT = 0x40000000, +}; + +/* MCQ capability mask */ +enum { + MASK_EXT_IID_SUPPORT = 0x00000400, }; #define UFS_MASK(mask, offset) ((mask) << (offset)) -- cgit v1.2.3 From 305a357d3595d39be7c001f72e135bc94cbd85da Mon Sep 17 00:00:00 2001 From: Asutosh Das Date: Fri, 13 Jan 2023 12:48:39 -0800 Subject: scsi: ufs: core: Introduce multi-circular queue capability Add support to check for MCQ capability in the UFSHC. Add a module parameter to disable MCQ if needed. Co-developed-by: Can Guo Signed-off-by: Can Guo Signed-off-by: Asutosh Das Reviewed-by: Bart Van Assche Reviewed-by: Manivannan Sadhasivam Reviewed-by: Stanley Chu Signed-off-by: Martin K. Petersen --- drivers/ufs/core/ufshcd.c | 26 ++++++++++++++++++++++++++ include/ufs/ufshcd.h | 2 ++ 2 files changed, 28 insertions(+) (limited to 'include/ufs') diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 6ba05976c139..64e04619326f 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -92,6 +92,28 @@ /* Polling time to wait for fDeviceInit */ #define FDEVICEINIT_COMPL_TIMEOUT 1500 /* millisecs */ +/* UFSHC 4.0 compliant HC support this mode, refer param_set_mcq_mode() */ +static bool use_mcq_mode = true; + +static int param_set_mcq_mode(const char *val, const struct kernel_param *kp) +{ + int ret; + + ret = param_set_bool(val, kp); + if (ret) + return ret; + + return 0; +} + +static const struct kernel_param_ops mcq_mode_ops = { + .set = param_set_mcq_mode, + .get = param_get_bool, +}; + +module_param_cb(use_mcq_mode, &mcq_mode_ops, &use_mcq_mode, 0644); +MODULE_PARM_DESC(use_mcq_mode, "Control MCQ mode for controllers starting from UFSHCI 4.0. 1 - enable MCQ, 0 - disable MCQ. MCQ is enabled by default"); + #define ufshcd_toggle_vreg(_dev, _vreg, _on) \ ({ \ int _ret; \ @@ -2261,6 +2283,10 @@ static inline int ufshcd_hba_capabilities(struct ufs_hba *hba) if (err) dev_err(hba->dev, "crypto setup failed\n"); + hba->mcq_sup = FIELD_GET(MASK_MCQ_SUPPORT, hba->capabilities); + if (!hba->mcq_sup) + return err; + hba->mcq_capabilities = ufshcd_readl(hba, REG_MCQCAP); hba->ext_iid_sup = FIELD_GET(MASK_EXT_IID_SUPPORT, hba->mcq_capabilities); diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h index 8f2080f9fdb5..514687d2de54 100644 --- a/include/ufs/ufshcd.h +++ b/include/ufs/ufshcd.h @@ -843,6 +843,7 @@ struct ufs_hba_monitor { * @complete_put: whether or not to call ufshcd_rpm_put() from inside * ufshcd_resume_complete() * @ext_iid_sup: is EXT_IID is supported by UFSHC + * @mcq_sup: is mcq supported by UFSHC */ struct ufs_hba { void __iomem *mmio_base; @@ -995,6 +996,7 @@ struct ufs_hba { u32 luns_avail; bool complete_put; bool ext_iid_sup; + bool mcq_sup; }; #ifdef CONFIG_SCSI_UFS_VARIABLE_SG_ENTRY_SIZE -- cgit v1.2.3 From 0cab4023ec7b49b18145f74ab8389678d6d58878 Mon Sep 17 00:00:00 2001 From: Asutosh Das Date: Fri, 13 Jan 2023 12:48:40 -0800 Subject: scsi: ufs: core: Defer adding host to SCSI if MCQ is supported If MCQ support is present, enabling it after MCQ support has been configured would require reallocating tags and memory. It would also free up the already allocated memory in Single Doorbell Mode. So defer invoking scsi_add_host() until MCQ is configured. Co-developed-by: Can Guo Signed-off-by: Can Guo Signed-off-by: Asutosh Das Reviewed-by: Bart Van Assche Reviewed-by: Manivannan Sadhasivam Signed-off-by: Martin K. Petersen --- drivers/ufs/core/ufshcd.c | 24 ++++++++++++++++++++---- include/ufs/ufshcd.h | 1 + 2 files changed, 21 insertions(+), 4 deletions(-) (limited to 'include/ufs') diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 64e04619326f..2cb05a6a0142 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -95,6 +95,11 @@ /* UFSHC 4.0 compliant HC support this mode, refer param_set_mcq_mode() */ static bool use_mcq_mode = true; +static bool is_mcq_supported(struct ufs_hba *hba) +{ + return hba->mcq_sup && use_mcq_mode; +} + static int param_set_mcq_mode(const char *val, const struct kernel_param *kp) { int ret; @@ -8292,6 +8297,7 @@ out: static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params) { int ret; + struct Scsi_Host *host = hba->host; hba->ufshcd_state = UFSHCD_STATE_RESET; @@ -8326,6 +8332,14 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params) ret = ufshcd_device_params_init(hba); if (ret) return ret; + if (is_mcq_supported(hba) && !hba->scsi_host_added) { + ret = scsi_add_host(host, hba->dev); + if (ret) { + dev_err(hba->dev, "scsi_add_host failed\n"); + return ret; + } + hba->scsi_host_added = true; + } } ufshcd_tune_unipro_params(hba); @@ -9964,10 +9978,12 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq) hba->is_irq_enabled = true; } - err = scsi_add_host(host, hba->dev); - if (err) { - dev_err(hba->dev, "scsi_add_host failed\n"); - goto out_disable; + if (!is_mcq_supported(hba)) { + err = scsi_add_host(host, hba->dev); + if (err) { + dev_err(hba->dev, "scsi_add_host failed\n"); + goto out_disable; + } } hba->tmf_tag_set = (struct blk_mq_tag_set) { diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h index 514687d2de54..a343bd4bc0eb 100644 --- a/include/ufs/ufshcd.h +++ b/include/ufs/ufshcd.h @@ -996,6 +996,7 @@ struct ufs_hba { u32 luns_avail; bool complete_put; bool ext_iid_sup; + bool scsi_host_added; bool mcq_sup; }; -- cgit v1.2.3 From 57b1c0ef89ac9d9e7475df7843aeb7672ebcd197 Mon Sep 17 00:00:00 2001 From: Asutosh Das Date: Fri, 13 Jan 2023 12:48:41 -0800 Subject: scsi: ufs: core: mcq: Add support to allocate multiple queues Multi-circular queue (MCQ) has been added in UFSHC v4.0 standard in addition to the Single Doorbell mode. The MCQ mode supports multiple submission and completion queues. Add support to allocate and configure the queues. Add module parameters support to configure the queues. Co-developed-by: Can Guo Signed-off-by: Can Guo Signed-off-by: Asutosh Das Reviewed-by: Bart Van Assche Reviewed-by: Manivannan Sadhasivam Signed-off-by: Martin K. Petersen --- drivers/ufs/core/Makefile | 2 +- drivers/ufs/core/ufs-mcq.c | 124 +++++++++++++++++++++++++++++++++++++++++ drivers/ufs/core/ufshcd-priv.h | 1 + drivers/ufs/core/ufshcd.c | 12 ++++ include/ufs/ufshcd.h | 4 ++ 5 files changed, 142 insertions(+), 1 deletion(-) create mode 100644 drivers/ufs/core/ufs-mcq.c (limited to 'include/ufs') diff --git a/drivers/ufs/core/Makefile b/drivers/ufs/core/Makefile index 62f38c5bf857..4d02e0f2de10 100644 --- a/drivers/ufs/core/Makefile +++ b/drivers/ufs/core/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_SCSI_UFSHCD) += ufshcd-core.o -ufshcd-core-y += ufshcd.o ufs-sysfs.o +ufshcd-core-y += ufshcd.o ufs-sysfs.o ufs-mcq.o ufshcd-core-$(CONFIG_DEBUG_FS) += ufs-debugfs.o ufshcd-core-$(CONFIG_SCSI_UFS_BSG) += ufs_bsg.o ufshcd-core-$(CONFIG_SCSI_UFS_CRYPTO) += ufshcd-crypto.o diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c new file mode 100644 index 000000000000..6ed362547806 --- /dev/null +++ b/drivers/ufs/core/ufs-mcq.c @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2022 Qualcomm Innovation Center. All rights reserved. + * + * Authors: + * Asutosh Das + * Can Guo + */ + +#include +#include +#include +#include +#include "ufshcd-priv.h" + +#define MAX_QUEUE_SUP GENMASK(7, 0) +#define UFS_MCQ_MIN_RW_QUEUES 2 +#define UFS_MCQ_MIN_READ_QUEUES 0 +#define UFS_MCQ_NUM_DEV_CMD_QUEUES 1 +#define UFS_MCQ_MIN_POLL_QUEUES 0 + +static int rw_queue_count_set(const char *val, const struct kernel_param *kp) +{ + return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_RW_QUEUES, + num_possible_cpus()); +} + +static const struct kernel_param_ops rw_queue_count_ops = { + .set = rw_queue_count_set, + .get = param_get_uint, +}; + +static unsigned int rw_queues; +module_param_cb(rw_queues, &rw_queue_count_ops, &rw_queues, 0644); +MODULE_PARM_DESC(rw_queues, + "Number of interrupt driven I/O queues used for rw. Default value is nr_cpus"); + +static int read_queue_count_set(const char *val, const struct kernel_param *kp) +{ + return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_READ_QUEUES, + num_possible_cpus()); +} + +static const struct kernel_param_ops read_queue_count_ops = { + .set = read_queue_count_set, + .get = param_get_uint, +}; + +static unsigned int read_queues; +module_param_cb(read_queues, &read_queue_count_ops, &read_queues, 0644); +MODULE_PARM_DESC(read_queues, + "Number of interrupt driven read queues used for read. Default value is 0"); + +static int poll_queue_count_set(const char *val, const struct kernel_param *kp) +{ + return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_POLL_QUEUES, + num_possible_cpus()); +} + +static const struct kernel_param_ops poll_queue_count_ops = { + .set = poll_queue_count_set, + .get = param_get_uint, +}; + +static unsigned int poll_queues = 1; +module_param_cb(poll_queues, &poll_queue_count_ops, &poll_queues, 0644); +MODULE_PARM_DESC(poll_queues, + "Number of poll queues used for r/w. Default value is 1"); + +static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba) +{ + int i; + u32 hba_maxq, rem, tot_queues; + struct Scsi_Host *host = hba->host; + + hba_maxq = FIELD_GET(MAX_QUEUE_SUP, hba->mcq_capabilities); + + tot_queues = UFS_MCQ_NUM_DEV_CMD_QUEUES + read_queues + poll_queues + + rw_queues; + + if (hba_maxq < tot_queues) { + dev_err(hba->dev, "Total queues (%d) exceeds HC capacity (%d)\n", + tot_queues, hba_maxq); + return -EOPNOTSUPP; + } + + rem = hba_maxq - UFS_MCQ_NUM_DEV_CMD_QUEUES; + + if (rw_queues) { + hba->nr_queues[HCTX_TYPE_DEFAULT] = rw_queues; + rem -= hba->nr_queues[HCTX_TYPE_DEFAULT]; + } else { + rw_queues = num_possible_cpus(); + } + + if (poll_queues) { + hba->nr_queues[HCTX_TYPE_POLL] = poll_queues; + rem -= hba->nr_queues[HCTX_TYPE_POLL]; + } + + if (read_queues) { + hba->nr_queues[HCTX_TYPE_READ] = read_queues; + rem -= hba->nr_queues[HCTX_TYPE_READ]; + } + + if (!hba->nr_queues[HCTX_TYPE_DEFAULT]) + hba->nr_queues[HCTX_TYPE_DEFAULT] = min3(rem, rw_queues, + num_possible_cpus()); + + for (i = 0; i < HCTX_MAX_TYPES; i++) + host->nr_hw_queues += hba->nr_queues[i]; + + hba->nr_hw_queues = host->nr_hw_queues + UFS_MCQ_NUM_DEV_CMD_QUEUES; + return 0; +} + +int ufshcd_mcq_init(struct ufs_hba *hba) +{ + int ret; + + ret = ufshcd_mcq_config_nr_queues(hba); + + return ret; +} diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h index 8fff72e63e36..8d93ef675cea 100644 --- a/drivers/ufs/core/ufshcd-priv.h +++ b/drivers/ufs/core/ufshcd-priv.h @@ -61,6 +61,7 @@ int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode, int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res); void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit); +int ufshcd_mcq_init(struct ufs_hba *hba); #define SD_ASCII_STD true #define SD_RAW false diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 2cb05a6a0142..97120867a2a0 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -8294,6 +8294,11 @@ out: return ret; } +static int ufshcd_alloc_mcq(struct ufs_hba *hba) +{ + return ufshcd_mcq_init(hba); +} + static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params) { int ret; @@ -8333,6 +8338,13 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params) if (ret) return ret; if (is_mcq_supported(hba) && !hba->scsi_host_added) { + ret = ufshcd_alloc_mcq(hba); + if (ret) { + /* Continue with SDB mode */ + use_mcq_mode = false; + dev_err(hba->dev, "MCQ mode is disabled, err=%d\n", + ret); + } ret = scsi_add_host(host, hba->dev); if (ret) { dev_err(hba->dev, "scsi_add_host failed\n"); diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h index a343bd4bc0eb..91ca724c0294 100644 --- a/include/ufs/ufshcd.h +++ b/include/ufs/ufshcd.h @@ -840,6 +840,8 @@ struct ufs_hba_monitor { * ee_ctrl_mask * @luns_avail: number of regular and well known LUNs supported by the UFS * device + * @nr_hw_queues: number of hardware queues configured + * @nr_queues: number of Queues of different queue types * @complete_put: whether or not to call ufshcd_rpm_put() from inside * ufshcd_resume_complete() * @ext_iid_sup: is EXT_IID is supported by UFSHC @@ -994,6 +996,8 @@ struct ufs_hba { u32 debugfs_ee_rate_limit_ms; #endif u32 luns_avail; + unsigned int nr_hw_queues; + unsigned int nr_queues[HCTX_MAX_TYPES]; bool complete_put; bool ext_iid_sup; bool scsi_host_added; -- cgit v1.2.3 From c263b4ef737e622e2a908c58ca4bb68a89376387 Mon Sep 17 00:00:00 2001 From: Asutosh Das Date: Fri, 13 Jan 2023 12:48:42 -0800 Subject: scsi: ufs: core: mcq: Configure resource regions Define the MCQ resources and add support to ioremap the resource regions. Co-developed-by: Can Guo Signed-off-by: Can Guo Signed-off-by: Asutosh Das Reviewed-by: Manivannan Sadhasivam Reviewed-by: Bart Van Assche Reviewed-by: Stanley Chu Signed-off-by: Martin K. Petersen --- drivers/ufs/core/ufs-mcq.c | 3 ++ drivers/ufs/core/ufshcd-priv.h | 8 ++++ drivers/ufs/host/ufs-qcom.c | 101 +++++++++++++++++++++++++++++++++++++++++ include/ufs/ufshcd.h | 30 ++++++++++++ 4 files changed, 142 insertions(+) (limited to 'include/ufs') diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c index 6ed362547806..65c00373af0e 100644 --- a/drivers/ufs/core/ufs-mcq.c +++ b/drivers/ufs/core/ufs-mcq.c @@ -119,6 +119,9 @@ int ufshcd_mcq_init(struct ufs_hba *hba) int ret; ret = ufshcd_mcq_config_nr_queues(hba); + if (ret) + return ret; + ret = ufshcd_vops_mcq_config_resource(hba); return ret; } diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h index 8d93ef675cea..e3bc4ce274e3 100644 --- a/drivers/ufs/core/ufshcd-priv.h +++ b/drivers/ufs/core/ufshcd-priv.h @@ -230,6 +230,14 @@ static inline void ufshcd_vops_reinit_notify(struct ufs_hba *hba) hba->vops->reinit_notify(hba); } +static inline int ufshcd_vops_mcq_config_resource(struct ufs_hba *hba) +{ + if (hba->vops && hba->vops->mcq_config_resource) + return hba->vops->mcq_config_resource(hba); + + return -EOPNOTSUPP; +} + extern const struct ufs_pm_lvl_states ufs_pm_lvl_states[]; /** diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c index 5e7ba3b6a59d..2a1434ff0501 100644 --- a/drivers/ufs/host/ufs-qcom.c +++ b/drivers/ufs/host/ufs-qcom.c @@ -22,6 +22,12 @@ #include #include +#define MCQ_QCFGPTR_MASK GENMASK(7, 0) +#define MCQ_QCFGPTR_UNIT 0x200 +#define MCQ_SQATTR_OFFSET(c) \ + ((((c) >> 16) & MCQ_QCFGPTR_MASK) * MCQ_QCFGPTR_UNIT) +#define MCQ_QCFG_SIZE 0x40 + enum { TSTBUS_UAWM, TSTBUS_UARM, @@ -1396,6 +1402,100 @@ static void ufs_qcom_reinit_notify(struct ufs_hba *hba) phy_power_off(host->generic_phy); } +/* Resources */ +static const struct ufshcd_res_info ufs_res_info[RES_MAX] = { + {.name = "ufs_mem",}, + {.name = "mcq",}, + /* Submission Queue DAO */ + {.name = "mcq_sqd",}, + /* Submission Queue Interrupt Status */ + {.name = "mcq_sqis",}, + /* Completion Queue DAO */ + {.name = "mcq_cqd",}, + /* Completion Queue Interrupt Status */ + {.name = "mcq_cqis",}, + /* MCQ vendor specific */ + {.name = "mcq_vs",}, +}; + +static int ufs_qcom_mcq_config_resource(struct ufs_hba *hba) +{ + struct platform_device *pdev = to_platform_device(hba->dev); + struct ufshcd_res_info *res; + struct resource *res_mem, *res_mcq; + int i, ret = 0; + + memcpy(hba->res, ufs_res_info, sizeof(ufs_res_info)); + + for (i = 0; i < RES_MAX; i++) { + res = &hba->res[i]; + res->resource = platform_get_resource_byname(pdev, + IORESOURCE_MEM, + res->name); + if (!res->resource) { + dev_info(hba->dev, "Resource %s not provided\n", res->name); + if (i == RES_UFS) + return -ENOMEM; + continue; + } else if (i == RES_UFS) { + res_mem = res->resource; + res->base = hba->mmio_base; + continue; + } + + res->base = devm_ioremap_resource(hba->dev, res->resource); + if (IS_ERR(res->base)) { + dev_err(hba->dev, "Failed to map res %s, err=%d\n", + res->name, (int)PTR_ERR(res->base)); + res->base = NULL; + ret = PTR_ERR(res->base); + return ret; + } + } + + /* MCQ resource provided in DT */ + res = &hba->res[RES_MCQ]; + /* Bail if MCQ resource is provided */ + if (res->base) + goto out; + + /* Explicitly allocate MCQ resource from ufs_mem */ + res_mcq = devm_kzalloc(hba->dev, sizeof(*res_mcq), GFP_KERNEL); + if (!res_mcq) + return ret; + + res_mcq->start = res_mem->start + + MCQ_SQATTR_OFFSET(hba->mcq_capabilities); + res_mcq->end = res_mcq->start + hba->nr_hw_queues * MCQ_QCFG_SIZE - 1; + res_mcq->flags = res_mem->flags; + res_mcq->name = "mcq"; + + ret = insert_resource(&iomem_resource, res_mcq); + if (ret) { + dev_err(hba->dev, "Failed to insert MCQ resource, err=%d\n", + ret); + goto insert_res_err; + } + + res->base = devm_ioremap_resource(hba->dev, res_mcq); + if (IS_ERR(res->base)) { + dev_err(hba->dev, "MCQ registers mapping failed, err=%d\n", + (int)PTR_ERR(res->base)); + ret = PTR_ERR(res->base); + goto ioremap_err; + } + +out: + hba->mcq_base = res->base; + return 0; +ioremap_err: + res->base = NULL; + remove_resource(res_mcq); +insert_res_err: + devm_kfree(hba->dev, res_mcq); + return ret; +} + /* * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations * @@ -1420,6 +1520,7 @@ static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = { .config_scaling_param = ufs_qcom_config_scaling_param, .program_key = ufs_qcom_ice_program_key, .reinit_notify = ufs_qcom_reinit_notify, + .mcq_config_resource = ufs_qcom_mcq_config_resource, }; /** diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h index 91ca724c0294..81c7494cecad 100644 --- a/include/ufs/ufshcd.h +++ b/include/ufs/ufshcd.h @@ -300,6 +300,7 @@ struct ufs_pwr_mode_info { * @program_key: program or evict an inline encryption key * @event_notify: called to notify important events * @reinit_notify: called to notify reinit of UFSHCD during max gear switch + * @mcq_config_resource: called to configure MCQ platform resources */ struct ufs_hba_variant_ops { const char *name; @@ -339,6 +340,7 @@ struct ufs_hba_variant_ops { void (*event_notify)(struct ufs_hba *hba, enum ufs_event_type evt, void *data); void (*reinit_notify)(struct ufs_hba *); + int (*mcq_config_resource)(struct ufs_hba *hba); }; /* clock gating state */ @@ -733,6 +735,30 @@ struct ufs_hba_monitor { bool enabled; }; +/** + * struct ufshcd_res_info_t - MCQ related resource regions + * + * @name: resource name + * @resource: pointer to resource region + * @base: register base address + */ +struct ufshcd_res_info { + const char *name; + struct resource *resource; + void __iomem *base; +}; + +enum ufshcd_res { + RES_UFS, + RES_MCQ, + RES_MCQ_SQD, + RES_MCQ_SQIS, + RES_MCQ_CQD, + RES_MCQ_CQIS, + RES_MCQ_VS, + RES_MAX, +}; + /** * struct ufs_hba - per adapter private structure * @mmio_base: UFSHCI base register address @@ -846,6 +872,8 @@ struct ufs_hba_monitor { * ufshcd_resume_complete() * @ext_iid_sup: is EXT_IID is supported by UFSHC * @mcq_sup: is mcq supported by UFSHC + * @res: array of resource info of MCQ registers + * @mcq_base: Multi circular queue registers base address */ struct ufs_hba { void __iomem *mmio_base; @@ -1002,6 +1030,8 @@ struct ufs_hba { bool ext_iid_sup; bool scsi_host_added; bool mcq_sup; + struct ufshcd_res_info res[RES_MAX]; + void __iomem *mcq_base; }; #ifdef CONFIG_SCSI_UFS_VARIABLE_SG_ENTRY_SIZE -- cgit v1.2.3 From 7224c806876e46cfaf46b1c90da8d5c2e1f2108f Mon Sep 17 00:00:00 2001 From: Asutosh Das Date: Fri, 13 Jan 2023 12:48:43 -0800 Subject: scsi: ufs: core: mcq: Calculate queue depth The UFS device defines the supported queuedepth by bqueuedepth which has a max value of 256. The HC defines MAC (Max Active Commands) that defines the max number of commands that in flight to the UFS device. Calculate and configure the nutrs based on both these values. Co-developed-by: Can Guo Signed-off-by: Can Guo Signed-off-by: Asutosh Das Reviewed-by: Manivannan Sadhasivam Reviewed-by: Bart Van Assche Reviewed-by: Stanley Chu Signed-off-by: Martin K. Petersen --- drivers/ufs/core/ufs-mcq.c | 35 +++++++++++++++++++++++++++++++++++ drivers/ufs/core/ufshcd-priv.h | 9 +++++++++ drivers/ufs/core/ufshcd.c | 17 ++++++++++++++++- drivers/ufs/host/ufs-qcom.c | 7 +++++++ drivers/ufs/host/ufs-qcom.h | 1 + include/ufs/ufs.h | 2 ++ include/ufs/ufshcd.h | 2 ++ include/ufs/ufshci.h | 1 + 8 files changed, 73 insertions(+), 1 deletion(-) (limited to 'include/ufs') diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c index 65c00373af0e..2f680ff5cafc 100644 --- a/drivers/ufs/core/ufs-mcq.c +++ b/drivers/ufs/core/ufs-mcq.c @@ -19,6 +19,9 @@ #define UFS_MCQ_NUM_DEV_CMD_QUEUES 1 #define UFS_MCQ_MIN_POLL_QUEUES 0 +#define MAX_DEV_CMD_ENTRIES 2 +#define MCQ_CFG_MAC_MASK GENMASK(16, 8) + static int rw_queue_count_set(const char *val, const struct kernel_param *kp) { return param_set_uint_minmax(val, kp, UFS_MCQ_MIN_RW_QUEUES, @@ -67,6 +70,38 @@ module_param_cb(poll_queues, &poll_queue_count_ops, &poll_queues, 0644); MODULE_PARM_DESC(poll_queues, "Number of poll queues used for r/w. Default value is 1"); +/** + * ufshcd_mcq_decide_queue_depth - decide the queue depth + * @hba - per adapter instance + * + * Returns queue-depth on success, non-zero on error + * + * MAC - Max. Active Command of the Host Controller (HC) + * HC wouldn't send more than this commands to the device. + * It is mandatory to implement get_hba_mac() to enable MCQ mode. + * Calculates and adjusts the queue depth based on the depth + * supported by the HC and ufs device. + */ +int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba) +{ + int mac; + + /* Mandatory to implement get_hba_mac() */ + mac = ufshcd_mcq_vops_get_hba_mac(hba); + if (mac < 0) { + dev_err(hba->dev, "Failed to get mac, err=%d\n", mac); + return mac; + } + + WARN_ON_ONCE(!hba->dev_info.bqueuedepth); + /* + * max. value of bqueuedepth = 256, mac is host dependent. + * It is mandatory for UFS device to define bQueueDepth if + * shared queuing architecture is enabled. + */ + return min_t(int, mac, hba->dev_info.bqueuedepth); +} + static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba) { int i; diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h index e3bc4ce274e3..11076c3111c5 100644 --- a/drivers/ufs/core/ufshcd-priv.h +++ b/drivers/ufs/core/ufshcd-priv.h @@ -62,6 +62,7 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, enum flag_idn idn, u8 index, bool *flag_res); void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit); int ufshcd_mcq_init(struct ufs_hba *hba); +int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba); #define SD_ASCII_STD true #define SD_RAW false @@ -238,6 +239,14 @@ static inline int ufshcd_vops_mcq_config_resource(struct ufs_hba *hba) return -EOPNOTSUPP; } +static inline int ufshcd_mcq_vops_get_hba_mac(struct ufs_hba *hba) +{ + if (hba->vops && hba->vops->get_hba_mac) + return hba->vops->get_hba_mac(hba); + + return -EOPNOTSUPP; +} + extern const struct ufs_pm_lvl_states ufs_pm_lvl_states[]; /** diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 97120867a2a0..5c8570c8e100 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -7887,6 +7887,7 @@ static int ufs_get_device_desc(struct ufs_hba *hba) /* getting Specification Version in big endian format */ dev_info->wspecversion = desc_buf[DEVICE_DESC_PARAM_SPEC_VER] << 8 | desc_buf[DEVICE_DESC_PARAM_SPEC_VER + 1]; + dev_info->bqueuedepth = desc_buf[DEVICE_DESC_PARAM_Q_DPTH]; b_ufs_feature_sup = desc_buf[DEVICE_DESC_PARAM_UFS_FEAT]; model_index = desc_buf[DEVICE_DESC_PARAM_PRDCT_NAME]; @@ -8296,7 +8297,21 @@ out: static int ufshcd_alloc_mcq(struct ufs_hba *hba) { - return ufshcd_mcq_init(hba); + int ret; + int old_nutrs = hba->nutrs; + + ret = ufshcd_mcq_decide_queue_depth(hba); + if (ret < 0) + return ret; + + hba->nutrs = ret; + ret = ufshcd_mcq_init(hba); + if (ret) { + hba->nutrs = old_nutrs; + return ret; + } + + return 0; } static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params) diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c index 2a1434ff0501..edd7225065df 100644 --- a/drivers/ufs/host/ufs-qcom.c +++ b/drivers/ufs/host/ufs-qcom.c @@ -1496,6 +1496,12 @@ insert_res_err: return ret; } +static int ufs_qcom_get_hba_mac(struct ufs_hba *hba) +{ + /* Qualcomm HC supports up to 64 */ + return MAX_SUPP_MAC; +} + /* * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations * @@ -1521,6 +1527,7 @@ static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = { .program_key = ufs_qcom_ice_program_key, .reinit_notify = ufs_qcom_reinit_notify, .mcq_config_resource = ufs_qcom_mcq_config_resource, + .get_hba_mac = ufs_qcom_get_hba_mac, }; /** diff --git a/drivers/ufs/host/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h index f744a9e62002..164e18b6b1fb 100644 --- a/drivers/ufs/host/ufs-qcom.h +++ b/drivers/ufs/host/ufs-qcom.h @@ -16,6 +16,7 @@ #define HBRN8_POLL_TOUT_MS 100 #define DEFAULT_CLK_RATE_HZ 1000000 #define BUS_VECTOR_NAME_LEN 32 +#define MAX_SUPP_MAC 64 #define UFS_HW_VER_MAJOR_MASK GENMASK(31, 28) #define UFS_HW_VER_MINOR_MASK GENMASK(27, 16) diff --git a/include/ufs/ufs.h b/include/ufs/ufs.h index bea7b2086a4a..4e8d6240e589 100644 --- a/include/ufs/ufs.h +++ b/include/ufs/ufs.h @@ -617,6 +617,8 @@ struct ufs_dev_info { u8 *model; u16 wspecversion; u32 clk_gating_wait_us; + /* Stores the depth of queue in UFS device */ + u8 bqueuedepth; /* UFS HPB related flag */ bool hpb_enabled; diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h index 81c7494cecad..57854bb0395e 100644 --- a/include/ufs/ufshcd.h +++ b/include/ufs/ufshcd.h @@ -301,6 +301,7 @@ struct ufs_pwr_mode_info { * @event_notify: called to notify important events * @reinit_notify: called to notify reinit of UFSHCD during max gear switch * @mcq_config_resource: called to configure MCQ platform resources + * @get_hba_mac: called to get vendor specific mac value, mandatory for mcq mode */ struct ufs_hba_variant_ops { const char *name; @@ -341,6 +342,7 @@ struct ufs_hba_variant_ops { enum ufs_event_type evt, void *data); void (*reinit_notify)(struct ufs_hba *); int (*mcq_config_resource)(struct ufs_hba *hba); + int (*get_hba_mac)(struct ufs_hba *hba); }; /* clock gating state */ diff --git a/include/ufs/ufshci.h b/include/ufs/ufshci.h index f41bc7b03092..845a82a57f65 100644 --- a/include/ufs/ufshci.h +++ b/include/ufs/ufshci.h @@ -57,6 +57,7 @@ enum { REG_UFS_CCAP = 0x100, REG_UFS_CRYPTOCAP = 0x104, + REG_UFS_MCQ_CFG = 0x380, UFSHCI_CRYPTO_REG_SPACE_SIZE = 0x400, }; -- cgit v1.2.3 From 4682abfae2eb3a1c138130cfd6d71411d81aaa00 Mon Sep 17 00:00:00 2001 From: Asutosh Das Date: Fri, 13 Jan 2023 12:48:44 -0800 Subject: scsi: ufs: core: mcq: Allocate memory for MCQ mode To read the bqueuedepth, the device descriptor is fetched in Single Doorbell Mode. This allocated memory may not be enough for MCQ mode because the number of tags supported in MCQ mode may be larger than in SDB mode. Hence, release the memory allocated in SDB mode and allocate memory for MCQ mode operation. Define the UFS hardware queue and Completion Queue Entry. Co-developed-by: Can Guo Signed-off-by: Can Guo Signed-off-by: Asutosh Das Reviewed-by: Manivannan Sadhasivam Reviewed-by: Bart Van Assche Signed-off-by: Martin K. Petersen --- drivers/ufs/core/ufs-mcq.c | 59 ++++++++++++++++++++++++++++++++++++++++-- drivers/ufs/core/ufshcd-priv.h | 1 + drivers/ufs/core/ufshcd.c | 48 +++++++++++++++++++++++++++++++--- include/ufs/ufshcd.h | 20 ++++++++++++++ include/ufs/ufshci.h | 22 ++++++++++++++++ 5 files changed, 145 insertions(+), 5 deletions(-) (limited to 'include/ufs') diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c index 2f680ff5cafc..c77bc54527b2 100644 --- a/drivers/ufs/core/ufs-mcq.c +++ b/drivers/ufs/core/ufs-mcq.c @@ -149,14 +149,69 @@ static int ufshcd_mcq_config_nr_queues(struct ufs_hba *hba) return 0; } +int ufshcd_mcq_memory_alloc(struct ufs_hba *hba) +{ + struct ufs_hw_queue *hwq; + size_t utrdl_size, cqe_size; + int i; + + for (i = 0; i < hba->nr_hw_queues; i++) { + hwq = &hba->uhq[i]; + + utrdl_size = sizeof(struct utp_transfer_req_desc) * + hwq->max_entries; + hwq->sqe_base_addr = dmam_alloc_coherent(hba->dev, utrdl_size, + &hwq->sqe_dma_addr, + GFP_KERNEL); + if (!hwq->sqe_dma_addr) { + dev_err(hba->dev, "SQE allocation failed\n"); + return -ENOMEM; + } + + cqe_size = sizeof(struct cq_entry) * hwq->max_entries; + hwq->cqe_base_addr = dmam_alloc_coherent(hba->dev, cqe_size, + &hwq->cqe_dma_addr, + GFP_KERNEL); + if (!hwq->cqe_dma_addr) { + dev_err(hba->dev, "CQE allocation failed\n"); + return -ENOMEM; + } + } + + return 0; +} + + int ufshcd_mcq_init(struct ufs_hba *hba) { - int ret; + struct ufs_hw_queue *hwq; + int ret, i; ret = ufshcd_mcq_config_nr_queues(hba); if (ret) return ret; ret = ufshcd_vops_mcq_config_resource(hba); - return ret; + if (ret) + return ret; + + hba->uhq = devm_kzalloc(hba->dev, + hba->nr_hw_queues * sizeof(struct ufs_hw_queue), + GFP_KERNEL); + if (!hba->uhq) { + dev_err(hba->dev, "ufs hw queue memory allocation failed\n"); + return -ENOMEM; + } + + for (i = 0; i < hba->nr_hw_queues; i++) { + hwq = &hba->uhq[i]; + hwq->max_entries = hba->nutrs; + } + + /* The very first HW queue serves device commands */ + hba->dev_cmd_queue = &hba->uhq[0]; + /* Give dev_cmd_queue the minimal number of entries */ + hba->dev_cmd_queue->max_entries = MAX_DEV_CMD_ENTRIES; + + return 0; } diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h index 11076c3111c5..667e601a480f 100644 --- a/drivers/ufs/core/ufshcd-priv.h +++ b/drivers/ufs/core/ufshcd-priv.h @@ -63,6 +63,7 @@ int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode, void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit); int ufshcd_mcq_init(struct ufs_hba *hba); int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba); +int ufshcd_mcq_memory_alloc(struct ufs_hba *hba); #define SD_ASCII_STD true #define SD_RAW false diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 5c8570c8e100..9b4d7a9c81e6 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -3719,6 +3719,14 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba) goto out; } + /* + * Skip utmrdl allocation; it may have been + * allocated during first pass and not released during + * MCQ memory allocation. + * See ufshcd_release_sdb_queue() and ufshcd_config_mcq() + */ + if (hba->utmrdl_base_addr) + goto skip_utmrdl; /* * Allocate memory for UTP Task Management descriptors * UFSHCI requires 1024 byte alignment of UTMRD @@ -3735,6 +3743,7 @@ static int ufshcd_memory_alloc(struct ufs_hba *hba) goto out; } +skip_utmrdl: /* Allocate memory for local reference block */ hba->lrb = devm_kcalloc(hba->dev, hba->nutrs, sizeof(struct ufshcd_lrb), @@ -8295,6 +8304,22 @@ out: return ret; } +/* SDB - Single Doorbell */ +static void ufshcd_release_sdb_queue(struct ufs_hba *hba, int nutrs) +{ + size_t ucdl_size, utrdl_size; + + ucdl_size = sizeof(struct utp_transfer_cmd_desc) * nutrs; + dmam_free_coherent(hba->dev, ucdl_size, hba->ucdl_base_addr, + hba->ucdl_dma_addr); + + utrdl_size = sizeof(struct utp_transfer_req_desc) * nutrs; + dmam_free_coherent(hba->dev, utrdl_size, hba->utrdl_base_addr, + hba->utrdl_dma_addr); + + devm_kfree(hba->dev, hba->lrb); +} + static int ufshcd_alloc_mcq(struct ufs_hba *hba) { int ret; @@ -8306,12 +8331,29 @@ static int ufshcd_alloc_mcq(struct ufs_hba *hba) hba->nutrs = ret; ret = ufshcd_mcq_init(hba); - if (ret) { - hba->nutrs = old_nutrs; - return ret; + if (ret) + goto err; + + /* + * Previously allocated memory for nutrs may not be enough in MCQ mode. + * Number of supported tags in MCQ mode may be larger than SDB mode. + */ + if (hba->nutrs != old_nutrs) { + ufshcd_release_sdb_queue(hba, old_nutrs); + ret = ufshcd_memory_alloc(hba); + if (ret) + goto err; + ufshcd_host_memory_configure(hba); } + ret = ufshcd_mcq_memory_alloc(hba); + if (ret) + goto err; + return 0; +err: + hba->nutrs = old_nutrs; + return ret; } static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params) diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h index 57854bb0395e..311113c8e92d 100644 --- a/include/ufs/ufshcd.h +++ b/include/ufs/ufshcd.h @@ -876,6 +876,8 @@ enum ufshcd_res { * @mcq_sup: is mcq supported by UFSHC * @res: array of resource info of MCQ registers * @mcq_base: Multi circular queue registers base address + * @uhq: array of supported hardware queues + * @dev_cmd_queue: Queue for issuing device management commands */ struct ufs_hba { void __iomem *mmio_base; @@ -1034,6 +1036,24 @@ struct ufs_hba { bool mcq_sup; struct ufshcd_res_info res[RES_MAX]; void __iomem *mcq_base; + struct ufs_hw_queue *uhq; + struct ufs_hw_queue *dev_cmd_queue; +}; + +/** + * struct ufs_hw_queue - per hardware queue structure + * @sqe_base_addr: submission queue entry base address + * @sqe_dma_addr: submission queue dma address + * @cqe_base_addr: completion queue base address + * @cqe_dma_addr: completion queue dma address + * @max_entries: max number of slots in this hardware queue + */ +struct ufs_hw_queue { + void *sqe_base_addr; + dma_addr_t sqe_dma_addr; + struct cq_entry *cqe_base_addr; + dma_addr_t cqe_dma_addr; + u32 max_entries; }; #ifdef CONFIG_SCSI_UFS_VARIABLE_SG_ENTRY_SIZE diff --git a/include/ufs/ufshci.h b/include/ufs/ufshci.h index 845a82a57f65..0d621e9e3146 100644 --- a/include/ufs/ufshci.h +++ b/include/ufs/ufshci.h @@ -492,6 +492,28 @@ struct utp_transfer_req_desc { __le16 prd_table_offset; }; +/* MCQ Completion Queue Entry */ +struct cq_entry { + /* DW 0-1 */ + __le64 command_desc_base_addr; + + /* DW 2 */ + __le16 response_upiu_length; + __le16 response_upiu_offset; + + /* DW 3 */ + __le16 prd_table_length; + __le16 prd_table_offset; + + /* DW 4 */ + __le32 status; + + /* DW 5-7 */ + __le32 reserved[3]; +}; + +static_assert(sizeof(struct cq_entry) == 32); + /* * UTMRD structure. */ -- cgit v1.2.3 From 2468da61ea095162067ed408824298ba9c3661c8 Mon Sep 17 00:00:00 2001 From: Asutosh Das Date: Fri, 13 Jan 2023 12:48:45 -0800 Subject: scsi: ufs: core: mcq: Configure operation and runtime interface Runtime and operation registers are defined per Submission and Completion queue. The location of these registers is not defined in the spec; meaning the offsets and stride may vary for different HC vendors. Establish the stride, base address, and doorbell address offsets from vendor host driver and program it. Co-developed-by: Can Guo Signed-off-by: Can Guo Signed-off-by: Asutosh Das Reviewed-by: Manivannan Sadhasivam Reviewed-by: Bart Van Assche Signed-off-by: Martin K. Petersen --- drivers/ufs/core/ufs-mcq.c | 102 +++++++++++++++++++++++++++++++++++++++++ drivers/ufs/core/ufshcd-priv.h | 11 +++++ drivers/ufs/core/ufshcd.c | 27 +++++++++++ drivers/ufs/host/ufs-qcom.c | 24 ++++++++++ include/ufs/ufshcd.h | 52 +++++++++++++++++++++ include/ufs/ufshci.h | 31 +++++++++++++ 6 files changed, 247 insertions(+) (limited to 'include/ufs') diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c index c77bc54527b2..496e2b638c44 100644 --- a/drivers/ufs/core/ufs-mcq.c +++ b/drivers/ufs/core/ufs-mcq.c @@ -18,9 +18,13 @@ #define UFS_MCQ_MIN_READ_QUEUES 0 #define UFS_MCQ_NUM_DEV_CMD_QUEUES 1 #define UFS_MCQ_MIN_POLL_QUEUES 0 +#define QUEUE_EN_OFFSET 31 +#define QUEUE_ID_OFFSET 16 #define MAX_DEV_CMD_ENTRIES 2 #define MCQ_CFG_MAC_MASK GENMASK(16, 8) +#define MCQ_QCFG_SIZE 0x40 +#define MCQ_ENTRY_SIZE_IN_DWORD 8 static int rw_queue_count_set(const char *val, const struct kernel_param *kp) { @@ -70,6 +74,24 @@ module_param_cb(poll_queues, &poll_queue_count_ops, &poll_queues, 0644); MODULE_PARM_DESC(poll_queues, "Number of poll queues used for r/w. Default value is 1"); +/** + * ufshcd_mcq_config_mac - Set the #Max Activ Cmds. + * @hba - per adapter instance + * @max_active_cmds - maximum # of active commands to the device at any time. + * + * The controller won't send more than the max_active_cmds to the device at + * any time. + */ +void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds) +{ + u32 val; + + val = ufshcd_readl(hba, REG_UFS_MCQ_CFG); + val &= ~MCQ_CFG_MAC_MASK; + val |= FIELD_PREP(MCQ_CFG_MAC_MASK, max_active_cmds); + ufshcd_writel(hba, val, REG_UFS_MCQ_CFG); +} + /** * ufshcd_mcq_decide_queue_depth - decide the queue depth * @hba - per adapter instance @@ -182,6 +204,80 @@ int ufshcd_mcq_memory_alloc(struct ufs_hba *hba) } +/* Operation and runtime registers configuration */ +#define MCQ_CFG_n(r, i) ((r) + MCQ_QCFG_SIZE * (i)) +#define MCQ_OPR_OFFSET_n(p, i) \ + (hba->mcq_opr[(p)].offset + hba->mcq_opr[(p)].stride * (i)) + +static void __iomem *mcq_opr_base(struct ufs_hba *hba, + enum ufshcd_mcq_opr n, int i) +{ + struct ufshcd_mcq_opr_info_t *opr = &hba->mcq_opr[n]; + + return opr->base + opr->stride * i; +} + +void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba) +{ + struct ufs_hw_queue *hwq; + u16 qsize; + int i; + + for (i = 0; i < hba->nr_hw_queues; i++) { + hwq = &hba->uhq[i]; + hwq->id = i; + qsize = hwq->max_entries * MCQ_ENTRY_SIZE_IN_DWORD - 1; + + /* Submission Queue Lower Base Address */ + ufsmcq_writelx(hba, lower_32_bits(hwq->sqe_dma_addr), + MCQ_CFG_n(REG_SQLBA, i)); + /* Submission Queue Upper Base Address */ + ufsmcq_writelx(hba, upper_32_bits(hwq->sqe_dma_addr), + MCQ_CFG_n(REG_SQUBA, i)); + /* Submission Queue Doorbell Address Offset */ + ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_SQD, i), + MCQ_CFG_n(REG_SQDAO, i)); + /* Submission Queue Interrupt Status Address Offset */ + ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_SQIS, i), + MCQ_CFG_n(REG_SQISAO, i)); + + /* Completion Queue Lower Base Address */ + ufsmcq_writelx(hba, lower_32_bits(hwq->cqe_dma_addr), + MCQ_CFG_n(REG_CQLBA, i)); + /* Completion Queue Upper Base Address */ + ufsmcq_writelx(hba, upper_32_bits(hwq->cqe_dma_addr), + MCQ_CFG_n(REG_CQUBA, i)); + /* Completion Queue Doorbell Address Offset */ + ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_CQD, i), + MCQ_CFG_n(REG_CQDAO, i)); + /* Completion Queue Interrupt Status Address Offset */ + ufsmcq_writelx(hba, MCQ_OPR_OFFSET_n(OPR_CQIS, i), + MCQ_CFG_n(REG_CQISAO, i)); + + /* Save the base addresses for quicker access */ + hwq->mcq_sq_head = mcq_opr_base(hba, OPR_SQD, i) + REG_SQHP; + hwq->mcq_sq_tail = mcq_opr_base(hba, OPR_SQD, i) + REG_SQTP; + hwq->mcq_cq_head = mcq_opr_base(hba, OPR_CQD, i) + REG_CQHP; + hwq->mcq_cq_tail = mcq_opr_base(hba, OPR_CQD, i) + REG_CQTP; + + /* Enable Tail Entry Push Status interrupt only for non-poll queues */ + if (i < hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL]) + writel(1, mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIE); + + /* Completion Queue Enable|Size to Completion Queue Attribute */ + ufsmcq_writel(hba, (1 << QUEUE_EN_OFFSET) | qsize, + MCQ_CFG_n(REG_CQATTR, i)); + + /* + * Submission Qeueue Enable|Size|Completion Queue ID to + * Submission Queue Attribute + */ + ufsmcq_writel(hba, (1 << QUEUE_EN_OFFSET) | qsize | + (i << QUEUE_ID_OFFSET), + MCQ_CFG_n(REG_SQATTR, i)); + } +} + int ufshcd_mcq_init(struct ufs_hba *hba) { struct ufs_hw_queue *hwq; @@ -195,6 +291,12 @@ int ufshcd_mcq_init(struct ufs_hba *hba) if (ret) return ret; + ret = ufshcd_mcq_vops_op_runtime_config(hba); + if (ret) { + dev_err(hba->dev, "Operation runtime config failed, ret=%d\n", + ret); + return ret; + } hba->uhq = devm_kzalloc(hba->dev, hba->nr_hw_queues * sizeof(struct ufs_hw_queue), GFP_KERNEL); diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h index 667e601a480f..4cd9b7b63868 100644 --- a/drivers/ufs/core/ufshcd-priv.h +++ b/drivers/ufs/core/ufshcd-priv.h @@ -64,6 +64,9 @@ void ufshcd_auto_hibern8_update(struct ufs_hba *hba, u32 ahit); int ufshcd_mcq_init(struct ufs_hba *hba); int ufshcd_mcq_decide_queue_depth(struct ufs_hba *hba); int ufshcd_mcq_memory_alloc(struct ufs_hba *hba); +void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba); +void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds); +void ufshcd_mcq_select_mcq_mode(struct ufs_hba *hba); #define SD_ASCII_STD true #define SD_RAW false @@ -248,6 +251,14 @@ static inline int ufshcd_mcq_vops_get_hba_mac(struct ufs_hba *hba) return -EOPNOTSUPP; } +static inline int ufshcd_mcq_vops_op_runtime_config(struct ufs_hba *hba) +{ + if (hba->vops && hba->vops->op_runtime_config) + return hba->vops->op_runtime_config(hba); + + return -EOPNOTSUPP; +} + extern const struct ufs_pm_lvl_states ufs_pm_lvl_states[]; /** diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 9b4d7a9c81e6..9d582786dabe 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -43,6 +43,12 @@ #define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\ UTP_TASK_REQ_COMPL |\ UFSHCD_ERROR_MASK) + +#define UFSHCD_ENABLE_MCQ_INTRS (UTP_TASK_REQ_COMPL |\ + UFSHCD_ERROR_MASK |\ + MCQ_CQ_EVENT_STATUS) + + /* UIC command timeout, unit: ms */ #define UIC_CMD_TIMEOUT 500 @@ -8356,6 +8362,20 @@ err: return ret; } +static void ufshcd_config_mcq(struct ufs_hba *hba) +{ + ufshcd_enable_intr(hba, UFSHCD_ENABLE_MCQ_INTRS); + ufshcd_mcq_make_queues_operational(hba); + ufshcd_mcq_config_mac(hba, hba->nutrs); + + hba->host->can_queue = hba->nutrs - UFSHCD_NUM_RESERVED; + hba->reserved_slot = hba->nutrs - UFSHCD_NUM_RESERVED; + dev_info(hba->dev, "MCQ configured, nr_queues=%d, io_queues=%d, read_queue=%d, poll_queues=%d, queue_depth=%d\n", + hba->nr_hw_queues, hba->nr_queues[HCTX_TYPE_DEFAULT], + hba->nr_queues[HCTX_TYPE_READ], hba->nr_queues[HCTX_TYPE_POLL], + hba->nutrs); +} + static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params) { int ret; @@ -8376,6 +8396,10 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params) /* UniPro link is active now */ ufshcd_set_link_active(hba); + /* Reconfigure MCQ upon reset */ + if (is_mcq_enabled(hba) && !init_dev_params) + ufshcd_config_mcq(hba); + /* Verify device initialization by sending NOP OUT UPIU */ ret = ufshcd_verify_dev_init(hba); if (ret) @@ -8409,6 +8433,9 @@ static int ufshcd_device_init(struct ufs_hba *hba, bool init_dev_params) } hba->scsi_host_added = true; } + /* MCQ may be disabled if ufshcd_alloc_mcq() fails */ + if (is_mcq_supported(hba) && use_mcq_mode) + ufshcd_config_mcq(hba); } ufshcd_tune_unipro_params(hba); diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c index edd7225065df..e828b876e793 100644 --- a/drivers/ufs/host/ufs-qcom.c +++ b/drivers/ufs/host/ufs-qcom.c @@ -1496,6 +1496,29 @@ insert_res_err: return ret; } +static int ufs_qcom_op_runtime_config(struct ufs_hba *hba) +{ + struct ufshcd_res_info *mem_res, *sqdao_res; + struct ufshcd_mcq_opr_info_t *opr; + int i; + + mem_res = &hba->res[RES_UFS]; + sqdao_res = &hba->res[RES_MCQ_SQD]; + + if (!mem_res->base || !sqdao_res->base) + return -EINVAL; + + for (i = 0; i < OPR_MAX; i++) { + opr = &hba->mcq_opr[i]; + opr->offset = sqdao_res->resource->start - + mem_res->resource->start + 0x40 * i; + opr->stride = 0x100; + opr->base = sqdao_res->base + 0x40 * i; + } + + return 0; +} + static int ufs_qcom_get_hba_mac(struct ufs_hba *hba) { /* Qualcomm HC supports up to 64 */ @@ -1528,6 +1551,7 @@ static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = { .reinit_notify = ufs_qcom_reinit_notify, .mcq_config_resource = ufs_qcom_mcq_config_resource, .get_hba_mac = ufs_qcom_get_hba_mac, + .op_runtime_config = ufs_qcom_op_runtime_config, }; /** diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h index 311113c8e92d..13a2f17daa8c 100644 --- a/include/ufs/ufshcd.h +++ b/include/ufs/ufshcd.h @@ -302,6 +302,7 @@ struct ufs_pwr_mode_info { * @reinit_notify: called to notify reinit of UFSHCD during max gear switch * @mcq_config_resource: called to configure MCQ platform resources * @get_hba_mac: called to get vendor specific mac value, mandatory for mcq mode + * @op_runtime_config: called to config Operation and runtime regs Pointers */ struct ufs_hba_variant_ops { const char *name; @@ -343,6 +344,7 @@ struct ufs_hba_variant_ops { void (*reinit_notify)(struct ufs_hba *); int (*mcq_config_resource)(struct ufs_hba *hba); int (*get_hba_mac)(struct ufs_hba *hba); + int (*op_runtime_config)(struct ufs_hba *hba); }; /* clock gating state */ @@ -761,6 +763,27 @@ enum ufshcd_res { RES_MAX, }; +/** + * struct ufshcd_mcq_opr_info_t - Operation and Runtime registers + * + * @offset: Doorbell Address Offset + * @stride: Steps proportional to queue [0...31] + * @base: base address + */ +struct ufshcd_mcq_opr_info_t { + unsigned long offset; + unsigned long stride; + void __iomem *base; +}; + +enum ufshcd_mcq_opr { + OPR_SQD, + OPR_SQIS, + OPR_CQD, + OPR_CQIS, + OPR_MAX, +}; + /** * struct ufs_hba - per adapter private structure * @mmio_base: UFSHCI base register address @@ -874,6 +897,7 @@ enum ufshcd_res { * ufshcd_resume_complete() * @ext_iid_sup: is EXT_IID is supported by UFSHC * @mcq_sup: is mcq supported by UFSHC + * @mcq_enabled: is mcq ready to accept requests * @res: array of resource info of MCQ registers * @mcq_base: Multi circular queue registers base address * @uhq: array of supported hardware queues @@ -1034,28 +1058,46 @@ struct ufs_hba { bool ext_iid_sup; bool scsi_host_added; bool mcq_sup; + bool mcq_enabled; struct ufshcd_res_info res[RES_MAX]; void __iomem *mcq_base; struct ufs_hw_queue *uhq; struct ufs_hw_queue *dev_cmd_queue; + struct ufshcd_mcq_opr_info_t mcq_opr[OPR_MAX]; }; /** * struct ufs_hw_queue - per hardware queue structure + * @mcq_sq_head: base address of submission queue head pointer + * @mcq_sq_tail: base address of submission queue tail pointer + * @mcq_cq_head: base address of completion queue head pointer + * @mcq_cq_tail: base address of completion queue tail pointer * @sqe_base_addr: submission queue entry base address * @sqe_dma_addr: submission queue dma address * @cqe_base_addr: completion queue base address * @cqe_dma_addr: completion queue dma address * @max_entries: max number of slots in this hardware queue + * @id: hardware queue ID */ struct ufs_hw_queue { + void __iomem *mcq_sq_head; + void __iomem *mcq_sq_tail; + void __iomem *mcq_cq_head; + void __iomem *mcq_cq_tail; + void *sqe_base_addr; dma_addr_t sqe_dma_addr; struct cq_entry *cqe_base_addr; dma_addr_t cqe_dma_addr; u32 max_entries; + u32 id; }; +static inline bool is_mcq_enabled(struct ufs_hba *hba) +{ + return hba->mcq_enabled; +} + #ifdef CONFIG_SCSI_UFS_VARIABLE_SG_ENTRY_SIZE static inline size_t ufshcd_sg_entry_size(const struct ufs_hba *hba) { @@ -1137,6 +1179,16 @@ static inline bool ufshcd_enable_wb_if_scaling_up(struct ufs_hba *hba) return hba->caps & UFSHCD_CAP_WB_WITH_CLK_SCALING; } +#define ufsmcq_writel(hba, val, reg) \ + writel((val), (hba)->mcq_base + (reg)) +#define ufsmcq_readl(hba, reg) \ + readl((hba)->mcq_base + (reg)) + +#define ufsmcq_writelx(hba, val, reg) \ + writel_relaxed((val), (hba)->mcq_base + (reg)) +#define ufsmcq_readlx(hba, reg) \ + readl_relaxed((hba)->mcq_base + (reg)) + #define ufshcd_writel(hba, val, reg) \ writel((val), (hba)->mmio_base + (reg)) #define ufshcd_readl(hba, reg) \ diff --git a/include/ufs/ufshci.h b/include/ufs/ufshci.h index 0d621e9e3146..b6ae5718387c 100644 --- a/include/ufs/ufshci.h +++ b/include/ufs/ufshci.h @@ -57,6 +57,7 @@ enum { REG_UFS_CCAP = 0x100, REG_UFS_CRYPTOCAP = 0x104, + REG_UFS_MEM_CFG = 0x300, REG_UFS_MCQ_CFG = 0x380, UFSHCI_CRYPTO_REG_SPACE_SIZE = 0x400, }; @@ -79,6 +80,35 @@ enum { MASK_EXT_IID_SUPPORT = 0x00000400, }; +enum { + REG_SQATTR = 0x0, + REG_SQLBA = 0x4, + REG_SQUBA = 0x8, + REG_SQDAO = 0xC, + REG_SQISAO = 0x10, + + REG_CQATTR = 0x20, + REG_CQLBA = 0x24, + REG_CQUBA = 0x28, + REG_CQDAO = 0x2C, + REG_CQISAO = 0x30, +}; + +enum { + REG_SQHP = 0x0, + REG_SQTP = 0x4, +}; + +enum { + REG_CQHP = 0x0, + REG_CQTP = 0x4, +}; + +enum { + REG_CQIS = 0x0, + REG_CQIE = 0x4, +}; + #define UFS_MASK(mask, offset) ((mask) << (offset)) /* UFS Version 08h */ @@ -135,6 +165,7 @@ static inline u32 ufshci_version(u32 major, u32 minor) #define CONTROLLER_FATAL_ERROR 0x10000 #define SYSTEM_BUS_FATAL_ERROR 0x20000 #define CRYPTO_ENGINE_FATAL_ERROR 0x40000 +#define MCQ_CQ_EVENT_STATUS 0x100000 #define UFSHCD_UIC_HIBERN8_MASK (UIC_HIBERNATE_ENTER |\ UIC_HIBERNATE_EXIT) -- cgit v1.2.3 From 22a2d563de1425ea294e9abfa104dbf20c83a28a Mon Sep 17 00:00:00 2001 From: Asutosh Das Date: Fri, 13 Jan 2023 12:48:47 -0800 Subject: scsi: ufs: core: Prepare ufshcd_send_command() for MCQ Add support to send commands using multiple submission queues in MCQ mode. Modify the functions that use ufshcd_send_command(). Co-developed-by: Can Guo Signed-off-by: Can Guo Signed-off-by: Asutosh Das Reviewed-by: Bart Van Assche Reviewed-by: Manivannan Sadhasivam Reviewed-by: Stanley Chu Signed-off-by: Martin K. Petersen --- drivers/ufs/core/ufs-mcq.c | 1 + drivers/ufs/core/ufshcd-priv.h | 10 ++++++++++ drivers/ufs/core/ufshcd.c | 38 +++++++++++++++++++++++++++----------- include/ufs/ufshcd.h | 5 +++++ 4 files changed, 43 insertions(+), 11 deletions(-) (limited to 'include/ufs') diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c index 8bf222fb4b06..68158259b223 100644 --- a/drivers/ufs/core/ufs-mcq.c +++ b/drivers/ufs/core/ufs-mcq.c @@ -309,6 +309,7 @@ int ufshcd_mcq_init(struct ufs_hba *hba) for (i = 0; i < hba->nr_hw_queues; i++) { hwq = &hba->uhq[i]; hwq->max_entries = hba->nutrs; + spin_lock_init(&hwq->sq_lock); } /* The very first HW queue serves device commands */ diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h index 4cd9b7b63868..013111fca0f7 100644 --- a/drivers/ufs/core/ufshcd-priv.h +++ b/drivers/ufs/core/ufshcd-priv.h @@ -335,4 +335,14 @@ static inline bool ufs_is_valid_unit_desc_lun(struct ufs_dev_info *dev_info, u8 return lun == UFS_UPIU_RPMB_WLUN || (lun < dev_info->max_lu_supported); } +static inline void ufshcd_inc_sq_tail(struct ufs_hw_queue *q) +{ + u32 mask = q->max_entries - 1; + u32 val; + + q->sq_tail_slot = (q->sq_tail_slot + 1) & mask; + val = q->sq_tail_slot * sizeof(struct utp_transfer_req_desc); + writel(val, q->mcq_sq_tail); +} + #endif /* _UFSHCD_PRIV_H_ */ diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index a492c88ccecb..655903b1d8f7 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -2185,9 +2185,11 @@ static void ufshcd_update_monitor(struct ufs_hba *hba, const struct ufshcd_lrb * * ufshcd_send_command - Send SCSI or device management commands * @hba: per adapter instance * @task_tag: Task tag of the command + * @hwq: pointer to hardware queue instance */ static inline -void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) +void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag, + struct ufs_hw_queue *hwq) { struct ufshcd_lrb *lrbp = &hba->lrb[task_tag]; unsigned long flags; @@ -2201,12 +2203,24 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag) if (unlikely(ufshcd_should_inform_monitor(hba, lrbp))) ufshcd_start_monitor(hba, lrbp); - spin_lock_irqsave(&hba->outstanding_lock, flags); - if (hba->vops && hba->vops->setup_xfer_req) - hba->vops->setup_xfer_req(hba, task_tag, !!lrbp->cmd); - __set_bit(task_tag, &hba->outstanding_reqs); - ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL); - spin_unlock_irqrestore(&hba->outstanding_lock, flags); + if (is_mcq_enabled(hba)) { + int utrd_size = sizeof(struct utp_transfer_req_desc); + + spin_lock(&hwq->sq_lock); + memcpy(hwq->sqe_base_addr + (hwq->sq_tail_slot * utrd_size), + lrbp->utr_descriptor_ptr, utrd_size); + ufshcd_inc_sq_tail(hwq); + spin_unlock(&hwq->sq_lock); + } else { + spin_lock_irqsave(&hba->outstanding_lock, flags); + if (hba->vops && hba->vops->setup_xfer_req) + hba->vops->setup_xfer_req(hba, lrbp->task_tag, + !!lrbp->cmd); + __set_bit(lrbp->task_tag, &hba->outstanding_reqs); + ufshcd_writel(hba, 1 << lrbp->task_tag, + REG_UTP_TRANSFER_REQ_DOOR_BELL); + spin_unlock_irqrestore(&hba->outstanding_lock, flags); + } } /** @@ -2836,6 +2850,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) int tag = scsi_cmd_to_rq(cmd)->tag; struct ufshcd_lrb *lrbp; int err = 0; + struct ufs_hw_queue *hwq = NULL; WARN_ONCE(tag < 0 || tag >= hba->nutrs, "Invalid tag %d\n", tag); @@ -2920,7 +2935,7 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) goto out; } - ufshcd_send_command(hba, tag); + ufshcd_send_command(hba, tag, hwq); out: rcu_read_unlock(); @@ -3121,10 +3136,11 @@ static int ufshcd_exec_dev_cmd(struct ufs_hba *hba, goto out; hba->dev_cmd.complete = &wait; + hba->dev_cmd.cqe = NULL; ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr); - ufshcd_send_command(hba, tag); + ufshcd_send_command(hba, tag, hba->dev_cmd_queue); err = ufshcd_wait_for_dev_cmd(hba, lrbp, timeout); ufshcd_add_query_upiu_trace(hba, err ? UFS_QUERY_ERR : UFS_QUERY_COMP, (struct utp_upiu_req *)lrbp->ucd_rsp_ptr); @@ -6938,7 +6954,7 @@ static int ufshcd_issue_devman_upiu_cmd(struct ufs_hba *hba, ufshcd_add_query_upiu_trace(hba, UFS_QUERY_SEND, lrbp->ucd_req_ptr); - ufshcd_send_command(hba, tag); + ufshcd_send_command(hba, tag, hba->dev_cmd_queue); /* * ignore the returning value here - ufshcd_check_query_response is * bound to fail since dev_cmd.query and dev_cmd.type were left empty. @@ -7104,7 +7120,7 @@ int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *r hba->dev_cmd.complete = &wait; - ufshcd_send_command(hba, tag); + ufshcd_send_command(hba, tag, hba->dev_cmd_queue); err = ufshcd_wait_for_dev_cmd(hba, lrbp, ADVANCED_RPMB_REQ_TIMEOUT); diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h index 13a2f17daa8c..019b8cf23b29 100644 --- a/include/ufs/ufshcd.h +++ b/include/ufs/ufshcd.h @@ -224,6 +224,7 @@ struct ufs_dev_cmd { struct mutex lock; struct completion *complete; struct ufs_query query; + struct cq_entry *cqe; }; /** @@ -1078,6 +1079,8 @@ struct ufs_hba { * @cqe_dma_addr: completion queue dma address * @max_entries: max number of slots in this hardware queue * @id: hardware queue ID + * @sq_tp_slot: current slot to which SQ tail pointer is pointing + * @sq_lock: serialize submission queue access */ struct ufs_hw_queue { void __iomem *mcq_sq_head; @@ -1091,6 +1094,8 @@ struct ufs_hw_queue { dma_addr_t cqe_dma_addr; u32 max_entries; u32 id; + u32 sq_tail_slot; + spinlock_t sq_lock; }; static inline bool is_mcq_enabled(struct ufs_hba *hba) -- cgit v1.2.3 From f87b2c41822aad09aadac31b8ba22c0c0e639eee Mon Sep 17 00:00:00 2001 From: Asutosh Das Date: Fri, 13 Jan 2023 12:48:50 -0800 Subject: scsi: ufs: mcq: Add completion support of a CQE Add support for completing requests from Completion Queue. Some host controllers support vendor specific registers that provide a bitmap of all CQs which have at least one completed CQE. Add this support. The MCQ specification doesn't provide the Task Tag or its equivalent in the Completion Queue Entry. So use an indirect method to find the Task Tag from the Completion Queue Entry. Co-developed-by: Can Guo Signed-off-by: Can Guo Signed-off-by: Asutosh Das Reviewed-by: Bart Van Assche Reviewed-by: Manivannan Sadhasivam Reviewed-by: Stanley Chu Signed-off-by: Martin K. Petersen --- drivers/ufs/core/ufs-mcq.c | 61 ++++++++++++++++++++++++++++++++++++++++++ drivers/ufs/core/ufshcd-priv.h | 43 +++++++++++++++++++++++++++++ drivers/ufs/core/ufshcd.c | 37 +++++++++++++++++++++++++ drivers/ufs/host/ufs-qcom.c | 14 ++++++++++ drivers/ufs/host/ufs-qcom.h | 4 +++ include/ufs/ufshcd.h | 7 +++++ include/ufs/ufshci.h | 3 +++ 7 files changed, 169 insertions(+) (limited to 'include/ufs') diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c index be84bcef2044..cd10d59c77e0 100644 --- a/drivers/ufs/core/ufs-mcq.c +++ b/drivers/ufs/core/ufs-mcq.c @@ -25,6 +25,7 @@ #define MCQ_CFG_MAC_MASK GENMASK(16, 8) #define MCQ_QCFG_SIZE 0x40 #define MCQ_ENTRY_SIZE_IN_DWORD 8 +#define CQE_UCD_BA GENMASK_ULL(63, 7) static int rw_queue_count_set(const char *val, const struct kernel_param *kp) { @@ -236,6 +237,63 @@ static void __iomem *mcq_opr_base(struct ufs_hba *hba, return opr->base + opr->stride * i; } +u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i) +{ + return readl(mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIS); +} + +void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i) +{ + writel(val, mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIS); +} + +/* + * Current MCQ specification doesn't provide a Task Tag or its equivalent in + * the Completion Queue Entry. Find the Task Tag using an indirect method. + */ +static int ufshcd_mcq_get_tag(struct ufs_hba *hba, + struct ufs_hw_queue *hwq, + struct cq_entry *cqe) +{ + u64 addr; + + /* sizeof(struct utp_transfer_cmd_desc) must be a multiple of 128 */ + BUILD_BUG_ON(sizeof(struct utp_transfer_cmd_desc) & GENMASK(6, 0)); + + /* Bits 63:7 UCD base address, 6:5 are reserved, 4:0 is SQ ID */ + addr = (le64_to_cpu(cqe->command_desc_base_addr) & CQE_UCD_BA) - + hba->ucdl_dma_addr; + + return div_u64(addr, sizeof(struct utp_transfer_cmd_desc)); +} + +static void ufshcd_mcq_process_cqe(struct ufs_hba *hba, + struct ufs_hw_queue *hwq) +{ + struct cq_entry *cqe = ufshcd_mcq_cur_cqe(hwq); + int tag = ufshcd_mcq_get_tag(hba, hwq, cqe); + + ufshcd_compl_one_cqe(hba, tag, cqe); +} + +unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba, + struct ufs_hw_queue *hwq) +{ + unsigned long completed_reqs = 0; + + ufshcd_mcq_update_cq_tail_slot(hwq); + while (!ufshcd_mcq_is_cq_empty(hwq)) { + ufshcd_mcq_process_cqe(hba, hwq); + ufshcd_mcq_inc_cq_head_slot(hwq); + completed_reqs++; + } + + if (completed_reqs) + ufshcd_mcq_update_cq_head(hwq); + + return completed_reqs; +} + void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba) { struct ufs_hw_queue *hwq; @@ -279,6 +337,9 @@ void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba) hwq->mcq_cq_head = mcq_opr_base(hba, OPR_CQD, i) + REG_CQHP; hwq->mcq_cq_tail = mcq_opr_base(hba, OPR_CQD, i) + REG_CQTP; + /* Reinitializing is needed upon HC reset */ + hwq->sq_tail_slot = hwq->cq_tail_slot = hwq->cq_head_slot = 0; + /* Enable Tail Entry Push Status interrupt only for non-poll queues */ if (i < hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL]) writel(1, mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIE); diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h index e87ce19ac896..583fb862013e 100644 --- a/drivers/ufs/core/ufshcd-priv.h +++ b/drivers/ufs/core/ufshcd-priv.h @@ -69,6 +69,10 @@ int ufshcd_mcq_memory_alloc(struct ufs_hba *hba); void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba); void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds); void ufshcd_mcq_select_mcq_mode(struct ufs_hba *hba); +u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i); +void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i); +unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba, + struct ufs_hw_queue *hwq); struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba, struct request *req); @@ -264,6 +268,15 @@ static inline int ufshcd_mcq_vops_op_runtime_config(struct ufs_hba *hba) return -EOPNOTSUPP; } +static inline int ufshcd_vops_get_outstanding_cqs(struct ufs_hba *hba, + unsigned long *ocqs) +{ + if (hba->vops && hba->vops->get_outstanding_cqs) + return hba->vops->get_outstanding_cqs(hba, ocqs); + + return -EOPNOTSUPP; +} + extern const struct ufs_pm_lvl_states ufs_pm_lvl_states[]; /** @@ -350,4 +363,34 @@ static inline void ufshcd_inc_sq_tail(struct ufs_hw_queue *q) writel(val, q->mcq_sq_tail); } +static inline void ufshcd_mcq_update_cq_tail_slot(struct ufs_hw_queue *q) +{ + u32 val = readl(q->mcq_cq_tail); + + q->cq_tail_slot = val / sizeof(struct cq_entry); +} + +static inline bool ufshcd_mcq_is_cq_empty(struct ufs_hw_queue *q) +{ + return q->cq_head_slot == q->cq_tail_slot; +} + +static inline void ufshcd_mcq_inc_cq_head_slot(struct ufs_hw_queue *q) +{ + q->cq_head_slot++; + if (q->cq_head_slot == q->max_entries) + q->cq_head_slot = 0; +} + +static inline void ufshcd_mcq_update_cq_head(struct ufs_hw_queue *q) +{ + writel(q->cq_head_slot * sizeof(struct cq_entry), q->mcq_cq_head); +} + +static inline struct cq_entry *ufshcd_mcq_cur_cqe(struct ufs_hw_queue *q) +{ + struct cq_entry *cqe = q->cqe_base_addr; + + return cqe + q->cq_head_slot; +} #endif /* _UFSHCD_PRIV_H_ */ diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 34947379320f..3afa07683ecf 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -6683,6 +6683,40 @@ static irqreturn_t ufshcd_tmc_handler(struct ufs_hba *hba) return ret; } +/** + * ufshcd_handle_mcq_cq_events - handle MCQ completion queue events + * @hba: per adapter instance + * + * Returns IRQ_HANDLED if interrupt is handled + */ +static irqreturn_t ufshcd_handle_mcq_cq_events(struct ufs_hba *hba) +{ + struct ufs_hw_queue *hwq; + unsigned long outstanding_cqs; + unsigned int nr_queues; + int i, ret; + u32 events; + + ret = ufshcd_vops_get_outstanding_cqs(hba, &outstanding_cqs); + if (ret) + outstanding_cqs = (1U << hba->nr_hw_queues) - 1; + + /* Exclude the poll queues */ + nr_queues = hba->nr_hw_queues - hba->nr_queues[HCTX_TYPE_POLL]; + for_each_set_bit(i, &outstanding_cqs, nr_queues) { + hwq = &hba->uhq[i]; + + events = ufshcd_mcq_read_cqis(hba, i); + if (events) + ufshcd_mcq_write_cqis(hba, events, i); + + if (events & UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS) + ufshcd_mcq_poll_cqe_nolock(hba, hwq); + } + + return IRQ_HANDLED; +} + /** * ufshcd_sl_intr - Interrupt service routine * @hba: per adapter instance @@ -6708,6 +6742,9 @@ static irqreturn_t ufshcd_sl_intr(struct ufs_hba *hba, u32 intr_status) if (intr_status & UTP_TRANSFER_REQ_COMPL) retval |= ufshcd_transfer_req_compl(hba); + if (intr_status & MCQ_CQ_EVENT_STATUS) + retval |= ufshcd_handle_mcq_cq_events(hba); + return retval; } diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c index e828b876e793..19b181447685 100644 --- a/drivers/ufs/host/ufs-qcom.c +++ b/drivers/ufs/host/ufs-qcom.c @@ -1525,6 +1525,19 @@ static int ufs_qcom_get_hba_mac(struct ufs_hba *hba) return MAX_SUPP_MAC; } +static int ufs_qcom_get_outstanding_cqs(struct ufs_hba *hba, + unsigned long *ocqs) +{ + struct ufshcd_res_info *mcq_vs_res = &hba->res[RES_MCQ_VS]; + + if (!mcq_vs_res->base) + return -EINVAL; + + *ocqs = readl(mcq_vs_res->base + UFS_MEM_CQIS_VS); + + return 0; +} + /* * struct ufs_hba_qcom_vops - UFS QCOM specific variant operations * @@ -1552,6 +1565,7 @@ static const struct ufs_hba_variant_ops ufs_hba_qcom_vops = { .mcq_config_resource = ufs_qcom_mcq_config_resource, .get_hba_mac = ufs_qcom_get_hba_mac, .op_runtime_config = ufs_qcom_op_runtime_config, + .get_outstanding_cqs = ufs_qcom_get_outstanding_cqs, }; /** diff --git a/drivers/ufs/host/ufs-qcom.h b/drivers/ufs/host/ufs-qcom.h index 164e18b6b1fb..8050e214f722 100644 --- a/drivers/ufs/host/ufs-qcom.h +++ b/drivers/ufs/host/ufs-qcom.h @@ -71,6 +71,10 @@ enum { UFS_UFS_DBG_RD_EDTL_RAM = 0x1900, }; +enum { + UFS_MEM_CQIS_VS = 0x8, +}; + #define UFS_CNTLR_2_x_x_VEN_REGS_OFFSET(x) (0x000 + x) #define UFS_CNTLR_3_x_x_VEN_REGS_OFFSET(x) (0x400 + x) diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h index 019b8cf23b29..0dcb104a6713 100644 --- a/include/ufs/ufshcd.h +++ b/include/ufs/ufshcd.h @@ -304,6 +304,7 @@ struct ufs_pwr_mode_info { * @mcq_config_resource: called to configure MCQ platform resources * @get_hba_mac: called to get vendor specific mac value, mandatory for mcq mode * @op_runtime_config: called to config Operation and runtime regs Pointers + * @get_outstanding_cqs: called to get outstanding completion queues */ struct ufs_hba_variant_ops { const char *name; @@ -346,6 +347,8 @@ struct ufs_hba_variant_ops { int (*mcq_config_resource)(struct ufs_hba *hba); int (*get_hba_mac)(struct ufs_hba *hba); int (*op_runtime_config)(struct ufs_hba *hba); + int (*get_outstanding_cqs)(struct ufs_hba *hba, + unsigned long *ocqs); }; /* clock gating state */ @@ -1081,6 +1084,8 @@ struct ufs_hba { * @id: hardware queue ID * @sq_tp_slot: current slot to which SQ tail pointer is pointing * @sq_lock: serialize submission queue access + * @cq_tail_slot: current slot to which CQ tail pointer is pointing + * @cq_head_slot: current slot to which CQ head pointer is pointing */ struct ufs_hw_queue { void __iomem *mcq_sq_head; @@ -1096,6 +1101,8 @@ struct ufs_hw_queue { u32 id; u32 sq_tail_slot; spinlock_t sq_lock; + u32 cq_tail_slot; + u32 cq_head_slot; }; static inline bool is_mcq_enabled(struct ufs_hba *hba) diff --git a/include/ufs/ufshci.h b/include/ufs/ufshci.h index b6ae5718387c..eca89f9f6aac 100644 --- a/include/ufs/ufshci.h +++ b/include/ufs/ufshci.h @@ -263,6 +263,9 @@ enum { /* UTMRLRSR - UTP Task Management Request Run-Stop Register 80h */ #define UTP_TASK_REQ_LIST_RUN_STOP_BIT 0x1 +/* CQISy - CQ y Interrupt Status Register */ +#define UFSHCD_MCQ_CQIS_TAIL_ENT_PUSH_STS 0x1 + /* UICCMD - UIC Command */ #define COMMAND_OPCODE_MASK 0xFF #define GEN_SELECTOR_INDEX_MASK 0xFFFF -- cgit v1.2.3 From ed975065c31c2a0372e13c19e8140b69814a98ba Mon Sep 17 00:00:00 2001 From: Asutosh Das Date: Fri, 13 Jan 2023 12:48:51 -0800 Subject: scsi: ufs: core: mcq: Add completion support in poll Complete CQE requests in poll. Assumption is that several poll completion may happen in different CPUs for the same completion queue. Hence a spin lock protection is added. Co-developed-by: Can Guo Signed-off-by: Can Guo Signed-off-by: Asutosh Das Reviewed-by: Bart Van Assche Reviewed-by: Manivannan Sadhasivam Reviewed-by: Stanley Chu Signed-off-by: Martin K. Petersen --- drivers/ufs/core/ufs-mcq.c | 13 +++++++++++++ drivers/ufs/core/ufshcd-priv.h | 2 ++ drivers/ufs/core/ufshcd.c | 7 +++++++ include/ufs/ufshcd.h | 2 ++ 4 files changed, 24 insertions(+) (limited to 'include/ufs') diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c index cd10d59c77e0..e710d19d4c55 100644 --- a/drivers/ufs/core/ufs-mcq.c +++ b/drivers/ufs/core/ufs-mcq.c @@ -294,6 +294,18 @@ unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba, return completed_reqs; } +unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba, + struct ufs_hw_queue *hwq) +{ + unsigned long completed_reqs; + + spin_lock(&hwq->cq_lock); + completed_reqs = ufshcd_mcq_poll_cqe_nolock(hba, hwq); + spin_unlock(&hwq->cq_lock); + + return completed_reqs; +} + void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba) { struct ufs_hw_queue *hwq; @@ -390,6 +402,7 @@ int ufshcd_mcq_init(struct ufs_hba *hba) hwq = &hba->uhq[i]; hwq->max_entries = hba->nutrs; spin_lock_init(&hwq->sq_lock); + spin_lock_init(&hwq->cq_lock); } /* The very first HW queue serves device commands */ diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h index 583fb862013e..9b630907c4dc 100644 --- a/drivers/ufs/core/ufshcd-priv.h +++ b/drivers/ufs/core/ufshcd-priv.h @@ -75,6 +75,8 @@ unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba, struct ufs_hw_queue *hwq); struct ufs_hw_queue *ufshcd_mcq_req_to_hwq(struct ufs_hba *hba, struct request *req); +unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba, + struct ufs_hw_queue *hwq); #define UFSHCD_MCQ_IO_QUEUE_OFFSET 1 #define SD_ASCII_STD true diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 3afa07683ecf..cb1bca4d0c39 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -5461,6 +5461,13 @@ static int ufshcd_poll(struct Scsi_Host *shost, unsigned int queue_num) struct ufs_hba *hba = shost_priv(shost); unsigned long completed_reqs, flags; u32 tr_doorbell; + struct ufs_hw_queue *hwq; + + if (is_mcq_enabled(hba)) { + hwq = &hba->uhq[queue_num + UFSHCD_MCQ_IO_QUEUE_OFFSET]; + + return ufshcd_mcq_poll_cqe_lock(hba, hwq); + } spin_lock_irqsave(&hba->outstanding_lock, flags); tr_doorbell = ufshcd_readl(hba, REG_UTP_TRANSFER_REQ_DOOR_BELL); diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h index 0dcb104a6713..33973e9f6d6a 100644 --- a/include/ufs/ufshcd.h +++ b/include/ufs/ufshcd.h @@ -1086,6 +1086,7 @@ struct ufs_hba { * @sq_lock: serialize submission queue access * @cq_tail_slot: current slot to which CQ tail pointer is pointing * @cq_head_slot: current slot to which CQ head pointer is pointing + * @cq_lock: Synchronize between multiple polling instances */ struct ufs_hw_queue { void __iomem *mcq_sq_head; @@ -1103,6 +1104,7 @@ struct ufs_hw_queue { spinlock_t sq_lock; u32 cq_tail_slot; u32 cq_head_slot; + spinlock_t cq_lock; }; static inline bool is_mcq_enabled(struct ufs_hba *hba) -- cgit v1.2.3 From edb0db05607ce05a5e0df00518b58a811e9f548e Mon Sep 17 00:00:00 2001 From: Can Guo Date: Wed, 14 Dec 2022 19:06:20 -0800 Subject: scsi: ufs: core: Add Event Specific Interrupt configuration vendor specific ops As Event Specific Interrupt message format is not defined in UFSHCI JEDEC specs, and the ESI handling highly depends on how the format is designed, hence add a vendor specific ops such that SoC vendors can configure their own ESI handlers. If ESI vops is not provided or returning error, go with the legacy (central) interrupt way. Signed-off-by: Can Guo Reviewed-by: Bart Van Assche Signed-off-by: Martin K. Petersen --- drivers/ufs/core/ufshcd-priv.h | 8 ++++++++ drivers/ufs/core/ufshcd.c | 5 +++++ include/ufs/ufshcd.h | 2 ++ 3 files changed, 15 insertions(+) (limited to 'include/ufs') diff --git a/drivers/ufs/core/ufshcd-priv.h b/drivers/ufs/core/ufshcd-priv.h index 9b630907c4dc..529f8507a5e4 100644 --- a/drivers/ufs/core/ufshcd-priv.h +++ b/drivers/ufs/core/ufshcd-priv.h @@ -279,6 +279,14 @@ static inline int ufshcd_vops_get_outstanding_cqs(struct ufs_hba *hba, return -EOPNOTSUPP; } +static inline int ufshcd_mcq_vops_config_esi(struct ufs_hba *hba) +{ + if (hba->vops && hba->vops->config_esi) + return hba->vops->config_esi(hba); + + return -EOPNOTSUPP; +} + extern const struct ufs_pm_lvl_states ufs_pm_lvl_states[]; /** diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 54717c497518..064a6d8605c1 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -8449,6 +8449,11 @@ err: static void ufshcd_config_mcq(struct ufs_hba *hba) { + int ret; + + ret = ufshcd_mcq_vops_config_esi(hba); + dev_info(hba->dev, "ESI %sconfigured\n", ret ? "is not " : ""); + ufshcd_enable_intr(hba, UFSHCD_ENABLE_MCQ_INTRS); ufshcd_mcq_make_queues_operational(hba); ufshcd_mcq_config_mac(hba, hba->nutrs); diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h index 33973e9f6d6a..1285787e2628 100644 --- a/include/ufs/ufshcd.h +++ b/include/ufs/ufshcd.h @@ -305,6 +305,7 @@ struct ufs_pwr_mode_info { * @get_hba_mac: called to get vendor specific mac value, mandatory for mcq mode * @op_runtime_config: called to config Operation and runtime regs Pointers * @get_outstanding_cqs: called to get outstanding completion queues + * @config_esi: called to config Event Specific Interrupt */ struct ufs_hba_variant_ops { const char *name; @@ -349,6 +350,7 @@ struct ufs_hba_variant_ops { int (*op_runtime_config)(struct ufs_hba *hba); int (*get_outstanding_cqs)(struct ufs_hba *hba, unsigned long *ocqs); + int (*config_esi)(struct ufs_hba *hba); }; /* clock gating state */ -- cgit v1.2.3 From e02288e0265fe316a16d48ec6dd7b7fd54d66e3e Mon Sep 17 00:00:00 2001 From: Can Guo Date: Wed, 14 Dec 2022 19:06:21 -0800 Subject: scsi: ufs: core: mcq: Add Event Specific Interrupt enable and config functions Add and export two functions to enable ESI and config ESI base addresses. The calls to these exported functions will be added by the next patch in this series. Signed-off-by: Can Guo Reviewed-by: Bart Van Assche Signed-off-by: Martin K. Petersen --- drivers/ufs/core/ufs-mcq.c | 16 ++++++++++++++++ include/ufs/ufshcd.h | 6 ++++++ include/ufs/ufshci.h | 2 ++ 3 files changed, 24 insertions(+) (limited to 'include/ufs') diff --git a/drivers/ufs/core/ufs-mcq.c b/drivers/ufs/core/ufs-mcq.c index e710d19d4c55..dd476f9e797c 100644 --- a/drivers/ufs/core/ufs-mcq.c +++ b/drivers/ufs/core/ufs-mcq.c @@ -246,6 +246,7 @@ void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i) { writel(val, mcq_opr_base(hba, OPR_CQIS, i) + REG_CQIS); } +EXPORT_SYMBOL_GPL(ufshcd_mcq_write_cqis); /* * Current MCQ specification doesn't provide a Task Tag or its equivalent in @@ -293,6 +294,7 @@ unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba, return completed_reqs; } +EXPORT_SYMBOL_GPL(ufshcd_mcq_poll_cqe_nolock); unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba, struct ufs_hw_queue *hwq) @@ -370,6 +372,20 @@ void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba) } } +void ufshcd_mcq_enable_esi(struct ufs_hba *hba) +{ + ufshcd_writel(hba, ufshcd_readl(hba, REG_UFS_MEM_CFG) | 0x2, + REG_UFS_MEM_CFG); +} +EXPORT_SYMBOL_GPL(ufshcd_mcq_enable_esi); + +void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg) +{ + ufshcd_writel(hba, msg->address_lo, REG_UFS_ESILBA); + ufshcd_writel(hba, msg->address_hi, REG_UFS_ESIUBA); +} +EXPORT_SYMBOL_GPL(ufshcd_mcq_config_esi); + int ufshcd_mcq_init(struct ufs_hba *hba) { struct Scsi_Host *host = hba->host; diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h index 1285787e2628..1779238d8a56 100644 --- a/include/ufs/ufshcd.h +++ b/include/ufs/ufshcd.h @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -1241,6 +1242,11 @@ void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk); void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val); void ufshcd_hba_stop(struct ufs_hba *hba); void ufshcd_schedule_eh_work(struct ufs_hba *hba); +void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i); +unsigned long ufshcd_mcq_poll_cqe_nolock(struct ufs_hba *hba, + struct ufs_hw_queue *hwq); +void ufshcd_mcq_enable_esi(struct ufs_hba *hba); +void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg); /** * ufshcd_set_variant - set variant specific data to the hba diff --git a/include/ufs/ufshci.h b/include/ufs/ufshci.h index eca89f9f6aac..11424bb03814 100644 --- a/include/ufs/ufshci.h +++ b/include/ufs/ufshci.h @@ -59,6 +59,8 @@ enum { REG_UFS_MEM_CFG = 0x300, REG_UFS_MCQ_CFG = 0x380, + REG_UFS_ESILBA = 0x384, + REG_UFS_ESIUBA = 0x388, UFSHCI_CRYPTO_REG_SPACE_SIZE = 0x400, }; -- cgit v1.2.3 From 3730bea883cd8f74fd9b2b31d51665c74cf73362 Mon Sep 17 00:00:00 2001 From: Alim Akhtar Date: Sat, 14 Jan 2023 07:50:10 +0530 Subject: scsi: ufs: ufs: Remove duplicate entry PA_GRANULARITY is duplicated, delete one of the entries. Signed-off-by: Alim Akhtar Reviewed-by: Bart Van Assche Link: https://lore.kernel.org/r/20230114022010.27088-1-alim.akhtar@samsung.com Signed-off-by: Martin K. Petersen --- include/ufs/unipro.h | 1 - 1 file changed, 1 deletion(-) (limited to 'include/ufs') diff --git a/include/ufs/unipro.h b/include/ufs/unipro.h index 6c553f98fe57..dc9dd1d23f0f 100644 --- a/include/ufs/unipro.h +++ b/include/ufs/unipro.h @@ -141,7 +141,6 @@ #define PA_SAVECONFIGTIME 0x15A4 #define PA_RXHSUNTERMCAP 0x15A5 #define PA_RXLSTERMCAP 0x15A6 -#define PA_GRANULARITY 0x15AA #define PA_HIBERN8TIME 0x15A7 #define PA_LOCALVERINFO 0x15A9 #define PA_GRANULARITY 0x15AA -- cgit v1.2.3 From 86bd0c4a2a5dc4265884648cb92c681646509692 Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Thu, 12 Jan 2023 15:42:13 -0800 Subject: scsi: ufs: exynos: Fix DMA alignment for PAGE_SIZE != 4096 The Exynos UFS controller only supports scatter/gather list elements that are aligned on a 4 KiB boundary. Fix DMA alignment in case PAGE_SIZE != 4096. Rename UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE into UFSHCD_QUIRK_4KB_DMA_ALIGNMENT. Cc: Kiwoong Kim Fixes: 2b2bfc8aa519 ("scsi: ufs: Introduce a quirk to allow only page-aligned sg entries") Signed-off-by: Bart Van Assche Reviewed-by: Alim Akhtar Tested-by: Alim Akhtar Signed-off-by: Martin K. Petersen --- drivers/ufs/core/ufshcd.c | 4 ++-- drivers/ufs/host/ufs-exynos.c | 2 +- include/ufs/ufshcd.h | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) (limited to 'include/ufs') diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 064a6d8605c1..e626810a9abb 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -5107,8 +5107,8 @@ static int ufshcd_slave_configure(struct scsi_device *sdev) ufshcd_hpb_configure(hba, sdev); blk_queue_update_dma_pad(q, PRDT_DATA_BYTE_COUNT_PAD - 1); - if (hba->quirks & UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE) - blk_queue_update_dma_alignment(q, PAGE_SIZE - 1); + if (hba->quirks & UFSHCD_QUIRK_4KB_DMA_ALIGNMENT) + blk_queue_update_dma_alignment(q, 4096 - 1); /* * Block runtime-pm until all consumers are added. * Refer ufshcd_setup_links(). diff --git a/drivers/ufs/host/ufs-exynos.c b/drivers/ufs/host/ufs-exynos.c index c3628a8645a5..3cdac89a28b8 100644 --- a/drivers/ufs/host/ufs-exynos.c +++ b/drivers/ufs/host/ufs-exynos.c @@ -1673,7 +1673,7 @@ static const struct exynos_ufs_drv_data exynos_ufs_drvs = { UFSHCD_QUIRK_BROKEN_OCS_FATAL_ERROR | UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL | UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING | - UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE, + UFSHCD_QUIRK_4KB_DMA_ALIGNMENT, .opts = EXYNOS_UFS_OPT_HAS_APB_CLK_CTRL | EXYNOS_UFS_OPT_BROKEN_AUTO_CLK_CTRL | EXYNOS_UFS_OPT_BROKEN_RX_SEL_IDX | diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h index 1779238d8a56..57a5af27522a 100644 --- a/include/ufs/ufshcd.h +++ b/include/ufs/ufshcd.h @@ -583,9 +583,9 @@ enum ufshcd_quirks { UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING = 1 << 13, /* - * This quirk allows only sg entries aligned with page size. + * Align DMA SG entries on a 4 KiB boundary. */ - UFSHCD_QUIRK_ALIGN_SG_WITH_PAGE_SIZE = 1 << 14, + UFSHCD_QUIRK_4KB_DMA_ALIGNMENT = 1 << 14, /* * This quirk needs to be enabled if the host controller does not -- cgit v1.2.3 From 88441a8d355dcbb86aa69f82934ae1ff0fccfa83 Mon Sep 17 00:00:00 2001 From: Anjana Hari Date: Thu, 2 Feb 2023 21:40:45 +0530 Subject: scsi: ufs: core: Add hibernation callbacks Add freeze, thaw, and restore callbacks for hibernate and restore functionality. Link: https://lore.kernel.org/r/20230202161045.3956-2-quic_ahari@quicinc.com Signed-off-by: Anjana Hari Reviewed-by: Bart Van Assche Signed-off-by: Martin K. Petersen --- drivers/ufs/core/ufshcd.c | 51 +++++++++++++++++++++++++++++++++++++++++++++ drivers/ufs/host/ufs-qcom.c | 8 ++++++- include/ufs/ufshcd.h | 4 ++++ 3 files changed, 62 insertions(+), 1 deletion(-) (limited to 'include/ufs') diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index ffa71c01a096..0ba275487f49 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -9796,6 +9796,7 @@ static int ufshcd_resume(struct ufs_hba *hba) /* enable the host irq as host controller would be active soon */ ufshcd_enable_irq(hba); + goto out; disable_vreg: @@ -9959,6 +9960,56 @@ void ufshcd_remove(struct ufs_hba *hba) } EXPORT_SYMBOL_GPL(ufshcd_remove); +#ifdef CONFIG_PM_SLEEP +int ufshcd_system_freeze(struct device *dev) +{ + + return ufshcd_system_suspend(dev); + +} +EXPORT_SYMBOL_GPL(ufshcd_system_freeze); + +int ufshcd_system_restore(struct device *dev) +{ + + struct ufs_hba *hba = dev_get_drvdata(dev); + int ret; + + ret = ufshcd_system_resume(dev); + if (ret) + return ret; + + /* Configure UTRL and UTMRL base address registers */ + ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr), + REG_UTP_TRANSFER_REQ_LIST_BASE_L); + ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr), + REG_UTP_TRANSFER_REQ_LIST_BASE_H); + ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr), + REG_UTP_TASK_REQ_LIST_BASE_L); + ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr), + REG_UTP_TASK_REQ_LIST_BASE_H); + /* + * Make sure that UTRL and UTMRL base address registers + * are updated with the latest queue addresses. Only after + * updating these addresses, we can queue the new commands. + */ + mb(); + + /* Resuming from hibernate, assume that link was OFF */ + ufshcd_set_link_off(hba); + + return 0; + +} +EXPORT_SYMBOL_GPL(ufshcd_system_restore); + +int ufshcd_system_thaw(struct device *dev) +{ + return ufshcd_system_resume(dev); +} +EXPORT_SYMBOL_GPL(ufshcd_system_thaw); +#endif /* CONFIG_PM_SLEEP */ + /** * ufshcd_dealloc_host - deallocate Host Bus Adapter (HBA) * @hba: pointer to Host Bus Adapter (HBA) diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c index 2ad03021c92f..34fc453f3eb1 100644 --- a/drivers/ufs/host/ufs-qcom.c +++ b/drivers/ufs/host/ufs-qcom.c @@ -1710,10 +1710,16 @@ MODULE_DEVICE_TABLE(acpi, ufs_qcom_acpi_match); #endif static const struct dev_pm_ops ufs_qcom_pm_ops = { - SET_SYSTEM_SLEEP_PM_OPS(ufshcd_system_suspend, ufshcd_system_resume) SET_RUNTIME_PM_OPS(ufshcd_runtime_suspend, ufshcd_runtime_resume, NULL) .prepare = ufshcd_suspend_prepare, .complete = ufshcd_resume_complete, +#ifdef CONFIG_PM_SLEEP + .suspend = ufshcd_system_suspend, + .resume = ufshcd_system_resume, + .freeze = ufshcd_system_freeze, + .restore = ufshcd_system_restore, + .thaw = ufshcd_system_thaw, +#endif }; static struct platform_driver ufs_qcom_pltform = { diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h index 57a5af27522a..ed9e3d5addb3 100644 --- a/include/ufs/ufshcd.h +++ b/include/ufs/ufshcd.h @@ -1276,8 +1276,12 @@ extern int ufshcd_runtime_resume(struct device *dev); #ifdef CONFIG_PM_SLEEP extern int ufshcd_system_suspend(struct device *dev); extern int ufshcd_system_resume(struct device *dev); +extern int ufshcd_system_freeze(struct device *dev); +extern int ufshcd_system_thaw(struct device *dev); +extern int ufshcd_system_restore(struct device *dev); #endif extern int ufshcd_shutdown(struct ufs_hba *hba); + extern int ufshcd_dme_configure_adapt(struct ufs_hba *hba, int agreed_gear, int adapt_val); -- cgit v1.2.3