summaryrefslogtreecommitdiff
path: root/drivers/spi/spi.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/spi/spi.c')
-rw-r--r--drivers/spi/spi.c512
1 files changed, 302 insertions, 210 deletions
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index a7194f29c200..a2c467d9e92f 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -459,7 +459,7 @@ static void spi_shutdown(struct device *dev)
}
}
-struct bus_type spi_bus_type = {
+const struct bus_type spi_bus_type = {
.name = "spi",
.dev_groups = spi_dev_groups,
.match = spi_match_device,
@@ -584,7 +584,7 @@ struct spi_device *spi_alloc_device(struct spi_controller *ctlr)
return NULL;
}
- spi->master = spi->controller = ctlr;
+ spi->controller = ctlr;
spi->dev.parent = &ctlr->dev;
spi->dev.bus = &spi_bus_type;
spi->dev.release = spidev_release;
@@ -608,23 +608,51 @@ static void spi_dev_set_name(struct spi_device *spi)
spi_get_chipselect(spi, 0));
}
+/*
+ * Zero(0) is a valid physical CS value and can be located at any
+ * logical CS in the spi->chip_select[]. If all the physical CS
+ * are initialized to 0 then It would be difficult to differentiate
+ * between a valid physical CS 0 & an unused logical CS whose physical
+ * CS can be 0. As a solution to this issue initialize all the CS to -1.
+ * Now all the unused logical CS will have -1 physical CS value & can be
+ * ignored while performing physical CS validity checks.
+ */
+#define SPI_INVALID_CS ((s8)-1)
+
+static inline bool is_valid_cs(s8 chip_select)
+{
+ return chip_select != SPI_INVALID_CS;
+}
+
+static inline int spi_dev_check_cs(struct device *dev,
+ struct spi_device *spi, u8 idx,
+ struct spi_device *new_spi, u8 new_idx)
+{
+ u8 cs, cs_new;
+ u8 idx_new;
+
+ cs = spi_get_chipselect(spi, idx);
+ for (idx_new = new_idx; idx_new < SPI_CS_CNT_MAX; idx_new++) {
+ cs_new = spi_get_chipselect(new_spi, idx_new);
+ if (is_valid_cs(cs) && is_valid_cs(cs_new) && cs == cs_new) {
+ dev_err(dev, "chipselect %u already in use\n", cs_new);
+ return -EBUSY;
+ }
+ }
+ return 0;
+}
+
static int spi_dev_check(struct device *dev, void *data)
{
struct spi_device *spi = to_spi_device(dev);
struct spi_device *new_spi = data;
- int idx, nw_idx;
- u8 cs, cs_nw;
+ int status, idx;
if (spi->controller == new_spi->controller) {
for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
- cs = spi_get_chipselect(spi, idx);
- for (nw_idx = 0; nw_idx < SPI_CS_CNT_MAX; nw_idx++) {
- cs_nw = spi_get_chipselect(new_spi, nw_idx);
- if (cs != 0xFF && cs_nw != 0xFF && cs == cs_nw) {
- dev_err(dev, "chipselect %d already in use\n", cs_nw);
- return -EBUSY;
- }
- }
+ status = spi_dev_check_cs(dev, spi, idx, new_spi, 0);
+ if (status)
+ return status;
}
}
return 0;
@@ -640,13 +668,13 @@ static int __spi_add_device(struct spi_device *spi)
{
struct spi_controller *ctlr = spi->controller;
struct device *dev = ctlr->dev.parent;
- int status, idx, nw_idx;
- u8 cs, nw_cs;
+ int status, idx;
+ u8 cs;
for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
/* Chipselects are numbered 0..max; validate. */
cs = spi_get_chipselect(spi, idx);
- if (cs != 0xFF && cs >= ctlr->num_chipselect) {
+ if (is_valid_cs(cs) && cs >= ctlr->num_chipselect) {
dev_err(dev, "cs%d >= max %d\n", spi_get_chipselect(spi, idx),
ctlr->num_chipselect);
return -EINVAL;
@@ -658,14 +686,9 @@ static int __spi_add_device(struct spi_device *spi)
* For example, spi->chip_select[0] != spi->chip_select[1] and so on.
*/
for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
- cs = spi_get_chipselect(spi, idx);
- for (nw_idx = idx + 1; nw_idx < SPI_CS_CNT_MAX; nw_idx++) {
- nw_cs = spi_get_chipselect(spi, nw_idx);
- if (cs != 0xFF && nw_cs != 0xFF && cs == nw_cs) {
- dev_err(dev, "chipselect %d already in use\n", nw_cs);
- return -EBUSY;
- }
- }
+ status = spi_dev_check_cs(dev, spi, idx, spi, idx + 1);
+ if (status)
+ return status;
}
/* Set the bus ID string */
@@ -691,7 +714,7 @@ static int __spi_add_device(struct spi_device *spi)
for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
cs = spi_get_chipselect(spi, idx);
- if (cs != 0xFF)
+ if (is_valid_cs(cs))
spi_set_csgpiod(spi, idx, ctlr->cs_gpiods[cs]);
}
}
@@ -745,6 +768,14 @@ int spi_add_device(struct spi_device *spi)
}
EXPORT_SYMBOL_GPL(spi_add_device);
+static void spi_set_all_cs_unused(struct spi_device *spi)
+{
+ u8 idx;
+
+ for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
+ spi_set_chipselect(spi, idx, SPI_INVALID_CS);
+}
+
/**
* spi_new_device - instantiate one new SPI device
* @ctlr: Controller to which device is connected
@@ -764,7 +795,6 @@ struct spi_device *spi_new_device(struct spi_controller *ctlr,
{
struct spi_device *proxy;
int status;
- u8 idx;
/*
* NOTE: caller did any chip->bus_num checks necessary.
@@ -780,19 +810,10 @@ struct spi_device *spi_new_device(struct spi_controller *ctlr,
WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
- /*
- * Zero(0) is a valid physical CS value and can be located at any
- * logical CS in the spi->chip_select[]. If all the physical CS
- * are initialized to 0 then It would be difficult to differentiate
- * between a valid physical CS 0 & an unused logical CS whose physical
- * CS can be 0. As a solution to this issue initialize all the CS to 0xFF.
- * Now all the unused logical CS will have 0xFF physical CS value & can be
- * ignore while performing physical CS validity checks.
- */
- for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
- spi_set_chipselect(proxy, idx, 0xFF);
-
+ /* Use provided chip-select for proxy device */
+ spi_set_all_cs_unused(proxy);
spi_set_chipselect(proxy, 0, chip->chip_select);
+
proxy->max_speed_hz = chip->max_speed_hz;
proxy->mode = chip->mode;
proxy->irq = chip->irq;
@@ -1007,7 +1028,7 @@ static inline bool spi_is_last_cs(struct spi_device *spi)
bool last = false;
for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
- if ((spi->cs_index_mask >> idx) & 0x01) {
+ if (spi->cs_index_mask & BIT(idx)) {
if (spi->controller->last_cs[idx] == spi_get_chipselect(spi, idx))
last = true;
}
@@ -1036,7 +1057,7 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
spi->controller->last_cs_index_mask = spi->cs_index_mask;
for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
- spi->controller->last_cs[idx] = enable ? spi_get_chipselect(spi, 0) : -1;
+ spi->controller->last_cs[idx] = enable ? spi_get_chipselect(spi, 0) : SPI_INVALID_CS;
spi->controller->last_cs_mode_high = spi->mode & SPI_CS_HIGH;
if (spi->mode & SPI_CS_HIGH)
@@ -1062,8 +1083,7 @@ static void spi_set_cs(struct spi_device *spi, bool enable, bool force)
* into account.
*/
for (idx = 0; idx < SPI_CS_CNT_MAX; idx++) {
- if (((spi->cs_index_mask >> idx) & 0x01) &&
- spi_get_csgpiod(spi, idx)) {
+ if ((spi->cs_index_mask & BIT(idx)) && spi_get_csgpiod(spi, idx)) {
if (has_acpi_companion(&spi->dev))
gpiod_set_value_cansleep(spi_get_csgpiod(spi, idx),
!enable);
@@ -1751,39 +1771,6 @@ static int __spi_pump_transfer_message(struct spi_controller *ctlr,
trace_spi_message_start(msg);
- /*
- * If an SPI controller does not support toggling the CS line on each
- * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
- * for the CS line, we can emulate the CS-per-word hardware function by
- * splitting transfers into one-word transfers and ensuring that
- * cs_change is set for each transfer.
- */
- if ((msg->spi->mode & SPI_CS_WORD) && (!(ctlr->mode_bits & SPI_CS_WORD) ||
- spi_is_csgpiod(msg->spi))) {
- ret = spi_split_transfers_maxwords(ctlr, msg, 1, GFP_KERNEL);
- if (ret) {
- msg->status = ret;
- spi_finalize_current_message(ctlr);
- return ret;
- }
-
- list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- /* Don't change cs_change on the last entry in the list */
- if (list_is_last(&xfer->transfer_list, &msg->transfers))
- break;
- xfer->cs_change = 1;
- }
- } else {
- ret = spi_split_transfers_maxsize(ctlr, msg,
- spi_max_transfer_size(msg->spi),
- GFP_KERNEL | GFP_DMA);
- if (ret) {
- msg->status = ret;
- spi_finalize_current_message(ctlr);
- return ret;
- }
- }
-
if (ctlr->prepare_message) {
ret = ctlr->prepare_message(ctlr, msg);
if (ret) {
@@ -2111,6 +2098,43 @@ struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr)
}
EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
+/*
+ * __spi_unoptimize_message - shared implementation of spi_unoptimize_message()
+ * and spi_maybe_unoptimize_message()
+ * @msg: the message to unoptimize
+ *
+ * Peripheral drivers should use spi_unoptimize_message() and callers inside
+ * core should use spi_maybe_unoptimize_message() rather than calling this
+ * function directly.
+ *
+ * It is not valid to call this on a message that is not currently optimized.
+ */
+static void __spi_unoptimize_message(struct spi_message *msg)
+{
+ struct spi_controller *ctlr = msg->spi->controller;
+
+ if (ctlr->unoptimize_message)
+ ctlr->unoptimize_message(msg);
+
+ spi_res_release(ctlr, msg);
+
+ msg->optimized = false;
+ msg->opt_state = NULL;
+}
+
+/*
+ * spi_maybe_unoptimize_message - unoptimize msg not managed by a peripheral
+ * @msg: the message to unoptimize
+ *
+ * This function is used to unoptimize a message if and only if it was
+ * optimized by the core (via spi_maybe_optimize_message()).
+ */
+static void spi_maybe_unoptimize_message(struct spi_message *msg)
+{
+ if (!msg->pre_optimized && msg->optimized)
+ __spi_unoptimize_message(msg);
+}
+
/**
* spi_finalize_current_message() - the current message is complete
* @ctlr: the controller to return the message to
@@ -2139,15 +2163,6 @@ void spi_finalize_current_message(struct spi_controller *ctlr)
spi_unmap_msg(ctlr, mesg);
- /*
- * In the prepare_messages callback the SPI bus has the opportunity
- * to split a transfer to smaller chunks.
- *
- * Release the split transfers here since spi_map_msg() is done on
- * the split transfers.
- */
- spi_res_release(ctlr, mesg);
-
if (mesg->prepared && ctlr->unprepare_message) {
ret = ctlr->unprepare_message(ctlr, mesg);
if (ret) {
@@ -2158,6 +2173,8 @@ void spi_finalize_current_message(struct spi_controller *ctlr)
mesg->prepared = false;
+ spi_maybe_unoptimize_message(mesg);
+
WRITE_ONCE(ctlr->cur_msg_incomplete, false);
smp_mb(); /* See __spi_pump_transfer_message()... */
if (READ_ONCE(ctlr->cur_msg_need_completion))
@@ -2425,17 +2442,7 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
return -EINVAL;
}
- /*
- * Zero(0) is a valid physical CS value and can be located at any
- * logical CS in the spi->chip_select[]. If all the physical CS
- * are initialized to 0 then It would be difficult to differentiate
- * between a valid physical CS 0 & an unused logical CS whose physical
- * CS can be 0. As a solution to this issue initialize all the CS to 0xFF.
- * Now all the unused logical CS will have 0xFF physical CS value & can be
- * ignore while performing physical CS validity checks.
- */
- for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
- spi_set_chipselect(spi, idx, 0xFF);
+ spi_set_all_cs_unused(spi);
/* Device address */
rc = of_property_read_variable_u32_array(nc, "reg", &cs[0], 1,
@@ -2459,14 +2466,10 @@ static int of_spi_parse_dt(struct spi_controller *ctlr, struct spi_device *spi,
spi_set_chipselect(spi, idx, cs[idx]);
/*
- * spi->chip_select[i] gives the corresponding physical CS for logical CS i
- * logical CS number is represented by setting the ith bit in spi->cs_index_mask
- * So, for example, if spi->cs_index_mask = 0x01 then logical CS number is 0 and
- * spi->chip_select[0] will give the physical CS.
- * By default spi->chip_select[0] will hold the physical CS number so, set
- * spi->cs_index_mask as 0x01.
+ * By default spi->chip_select[0] will hold the physical CS number,
+ * so set bit 0 in spi->cs_index_mask.
*/
- spi->cs_index_mask = 0x01;
+ spi->cs_index_mask = BIT(0);
/* Device speed */
if (!of_property_read_u32(nc, "spi-max-frequency", &value))
@@ -2572,7 +2575,6 @@ struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
struct spi_controller *ctlr = spi->controller;
struct spi_device *ancillary;
int rc = 0;
- u8 idx;
/* Alloc an spi_device */
ancillary = spi_alloc_device(ctlr);
@@ -2583,33 +2585,18 @@ struct spi_device *spi_new_ancillary_device(struct spi_device *spi,
strscpy(ancillary->modalias, "dummy", sizeof(ancillary->modalias));
- /*
- * Zero(0) is a valid physical CS value and can be located at any
- * logical CS in the spi->chip_select[]. If all the physical CS
- * are initialized to 0 then It would be difficult to differentiate
- * between a valid physical CS 0 & an unused logical CS whose physical
- * CS can be 0. As a solution to this issue initialize all the CS to 0xFF.
- * Now all the unused logical CS will have 0xFF physical CS value & can be
- * ignore while performing physical CS validity checks.
- */
- for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
- spi_set_chipselect(ancillary, idx, 0xFF);
-
/* Use provided chip-select for ancillary device */
+ spi_set_all_cs_unused(ancillary);
spi_set_chipselect(ancillary, 0, chip_select);
/* Take over SPI mode/speed from SPI main device */
ancillary->max_speed_hz = spi->max_speed_hz;
ancillary->mode = spi->mode;
/*
- * spi->chip_select[i] gives the corresponding physical CS for logical CS i
- * logical CS number is represented by setting the ith bit in spi->cs_index_mask
- * So, for example, if spi->cs_index_mask = 0x01 then logical CS number is 0 and
- * spi->chip_select[0] will give the physical CS.
- * By default spi->chip_select[0] will hold the physical CS number so, set
- * spi->cs_index_mask as 0x01.
+ * By default spi->chip_select[0] will hold the physical CS number,
+ * so set bit 0 in spi->cs_index_mask.
*/
- ancillary->cs_index_mask = 0x01;
+ ancillary->cs_index_mask = BIT(0);
WARN_ON(!mutex_is_locked(&ctlr->add_lock));
@@ -2812,7 +2799,6 @@ struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
struct acpi_spi_lookup lookup = {};
struct spi_device *spi;
int ret;
- u8 idx;
if (!ctlr && index == -1)
return ERR_PTR(-EINVAL);
@@ -2848,33 +2834,19 @@ struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
return ERR_PTR(-ENOMEM);
}
- /*
- * Zero(0) is a valid physical CS value and can be located at any
- * logical CS in the spi->chip_select[]. If all the physical CS
- * are initialized to 0 then It would be difficult to differentiate
- * between a valid physical CS 0 & an unused logical CS whose physical
- * CS can be 0. As a solution to this issue initialize all the CS to 0xFF.
- * Now all the unused logical CS will have 0xFF physical CS value & can be
- * ignore while performing physical CS validity checks.
- */
- for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
- spi_set_chipselect(spi, idx, 0xFF);
+ spi_set_all_cs_unused(spi);
+ spi_set_chipselect(spi, 0, lookup.chip_select);
ACPI_COMPANION_SET(&spi->dev, adev);
spi->max_speed_hz = lookup.max_speed_hz;
spi->mode |= lookup.mode;
spi->irq = lookup.irq;
spi->bits_per_word = lookup.bits_per_word;
- spi_set_chipselect(spi, 0, lookup.chip_select);
/*
- * spi->chip_select[i] gives the corresponding physical CS for logical CS i
- * logical CS number is represented by setting the ith bit in spi->cs_index_mask
- * So, for example, if spi->cs_index_mask = 0x01 then logical CS number is 0 and
- * spi->chip_select[0] will give the physical CS.
- * By default spi->chip_select[0] will hold the physical CS number so, set
- * spi->cs_index_mask as 0x01.
+ * By default spi->chip_select[0] will hold the physical CS number,
+ * so set bit 0 in spi->cs_index_mask.
*/
- spi->cs_index_mask = 0x01;
+ spi->cs_index_mask = BIT(0);
return spi;
}
@@ -3372,9 +3344,9 @@ int spi_register_controller(struct spi_controller *ctlr)
goto free_bus_id;
}
- /* Setting last_cs to -1 means no chip selected */
+ /* Setting last_cs to SPI_INVALID_CS means no chip selected */
for (idx = 0; idx < SPI_CS_CNT_MAX; idx++)
- ctlr->last_cs[idx] = -1;
+ ctlr->last_cs[idx] = SPI_INVALID_CS;
status = device_add(&ctlr->dev);
if (status < 0)
@@ -3715,8 +3687,7 @@ static struct spi_replaced_transfers *spi_replace_transfers(
static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
struct spi_message *msg,
struct spi_transfer **xferp,
- size_t maxsize,
- gfp_t gfp)
+ size_t maxsize)
{
struct spi_transfer *xfer = *xferp, *xfers;
struct spi_replaced_transfers *srt;
@@ -3727,7 +3698,7 @@ static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
count = DIV_ROUND_UP(xfer->len, maxsize);
/* Create replacement */
- srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, gfp);
+ srt = spi_replace_transfers(msg, xfer, 1, count, NULL, 0, GFP_KERNEL);
if (IS_ERR(srt))
return PTR_ERR(srt);
xfers = srt->inserted_transfers;
@@ -3787,14 +3758,16 @@ static int __spi_split_transfer_maxsize(struct spi_controller *ctlr,
* @ctlr: the @spi_controller for this transfer
* @msg: the @spi_message to transform
* @maxsize: the maximum when to apply this
- * @gfp: GFP allocation flags
+ *
+ * This function allocates resources that are automatically freed during the
+ * spi message unoptimize phase so this function should only be called from
+ * optimize_message callbacks.
*
* Return: status of transformation
*/
int spi_split_transfers_maxsize(struct spi_controller *ctlr,
struct spi_message *msg,
- size_t maxsize,
- gfp_t gfp)
+ size_t maxsize)
{
struct spi_transfer *xfer;
int ret;
@@ -3809,7 +3782,7 @@ int spi_split_transfers_maxsize(struct spi_controller *ctlr,
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
if (xfer->len > maxsize) {
ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
- maxsize, gfp);
+ maxsize);
if (ret)
return ret;
}
@@ -3827,14 +3800,16 @@ EXPORT_SYMBOL_GPL(spi_split_transfers_maxsize);
* @ctlr: the @spi_controller for this transfer
* @msg: the @spi_message to transform
* @maxwords: the number of words to limit each transfer to
- * @gfp: GFP allocation flags
+ *
+ * This function allocates resources that are automatically freed during the
+ * spi message unoptimize phase so this function should only be called from
+ * optimize_message callbacks.
*
* Return: status of transformation
*/
int spi_split_transfers_maxwords(struct spi_controller *ctlr,
struct spi_message *msg,
- size_t maxwords,
- gfp_t gfp)
+ size_t maxwords)
{
struct spi_transfer *xfer;
@@ -3852,7 +3827,7 @@ int spi_split_transfers_maxwords(struct spi_controller *ctlr,
maxsize = maxwords * roundup_pow_of_two(BITS_TO_BYTES(xfer->bits_per_word));
if (xfer->len > maxsize) {
ret = __spi_split_transfer_maxsize(ctlr, msg, &xfer,
- maxsize, gfp);
+ maxsize);
if (ret)
return ret;
}
@@ -4204,6 +4179,167 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
return 0;
}
+/*
+ * spi_split_transfers - generic handling of transfer splitting
+ * @msg: the message to split
+ *
+ * Under certain conditions, a SPI controller may not support arbitrary
+ * transfer sizes or other features required by a peripheral. This function
+ * will split the transfers in the message into smaller transfers that are
+ * supported by the controller.
+ *
+ * Controllers with special requirements not covered here can also split
+ * transfers in the optimize_message() callback.
+ *
+ * Context: can sleep
+ * Return: zero on success, else a negative error code
+ */
+static int spi_split_transfers(struct spi_message *msg)
+{
+ struct spi_controller *ctlr = msg->spi->controller;
+ struct spi_transfer *xfer;
+ int ret;
+
+ /*
+ * If an SPI controller does not support toggling the CS line on each
+ * transfer (indicated by the SPI_CS_WORD flag) or we are using a GPIO
+ * for the CS line, we can emulate the CS-per-word hardware function by
+ * splitting transfers into one-word transfers and ensuring that
+ * cs_change is set for each transfer.
+ */
+ if ((msg->spi->mode & SPI_CS_WORD) &&
+ (!(ctlr->mode_bits & SPI_CS_WORD) || spi_is_csgpiod(msg->spi))) {
+ ret = spi_split_transfers_maxwords(ctlr, msg, 1);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ /* Don't change cs_change on the last entry in the list */
+ if (list_is_last(&xfer->transfer_list, &msg->transfers))
+ break;
+
+ xfer->cs_change = 1;
+ }
+ } else {
+ ret = spi_split_transfers_maxsize(ctlr, msg,
+ spi_max_transfer_size(msg->spi));
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/*
+ * __spi_optimize_message - shared implementation for spi_optimize_message()
+ * and spi_maybe_optimize_message()
+ * @spi: the device that will be used for the message
+ * @msg: the message to optimize
+ *
+ * Peripheral drivers will call spi_optimize_message() and the spi core will
+ * call spi_maybe_optimize_message() instead of calling this directly.
+ *
+ * It is not valid to call this on a message that has already been optimized.
+ *
+ * Return: zero on success, else a negative error code
+ */
+static int __spi_optimize_message(struct spi_device *spi,
+ struct spi_message *msg)
+{
+ struct spi_controller *ctlr = spi->controller;
+ int ret;
+
+ ret = __spi_validate(spi, msg);
+ if (ret)
+ return ret;
+
+ ret = spi_split_transfers(msg);
+ if (ret)
+ return ret;
+
+ if (ctlr->optimize_message) {
+ ret = ctlr->optimize_message(msg);
+ if (ret) {
+ spi_res_release(ctlr, msg);
+ return ret;
+ }
+ }
+
+ msg->optimized = true;
+
+ return 0;
+}
+
+/*
+ * spi_maybe_optimize_message - optimize message if it isn't already pre-optimized
+ * @spi: the device that will be used for the message
+ * @msg: the message to optimize
+ * Return: zero on success, else a negative error code
+ */
+static int spi_maybe_optimize_message(struct spi_device *spi,
+ struct spi_message *msg)
+{
+ if (msg->pre_optimized)
+ return 0;
+
+ return __spi_optimize_message(spi, msg);
+}
+
+/**
+ * spi_optimize_message - do any one-time validation and setup for a SPI message
+ * @spi: the device that will be used for the message
+ * @msg: the message to optimize
+ *
+ * Peripheral drivers that reuse the same message repeatedly may call this to
+ * perform as much message prep as possible once, rather than repeating it each
+ * time a message transfer is performed to improve throughput and reduce CPU
+ * usage.
+ *
+ * Once a message has been optimized, it cannot be modified with the exception
+ * of updating the contents of any xfer->tx_buf (the pointer can't be changed,
+ * only the data in the memory it points to).
+ *
+ * Calls to this function must be balanced with calls to spi_unoptimize_message()
+ * to avoid leaking resources.
+ *
+ * Context: can sleep
+ * Return: zero on success, else a negative error code
+ */
+int spi_optimize_message(struct spi_device *spi, struct spi_message *msg)
+{
+ int ret;
+
+ ret = __spi_optimize_message(spi, msg);
+ if (ret)
+ return ret;
+
+ /*
+ * This flag indicates that the peripheral driver called spi_optimize_message()
+ * and therefore we shouldn't unoptimize message automatically when finalizing
+ * the message but rather wait until spi_unoptimize_message() is called
+ * by the peripheral driver.
+ */
+ msg->pre_optimized = true;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_optimize_message);
+
+/**
+ * spi_unoptimize_message - releases any resources allocated by spi_optimize_message()
+ * @msg: the message to unoptimize
+ *
+ * Calls to this function must be balanced with calls to spi_optimize_message().
+ *
+ * Context: can sleep
+ */
+void spi_unoptimize_message(struct spi_message *msg)
+{
+ __spi_unoptimize_message(msg);
+ msg->pre_optimized = false;
+}
+EXPORT_SYMBOL_GPL(spi_unoptimize_message);
+
static int __spi_async(struct spi_device *spi, struct spi_message *message)
{
struct spi_controller *ctlr = spi->controller;
@@ -4268,8 +4404,8 @@ int spi_async(struct spi_device *spi, struct spi_message *message)
int ret;
unsigned long flags;
- ret = __spi_validate(spi, message);
- if (ret != 0)
+ ret = spi_maybe_optimize_message(spi, message);
+ if (ret)
return ret;
spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
@@ -4281,60 +4417,11 @@ int spi_async(struct spi_device *spi, struct spi_message *message)
spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
- return ret;
-}
-EXPORT_SYMBOL_GPL(spi_async);
-
-/**
- * spi_async_locked - version of spi_async with exclusive bus usage
- * @spi: device with which data will be exchanged
- * @message: describes the data transfers, including completion callback
- * Context: any (IRQs may be blocked, etc)
- *
- * This call may be used in_irq and other contexts which can't sleep,
- * as well as from task contexts which can sleep.
- *
- * The completion callback is invoked in a context which can't sleep.
- * Before that invocation, the value of message->status is undefined.
- * When the callback is issued, message->status holds either zero (to
- * indicate complete success) or a negative error code. After that
- * callback returns, the driver which issued the transfer request may
- * deallocate the associated memory; it's no longer in use by any SPI
- * core or controller driver code.
- *
- * Note that although all messages to a spi_device are handled in
- * FIFO order, messages may go to different devices in other orders.
- * Some device might be higher priority, or have various "hard" access
- * time requirements, for example.
- *
- * On detection of any fault during the transfer, processing of
- * the entire message is aborted, and the device is deselected.
- * Until returning from the associated message completion callback,
- * no other spi_message queued to that device will be processed.
- * (This rule applies equally to all the synchronous transfer calls,
- * which are wrappers around this core asynchronous primitive.)
- *
- * Return: zero on success, else a negative error code.
- */
-static int spi_async_locked(struct spi_device *spi, struct spi_message *message)
-{
- struct spi_controller *ctlr = spi->controller;
- int ret;
- unsigned long flags;
-
- ret = __spi_validate(spi, message);
- if (ret != 0)
- return ret;
-
- spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
-
- ret = __spi_async(spi, message);
-
- spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
+ spi_maybe_unoptimize_message(message);
return ret;
-
}
+EXPORT_SYMBOL_GPL(spi_async);
static void __spi_transfer_message_noqueue(struct spi_controller *ctlr, struct spi_message *msg)
{
@@ -4383,6 +4470,7 @@ static void spi_complete(void *arg)
static int __spi_sync(struct spi_device *spi, struct spi_message *message)
{
DECLARE_COMPLETION_ONSTACK(done);
+ unsigned long flags;
int status;
struct spi_controller *ctlr = spi->controller;
@@ -4391,8 +4479,8 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message)
return -ESHUTDOWN;
}
- status = __spi_validate(spi, message);
- if (status != 0)
+ status = spi_maybe_optimize_message(spi, message);
+ if (status)
return status;
SPI_STATISTICS_INCREMENT_FIELD(ctlr->pcpu_statistics, spi_sync);
@@ -4426,7 +4514,11 @@ static int __spi_sync(struct spi_device *spi, struct spi_message *message)
*/
message->complete = spi_complete;
message->context = &done;
- status = spi_async_locked(spi, message);
+
+ spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
+ status = __spi_async(spi, message);
+ spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
+
if (status == 0) {
wait_for_completion(&done);
status = message->status;