summaryrefslogtreecommitdiff
path: root/drivers/crypto/qat/qat_common/adf_transport.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto/qat/qat_common/adf_transport.c')
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport.c130
1 files changed, 89 insertions, 41 deletions
diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c
index 2ad774017200..5a7030acdc33 100644
--- a/drivers/crypto/qat/qat_common/adf_transport.c
+++ b/drivers/crypto/qat/qat_common/adf_transport.c
@@ -54,24 +54,32 @@ static void adf_unreserve_ring(struct adf_etr_bank_data *bank, u32 ring)
static void adf_enable_ring_irq(struct adf_etr_bank_data *bank, u32 ring)
{
+ struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
+
spin_lock_bh(&bank->lock);
bank->irq_mask |= (1 << ring);
spin_unlock_bh(&bank->lock);
- WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask);
- WRITE_CSR_INT_COL_CTL(bank->csr_addr, bank->bank_number,
- bank->irq_coalesc_timer);
+ csr_ops->write_csr_int_col_en(bank->csr_addr, bank->bank_number,
+ bank->irq_mask);
+ csr_ops->write_csr_int_col_ctl(bank->csr_addr, bank->bank_number,
+ bank->irq_coalesc_timer);
}
static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, u32 ring)
{
+ struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
+
spin_lock_bh(&bank->lock);
bank->irq_mask &= ~(1 << ring);
spin_unlock_bh(&bank->lock);
- WRITE_CSR_INT_COL_EN(bank->csr_addr, bank->bank_number, bank->irq_mask);
+ csr_ops->write_csr_int_col_en(bank->csr_addr, bank->bank_number,
+ bank->irq_mask);
}
int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg)
{
+ struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
+
if (atomic_add_return(1, ring->inflights) >
ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size)) {
atomic_dec(ring->inflights);
@@ -84,14 +92,17 @@ int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg)
ring->tail = adf_modulo(ring->tail +
ADF_MSG_SIZE_TO_BYTES(ring->msg_size),
ADF_RING_SIZE_MODULO(ring->ring_size));
- WRITE_CSR_RING_TAIL(ring->bank->csr_addr, ring->bank->bank_number,
- ring->ring_number, ring->tail);
+ csr_ops->write_csr_ring_tail(ring->bank->csr_addr,
+ ring->bank->bank_number, ring->ring_number,
+ ring->tail);
spin_unlock_bh(&ring->lock);
+
return 0;
}
static int adf_handle_response(struct adf_etr_ring_data *ring)
{
+ struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
u32 msg_counter = 0;
u32 *msg = (u32 *)((uintptr_t)ring->base_addr + ring->head);
@@ -105,30 +116,36 @@ static int adf_handle_response(struct adf_etr_ring_data *ring)
msg_counter++;
msg = (u32 *)((uintptr_t)ring->base_addr + ring->head);
}
- if (msg_counter > 0)
- WRITE_CSR_RING_HEAD(ring->bank->csr_addr,
- ring->bank->bank_number,
- ring->ring_number, ring->head);
+ if (msg_counter > 0) {
+ csr_ops->write_csr_ring_head(ring->bank->csr_addr,
+ ring->bank->bank_number,
+ ring->ring_number, ring->head);
+ }
return 0;
}
static void adf_configure_tx_ring(struct adf_etr_ring_data *ring)
{
+ struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
u32 ring_config = BUILD_RING_CONFIG(ring->ring_size);
- WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number,
- ring->ring_number, ring_config);
+ csr_ops->write_csr_ring_config(ring->bank->csr_addr,
+ ring->bank->bank_number,
+ ring->ring_number, ring_config);
+
}
static void adf_configure_rx_ring(struct adf_etr_ring_data *ring)
{
+ struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
u32 ring_config =
BUILD_RESP_RING_CONFIG(ring->ring_size,
ADF_RING_NEAR_WATERMARK_512,
ADF_RING_NEAR_WATERMARK_0);
- WRITE_CSR_RING_CONFIG(ring->bank->csr_addr, ring->bank->bank_number,
- ring->ring_number, ring_config);
+ csr_ops->write_csr_ring_config(ring->bank->csr_addr,
+ ring->bank->bank_number,
+ ring->ring_number, ring_config);
}
static int adf_init_ring(struct adf_etr_ring_data *ring)
@@ -136,6 +153,7 @@ static int adf_init_ring(struct adf_etr_ring_data *ring)
struct adf_etr_bank_data *bank = ring->bank;
struct adf_accel_dev *accel_dev = bank->accel_dev;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
u64 ring_base;
u32 ring_size_bytes =
ADF_SIZE_TO_RING_SIZE_IN_BYTES(ring->ring_size);
@@ -162,9 +180,12 @@ static int adf_init_ring(struct adf_etr_ring_data *ring)
else
adf_configure_rx_ring(ring);
- ring_base = BUILD_RING_BASE_ADDR(ring->dma_addr, ring->ring_size);
- WRITE_CSR_RING_BASE(ring->bank->csr_addr, ring->bank->bank_number,
- ring->ring_number, ring_base);
+ ring_base = csr_ops->build_csr_ring_base_addr(ring->dma_addr,
+ ring->ring_size);
+
+ csr_ops->write_csr_ring_base(ring->bank->csr_addr,
+ ring->bank->bank_number, ring->ring_number,
+ ring_base);
spin_lock_init(&ring->lock);
return 0;
}
@@ -190,6 +211,7 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
struct adf_etr_ring_data **ring_ptr)
{
struct adf_etr_data *transport_data = accel_dev->transport;
+ u8 num_rings_per_bank = GET_NUM_RINGS_PER_BANK(accel_dev);
struct adf_etr_bank_data *bank;
struct adf_etr_ring_data *ring;
char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
@@ -219,7 +241,7 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
dev_err(&GET_DEV(accel_dev), "Can't get ring number\n");
return -EFAULT;
}
- if (ring_num >= ADF_ETR_MAX_RINGS_PER_BANK) {
+ if (ring_num >= num_rings_per_bank) {
dev_err(&GET_DEV(accel_dev), "Invalid ring number\n");
return -EFAULT;
}
@@ -268,15 +290,17 @@ err:
void adf_remove_ring(struct adf_etr_ring_data *ring)
{
struct adf_etr_bank_data *bank = ring->bank;
+ struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
/* Disable interrupts for the given ring */
adf_disable_ring_irq(bank, ring->ring_number);
/* Clear PCI config space */
- WRITE_CSR_RING_CONFIG(bank->csr_addr, bank->bank_number,
- ring->ring_number, 0);
- WRITE_CSR_RING_BASE(bank->csr_addr, bank->bank_number,
- ring->ring_number, 0);
+
+ csr_ops->write_csr_ring_config(bank->csr_addr, bank->bank_number,
+ ring->ring_number, 0);
+ csr_ops->write_csr_ring_base(bank->csr_addr, bank->bank_number,
+ ring->ring_number, 0);
adf_ring_debugfs_rm(ring);
adf_unreserve_ring(bank, ring->ring_number);
/* Disable HW arbitration for the given ring */
@@ -286,25 +310,30 @@ void adf_remove_ring(struct adf_etr_ring_data *ring)
static void adf_ring_response_handler(struct adf_etr_bank_data *bank)
{
- u32 empty_rings, i;
+ struct adf_accel_dev *accel_dev = bank->accel_dev;
+ u8 num_rings_per_bank = GET_NUM_RINGS_PER_BANK(accel_dev);
+ struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(accel_dev);
+ unsigned long empty_rings;
+ int i;
- empty_rings = READ_CSR_E_STAT(bank->csr_addr, bank->bank_number);
+ empty_rings = csr_ops->read_csr_e_stat(bank->csr_addr,
+ bank->bank_number);
empty_rings = ~empty_rings & bank->irq_mask;
- for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; ++i) {
- if (empty_rings & (1 << i))
- adf_handle_response(&bank->rings[i]);
- }
+ for_each_set_bit(i, &empty_rings, num_rings_per_bank)
+ adf_handle_response(&bank->rings[i]);
}
void adf_response_handler(uintptr_t bank_addr)
{
struct adf_etr_bank_data *bank = (void *)bank_addr;
+ struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(bank->accel_dev);
/* Handle all the responses and reenable IRQs */
adf_ring_response_handler(bank);
- WRITE_CSR_INT_FLAG_AND_COL(bank->csr_addr, bank->bank_number,
- bank->irq_mask);
+
+ csr_ops->write_csr_int_flag_and_col(bank->csr_addr, bank->bank_number,
+ bank->irq_mask);
}
static inline int adf_get_cfg_int(struct adf_accel_dev *accel_dev,
@@ -343,9 +372,14 @@ static int adf_init_bank(struct adf_accel_dev *accel_dev,
u32 bank_num, void __iomem *csr_addr)
{
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ u8 num_rings_per_bank = hw_data->num_rings_per_bank;
+ struct adf_hw_csr_ops *csr_ops = &hw_data->csr_ops;
+ u32 irq_mask = BIT(num_rings_per_bank) - 1;
struct adf_etr_ring_data *ring;
struct adf_etr_ring_data *tx_ring;
u32 i, coalesc_enabled = 0;
+ unsigned long ring_mask;
+ int size;
memset(bank, 0, sizeof(*bank));
bank->bank_number = bank_num;
@@ -353,6 +387,13 @@ static int adf_init_bank(struct adf_accel_dev *accel_dev,
bank->accel_dev = accel_dev;
spin_lock_init(&bank->lock);
+ /* Allocate the rings in the bank */
+ size = num_rings_per_bank * sizeof(struct adf_etr_ring_data);
+ bank->rings = kzalloc_node(size, GFP_KERNEL,
+ dev_to_node(&GET_DEV(accel_dev)));
+ if (!bank->rings)
+ return -ENOMEM;
+
/* Enable IRQ coalescing always. This will allow to use
* the optimised flag and coalesc register.
* If it is disabled in the config file just use min time value */
@@ -363,9 +404,10 @@ static int adf_init_bank(struct adf_accel_dev *accel_dev,
else
bank->irq_coalesc_timer = ADF_COALESCING_MIN_TIME;
- for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
- WRITE_CSR_RING_CONFIG(csr_addr, bank_num, i, 0);
- WRITE_CSR_RING_BASE(csr_addr, bank_num, i, 0);
+ for (i = 0; i < num_rings_per_bank; i++) {
+ csr_ops->write_csr_ring_config(csr_addr, bank_num, i, 0);
+ csr_ops->write_csr_ring_base(csr_addr, bank_num, i, 0);
+
ring = &bank->rings[i];
if (hw_data->tx_rings_mask & (1 << i)) {
ring->inflights =
@@ -390,15 +432,18 @@ static int adf_init_bank(struct adf_accel_dev *accel_dev,
goto err;
}
- WRITE_CSR_INT_FLAG(csr_addr, bank_num, ADF_BANK_INT_FLAG_CLEAR_MASK);
- WRITE_CSR_INT_SRCSEL(csr_addr, bank_num);
+ csr_ops->write_csr_int_flag(csr_addr, bank_num, irq_mask);
+ csr_ops->write_csr_int_srcsel(csr_addr, bank_num);
+
return 0;
err:
- for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
+ ring_mask = hw_data->tx_rings_mask;
+ for_each_set_bit(i, &ring_mask, num_rings_per_bank) {
ring = &bank->rings[i];
- if (hw_data->tx_rings_mask & (1 << i))
- kfree(ring->inflights);
+ kfree(ring->inflights);
+ ring->inflights = NULL;
}
+ kfree(bank->rings);
return -ENOMEM;
}
@@ -464,11 +509,12 @@ EXPORT_SYMBOL_GPL(adf_init_etr_data);
static void cleanup_bank(struct adf_etr_bank_data *bank)
{
+ struct adf_accel_dev *accel_dev = bank->accel_dev;
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ u8 num_rings_per_bank = hw_data->num_rings_per_bank;
u32 i;
- for (i = 0; i < ADF_ETR_MAX_RINGS_PER_BANK; i++) {
- struct adf_accel_dev *accel_dev = bank->accel_dev;
- struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ for (i = 0; i < num_rings_per_bank; i++) {
struct adf_etr_ring_data *ring = &bank->rings[i];
if (bank->ring_mask & (1 << i))
@@ -477,6 +523,7 @@ static void cleanup_bank(struct adf_etr_bank_data *bank)
if (hw_data->tx_rings_mask & (1 << i))
kfree(ring->inflights);
}
+ kfree(bank->rings);
adf_bank_debugfs_rm(bank);
memset(bank, 0, sizeof(*bank));
}
@@ -507,6 +554,7 @@ void adf_cleanup_etr_data(struct adf_accel_dev *accel_dev)
if (etr_data) {
adf_cleanup_etr_handles(accel_dev);
debugfs_remove(etr_data->debug);
+ kfree(etr_data->banks->rings);
kfree(etr_data->banks);
kfree(etr_data);
accel_dev->transport = NULL;