summaryrefslogtreecommitdiff
path: root/drivers/infiniband/hw
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw')
-rw-r--r--drivers/infiniband/hw/bnxt_re/hw_counters.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c2
-rw-r--r--drivers/infiniband/hw/efa/efa_verbs.c1
-rw-r--r--drivers/infiniband/hw/hfi1/debugfs.c19
-rw-r--r--drivers/infiniband/hw/hfi1/init.c37
-rw-r--r--drivers/infiniband/hw/hfi1/iowait.h2
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib.h6
-rw-r--r--drivers/infiniband/hw/hfi1/ipoib_tx.c104
-rw-r--r--drivers/infiniband/hw/hfi1/netdev_rx.c2
-rw-r--r--drivers/infiniband/hw/hfi1/qp.c7
-rw-r--r--drivers/infiniband/hw/hfi1/tid_rdma.c5
-rw-r--r--drivers/infiniband/hw/hfi1/verbs_txreq.h2
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_device.h7
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v1.c4
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_hw_v2.c51
-rw-r--r--drivers/infiniband/hw/hns/hns_roce_mr.c7
-rw-r--r--drivers/infiniband/hw/i40iw/Makefile1
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw.h2
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c6
-rw-r--r--drivers/infiniband/hw/mlx5/cq.c6
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c2
-rw-r--r--drivers/infiniband/hw/mlx5/main.c2
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c27
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c84
-rw-r--r--drivers/infiniband/hw/mlx5/qpc.c8
-rw-r--r--drivers/infiniband/hw/mlx5/srq_cmd.c4
-rw-r--r--drivers/infiniband/hw/mlx5/wr.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c10
-rw-r--r--drivers/infiniband/hw/qedr/main.c20
-rw-r--r--drivers/infiniband/hw/qedr/qedr_iw_cm.c13
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c97
32 files changed, 322 insertions, 222 deletions
diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.c b/drivers/infiniband/hw/bnxt_re/hw_counters.c
index 3421a0b15983..5f5408cdf008 100644
--- a/drivers/infiniband/hw/bnxt_re/hw_counters.c
+++ b/drivers/infiniband/hw/bnxt_re/hw_counters.c
@@ -132,7 +132,7 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev,
stats->value[BNXT_RE_RECOVERABLE_ERRORS] =
le64_to_cpu(bnxt_re_stats->tx_bcast_pkts);
stats->value[BNXT_RE_RX_DROPS] =
- le64_to_cpu(bnxt_re_stats->rx_drop_pkts);
+ le64_to_cpu(bnxt_re_stats->rx_error_pkts);
stats->value[BNXT_RE_RX_DISCARDS] =
le64_to_cpu(bnxt_re_stats->rx_discard_pkts);
stats->value[BNXT_RE_RX_PKTS] =
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 30e08bcc9afb..77bc02a9228e 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -3282,7 +3282,7 @@ static int get_lladdr(struct net_device *dev, struct in6_addr *addr,
static int pick_local_ip6addrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id)
{
- struct in6_addr uninitialized_var(addr);
+ struct in6_addr addr;
struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr;
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index b1bb61c65f4f..352b8af1998a 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -754,7 +754,7 @@ skip_cqe:
static int __c4iw_poll_cq_one(struct c4iw_cq *chp, struct c4iw_qp *qhp,
struct ib_wc *wc, struct c4iw_srq *srq)
{
- struct t4_cqe uninitialized_var(cqe);
+ struct t4_cqe cqe;
struct t4_wq *wq = qhp ? &qhp->wq : NULL;
u32 credit = 0;
u8 cqe_flushed;
diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c
index fda175836fb6..9e201f169289 100644
--- a/drivers/infiniband/hw/efa/efa_verbs.c
+++ b/drivers/infiniband/hw/efa/efa_verbs.c
@@ -212,6 +212,7 @@ int efa_query_device(struct ib_device *ibdev,
props->max_send_sge = dev_attr->max_sq_sge;
props->max_recv_sge = dev_attr->max_rq_sge;
props->max_sge_rd = dev_attr->max_wr_rdma_sge;
+ props->max_pkeys = 1;
if (udata && udata->outlen) {
resp.max_sq_sge = dev_attr->max_sq_sge;
diff --git a/drivers/infiniband/hw/hfi1/debugfs.c b/drivers/infiniband/hw/hfi1/debugfs.c
index 4633a0ce1a8c..2ced236e1553 100644
--- a/drivers/infiniband/hw/hfi1/debugfs.c
+++ b/drivers/infiniband/hw/hfi1/debugfs.c
@@ -985,15 +985,10 @@ static ssize_t qsfp2_debugfs_read(struct file *file, char __user *buf,
static int __i2c_debugfs_open(struct inode *in, struct file *fp, u32 target)
{
struct hfi1_pportdata *ppd;
- int ret;
ppd = private2ppd(fp);
- ret = acquire_chip_resource(ppd->dd, i2c_target(target), 0);
- if (ret) /* failed - release the module */
- module_put(THIS_MODULE);
-
- return ret;
+ return acquire_chip_resource(ppd->dd, i2c_target(target), 0);
}
static int i2c1_debugfs_open(struct inode *in, struct file *fp)
@@ -1013,7 +1008,6 @@ static int __i2c_debugfs_release(struct inode *in, struct file *fp, u32 target)
ppd = private2ppd(fp);
release_chip_resource(ppd->dd, i2c_target(target));
- module_put(THIS_MODULE);
return 0;
}
@@ -1031,18 +1025,10 @@ static int i2c2_debugfs_release(struct inode *in, struct file *fp)
static int __qsfp_debugfs_open(struct inode *in, struct file *fp, u32 target)
{
struct hfi1_pportdata *ppd;
- int ret;
-
- if (!try_module_get(THIS_MODULE))
- return -ENODEV;
ppd = private2ppd(fp);
- ret = acquire_chip_resource(ppd->dd, i2c_target(target), 0);
- if (ret) /* failed - release the module */
- module_put(THIS_MODULE);
-
- return ret;
+ return acquire_chip_resource(ppd->dd, i2c_target(target), 0);
}
static int qsfp1_debugfs_open(struct inode *in, struct file *fp)
@@ -1062,7 +1048,6 @@ static int __qsfp_debugfs_release(struct inode *in, struct file *fp, u32 target)
ppd = private2ppd(fp);
release_chip_resource(ppd->dd, i2c_target(target));
- module_put(THIS_MODULE);
return 0;
}
diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c
index 5eed4360695f..cb7ad1288821 100644
--- a/drivers/infiniband/hw/hfi1/init.c
+++ b/drivers/infiniband/hw/hfi1/init.c
@@ -831,6 +831,29 @@ wq_error:
}
/**
+ * destroy_workqueues - destroy per port workqueues
+ * @dd: the hfi1_ib device
+ */
+static void destroy_workqueues(struct hfi1_devdata *dd)
+{
+ int pidx;
+ struct hfi1_pportdata *ppd;
+
+ for (pidx = 0; pidx < dd->num_pports; ++pidx) {
+ ppd = dd->pport + pidx;
+
+ if (ppd->hfi1_wq) {
+ destroy_workqueue(ppd->hfi1_wq);
+ ppd->hfi1_wq = NULL;
+ }
+ if (ppd->link_wq) {
+ destroy_workqueue(ppd->link_wq);
+ ppd->link_wq = NULL;
+ }
+ }
+}
+
+/**
* enable_general_intr() - Enable the IRQs that will be handled by the
* general interrupt handler.
* @dd: valid devdata
@@ -1103,15 +1126,10 @@ static void shutdown_device(struct hfi1_devdata *dd)
* We can't count on interrupts since we are stopping.
*/
hfi1_quiet_serdes(ppd);
-
- if (ppd->hfi1_wq) {
- destroy_workqueue(ppd->hfi1_wq);
- ppd->hfi1_wq = NULL;
- }
- if (ppd->link_wq) {
- destroy_workqueue(ppd->link_wq);
- ppd->link_wq = NULL;
- }
+ if (ppd->hfi1_wq)
+ flush_workqueue(ppd->hfi1_wq);
+ if (ppd->link_wq)
+ flush_workqueue(ppd->link_wq);
}
sdma_exit(dd);
}
@@ -1756,6 +1774,7 @@ static void remove_one(struct pci_dev *pdev)
* clear dma engines, etc.
*/
shutdown_device(dd);
+ destroy_workqueues(dd);
stop_timers(dd);
diff --git a/drivers/infiniband/hw/hfi1/iowait.h b/drivers/infiniband/hw/hfi1/iowait.h
index 07847cb72169..d580aa17ae37 100644
--- a/drivers/infiniband/hw/hfi1/iowait.h
+++ b/drivers/infiniband/hw/hfi1/iowait.h
@@ -399,7 +399,7 @@ static inline void iowait_get_priority(struct iowait *w)
* @wait_head: the wait queue
*
* This function is called to insert an iowait struct into a
- * wait queue after a resource (eg, sdma decriptor or pio
+ * wait queue after a resource (eg, sdma descriptor or pio
* buffer) is run out.
*/
static inline void iowait_queue(bool pkts_sent, struct iowait *w,
diff --git a/drivers/infiniband/hw/hfi1/ipoib.h b/drivers/infiniband/hw/hfi1/ipoib.h
index 185c9b02c974..b8c9d0a003fb 100644
--- a/drivers/infiniband/hw/hfi1/ipoib.h
+++ b/drivers/infiniband/hw/hfi1/ipoib.h
@@ -67,6 +67,9 @@ struct hfi1_ipoib_circ_buf {
* @sde: sdma engine
* @tx_list: tx request list
* @sent_txreqs: count of txreqs posted to sdma
+ * @stops: count of stops of queue
+ * @ring_full: ring has been filled
+ * @no_desc: descriptor shortage seen
* @flow: tracks when list needs to be flushed for a flow change
* @q_idx: ipoib Tx queue index
* @pkts_sent: indicator packets have been sent from this queue
@@ -80,6 +83,9 @@ struct hfi1_ipoib_txq {
struct sdma_engine *sde;
struct list_head tx_list;
u64 sent_txreqs;
+ atomic_t stops;
+ atomic_t ring_full;
+ atomic_t no_desc;
union hfi1_ipoib_flow flow;
u8 q_idx;
bool pkts_sent;
diff --git a/drivers/infiniband/hw/hfi1/ipoib_tx.c b/drivers/infiniband/hw/hfi1/ipoib_tx.c
index 883cb9d48022..9df292b51a05 100644
--- a/drivers/infiniband/hw/hfi1/ipoib_tx.c
+++ b/drivers/infiniband/hw/hfi1/ipoib_tx.c
@@ -55,23 +55,48 @@ static u64 hfi1_ipoib_txreqs(const u64 sent, const u64 completed)
return sent - completed;
}
-static void hfi1_ipoib_check_queue_depth(struct hfi1_ipoib_txq *txq)
+static u64 hfi1_ipoib_used(struct hfi1_ipoib_txq *txq)
{
- if (unlikely(hfi1_ipoib_txreqs(++txq->sent_txreqs,
- atomic64_read(&txq->complete_txreqs)) >=
- min_t(unsigned int, txq->priv->netdev->tx_queue_len,
- txq->tx_ring.max_items - 1)))
+ return hfi1_ipoib_txreqs(txq->sent_txreqs,
+ atomic64_read(&txq->complete_txreqs));
+}
+
+static void hfi1_ipoib_stop_txq(struct hfi1_ipoib_txq *txq)
+{
+ if (atomic_inc_return(&txq->stops) == 1)
netif_stop_subqueue(txq->priv->netdev, txq->q_idx);
}
+static void hfi1_ipoib_wake_txq(struct hfi1_ipoib_txq *txq)
+{
+ if (atomic_dec_and_test(&txq->stops))
+ netif_wake_subqueue(txq->priv->netdev, txq->q_idx);
+}
+
+static uint hfi1_ipoib_ring_hwat(struct hfi1_ipoib_txq *txq)
+{
+ return min_t(uint, txq->priv->netdev->tx_queue_len,
+ txq->tx_ring.max_items - 1);
+}
+
+static uint hfi1_ipoib_ring_lwat(struct hfi1_ipoib_txq *txq)
+{
+ return min_t(uint, txq->priv->netdev->tx_queue_len,
+ txq->tx_ring.max_items) >> 1;
+}
+
+static void hfi1_ipoib_check_queue_depth(struct hfi1_ipoib_txq *txq)
+{
+ ++txq->sent_txreqs;
+ if (hfi1_ipoib_used(txq) >= hfi1_ipoib_ring_hwat(txq) &&
+ !atomic_xchg(&txq->ring_full, 1))
+ hfi1_ipoib_stop_txq(txq);
+}
+
static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq)
{
struct net_device *dev = txq->priv->netdev;
- /* If the queue is already running just return */
- if (likely(!__netif_subqueue_stopped(dev, txq->q_idx)))
- return;
-
/* If shutting down just return as queue state is irrelevant */
if (unlikely(dev->reg_state != NETREG_REGISTERED))
return;
@@ -86,11 +111,9 @@ static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq)
* Use the minimum of the current tx_queue_len or the rings max txreqs
* to protect against ring overflow.
*/
- if (hfi1_ipoib_txreqs(txq->sent_txreqs,
- atomic64_read(&txq->complete_txreqs))
- < min_t(unsigned int, dev->tx_queue_len,
- txq->tx_ring.max_items) >> 1)
- netif_wake_subqueue(dev, txq->q_idx);
+ if (hfi1_ipoib_used(txq) < hfi1_ipoib_ring_lwat(txq) &&
+ atomic_xchg(&txq->ring_full, 0))
+ hfi1_ipoib_wake_txq(txq);
}
static void hfi1_ipoib_free_tx(struct ipoib_txreq *tx, int budget)
@@ -364,11 +387,12 @@ static struct ipoib_txreq *hfi1_ipoib_send_dma_common(struct net_device *dev,
if (unlikely(!tx))
return ERR_PTR(-ENOMEM);
- /* so that we can test if the sdma decriptors are there */
+ /* so that we can test if the sdma descriptors are there */
tx->txreq.num_desc = 0;
tx->priv = priv;
tx->txq = txp->txq;
tx->skb = skb;
+ INIT_LIST_HEAD(&tx->txreq.list);
hfi1_ipoib_build_ib_tx_headers(tx, txp);
@@ -469,6 +493,7 @@ static int hfi1_ipoib_send_dma_single(struct net_device *dev,
ret = hfi1_ipoib_submit_tx(txq, tx);
if (likely(!ret)) {
+tx_ok:
trace_sdma_output_ibhdr(tx->priv->dd,
&tx->sdma_hdr.hdr,
ib_is_sc5(txp->flow.sc5));
@@ -478,20 +503,8 @@ static int hfi1_ipoib_send_dma_single(struct net_device *dev,
txq->pkts_sent = false;
- if (ret == -EBUSY) {
- list_add_tail(&tx->txreq.list, &txq->tx_list);
-
- trace_sdma_output_ibhdr(tx->priv->dd,
- &tx->sdma_hdr.hdr,
- ib_is_sc5(txp->flow.sc5));
- hfi1_ipoib_check_queue_depth(txq);
- return NETDEV_TX_OK;
- }
-
- if (ret == -ECOMM) {
- hfi1_ipoib_check_queue_depth(txq);
- return NETDEV_TX_OK;
- }
+ if (ret == -EBUSY || ret == -ECOMM)
+ goto tx_ok;
sdma_txclean(priv->dd, &tx->txreq);
dev_kfree_skb_any(skb);
@@ -509,9 +522,17 @@ static int hfi1_ipoib_send_dma_list(struct net_device *dev,
struct ipoib_txreq *tx;
/* Has the flow change ? */
- if (txq->flow.as_int != txp->flow.as_int)
- (void)hfi1_ipoib_flush_tx_list(dev, txq);
-
+ if (txq->flow.as_int != txp->flow.as_int) {
+ int ret;
+
+ ret = hfi1_ipoib_flush_tx_list(dev, txq);
+ if (unlikely(ret)) {
+ if (ret == -EBUSY)
+ ++dev->stats.tx_dropped;
+ dev_kfree_skb_any(skb);
+ return NETDEV_TX_OK;
+ }
+ }
tx = hfi1_ipoib_send_dma_common(dev, skb, txp);
if (IS_ERR(tx)) {
int ret = PTR_ERR(tx);
@@ -610,10 +631,14 @@ static int hfi1_ipoib_sdma_sleep(struct sdma_engine *sde,
return -EAGAIN;
}
- netif_stop_subqueue(txq->priv->netdev, txq->q_idx);
-
- if (list_empty(&txq->wait.list))
+ if (list_empty(&txreq->list))
+ /* came from non-list submit */
+ list_add_tail(&txreq->list, &txq->tx_list);
+ if (list_empty(&txq->wait.list)) {
+ if (!atomic_xchg(&txq->no_desc, 1))
+ hfi1_ipoib_stop_txq(txq);
iowait_queue(pkts_sent, wait->iow, &sde->dmawait);
+ }
write_sequnlock(&sde->waitlock);
return -EBUSY;
@@ -648,9 +673,9 @@ static void hfi1_ipoib_flush_txq(struct work_struct *work)
struct net_device *dev = txq->priv->netdev;
if (likely(dev->reg_state == NETREG_REGISTERED) &&
- likely(__netif_subqueue_stopped(dev, txq->q_idx)) &&
likely(!hfi1_ipoib_flush_tx_list(dev, txq)))
- netif_wake_subqueue(dev, txq->q_idx);
+ if (atomic_xchg(&txq->no_desc, 0))
+ hfi1_ipoib_wake_txq(txq);
}
int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
@@ -704,6 +729,9 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv)
txq->sde = NULL;
INIT_LIST_HEAD(&txq->tx_list);
atomic64_set(&txq->complete_txreqs, 0);
+ atomic_set(&txq->stops, 0);
+ atomic_set(&txq->ring_full, 0);
+ atomic_set(&txq->no_desc, 0);
txq->q_idx = i;
txq->flow.tx_queue = 0xff;
txq->flow.sc5 = 0xff;
@@ -769,7 +797,7 @@ static void hfi1_ipoib_drain_tx_list(struct hfi1_ipoib_txq *txq)
atomic64_inc(complete_txreqs);
}
- if (hfi1_ipoib_txreqs(txq->sent_txreqs, atomic64_read(complete_txreqs)))
+ if (hfi1_ipoib_used(txq))
dd_dev_warn(txq->priv->dd,
"txq %d not empty found %llu requests\n",
txq->q_idx,
diff --git a/drivers/infiniband/hw/hfi1/netdev_rx.c b/drivers/infiniband/hw/hfi1/netdev_rx.c
index 63688e85e8da..6d263c9749b3 100644
--- a/drivers/infiniband/hw/hfi1/netdev_rx.c
+++ b/drivers/infiniband/hw/hfi1/netdev_rx.c
@@ -373,7 +373,7 @@ void hfi1_netdev_free(struct hfi1_devdata *dd)
{
if (dd->dummy_netdev) {
dd_dev_info(dd, "hfi1 netdev freed\n");
- free_netdev(dd->dummy_netdev);
+ kfree(dd->dummy_netdev);
dd->dummy_netdev = NULL;
}
}
diff --git a/drivers/infiniband/hw/hfi1/qp.c b/drivers/infiniband/hw/hfi1/qp.c
index b1175c514cd8..356518e17fa6 100644
--- a/drivers/infiniband/hw/hfi1/qp.c
+++ b/drivers/infiniband/hw/hfi1/qp.c
@@ -195,7 +195,7 @@ static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu)
{
/* Constraining 10KB packets to 8KB packets */
if (mtu == (enum ib_mtu)OPA_MTU_10240)
- mtu = OPA_MTU_8192;
+ mtu = (enum ib_mtu)OPA_MTU_8192;
return opa_mtu_enum_to_int((enum opa_mtu)mtu);
}
@@ -367,7 +367,10 @@ bool _hfi1_schedule_send(struct rvt_qp *qp)
struct hfi1_ibport *ibp =
to_iport(qp->ibqp.device, qp->port_num);
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
+ struct hfi1_devdata *dd = ppd->dd;
+
+ if (dd->flags & HFI1_SHUTDOWN)
+ return true;
return iowait_schedule(&priv->s_iowait, ppd->hfi1_wq,
priv->s_sde ?
diff --git a/drivers/infiniband/hw/hfi1/tid_rdma.c b/drivers/infiniband/hw/hfi1/tid_rdma.c
index 62b6c1bf267d..9af82ff933d7 100644
--- a/drivers/infiniband/hw/hfi1/tid_rdma.c
+++ b/drivers/infiniband/hw/hfi1/tid_rdma.c
@@ -5406,7 +5406,10 @@ static bool _hfi1_schedule_tid_send(struct rvt_qp *qp)
struct hfi1_ibport *ibp =
to_iport(qp->ibqp.device, qp->port_num);
struct hfi1_pportdata *ppd = ppd_from_ibp(ibp);
- struct hfi1_devdata *dd = dd_from_ibdev(qp->ibqp.device);
+ struct hfi1_devdata *dd = ppd->dd;
+
+ if ((dd->flags & HFI1_SHUTDOWN))
+ return true;
return iowait_tid_schedule(&priv->s_iowait, ppd->hfi1_wq,
priv->s_sde ?
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h
index bfa6e081cb56..d2d526c5a756 100644
--- a/drivers/infiniband/hw/hfi1/verbs_txreq.h
+++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h
@@ -91,7 +91,7 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
tx->mr = NULL;
tx->sde = priv->s_sde;
tx->psc = priv->s_sendcontext;
- /* so that we can test if the sdma decriptors are there */
+ /* so that we can test if the sdma descriptors are there */
tx->txreq.num_desc = 0;
/* Set the header type */
tx->phdr.hdr.hdr_type = priv->hdr_type;
diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h
index 846954eab65c..da9888deff8c 100644
--- a/drivers/infiniband/hw/hns/hns_roce_device.h
+++ b/drivers/infiniband/hw/hns/hns_roce_device.h
@@ -899,13 +899,14 @@ struct hns_roce_hw {
int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr);
void (*set_mtu)(struct hns_roce_dev *hr_dev, u8 phy_port,
enum ib_mtu mtu);
- int (*write_mtpt)(void *mb_buf, struct hns_roce_mr *mr,
- unsigned long mtpt_idx);
+ int (*write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf,
+ struct hns_roce_mr *mr, unsigned long mtpt_idx);
int (*rereg_write_mtpt)(struct hns_roce_dev *hr_dev,
struct hns_roce_mr *mr, int flags, u32 pdn,
int mr_access_flags, u64 iova, u64 size,
void *mb_buf);
- int (*frmr_write_mtpt)(void *mb_buf, struct hns_roce_mr *mr);
+ int (*frmr_write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf,
+ struct hns_roce_mr *mr);
int (*mw_write_mtpt)(void *mb_buf, struct hns_roce_mw *mw);
void (*write_cqc)(struct hns_roce_dev *hr_dev,
struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
index ef7f8b3177ff..07b4c85d341d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c
@@ -1756,10 +1756,10 @@ static void hns_roce_v1_set_mtu(struct hns_roce_dev *hr_dev, u8 phy_port,
val);
}
-static int hns_roce_v1_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
+static int hns_roce_v1_write_mtpt(struct hns_roce_dev *hr_dev, void *mb_buf,
+ struct hns_roce_mr *mr,
unsigned long mtpt_idx)
{
- struct hns_roce_dev *hr_dev = to_hr_dev(mr->ibmr.device);
u64 pages[HNS_ROCE_MAX_INNER_MTPT_NUM] = { 0 };
struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_v1_mpt_entry *mpt_entry;
diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
index d51b332ece5b..d2968594664b 100644
--- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
+++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c
@@ -914,7 +914,7 @@ static int hns_roce_v2_rst_process_cmd(struct hns_roce_dev *hr_dev)
instance_stage = handle->rinfo.instance_state;
reset_stage = handle->rinfo.reset_state;
reset_cnt = ops->ae_dev_reset_cnt(handle);
- hw_resetting = ops->get_hw_reset_stat(handle);
+ hw_resetting = ops->get_cmdq_stat(handle);
sw_resetting = ops->ae_dev_resetting(handle);
if (reset_cnt != hr_dev->reset_cnt)
@@ -2523,10 +2523,10 @@ static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port,
return hns_roce_cmq_send(hr_dev, &desc, 1);
}
-static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry,
+static int set_mtpt_pbl(struct hns_roce_dev *hr_dev,
+ struct hns_roce_v2_mpt_entry *mpt_entry,
struct hns_roce_mr *mr)
{
- struct hns_roce_dev *hr_dev = to_hr_dev(mr->ibmr.device);
u64 pages[HNS_ROCE_V2_MAX_INNER_MTPT_NUM] = { 0 };
struct ib_device *ibdev = &hr_dev->ib_dev;
dma_addr_t pbl_ba;
@@ -2565,7 +2565,8 @@ static int set_mtpt_pbl(struct hns_roce_v2_mpt_entry *mpt_entry,
return 0;
}
-static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
+static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev,
+ void *mb_buf, struct hns_roce_mr *mr,
unsigned long mtpt_idx)
{
struct hns_roce_v2_mpt_entry *mpt_entry;
@@ -2614,7 +2615,7 @@ static int hns_roce_v2_write_mtpt(void *mb_buf, struct hns_roce_mr *mr,
if (mr->type == MR_TYPE_DMA)
return 0;
- ret = set_mtpt_pbl(mpt_entry, mr);
+ ret = set_mtpt_pbl(hr_dev, mpt_entry, mr);
return ret;
}
@@ -2660,15 +2661,15 @@ static int hns_roce_v2_rereg_write_mtpt(struct hns_roce_dev *hr_dev,
mr->iova = iova;
mr->size = size;
- ret = set_mtpt_pbl(mpt_entry, mr);
+ ret = set_mtpt_pbl(hr_dev, mpt_entry, mr);
}
return ret;
}
-static int hns_roce_v2_frmr_write_mtpt(void *mb_buf, struct hns_roce_mr *mr)
+static int hns_roce_v2_frmr_write_mtpt(struct hns_roce_dev *hr_dev,
+ void *mb_buf, struct hns_roce_mr *mr)
{
- struct hns_roce_dev *hr_dev = to_hr_dev(mr->ibmr.device);
struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_v2_mpt_entry *mpt_entry;
dma_addr_t pbl_ba = 0;
@@ -3933,6 +3934,15 @@ static int config_qp_sq_buf(struct hns_roce_dev *hr_dev,
return 0;
}
+static inline enum ib_mtu get_mtu(struct ib_qp *ibqp,
+ const struct ib_qp_attr *attr)
+{
+ if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
+ return IB_MTU_4096;
+
+ return attr->path_mtu;
+}
+
static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
const struct ib_qp_attr *attr, int attr_mask,
struct hns_roce_v2_qp_context *context,
@@ -3944,6 +3954,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
struct ib_device *ibdev = &hr_dev->ib_dev;
dma_addr_t trrl_ba;
dma_addr_t irrl_ba;
+ enum ib_mtu mtu;
u8 port_num;
u64 *mtts;
u8 *dmac;
@@ -4041,23 +4052,23 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
roce_set_field(qpc_mask->byte_52_udpspn_dmac, V2_QPC_BYTE_52_DMAC_M,
V2_QPC_BYTE_52_DMAC_S, 0);
- /* mtu*(2^LP_PKTN_INI) should not bigger than 1 message length 64kb */
+ mtu = get_mtu(ibqp, attr);
+
+ if (attr_mask & IB_QP_PATH_MTU) {
+ roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
+ V2_QPC_BYTE_24_MTU_S, mtu);
+ roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
+ V2_QPC_BYTE_24_MTU_S, 0);
+ }
+
+#define MAX_LP_MSG_LEN 65536
+ /* MTU*(2^LP_PKTN_INI) shouldn't be bigger than 64kb */
roce_set_field(context->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
V2_QPC_BYTE_56_LP_PKTN_INI_S,
- ilog2(hr_dev->caps.max_sq_inline / IB_MTU_4096));
+ ilog2(MAX_LP_MSG_LEN / ib_mtu_enum_to_int(mtu)));
roce_set_field(qpc_mask->byte_56_dqpn_err, V2_QPC_BYTE_56_LP_PKTN_INI_M,
V2_QPC_BYTE_56_LP_PKTN_INI_S, 0);
- if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_UD)
- roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
- V2_QPC_BYTE_24_MTU_S, IB_MTU_4096);
- else if (attr_mask & IB_QP_PATH_MTU)
- roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
- V2_QPC_BYTE_24_MTU_S, attr->path_mtu);
-
- roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_MTU_M,
- V2_QPC_BYTE_24_MTU_S, 0);
-
roce_set_bit(qpc_mask->byte_108_rx_reqepsn,
V2_QPC_BYTE_108_RX_REQ_PSN_ERR_S, 0);
roce_set_field(qpc_mask->byte_96_rx_reqmsn, V2_QPC_BYTE_96_RX_REQ_MSN_M,
diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c
index 1655fcf21a76..e5df3884b41d 100644
--- a/drivers/infiniband/hw/hns/hns_roce_mr.c
+++ b/drivers/infiniband/hw/hns/hns_roce_mr.c
@@ -120,7 +120,7 @@ static int alloc_mr_pbl(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr,
mr->pbl_hop_num = is_fast ? 1 : hr_dev->caps.pbl_hop_num;
buf_attr.page_shift = is_fast ? PAGE_SHIFT :
- hr_dev->caps.pbl_buf_pg_sz + HNS_HW_PAGE_SHIFT;
+ hr_dev->caps.pbl_buf_pg_sz + PAGE_SHIFT;
buf_attr.region[0].size = length;
buf_attr.region[0].hopnum = mr->pbl_hop_num;
buf_attr.region_count = 1;
@@ -180,9 +180,10 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev,
}
if (mr->type != MR_TYPE_FRMR)
- ret = hr_dev->hw->write_mtpt(mailbox->buf, mr, mtpt_idx);
+ ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr,
+ mtpt_idx);
else
- ret = hr_dev->hw->frmr_write_mtpt(mailbox->buf, mr);
+ ret = hr_dev->hw->frmr_write_mtpt(hr_dev, mailbox->buf, mr);
if (ret) {
dev_err(dev, "Write mtpt fail!\n");
goto err_page;
diff --git a/drivers/infiniband/hw/i40iw/Makefile b/drivers/infiniband/hw/i40iw/Makefile
index 8942f8229945..34da9eba8a7c 100644
--- a/drivers/infiniband/hw/i40iw/Makefile
+++ b/drivers/infiniband/hw/i40iw/Makefile
@@ -1,5 +1,4 @@
# SPDX-License-Identifier: GPL-2.0
-ccflags-y := -I $(srctree)/drivers/net/ethernet/intel/i40e
obj-$(CONFIG_INFINIBAND_I40IW) += i40iw.o
diff --git a/drivers/infiniband/hw/i40iw/i40iw.h b/drivers/infiniband/hw/i40iw/i40iw.h
index 49d92638e0db..25747b85a79c 100644
--- a/drivers/infiniband/hw/i40iw/i40iw.h
+++ b/drivers/infiniband/hw/i40iw/i40iw.h
@@ -45,6 +45,7 @@
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/crc32c.h>
+#include <linux/net/intel/i40e_client.h>
#include <rdma/ib_smi.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_pack.h>
@@ -57,7 +58,6 @@
#include "i40iw_d.h"
#include "i40iw_hmc.h"
-#include <i40e_client.h>
#include "i40iw_type.h"
#include "i40iw_p.h"
#include <rdma/i40iw-abi.h>
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index cf51e3cbd969..f9ca6e000a81 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -3541,11 +3541,11 @@ static int _mlx4_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
int nreq;
int err = 0;
unsigned ind;
- int uninitialized_var(size);
- unsigned uninitialized_var(seglen);
+ int size;
+ unsigned seglen;
__be32 dummy;
__be32 *lso_wqe;
- __be32 uninitialized_var(lso_hdr_sz);
+ __be32 lso_hdr_sz;
__be32 blh;
int i;
struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c
index 0c18cb6a2f14..0133ebb8d740 100644
--- a/drivers/infiniband/hw/mlx5/cq.c
+++ b/drivers/infiniband/hw/mlx5/cq.c
@@ -925,8 +925,8 @@ int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_ib_cq *cq = to_mcq(ibcq);
u32 out[MLX5_ST_SZ_DW(create_cq_out)];
- int uninitialized_var(index);
- int uninitialized_var(inlen);
+ int index;
+ int inlen;
u32 *cqb = NULL;
void *cqc;
int cqe_size;
@@ -1246,7 +1246,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
__be64 *pas;
int page_shift;
int inlen;
- int uninitialized_var(cqe_size);
+ int cqe_size;
unsigned long flags;
if (!MLX5_CAP_GEN(dev->mdev, cq_resize)) {
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index e9cf294f8529..9e3d8b826498 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -2490,7 +2490,7 @@ static ssize_t devx_async_event_read(struct file *filp, char __user *buf,
{
struct devx_async_event_file *ev_file = filp->private_data;
struct devx_event_subscription *event_sub;
- struct devx_async_event_data *uninitialized_var(event);
+ struct devx_async_event_data *event;
int ret = 0;
size_t eventsz;
bool omit_data;
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index a973008286fd..fbc45a5e76c5 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -484,7 +484,7 @@ static int mlx5_query_port_roce(struct ib_device *device, u8 port_num,
mdev_port_num);
if (err)
goto out;
- ext = MLX5_CAP_PCAM_FEATURE(dev->mdev, ptys_extended_ethernet);
+ ext = !!MLX5_GET_ETH_PROTO(ptys_reg, out, true, eth_proto_capability);
eth_prot_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper);
props->active_width = IB_WIDTH_4X;
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 8f4426496dc7..cfd7efab114e 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -601,6 +601,23 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
*/
synchronize_srcu(&dev->odp_srcu);
+ /*
+ * All work on the prefetch list must be completed, xa_erase() prevented
+ * new work from being created.
+ */
+ wait_event(imr->q_deferred_work, !atomic_read(&imr->num_deferred_work));
+
+ /*
+ * At this point it is forbidden for any other thread to enter
+ * pagefault_mr() on this imr. It is already forbidden to call
+ * pagefault_mr() on an implicit child. Due to this additions to
+ * implicit_children are prevented.
+ */
+
+ /*
+ * Block destroy_unused_implicit_child_mr() from incrementing
+ * num_deferred_work.
+ */
xa_lock(&imr->implicit_children);
xa_for_each (&imr->implicit_children, idx, mtt) {
__xa_erase(&imr->implicit_children, idx);
@@ -609,9 +626,8 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
xa_unlock(&imr->implicit_children);
/*
- * num_deferred_work can only be incremented inside the odp_srcu, or
- * under xa_lock while the child is in the xarray. Thus at this point
- * it is only decreasing, and all work holding it is now on the wq.
+ * Wait for any concurrent destroy_unused_implicit_child_mr() to
+ * complete.
*/
wait_event(imr->q_deferred_work, !atomic_read(&imr->num_deferred_work));
@@ -1790,9 +1806,7 @@ static bool init_prefetch_work(struct ib_pd *pd,
work->frags[i].mr =
get_prefetchable_mr(pd, advice, sg_list[i].lkey);
if (!work->frags[i].mr) {
- work->num_sge = i - 1;
- if (i)
- destroy_prefetch_work(work);
+ work->num_sge = i;
return false;
}
@@ -1859,6 +1873,7 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
srcu_key = srcu_read_lock(&dev->odp_srcu);
if (!init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge)) {
srcu_read_unlock(&dev->odp_srcu, srcu_key);
+ destroy_prefetch_work(work);
return -EINVAL;
}
queue_work(system_unbound_wq, &work->work);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 9fbe0583f579..59fce5fac7a3 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -1767,15 +1767,14 @@ err:
}
static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev,
+ struct mlx5_ib_qp *qp,
struct ib_qp_init_attr *init_attr,
- struct mlx5_ib_create_qp *ucmd,
void *qpc)
{
int scqe_sz;
bool allow_scat_cqe = false;
- if (ucmd)
- allow_scat_cqe = ucmd->flags & MLX5_QP_FLAG_ALLOW_SCATTER_CQE;
+ allow_scat_cqe = qp->flags_en & MLX5_QP_FLAG_ALLOW_SCATTER_CQE;
if (!allow_scat_cqe && init_attr->sq_sig_type != IB_SIGNAL_ALL_WR)
return;
@@ -1854,8 +1853,6 @@ static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
u32 *in;
int err;
- mutex_init(&qp->mutex);
-
if (attr->sq_sig_type == IB_SIGNAL_ALL_WR)
qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
@@ -1863,7 +1860,7 @@ static int create_xrc_tgt_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (!in)
return -ENOMEM;
- if (MLX5_CAP_GEN(mdev, ece_support))
+ if (MLX5_CAP_GEN(mdev, ece_support) && ucmd)
MLX5_SET(create_qp_in, in, ece, ucmd->ece_options);
qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
@@ -1939,7 +1936,6 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
u32 *in;
int err;
- mutex_init(&qp->mutex);
spin_lock_init(&qp->sq.lock);
spin_lock_init(&qp->rq.lock);
@@ -2013,7 +2009,7 @@ static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
}
if ((qp->flags_en & MLX5_QP_FLAG_SCATTER_CQE) &&
(qp->type == MLX5_IB_QPT_DCI || qp->type == IB_QPT_RC))
- configure_requester_scat_cqe(dev, init_attr, ucmd, qpc);
+ configure_requester_scat_cqe(dev, qp, init_attr, qpc);
if (qp->rq.wqe_cnt) {
MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4);
@@ -2130,7 +2126,6 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
u32 *in;
int err;
- mutex_init(&qp->mutex);
spin_lock_init(&qp->sq.lock);
spin_lock_init(&qp->rq.lock);
@@ -2342,18 +2337,18 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
unsigned long flags;
int err;
- if (qp->ibqp.rwq_ind_tbl) {
+ if (qp->is_rss) {
destroy_rss_raw_qp_tir(dev, qp);
return;
}
- base = (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
+ base = (qp->type == IB_QPT_RAW_PACKET ||
qp->flags & IB_QP_CREATE_SOURCE_QPN) ?
- &qp->raw_packet_qp.rq.base :
- &qp->trans_qp.base;
+ &qp->raw_packet_qp.rq.base :
+ &qp->trans_qp.base;
if (qp->state != IB_QPS_RESET) {
- if (qp->ibqp.qp_type != IB_QPT_RAW_PACKET &&
+ if (qp->type != IB_QPT_RAW_PACKET &&
!(qp->flags & IB_QP_CREATE_SOURCE_QPN)) {
err = mlx5_core_qp_modify(dev, MLX5_CMD_OP_2RST_QP, 0,
NULL, &base->mqp, NULL);
@@ -2369,8 +2364,8 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
base->mqp.qpn);
}
- get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq,
- &send_cq, &recv_cq);
+ get_cqs(qp->type, qp->ibqp.send_cq, qp->ibqp.recv_cq, &send_cq,
+ &recv_cq);
spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
mlx5_ib_lock_cqs(send_cq, recv_cq);
@@ -2392,7 +2387,7 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
mlx5_ib_unlock_cqs(send_cq, recv_cq);
spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
- if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
+ if (qp->type == IB_QPT_RAW_PACKET ||
qp->flags & IB_QP_CREATE_SOURCE_QPN) {
destroy_raw_packet_qp(dev, qp);
} else {
@@ -2544,13 +2539,18 @@ static void process_vendor_flag(struct mlx5_ib_dev *dev, int *flags, int flag,
return;
}
- if (flag == MLX5_QP_FLAG_SCATTER_CQE) {
+ switch (flag) {
+ case MLX5_QP_FLAG_SCATTER_CQE:
+ case MLX5_QP_FLAG_ALLOW_SCATTER_CQE:
/*
- * We don't return error if this flag was provided,
- * and mlx5 doesn't have right capability.
- */
- *flags &= ~MLX5_QP_FLAG_SCATTER_CQE;
+ * We don't return error if these flags were provided,
+ * and mlx5 doesn't have right capability.
+ */
+ *flags &= ~(MLX5_QP_FLAG_SCATTER_CQE |
+ MLX5_QP_FLAG_ALLOW_SCATTER_CQE);
return;
+ default:
+ break;
}
mlx5_ib_dbg(dev, "Vendor create QP flag 0x%X is not supported\n", flag);
}
@@ -2590,6 +2590,8 @@ static int process_vendor_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SIGNATURE, true, qp);
process_vendor_flag(dev, &flags, MLX5_QP_FLAG_SCATTER_CQE,
MLX5_CAP_GEN(mdev, sctr_data_cqe), qp);
+ process_vendor_flag(dev, &flags, MLX5_QP_FLAG_ALLOW_SCATTER_CQE,
+ MLX5_CAP_GEN(mdev, sctr_data_cqe), qp);
if (qp->type == IB_QPT_RAW_PACKET) {
cond = MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) ||
@@ -2669,6 +2671,13 @@ static int process_create_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
if (qp_type == IB_QPT_RAW_PACKET && attr->rwq_ind_tbl)
return (create_flags) ? -EINVAL : 0;
+ process_create_flag(dev, &create_flags, IB_QP_CREATE_NETIF_QP,
+ mlx5_get_flow_namespace(dev->mdev,
+ MLX5_FLOW_NAMESPACE_BYPASS),
+ qp);
+ process_create_flag(dev, &create_flags,
+ IB_QP_CREATE_INTEGRITY_EN,
+ MLX5_CAP_GEN(mdev, sho), qp);
process_create_flag(dev, &create_flags,
IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
MLX5_CAP_GEN(mdev, block_lb_mc), qp);
@@ -2874,7 +2883,6 @@ static int mlx5_ib_destroy_dct(struct mlx5_ib_qp *mqp)
static int check_ucmd_data(struct mlx5_ib_dev *dev,
struct mlx5_create_qp_params *params)
{
- struct ib_qp_init_attr *attr = params->attr;
struct ib_udata *udata = params->udata;
size_t size, last;
int ret;
@@ -2886,14 +2894,7 @@ static int check_ucmd_data(struct mlx5_ib_dev *dev,
*/
last = sizeof(struct mlx5_ib_create_qp_rss);
else
- /* IB_QPT_RAW_PACKET doesn't have ECE data */
- switch (attr->qp_type) {
- case IB_QPT_RAW_PACKET:
- last = offsetof(struct mlx5_ib_create_qp, ece_options);
- break;
- default:
- last = offsetof(struct mlx5_ib_create_qp, reserved);
- }
+ last = offsetof(struct mlx5_ib_create_qp, reserved);
if (udata->inlen <= last)
return 0;
@@ -2908,7 +2909,7 @@ static int check_ucmd_data(struct mlx5_ib_dev *dev,
if (!ret)
mlx5_ib_dbg(
dev,
- "udata is not cleared, inlen = %lu, ucmd = %lu, last = %lu, size = %lu\n",
+ "udata is not cleared, inlen = %zu, ucmd = %zu, last = %zu, size = %zu\n",
udata->inlen, params->ucmd_size, last, size);
return ret ? 0 : -EINVAL;
}
@@ -2965,6 +2966,7 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr,
goto free_ucmd;
}
+ mutex_init(&qp->mutex);
qp->type = type;
if (udata) {
err = process_vendor_flags(dev, qp, params.ucmd, attr);
@@ -3003,10 +3005,19 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attr,
return &qp->ibqp;
destroy_qp:
- if (qp->type == MLX5_IB_QPT_DCT)
+ if (qp->type == MLX5_IB_QPT_DCT) {
mlx5_ib_destroy_dct(qp);
- else
+ } else {
+ /*
+ * These lines below are temp solution till QP allocation
+ * will be moved to be under IB/core responsiblity.
+ */
+ qp->ibqp.send_cq = attr->send_cq;
+ qp->ibqp.recv_cq = attr->recv_cq;
+ qp->ibqp.pd = pd;
destroy_qp_common(dev, qp, udata);
+ }
+
qp = NULL;
free_qp:
kfree(qp);
@@ -4161,8 +4172,6 @@ static int mlx5_ib_modify_dct(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (udata->outlen < min_resp_len)
return -EINVAL;
- resp.response_length = min_resp_len;
-
/*
* If we don't have enough space for the ECE options,
* simply indicate it with resp.response_length.
@@ -4381,8 +4390,7 @@ static void to_rdma_ah_attr(struct mlx5_ib_dev *ibdev,
MLX5_GET(ads, path, src_addr_index),
MLX5_GET(ads, path, hop_limit),
MLX5_GET(ads, path, tclass));
- memcpy(ah_attr, MLX5_ADDR_OF(ads, path, rgid_rip),
- MLX5_FLD_SZ_BYTES(ads, rgid_rip));
+ rdma_ah_set_dgid_raw(ah_attr, MLX5_ADDR_OF(ads, path, rgid_rip));
}
}
diff --git a/drivers/infiniband/hw/mlx5/qpc.c b/drivers/infiniband/hw/mlx5/qpc.c
index c19d91d6dce8..7c3968ef9cd1 100644
--- a/drivers/infiniband/hw/mlx5/qpc.c
+++ b/drivers/infiniband/hw/mlx5/qpc.c
@@ -346,6 +346,9 @@ static int get_ece_from_mbox(void *out, u16 opcode)
int ece = 0;
switch (opcode) {
+ case MLX5_CMD_OP_INIT2INIT_QP:
+ ece = MLX5_GET(init2init_qp_out, out, ece);
+ break;
case MLX5_CMD_OP_INIT2RTR_QP:
ece = MLX5_GET(init2rtr_qp_out, out, ece);
break;
@@ -355,6 +358,9 @@ static int get_ece_from_mbox(void *out, u16 opcode)
case MLX5_CMD_OP_RTS2RTS_QP:
ece = MLX5_GET(rts2rts_qp_out, out, ece);
break;
+ case MLX5_CMD_OP_RST2INIT_QP:
+ ece = MLX5_GET(rst2init_qp_out, out, ece);
+ break;
default:
break;
}
@@ -406,6 +412,7 @@ static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
return -ENOMEM;
MOD_QP_IN_SET_QPC(rst2init_qp, mbox->in, opcode, qpn,
opt_param_mask, qpc, uid);
+ MLX5_SET(rst2init_qp_in, mbox->in, ece, ece);
break;
case MLX5_CMD_OP_INIT2RTR_QP:
if (MBOX_ALLOC(mbox, init2rtr_qp))
@@ -439,6 +446,7 @@ static int modify_qp_mbox_alloc(struct mlx5_core_dev *dev, u16 opcode, int qpn,
return -ENOMEM;
MOD_QP_IN_SET_QPC(init2init_qp, mbox->in, opcode, qpn,
opt_param_mask, qpc, uid);
+ MLX5_SET(init2init_qp_in, mbox->in, ece, ece);
break;
default:
return -EINVAL;
diff --git a/drivers/infiniband/hw/mlx5/srq_cmd.c b/drivers/infiniband/hw/mlx5/srq_cmd.c
index 6f5eadc4d183..37aaacebd3f2 100644
--- a/drivers/infiniband/hw/mlx5/srq_cmd.c
+++ b/drivers/infiniband/hw/mlx5/srq_cmd.c
@@ -83,11 +83,11 @@ struct mlx5_core_srq *mlx5_cmd_get_srq(struct mlx5_ib_dev *dev, u32 srqn)
struct mlx5_srq_table *table = &dev->srq_table;
struct mlx5_core_srq *srq;
- xa_lock(&table->array);
+ xa_lock_irq(&table->array);
srq = xa_load(&table->array, srqn);
if (srq)
refcount_inc(&srq->common.refcount);
- xa_unlock(&table->array);
+ xa_unlock_irq(&table->array);
return srq;
}
diff --git a/drivers/infiniband/hw/mlx5/wr.c b/drivers/infiniband/hw/mlx5/wr.c
index 4d4f8c22b3e6..43880973a512 100644
--- a/drivers/infiniband/hw/mlx5/wr.c
+++ b/drivers/infiniband/hw/mlx5/wr.c
@@ -1281,7 +1281,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
struct mlx5_wqe_xrc_seg *xrc;
struct mlx5_bf *bf;
void *cur_edge;
- int uninitialized_var(size);
+ int size;
unsigned long flags;
unsigned int idx;
int err = 0;
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index d04c245359eb..c6e95d0d760a 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -1639,8 +1639,8 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
* without initializing f0 and size0, and they are in fact
* never used uninitialized.
*/
- int uninitialized_var(size0);
- u32 uninitialized_var(f0);
+ int size0;
+ u32 f0;
int ind;
u8 op0 = 0;
@@ -1835,7 +1835,7 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
* without initializing size0, and it is in fact never used
* uninitialized.
*/
- int uninitialized_var(size0);
+ int size0;
int ind;
void *wqe;
void *prev_wqe;
@@ -1943,8 +1943,8 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
* without initializing f0 and size0, and they are in fact
* never used uninitialized.
*/
- int uninitialized_var(size0);
- u32 uninitialized_var(f0);
+ int size0;
+ u32 f0;
int ind;
u8 op0 = 0;
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index c9eeed25c662..d85f992bac29 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -345,9 +345,14 @@ static void qedr_free_resources(struct qedr_dev *dev)
static int qedr_alloc_resources(struct qedr_dev *dev)
{
+ struct qed_chain_init_params params = {
+ .mode = QED_CHAIN_MODE_PBL,
+ .intended_use = QED_CHAIN_USE_TO_CONSUME,
+ .cnt_type = QED_CHAIN_CNT_TYPE_U16,
+ .elem_size = sizeof(struct regpair *),
+ };
struct qedr_cnq *cnq;
__le16 *cons_pi;
- u16 n_entries;
int i, rc;
dev->sgid_tbl = kcalloc(QEDR_MAX_SGID, sizeof(union ib_gid),
@@ -381,7 +386,9 @@ static int qedr_alloc_resources(struct qedr_dev *dev)
dev->sb_start = dev->ops->rdma_get_start_sb(dev->cdev);
/* Allocate CNQ PBLs */
- n_entries = min_t(u32, QED_RDMA_MAX_CNQ_SIZE, QEDR_ROCE_MAX_CNQ_SIZE);
+ params.num_elems = min_t(u32, QED_RDMA_MAX_CNQ_SIZE,
+ QEDR_ROCE_MAX_CNQ_SIZE);
+
for (i = 0; i < dev->num_cnq; i++) {
cnq = &dev->cnq_array[i];
@@ -390,13 +397,8 @@ static int qedr_alloc_resources(struct qedr_dev *dev)
if (rc)
goto err3;
- rc = dev->ops->common->chain_alloc(dev->cdev,
- QED_CHAIN_USE_TO_CONSUME,
- QED_CHAIN_MODE_PBL,
- QED_CHAIN_CNT_TYPE_U16,
- n_entries,
- sizeof(struct regpair *),
- &cnq->pbl, NULL);
+ rc = dev->ops->common->chain_alloc(dev->cdev, &cnq->pbl,
+ &params);
if (rc)
goto err4;
diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
index 792eecd206b6..97fc7dd353b0 100644
--- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
@@ -150,8 +150,17 @@ qedr_iw_issue_event(void *context,
if (params->cm_info) {
event.ird = params->cm_info->ird;
event.ord = params->cm_info->ord;
- event.private_data_len = params->cm_info->private_data_len;
- event.private_data = (void *)params->cm_info->private_data;
+ /* Only connect_request and reply have valid private data
+ * the rest of the events this may be left overs from
+ * connection establishment. CONNECT_REQUEST is issued via
+ * qedr_iw_mpa_request
+ */
+ if (event_type == IW_CM_EVENT_CONNECT_REPLY) {
+ event.private_data_len =
+ params->cm_info->private_data_len;
+ event.private_data =
+ (void *)params->cm_info->private_data;
+ }
}
if (ep->cm_id)
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index bd37eafb7cc4..4ce4e2eef6cc 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -895,6 +895,12 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
udata, struct qedr_ucontext, ibucontext);
struct qed_rdma_destroy_cq_out_params destroy_oparams;
struct qed_rdma_destroy_cq_in_params destroy_iparams;
+ struct qed_chain_init_params chain_params = {
+ .mode = QED_CHAIN_MODE_PBL,
+ .intended_use = QED_CHAIN_USE_TO_CONSUME,
+ .cnt_type = QED_CHAIN_CNT_TYPE_U32,
+ .elem_size = sizeof(union rdma_cqe),
+ };
struct qedr_dev *dev = get_qedr_dev(ibdev);
struct qed_rdma_create_cq_in_params params;
struct qedr_create_cq_ureq ureq = {};
@@ -921,6 +927,7 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
chain_entries = qedr_align_cq_entries(entries);
chain_entries = min_t(int, chain_entries, QEDR_MAX_CQES);
+ chain_params.num_elems = chain_entries;
/* calc db offset. user will add DPI base, kernel will add db addr */
db_offset = DB_ADDR_SHIFT(DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT);
@@ -955,13 +962,8 @@ int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
} else {
cq->cq_type = QEDR_CQ_TYPE_KERNEL;
- rc = dev->ops->common->chain_alloc(dev->cdev,
- QED_CHAIN_USE_TO_CONSUME,
- QED_CHAIN_MODE_PBL,
- QED_CHAIN_CNT_TYPE_U32,
- chain_entries,
- sizeof(union rdma_cqe),
- &cq->pbl, NULL);
+ rc = dev->ops->common->chain_alloc(dev->cdev, &cq->pbl,
+ &chain_params);
if (rc)
goto err0;
@@ -1450,6 +1452,12 @@ static int qedr_alloc_srq_kernel_params(struct qedr_srq *srq,
struct ib_srq_init_attr *init_attr)
{
struct qedr_srq_hwq_info *hw_srq = &srq->hw_srq;
+ struct qed_chain_init_params params = {
+ .mode = QED_CHAIN_MODE_PBL,
+ .intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE,
+ .cnt_type = QED_CHAIN_CNT_TYPE_U32,
+ .elem_size = QEDR_SRQ_WQE_ELEM_SIZE,
+ };
dma_addr_t phy_prod_pair_addr;
u32 num_elems;
void *va;
@@ -1468,13 +1476,9 @@ static int qedr_alloc_srq_kernel_params(struct qedr_srq *srq,
hw_srq->virt_prod_pair_addr = va;
num_elems = init_attr->attr.max_wr * RDMA_MAX_SRQ_WQE_SIZE;
- rc = dev->ops->common->chain_alloc(dev->cdev,
- QED_CHAIN_USE_TO_CONSUME_PRODUCE,
- QED_CHAIN_MODE_PBL,
- QED_CHAIN_CNT_TYPE_U32,
- num_elems,
- QEDR_SRQ_WQE_ELEM_SIZE,
- &hw_srq->pbl, NULL);
+ params.num_elems = num_elems;
+
+ rc = dev->ops->common->chain_alloc(dev->cdev, &hw_srq->pbl, &params);
if (rc)
goto err0;
@@ -1908,29 +1912,28 @@ qedr_roce_create_kernel_qp(struct qedr_dev *dev,
u32 n_sq_elems, u32 n_rq_elems)
{
struct qed_rdma_create_qp_out_params out_params;
+ struct qed_chain_init_params params = {
+ .mode = QED_CHAIN_MODE_PBL,
+ .cnt_type = QED_CHAIN_CNT_TYPE_U32,
+ };
int rc;
- rc = dev->ops->common->chain_alloc(dev->cdev,
- QED_CHAIN_USE_TO_PRODUCE,
- QED_CHAIN_MODE_PBL,
- QED_CHAIN_CNT_TYPE_U32,
- n_sq_elems,
- QEDR_SQE_ELEMENT_SIZE,
- &qp->sq.pbl, NULL);
+ params.intended_use = QED_CHAIN_USE_TO_PRODUCE;
+ params.num_elems = n_sq_elems;
+ params.elem_size = QEDR_SQE_ELEMENT_SIZE;
+ rc = dev->ops->common->chain_alloc(dev->cdev, &qp->sq.pbl, &params);
if (rc)
return rc;
in_params->sq_num_pages = qed_chain_get_page_cnt(&qp->sq.pbl);
in_params->sq_pbl_ptr = qed_chain_get_pbl_phys(&qp->sq.pbl);
- rc = dev->ops->common->chain_alloc(dev->cdev,
- QED_CHAIN_USE_TO_CONSUME_PRODUCE,
- QED_CHAIN_MODE_PBL,
- QED_CHAIN_CNT_TYPE_U32,
- n_rq_elems,
- QEDR_RQE_ELEMENT_SIZE,
- &qp->rq.pbl, NULL);
+ params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
+ params.num_elems = n_rq_elems;
+ params.elem_size = QEDR_RQE_ELEMENT_SIZE;
+
+ rc = dev->ops->common->chain_alloc(dev->cdev, &qp->rq.pbl, &params);
if (rc)
return rc;
@@ -1956,14 +1959,19 @@ qedr_iwarp_create_kernel_qp(struct qedr_dev *dev,
u32 n_sq_elems, u32 n_rq_elems)
{
struct qed_rdma_create_qp_out_params out_params;
- struct qed_chain_ext_pbl ext_pbl;
+ struct qed_chain_init_params params = {
+ .mode = QED_CHAIN_MODE_PBL,
+ .cnt_type = QED_CHAIN_CNT_TYPE_U32,
+ };
int rc;
in_params->sq_num_pages = QED_CHAIN_PAGE_CNT(n_sq_elems,
QEDR_SQE_ELEMENT_SIZE,
+ QED_CHAIN_PAGE_SIZE,
QED_CHAIN_MODE_PBL);
in_params->rq_num_pages = QED_CHAIN_PAGE_CNT(n_rq_elems,
QEDR_RQE_ELEMENT_SIZE,
+ QED_CHAIN_PAGE_SIZE,
QED_CHAIN_MODE_PBL);
qp->qed_qp = dev->ops->rdma_create_qp(dev->rdma_ctx,
@@ -1973,31 +1981,24 @@ qedr_iwarp_create_kernel_qp(struct qedr_dev *dev,
return -EINVAL;
/* Now we allocate the chain */
- ext_pbl.p_pbl_virt = out_params.sq_pbl_virt;
- ext_pbl.p_pbl_phys = out_params.sq_pbl_phys;
- rc = dev->ops->common->chain_alloc(dev->cdev,
- QED_CHAIN_USE_TO_PRODUCE,
- QED_CHAIN_MODE_PBL,
- QED_CHAIN_CNT_TYPE_U32,
- n_sq_elems,
- QEDR_SQE_ELEMENT_SIZE,
- &qp->sq.pbl, &ext_pbl);
+ params.intended_use = QED_CHAIN_USE_TO_PRODUCE;
+ params.num_elems = n_sq_elems;
+ params.elem_size = QEDR_SQE_ELEMENT_SIZE;
+ params.ext_pbl_virt = out_params.sq_pbl_virt;
+ params.ext_pbl_phys = out_params.sq_pbl_phys;
+ rc = dev->ops->common->chain_alloc(dev->cdev, &qp->sq.pbl, &params);
if (rc)
goto err;
- ext_pbl.p_pbl_virt = out_params.rq_pbl_virt;
- ext_pbl.p_pbl_phys = out_params.rq_pbl_phys;
-
- rc = dev->ops->common->chain_alloc(dev->cdev,
- QED_CHAIN_USE_TO_CONSUME_PRODUCE,
- QED_CHAIN_MODE_PBL,
- QED_CHAIN_CNT_TYPE_U32,
- n_rq_elems,
- QEDR_RQE_ELEMENT_SIZE,
- &qp->rq.pbl, &ext_pbl);
+ params.intended_use = QED_CHAIN_USE_TO_CONSUME_PRODUCE;
+ params.num_elems = n_rq_elems;
+ params.elem_size = QEDR_RQE_ELEMENT_SIZE;
+ params.ext_pbl_virt = out_params.rq_pbl_virt;
+ params.ext_pbl_phys = out_params.rq_pbl_phys;
+ rc = dev->ops->common->chain_alloc(dev->cdev, &qp->rq.pbl, &params);
if (rc)
goto err;