From 6b90a6d66b17bfe09351e18c705cb4a2ed147300 Mon Sep 17 00:00:00 2001 From: Michael Wang Date: Tue, 5 May 2015 14:50:18 +0200 Subject: IB/Verbs: Implement new callback query_protocol() Add new callback query_protocol() and implement for each HW. Mapping List: node-type link-layer transport protocol nes RNIC ETH IWARP IWARP amso1100 RNIC ETH IWARP IWARP cxgb3 RNIC ETH IWARP IWARP cxgb4 RNIC ETH IWARP IWARP usnic USNIC_UDP ETH USNIC_UDP USNIC_UDP ocrdma IB_CA ETH IB IBOE mlx4 IB_CA IB/ETH IB IB/IBOE mlx5 IB_CA IB IB IB ehca IB_CA IB IB IB ipath IB_CA IB IB IB mthca IB_CA IB IB IB qib IB_CA IB IB IB Signed-off-by: Michael Wang Reviewed-by: Ira Weiny Tested-by: Ira Weiny Reviewed-by: Sean Hefty Reviewed-by: Jason Gunthorpe Tested-by: Doug Ledford Signed-off-by: Doug Ledford --- include/rdma/ib_verbs.h | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'include/rdma') diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 65994a19e840..080f204273e4 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -81,6 +81,13 @@ enum rdma_transport_type { RDMA_TRANSPORT_USNIC_UDP }; +enum rdma_protocol_type { + RDMA_PROTOCOL_IB, + RDMA_PROTOCOL_IBOE, + RDMA_PROTOCOL_IWARP, + RDMA_PROTOCOL_USNIC_UDP +}; + __attribute_const__ enum rdma_transport_type rdma_node_get_transport(enum rdma_node_type node_type); @@ -1501,6 +1508,8 @@ struct ib_device { int (*query_port)(struct ib_device *device, u8 port_num, struct ib_port_attr *port_attr); + enum rdma_protocol_type (*query_protocol)(struct ib_device *device, + u8 port_num); enum rdma_link_layer (*get_link_layer)(struct ib_device *device, u8 port_num); int (*query_gid)(struct ib_device *device, -- cgit v1.2.3 From de66be94749d75c2f3578f3d01f91d31a8eb85ef Mon Sep 17 00:00:00 2001 From: Michael Wang Date: Tue, 5 May 2015 14:50:19 +0200 Subject: IB/Verbs: Implement raw management helpers Add raw helpers: rdma_protocol_ib rdma_protocol_iboe rdma_protocol_iwarp rdma_ib_or_iboe (transition, clean up later) To help us detect which technology the port supported. Signed-off-by: Michael Wang Reviewed-by: Ira Weiny Tested-by: Ira Weiny Reviewed-by: Sean Hefty Reviewed-by: Jason Gunthorpe Tested-by: Doug Ledford Signed-off-by: Doug Ledford --- include/rdma/ib_verbs.h | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) (limited to 'include/rdma') diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 080f204273e4..e6dd9846b6c2 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1752,6 +1752,28 @@ int ib_query_port(struct ib_device *device, enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num); +static inline bool rdma_protocol_ib(struct ib_device *device, u8 port_num) +{ + return device->query_protocol(device, port_num) == RDMA_PROTOCOL_IB; +} + +static inline bool rdma_protocol_iboe(struct ib_device *device, u8 port_num) +{ + return device->query_protocol(device, port_num) == RDMA_PROTOCOL_IBOE; +} + +static inline bool rdma_protocol_iwarp(struct ib_device *device, u8 port_num) +{ + return device->query_protocol(device, port_num) == RDMA_PROTOCOL_IWARP; +} + +static inline bool rdma_ib_or_iboe(struct ib_device *device, u8 port_num) +{ + enum rdma_protocol_type pt = device->query_protocol(device, port_num); + + return (pt == RDMA_PROTOCOL_IB || pt == RDMA_PROTOCOL_IBOE); +} + int ib_query_gid(struct ib_device *device, u8 port_num, int index, union ib_gid *gid); -- cgit v1.2.3 From c757dea816407dc472452091e3ea941cef6638a2 Mon Sep 17 00:00:00 2001 From: Michael Wang Date: Tue, 5 May 2015 14:50:32 +0200 Subject: IB/Verbs: Use management helper rdma_cap_ib_mad() Introduce helper rdma_cap_ib_mad() to help us check if the port of an IB device support Infiniband Management Datagrams. Signed-off-by: Michael Wang Reviewed-by: Ira Weiny Tested-by: Ira Weiny Reviewed-by: Sean Hefty Reviewed-by: Jason Gunthorpe Tested-by: Doug Ledford Signed-off-by: Doug Ledford --- drivers/infiniband/core/mad.c | 6 +++--- drivers/infiniband/core/user_mad.c | 6 +++--- include/rdma/ib_verbs.h | 15 +++++++++++++++ 3 files changed, 21 insertions(+), 6 deletions(-) (limited to 'include/rdma') diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 507eb67d14cc..80777cdaaccc 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -3066,7 +3066,7 @@ static void ib_mad_init_device(struct ib_device *device) } for (i = start; i <= end; i++) { - if (!rdma_ib_or_iboe(device, i)) + if (!rdma_cap_ib_mad(device, i)) continue; if (ib_mad_port_open(device, i)) { @@ -3087,7 +3087,7 @@ error_agent: error: while (--i >= start) { - if (!rdma_ib_or_iboe(device, i)) + if (!rdma_cap_ib_mad(device, i)) continue; if (ib_agent_port_close(device, i)) @@ -3111,7 +3111,7 @@ static void ib_mad_remove_device(struct ib_device *device) } for (i = start; i <= end; i++) { - if (!rdma_ib_or_iboe(device, i)) + if (!rdma_cap_ib_mad(device, i)) continue; if (ib_agent_port_close(device, i)) diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index aa8b334c8dce..d451717047db 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -1294,7 +1294,7 @@ static void ib_umad_add_one(struct ib_device *device) umad_dev->end_port = e; for (i = s; i <= e; ++i) { - if (!rdma_ib_or_iboe(device, i)) + if (!rdma_cap_ib_mad(device, i)) continue; umad_dev->port[i - s].umad_dev = umad_dev; @@ -1315,7 +1315,7 @@ static void ib_umad_add_one(struct ib_device *device) err: while (--i >= s) { - if (!rdma_ib_or_iboe(device, i)) + if (!rdma_cap_ib_mad(device, i)) continue; ib_umad_kill_port(&umad_dev->port[i - s]); @@ -1333,7 +1333,7 @@ static void ib_umad_remove_one(struct ib_device *device) return; for (i = 0; i <= umad_dev->end_port - umad_dev->start_port; ++i) { - if (rdma_ib_or_iboe(device, i)) + if (rdma_cap_ib_mad(device, i)) ib_umad_kill_port(&umad_dev->port[i]); } diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index e6dd9846b6c2..23ba66e25b7f 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1774,6 +1774,21 @@ static inline bool rdma_ib_or_iboe(struct ib_device *device, u8 port_num) return (pt == RDMA_PROTOCOL_IB || pt == RDMA_PROTOCOL_IBOE); } +/** + * rdma_cap_ib_mad - Check if the port of device has the capability Infiniband + * Management Datagrams. + * + * @device: Device to be checked + * @port_num: Port number of the device + * + * Return false when port of the device don't support Infiniband + * Management Datagrams. + */ +static inline bool rdma_cap_ib_mad(struct ib_device *device, u8 port_num) +{ + return rdma_ib_or_iboe(device, port_num); +} + int ib_query_gid(struct ib_device *device, u8 port_num, int index, union ib_gid *gid); -- cgit v1.2.3 From 29541e3add4d03682b78311823a5426e82019cda Mon Sep 17 00:00:00 2001 From: Michael Wang Date: Tue, 5 May 2015 14:50:33 +0200 Subject: IB/Verbs: Use management helper rdma_cap_ib_smi() Introduce helper rdma_cap_ib_smi() to help us check if the port of an IB device support Infiniband Subnet Management Interface. Signed-off-by: Michael Wang Reviewed-by: Ira Weiny Tested-by: Ira Weiny Reviewed-by: Sean Hefty Reviewed-by: Jason Gunthorpe Tested-by: Doug Ledford Signed-off-by: Doug Ledford --- drivers/infiniband/core/agent.c | 2 +- drivers/infiniband/core/mad.c | 2 +- include/rdma/ib_verbs.h | 15 +++++++++++++++ 3 files changed, 17 insertions(+), 2 deletions(-) (limited to 'include/rdma') diff --git a/drivers/infiniband/core/agent.c b/drivers/infiniband/core/agent.c index 89d4fbcfc8d3..a6fc4d6dc7d7 100644 --- a/drivers/infiniband/core/agent.c +++ b/drivers/infiniband/core/agent.c @@ -156,7 +156,7 @@ int ib_agent_port_open(struct ib_device *device, int port_num) goto error1; } - if (rdma_protocol_ib(device, port_num)) { + if (rdma_cap_ib_smi(device, port_num)) { /* Obtain send only MAD agent for SMI QP */ port_priv->agent[0] = ib_register_mad_agent(device, port_num, IB_QPT_SMI, NULL, 0, diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 80777cdaaccc..e9699c9942a9 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -2938,7 +2938,7 @@ static int ib_mad_port_open(struct ib_device *device, init_mad_qp(port_priv, &port_priv->qp_info[1]); cq_size = mad_sendq_size + mad_recvq_size; - has_smi = rdma_protocol_ib(device, port_num); + has_smi = rdma_cap_ib_smi(device, port_num); if (has_smi) cq_size *= 2; diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 23ba66e25b7f..e983e335af21 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1789,6 +1789,21 @@ static inline bool rdma_cap_ib_mad(struct ib_device *device, u8 port_num) return rdma_ib_or_iboe(device, port_num); } +/** + * rdma_cap_ib_smi - Check if the port of device has the capability Infiniband + * Subnet Management Interface. + * + * @device: Device to be checked + * @port_num: Port number of the device + * + * Return false when port of the device don't support Infiniband + * Subnet Management Interface. + */ +static inline bool rdma_cap_ib_smi(struct ib_device *device, u8 port_num) +{ + return rdma_protocol_ib(device, port_num); +} + int ib_query_gid(struct ib_device *device, u8 port_num, int index, union ib_gid *gid); -- cgit v1.2.3 From 72219cea8e246a55bff92e5ff6ec21f331a8791e Mon Sep 17 00:00:00 2001 From: Michael Wang Date: Tue, 5 May 2015 14:50:34 +0200 Subject: IB/Verbs: Use management helper rdma_cap_ib_cm() Introduce helper rdma_cap_ib_cm() to help us check if the port of an IB device support Infiniband Communication Manager. Signed-off-by: Michael Wang Reviewed-by: Ira Weiny Tested-by: Ira Weiny Reviewed-by: Sean Hefty Reviewed-by: Jason Gunthorpe Tested-by: Doug Ledford Signed-off-by: Doug Ledford --- drivers/infiniband/core/cm.c | 6 +++--- drivers/infiniband/core/cma.c | 19 +++++++++---------- drivers/infiniband/core/ucm.c | 2 +- include/rdma/ib_verbs.h | 15 +++++++++++++++ 4 files changed, 28 insertions(+), 14 deletions(-) (limited to 'include/rdma') diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index cfcc7f451185..14423c20c55b 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -3780,7 +3780,7 @@ static void cm_add_one(struct ib_device *ib_device) set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask); for (i = 1; i <= ib_device->phys_port_cnt; i++) { - if (!rdma_ib_or_iboe(ib_device, i)) + if (!rdma_cap_ib_cm(ib_device, i)) continue; port = kzalloc(sizeof *port, GFP_KERNEL); @@ -3831,7 +3831,7 @@ error1: port_modify.set_port_cap_mask = 0; port_modify.clr_port_cap_mask = IB_PORT_CM_SUP; while (--i) { - if (!rdma_ib_or_iboe(ib_device, i)) + if (!rdma_cap_ib_cm(ib_device, i)) continue; port = cm_dev->port[i-1]; @@ -3863,7 +3863,7 @@ static void cm_remove_one(struct ib_device *ib_device) write_unlock_irqrestore(&cm.device_lock, flags); for (i = 1; i <= ib_device->phys_port_cnt; i++) { - if (!rdma_ib_or_iboe(ib_device, i)) + if (!rdma_cap_ib_cm(ib_device, i)) continue; port = cm_dev->port[i-1]; diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index da7e55f8097f..754a96b66608 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -745,7 +745,7 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, int ret = 0; id_priv = container_of(id, struct rdma_id_private, id); - if (rdma_ib_or_iboe(id->device, id->port_num)) { + if (rdma_cap_ib_cm(id->device, id->port_num)) { if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD)) ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask); else @@ -1040,7 +1040,7 @@ void rdma_destroy_id(struct rdma_cm_id *id) mutex_unlock(&id_priv->handler_mutex); if (id_priv->cma_dev) { - if (rdma_ib_or_iboe(id_priv->id.device, 1)) { + if (rdma_cap_ib_cm(id_priv->id.device, 1)) { if (id_priv->cm_id.ib) ib_destroy_cm_id(id_priv->cm_id.ib); } else if (rdma_protocol_iwarp(id_priv->id.device, 1)) { @@ -1623,8 +1623,7 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv, struct rdma_cm_id *id; int ret; - if (cma_family(id_priv) == AF_IB && - !rdma_ib_or_iboe(cma_dev->device, 1)) + if (cma_family(id_priv) == AF_IB && !rdma_cap_ib_cm(cma_dev->device, 1)) return; id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps, @@ -2015,7 +2014,7 @@ static int cma_bind_loopback(struct rdma_id_private *id_priv) mutex_lock(&lock); list_for_each_entry(cur_dev, &dev_list, list) { if (cma_family(id_priv) == AF_IB && - !rdma_ib_or_iboe(cur_dev->device, 1)) + !rdma_cap_ib_cm(cur_dev->device, 1)) continue; if (!cma_dev) @@ -2524,7 +2523,7 @@ int rdma_listen(struct rdma_cm_id *id, int backlog) id_priv->backlog = backlog; if (id->device) { - if (rdma_ib_or_iboe(id->device, 1)) { + if (rdma_cap_ib_cm(id->device, 1)) { ret = cma_ib_listen(id_priv); if (ret) goto err; @@ -2868,7 +2867,7 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) id_priv->srq = conn_param->srq; } - if (rdma_ib_or_iboe(id->device, id->port_num)) { + if (rdma_cap_ib_cm(id->device, id->port_num)) { if (id->qp_type == IB_QPT_UD) ret = cma_resolve_ib_udp(id_priv, conn_param); else @@ -2979,7 +2978,7 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) id_priv->srq = conn_param->srq; } - if (rdma_ib_or_iboe(id->device, id->port_num)) { + if (rdma_cap_ib_cm(id->device, id->port_num)) { if (id->qp_type == IB_QPT_UD) { if (conn_param) ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, @@ -3042,7 +3041,7 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data, if (!id_priv->cm_id.ib) return -EINVAL; - if (rdma_ib_or_iboe(id->device, id->port_num)) { + if (rdma_cap_ib_cm(id->device, id->port_num)) { if (id->qp_type == IB_QPT_UD) ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, 0, private_data, private_data_len); @@ -3069,7 +3068,7 @@ int rdma_disconnect(struct rdma_cm_id *id) if (!id_priv->cm_id.ib) return -EINVAL; - if (rdma_ib_or_iboe(id->device, id->port_num)) { + if (rdma_cap_ib_cm(id->device, id->port_num)) { ret = cma_modify_qp_err(id_priv); if (ret) goto out; diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c index 70e0ccb77fca..62c24b1452b8 100644 --- a/drivers/infiniband/core/ucm.c +++ b/drivers/infiniband/core/ucm.c @@ -1253,7 +1253,7 @@ static void ib_ucm_add_one(struct ib_device *device) dev_t base; struct ib_ucm_device *ucm_dev; - if (!device->alloc_ucontext || !rdma_ib_or_iboe(device, 1)) + if (!device->alloc_ucontext || !rdma_cap_ib_cm(device, 1)) return; ucm_dev = kzalloc(sizeof *ucm_dev, GFP_KERNEL); diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index e983e335af21..e349596fd500 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1804,6 +1804,21 @@ static inline bool rdma_cap_ib_smi(struct ib_device *device, u8 port_num) return rdma_protocol_ib(device, port_num); } +/** + * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband + * Communication Manager. + * + * @device: Device to be checked + * @port_num: Port number of the device + * + * Return false when port of the device don't support Infiniband + * Communication Manager. + */ +static inline bool rdma_cap_ib_cm(struct ib_device *device, u8 port_num) +{ + return rdma_ib_or_iboe(device, port_num); +} + int ib_query_gid(struct ib_device *device, u8 port_num, int index, union ib_gid *gid); -- cgit v1.2.3 From 042153306d9d08da67459f187d63a68aefd97388 Mon Sep 17 00:00:00 2001 From: Michael Wang Date: Tue, 5 May 2015 14:50:35 +0200 Subject: IB/Verbs: Use management helper rdma_cap_iw_cm() Introduce helper rdma_cap_iw_cm() to help us check if the port of an IB device support IWARP Communication Manager. Signed-off-by: Michael Wang Reviewed-by: Ira Weiny Tested-by: Ira Weiny Reviewed-by: Sean Hefty Reviewed-by: Jason Gunthorpe Tested-by: Doug Ledford Signed-off-by: Doug Ledford --- drivers/infiniband/core/cma.c | 14 +++++++------- include/rdma/ib_verbs.h | 15 +++++++++++++++ 2 files changed, 22 insertions(+), 7 deletions(-) (limited to 'include/rdma') diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 754a96b66608..3998e8bdfcdd 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -754,7 +754,7 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, if (qp_attr->qp_state == IB_QPS_RTR) qp_attr->rq_psn = id_priv->seq_num; - } else if (rdma_protocol_iwarp(id->device, id->port_num)) { + } else if (rdma_cap_iw_cm(id->device, id->port_num)) { if (!id_priv->cm_id.iw) { qp_attr->qp_access_flags = 0; *qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS; @@ -1043,7 +1043,7 @@ void rdma_destroy_id(struct rdma_cm_id *id) if (rdma_cap_ib_cm(id_priv->id.device, 1)) { if (id_priv->cm_id.ib) ib_destroy_cm_id(id_priv->cm_id.ib); - } else if (rdma_protocol_iwarp(id_priv->id.device, 1)) { + } else if (rdma_cap_iw_cm(id_priv->id.device, 1)) { if (id_priv->cm_id.iw) iw_destroy_cm_id(id_priv->cm_id.iw); } @@ -2527,7 +2527,7 @@ int rdma_listen(struct rdma_cm_id *id, int backlog) ret = cma_ib_listen(id_priv); if (ret) goto err; - } else if (rdma_protocol_iwarp(id->device, 1)) { + } else if (rdma_cap_iw_cm(id->device, 1)) { ret = cma_iw_listen(id_priv, backlog); if (ret) goto err; @@ -2872,7 +2872,7 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) ret = cma_resolve_ib_udp(id_priv, conn_param); else ret = cma_connect_ib(id_priv, conn_param); - } else if (rdma_protocol_iwarp(id->device, id->port_num)) + } else if (rdma_cap_iw_cm(id->device, id->port_num)) ret = cma_connect_iw(id_priv, conn_param); else ret = -ENOSYS; @@ -2994,7 +2994,7 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) else ret = cma_rep_recv(id_priv); } - } else if (rdma_protocol_iwarp(id->device, id->port_num)) + } else if (rdma_cap_iw_cm(id->device, id->port_num)) ret = cma_accept_iw(id_priv, conn_param); else ret = -ENOSYS; @@ -3049,7 +3049,7 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data, ret = ib_send_cm_rej(id_priv->cm_id.ib, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0, private_data, private_data_len); - } else if (rdma_protocol_iwarp(id->device, id->port_num)) { + } else if (rdma_cap_iw_cm(id->device, id->port_num)) { ret = iw_cm_reject(id_priv->cm_id.iw, private_data, private_data_len); } else @@ -3075,7 +3075,7 @@ int rdma_disconnect(struct rdma_cm_id *id) /* Initiate or respond to a disconnect. */ if (ib_send_cm_dreq(id_priv->cm_id.ib, NULL, 0)) ib_send_cm_drep(id_priv->cm_id.ib, NULL, 0); - } else if (rdma_protocol_iwarp(id->device, id->port_num)) { + } else if (rdma_cap_iw_cm(id->device, id->port_num)) { ret = iw_cm_disconnect(id_priv->cm_id.iw, 0); } else ret = -EINVAL; diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index e349596fd500..cc92a6489136 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1819,6 +1819,21 @@ static inline bool rdma_cap_ib_cm(struct ib_device *device, u8 port_num) return rdma_ib_or_iboe(device, port_num); } +/** + * rdma_cap_iw_cm - Check if the port of device has the capability IWARP + * Communication Manager. + * + * @device: Device to be checked + * @port_num: Port number of the device + * + * Return false when port of the device don't support IWARP + * Communication Manager. + */ +static inline bool rdma_cap_iw_cm(struct ib_device *device, u8 port_num) +{ + return rdma_protocol_iwarp(device, port_num); +} + int ib_query_gid(struct ib_device *device, u8 port_num, int index, union ib_gid *gid); -- cgit v1.2.3 From fe53ba2f0c3de0416422407bab2c1982a2e85b6a Mon Sep 17 00:00:00 2001 From: Michael Wang Date: Tue, 5 May 2015 14:50:36 +0200 Subject: IB/Verbs: Use management helper rdma_cap_ib_sa() Introduce helper rdma_cap_ib_sa() to help us check if the port of an IB device support Infiniband Subnet Administration. Signed-off-by: Michael Wang Reviewed-by: Ira Weiny Tested-by: Ira Weiny Reviewed-by: Sean Hefty Reviewed-by: Jason Gunthorpe Tested-by: Doug Ledford Signed-off-by: Doug Ledford --- drivers/infiniband/core/cma.c | 4 ++-- drivers/infiniband/core/sa_query.c | 10 +++++----- drivers/infiniband/core/ucma.c | 2 +- include/rdma/ib_verbs.h | 15 +++++++++++++++ 4 files changed, 23 insertions(+), 8 deletions(-) (limited to 'include/rdma') diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 3998e8bdfcdd..6d6546063df2 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -940,7 +940,7 @@ static inline int cma_user_data_offset(struct rdma_id_private *id_priv) static void cma_cancel_route(struct rdma_id_private *id_priv) { - if (rdma_protocol_ib(id_priv->id.device, id_priv->id.port_num)) { + if (rdma_cap_ib_sa(id_priv->id.device, id_priv->id.port_num)) { if (id_priv->query) ib_sa_cancel_query(id_priv->query_id, id_priv->query); } @@ -1964,7 +1964,7 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms) return -EINVAL; atomic_inc(&id_priv->refcount); - if (rdma_protocol_ib(id->device, id->port_num)) + if (rdma_cap_ib_sa(id->device, id->port_num)) ret = cma_resolve_ib_route(id_priv, timeout_ms); else if (rdma_protocol_iboe(id->device, id->port_num)) ret = cma_resolve_iboe_route(id_priv); diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index b115c2835264..30aa5e5e08f2 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -450,7 +450,7 @@ static void ib_sa_event(struct ib_event_handler *handler, struct ib_event *event struct ib_sa_port *port = &sa_dev->port[event->element.port_num - sa_dev->start_port]; - if (WARN_ON(!rdma_protocol_ib(handler->device, port->port_num))) + if (WARN_ON(!rdma_cap_ib_sa(handler->device, port->port_num))) return; spin_lock_irqsave(&port->ah_lock, flags); @@ -1173,7 +1173,7 @@ static void ib_sa_add_one(struct ib_device *device) for (i = 0; i <= e - s; ++i) { spin_lock_init(&sa_dev->port[i].ah_lock); - if (!rdma_protocol_ib(device, i + 1)) + if (!rdma_cap_ib_sa(device, i + 1)) continue; sa_dev->port[i].sm_ah = NULL; @@ -1208,7 +1208,7 @@ static void ib_sa_add_one(struct ib_device *device) goto err; for (i = 0; i <= e - s; ++i) { - if (rdma_protocol_ib(device, i + 1)) + if (rdma_cap_ib_sa(device, i + 1)) update_sm_ah(&sa_dev->port[i].update_task); } @@ -1216,7 +1216,7 @@ static void ib_sa_add_one(struct ib_device *device) err: while (--i >= 0) { - if (rdma_protocol_ib(device, i + 1)) + if (rdma_cap_ib_sa(device, i + 1)) ib_unregister_mad_agent(sa_dev->port[i].agent); } free: @@ -1237,7 +1237,7 @@ static void ib_sa_remove_one(struct ib_device *device) flush_workqueue(ib_wq); for (i = 0; i <= sa_dev->end_port - sa_dev->start_port; ++i) { - if (rdma_protocol_ib(device, i + 1)) { + if (rdma_cap_ib_sa(device, i + 1)) { ib_unregister_mad_agent(sa_dev->port[i].agent); if (sa_dev->port[i].sm_ah) kref_put(&sa_dev->port[i].sm_ah->ref, free_sm_ah); diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index dae762059bc3..d42b816c781f 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -723,7 +723,7 @@ static ssize_t ucma_query_route(struct ucma_file *file, resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid; resp.port_num = ctx->cm_id->port_num; - if (rdma_protocol_ib(ctx->cm_id->device, ctx->cm_id->port_num)) + if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num)) ucma_copy_ib_route(&resp, &ctx->cm_id->route); else if (rdma_protocol_iboe(ctx->cm_id->device, ctx->cm_id->port_num)) ucma_copy_iboe_route(&resp, &ctx->cm_id->route); diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index cc92a6489136..c3a561e891b1 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1834,6 +1834,21 @@ static inline bool rdma_cap_iw_cm(struct ib_device *device, u8 port_num) return rdma_protocol_iwarp(device, port_num); } +/** + * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband + * Subnet Administration. + * + * @device: Device to be checked + * @port_num: Port number of the device + * + * Return false when port of the device don't support Infiniband + * Subnet Administration. + */ +static inline bool rdma_cap_ib_sa(struct ib_device *device, u8 port_num) +{ + return rdma_protocol_ib(device, port_num); +} + int ib_query_gid(struct ib_device *device, u8 port_num, int index, union ib_gid *gid); -- cgit v1.2.3 From a31ad3b0e35f7e340c1ab6668080cff91d48c90a Mon Sep 17 00:00:00 2001 From: Michael Wang Date: Tue, 5 May 2015 14:50:37 +0200 Subject: IB/Verbs: Use management helper rdma_cap_ib_mcast() Introduce helper rdma_cap_ib_mcast() to help us check if the port of an IB device support Infiniband Multicast. Signed-off-by: Michael Wang Reviewed-by: Ira Weiny Tested-by: Ira Weiny Reviewed-by: Sean Hefty Reviewed-by: Jason Gunthorpe Tested-by: Doug Ledford Signed-off-by: Doug Ledford --- drivers/infiniband/core/cma.c | 6 +++--- drivers/infiniband/core/multicast.c | 6 +++--- include/rdma/ib_verbs.h | 15 +++++++++++++++ 3 files changed, 21 insertions(+), 6 deletions(-) (limited to 'include/rdma') diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 6d6546063df2..78becc79f13c 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -1014,7 +1014,7 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv) mc = container_of(id_priv->mc_list.next, struct cma_multicast, list); list_del(&mc->list); - if (rdma_protocol_ib(id_priv->cma_dev->device, + if (rdma_cap_ib_mcast(id_priv->cma_dev->device, id_priv->id.port_num)) { ib_sa_free_multicast(mc->multicast.ib); kfree(mc); @@ -3328,7 +3328,7 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, if (rdma_protocol_iboe(id->device, id->port_num)) { kref_init(&mc->mcref); ret = cma_iboe_join_multicast(id_priv, mc); - } else if (rdma_protocol_ib(id->device, id->port_num)) + } else if (rdma_cap_ib_mcast(id->device, id->port_num)) ret = cma_join_ib_multicast(id_priv, mc); else ret = -ENOSYS; @@ -3362,7 +3362,7 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr) BUG_ON(id_priv->cma_dev->device != id->device); - if (rdma_protocol_ib(id->device, id->port_num)) { + if (rdma_cap_ib_mcast(id->device, id->port_num)) { ib_sa_free_multicast(mc->multicast.ib); kfree(mc); } else if (rdma_protocol_iboe(id->device, id->port_num)) diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c index b57ed03a487e..605f20a9af85 100644 --- a/drivers/infiniband/core/multicast.c +++ b/drivers/infiniband/core/multicast.c @@ -780,7 +780,7 @@ static void mcast_event_handler(struct ib_event_handler *handler, int index; dev = container_of(handler, struct mcast_device, event_handler); - if (WARN_ON(!rdma_protocol_ib(dev->device, event->element.port_num))) + if (WARN_ON(!rdma_cap_ib_mcast(dev->device, event->element.port_num))) return; index = event->element.port_num - dev->start_port; @@ -820,7 +820,7 @@ static void mcast_add_one(struct ib_device *device) } for (i = 0; i <= dev->end_port - dev->start_port; i++) { - if (!rdma_protocol_ib(device, dev->start_port + i)) + if (!rdma_cap_ib_mcast(device, dev->start_port + i)) continue; port = &dev->port[i]; port->dev = dev; @@ -858,7 +858,7 @@ static void mcast_remove_one(struct ib_device *device) flush_workqueue(mcast_wq); for (i = 0; i <= dev->end_port - dev->start_port; i++) { - if (rdma_protocol_ib(device, dev->start_port + i)) { + if (rdma_cap_ib_mcast(device, dev->start_port + i)) { port = &dev->port[i]; deref_port(port); wait_for_completion(&port->comp); diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index c3a561e891b1..6bbbc86d39d9 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1849,6 +1849,21 @@ static inline bool rdma_cap_ib_sa(struct ib_device *device, u8 port_num) return rdma_protocol_ib(device, port_num); } +/** + * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband + * Multicast. + * + * @device: Device to be checked + * @port_num: Port number of the device + * + * Return false when port of the device don't support Infiniband + * Multicast. + */ +static inline bool rdma_cap_ib_mcast(struct ib_device *device, u8 port_num) +{ + return rdma_cap_ib_sa(device, port_num); +} + int ib_query_gid(struct ib_device *device, u8 port_num, int index, union ib_gid *gid); -- cgit v1.2.3 From bc0f1d71536063f8b2df966625e0136bca03b3e6 Mon Sep 17 00:00:00 2001 From: Michael Wang Date: Tue, 5 May 2015 14:50:38 +0200 Subject: IB/Verbs: Use management helper rdma_cap_read_multi_sge() Introduce helper rdma_cap_read_multi_sge() to help us check if the port of an IB device support RDMA Read Multiple Scatter-Gather Entries. Signed-off-by: Michael Wang Reviewed-by: Ira Weiny Tested-by: Ira Weiny Reviewed-by: Sean Hefty Reviewed-by: Jason Gunthorpe Tested-by: Doug Ledford Signed-off-by: Doug Ledford --- include/rdma/ib_verbs.h | 16 ++++++++++++++++ net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 4 ++-- 2 files changed, 18 insertions(+), 2 deletions(-) (limited to 'include/rdma') diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 6bbbc86d39d9..2cf23b130f9f 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1864,6 +1864,22 @@ static inline bool rdma_cap_ib_mcast(struct ib_device *device, u8 port_num) return rdma_cap_ib_sa(device, port_num); } +/** + * rdma_cap_read_multi_sge - Check if the port of device has the capability + * RDMA Read Multiple Scatter-Gather Entries. + * + * @device: Device to be checked + * @port_num: Port number of the device + * + * Return false when port of the device don't support + * RDMA Read Multiple Scatter-Gather Entries. + */ +static inline bool rdma_cap_read_multi_sge(struct ib_device *device, + u8 port_num) +{ + return !rdma_protocol_iwarp(device, port_num); +} + int ib_query_gid(struct ib_device *device, u8 port_num, int index, union ib_gid *gid); diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index 2cc625db16aa..86b44164172b 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c @@ -117,8 +117,8 @@ static void rdma_build_arg_xdr(struct svc_rqst *rqstp, static int rdma_read_max_sge(struct svcxprt_rdma *xprt, int sge_count) { - if (rdma_protocol_iwarp(xprt->sc_cm_id->device, - xprt->sc_cm_id->port_num)) + if (!rdma_cap_read_multi_sge(xprt->sc_cm_id->device, + xprt->sc_cm_id->port_num)) return 1; else return min_t(int, sge_count, xprt->sc_max_sge); -- cgit v1.2.3 From 30a74ef41d5293cb2f85fcce120fe869a672ade4 Mon Sep 17 00:00:00 2001 From: Michael Wang Date: Tue, 5 May 2015 14:50:39 +0200 Subject: IB/Verbs: Use management helper rdma_cap_af_ib() Introduce helper rdma_cap_af_ib() to help us check if the port of an IB device support Native Infiniband Address. Signed-off-by: Michael Wang Reviewed-by: Ira Weiny Tested-by: Ira Weiny Reviewed-by: Sean Hefty Reviewed-by: Jason Gunthorpe Tested-by: Doug Ledford Signed-off-by: Doug Ledford --- drivers/infiniband/core/cma.c | 2 +- include/rdma/ib_verbs.h | 15 +++++++++++++++ 2 files changed, 16 insertions(+), 1 deletion(-) (limited to 'include/rdma') diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 78becc79f13c..c9a281718c1a 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -448,7 +448,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) list_for_each_entry(cur_dev, &dev_list, list) { for (p = 1; p <= cur_dev->device->phys_port_cnt; ++p) { - if (!rdma_ib_or_iboe(cur_dev->device, p)) + if (!rdma_cap_af_ib(cur_dev->device, p)) continue; if (ib_find_cached_pkey(cur_dev->device, p, pkey, &index)) diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 2cf23b130f9f..349d1216564c 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1864,6 +1864,21 @@ static inline bool rdma_cap_ib_mcast(struct ib_device *device, u8 port_num) return rdma_cap_ib_sa(device, port_num); } +/** + * rdma_cap_af_ib - Check if the port of device has the capability + * Native Infiniband Address. + * + * @device: Device to be checked + * @port_num: Port number of the device + * + * Return false when port of the device don't support + * Native Infiniband Address. + */ +static inline bool rdma_cap_af_ib(struct ib_device *device, u8 port_num) +{ + return rdma_ib_or_iboe(device, port_num); +} + /** * rdma_cap_read_multi_sge - Check if the port of device has the capability * RDMA Read Multiple Scatter-Gather Entries. -- cgit v1.2.3 From 227128fc68401d8e36b660ffeef4320c5fb492d7 Mon Sep 17 00:00:00 2001 From: Michael Wang Date: Tue, 5 May 2015 14:50:40 +0200 Subject: IB/Verbs: Use management helper rdma_cap_eth_ah() Introduce helper rdma_cap_eth_ah() to help us check if the port of an IB device support Ethernet Address Handler. Signed-off-by: Michael Wang Reviewed-by: Ira Weiny Tested-by: Ira Weiny Reviewed-by: Sean Hefty Reviewed-by: Jason Gunthorpe Tested-by: Doug Ledford Signed-off-by: Doug Ledford --- drivers/infiniband/core/cma.c | 2 +- drivers/infiniband/core/sa_query.c | 2 +- drivers/infiniband/core/verbs.c | 4 ++-- include/rdma/ib_verbs.h | 15 +++++++++++++++ 4 files changed, 19 insertions(+), 4 deletions(-) (limited to 'include/rdma') diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index c9a281718c1a..1977f601a1ec 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -711,7 +711,7 @@ static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv, int ret; u16 pkey; - if (rdma_protocol_iboe(id_priv->id.device, id_priv->id.port_num)) + if (rdma_cap_eth_ah(id_priv->id.device, id_priv->id.port_num)) pkey = 0xffff; else pkey = ib_addr_get_pkey(dev_addr); diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 30aa5e5e08f2..7f7c8c9fa92c 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -540,7 +540,7 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num, ah_attr->port_num = port_num; ah_attr->static_rate = rec->rate; - force_grh = rdma_protocol_iboe(device, port_num); + force_grh = rdma_cap_eth_ah(device, port_num); if (rec->hop_limit > 1 || force_grh) { ah_attr->ah_flags = IB_AH_GRH; diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 7dd2f5182020..d110a5eb77a8 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -200,7 +200,7 @@ int ib_init_ah_from_wc(struct ib_device *device, u8 port_num, struct ib_wc *wc, int ret; memset(ah_attr, 0, sizeof *ah_attr); - if (rdma_protocol_iboe(device, port_num)) { + if (rdma_cap_eth_ah(device, port_num)) { if (!(wc->wc_flags & IB_WC_GRH)) return -EPROTOTYPE; @@ -869,7 +869,7 @@ int ib_resolve_eth_l2_attrs(struct ib_qp *qp, union ib_gid sgid; if ((*qp_attr_mask & IB_QP_AV) && - (rdma_protocol_iboe(qp->device, qp_attr->ah_attr.port_num))) { + (rdma_cap_eth_ah(qp->device, qp_attr->ah_attr.port_num))) { ret = ib_query_gid(qp->device, qp_attr->ah_attr.port_num, qp_attr->ah_attr.grh.sgid_index, &sgid); if (ret) diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 349d1216564c..8d59479eea4d 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1879,6 +1879,21 @@ static inline bool rdma_cap_af_ib(struct ib_device *device, u8 port_num) return rdma_ib_or_iboe(device, port_num); } +/** + * rdma_cap_eth_ah - Check if the port of device has the capability + * Ethernet Address Handler. + * + * @device: Device to be checked + * @port_num: Port number of the device + * + * Return false when port of the device don't support + * Ethernet Address Handler. + */ +static inline bool rdma_cap_eth_ah(struct ib_device *device, u8 port_num) +{ + return rdma_protocol_iboe(device, port_num); +} + /** * rdma_cap_read_multi_sge - Check if the port of device has the capability * RDMA Read Multiple Scatter-Gather Entries. -- cgit v1.2.3 From 296ec00995fb28c4e34b41f80b5a876f3a25c134 Mon Sep 17 00:00:00 2001 From: Michael Wang Date: Mon, 18 May 2015 10:41:45 +0200 Subject: IB/Verbs: Improve docs for rdma-helpers Increase the level of documentation for the rdma_cap_* helpers introduced by Michael Wang . This patch is loosely based on a patch Michael wrote to enhance the documentation of these functions, but has been significantly modified in terms of verbiage. In addition, the comments were moved from a kernel Documentation/infiniband/ file to being inline in the header file itself for the functions in question. Finally, the documentation was formated in proper kdoc format. Signed-off-by: Michael Wang Signed-off-by: Doug Ledford --- include/rdma/ib_verbs.h | 132 +++++++++++++++++++++++++++++++++--------------- 1 file changed, 92 insertions(+), 40 deletions(-) (limited to 'include/rdma') diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 8d59479eea4d..81740c14fdb1 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1775,14 +1775,16 @@ static inline bool rdma_ib_or_iboe(struct ib_device *device, u8 port_num) } /** - * rdma_cap_ib_mad - Check if the port of device has the capability Infiniband + * rdma_cap_ib_mad - Check if the port of a device supports Infiniband * Management Datagrams. + * @device: Device to check + * @port_num: Port number to check * - * @device: Device to be checked - * @port_num: Port number of the device + * Management Datagrams (MAD) are a required part of the InfiniBand + * specification and are supported on all InfiniBand devices. A slightly + * extended version are also supported on OPA interfaces. * - * Return false when port of the device don't support Infiniband - * Management Datagrams. + * Return: true if the port supports sending/receiving of MAD packets. */ static inline bool rdma_cap_ib_mad(struct ib_device *device, u8 port_num) { @@ -1790,14 +1792,24 @@ static inline bool rdma_cap_ib_mad(struct ib_device *device, u8 port_num) } /** - * rdma_cap_ib_smi - Check if the port of device has the capability Infiniband - * Subnet Management Interface. + * rdma_cap_ib_smi - Check if the port of a device provides an Infiniband + * Subnet Management Agent (SMA) on the Subnet Management Interface (SMI). + * @device: Device to check + * @port_num: Port number to check * - * @device: Device to be checked - * @port_num: Port number of the device + * Each InfiniBand node is required to provide a Subnet Management Agent + * that the subnet manager can access. Prior to the fabric being fully + * configured by the subnet manager, the SMA is accessed via a well known + * interface called the Subnet Management Interface (SMI). This interface + * uses directed route packets to communicate with the SM to get around the + * chicken and egg problem of the SM needing to know what's on the fabric + * in order to configure the fabric, and needing to configure the fabric in + * order to send packets to the devices on the fabric. These directed + * route packets do not need the fabric fully configured in order to reach + * their destination. The SMI is the only method allowed to send + * directed route packets on an InfiniBand fabric. * - * Return false when port of the device don't support Infiniband - * Subnet Management Interface. + * Return: true if the port provides an SMI. */ static inline bool rdma_cap_ib_smi(struct ib_device *device, u8 port_num) { @@ -1807,12 +1819,17 @@ static inline bool rdma_cap_ib_smi(struct ib_device *device, u8 port_num) /** * rdma_cap_ib_cm - Check if the port of device has the capability Infiniband * Communication Manager. + * @device: Device to check + * @port_num: Port number to check * - * @device: Device to be checked - * @port_num: Port number of the device + * The InfiniBand Communication Manager is one of many pre-defined General + * Service Agents (GSA) that are accessed via the General Service + * Interface (GSI). It's role is to facilitate establishment of connections + * between nodes as well as other management related tasks for established + * connections. * - * Return false when port of the device don't support Infiniband - * Communication Manager. + * Return: true if the port supports an IB CM (this does not guarantee that + * a CM is actually running however). */ static inline bool rdma_cap_ib_cm(struct ib_device *device, u8 port_num) { @@ -1822,12 +1839,14 @@ static inline bool rdma_cap_ib_cm(struct ib_device *device, u8 port_num) /** * rdma_cap_iw_cm - Check if the port of device has the capability IWARP * Communication Manager. + * @device: Device to check + * @port_num: Port number to check * - * @device: Device to be checked - * @port_num: Port number of the device + * Similar to above, but specific to iWARP connections which have a different + * managment protocol than InfiniBand. * - * Return false when port of the device don't support IWARP - * Communication Manager. + * Return: true if the port supports an iWARP CM (this does not guarantee that + * a CM is actually running however). */ static inline bool rdma_cap_iw_cm(struct ib_device *device, u8 port_num) { @@ -1837,12 +1856,17 @@ static inline bool rdma_cap_iw_cm(struct ib_device *device, u8 port_num) /** * rdma_cap_ib_sa - Check if the port of device has the capability Infiniband * Subnet Administration. + * @device: Device to check + * @port_num: Port number to check * - * @device: Device to be checked - * @port_num: Port number of the device + * An InfiniBand Subnet Administration (SA) service is a pre-defined General + * Service Agent (GSA) provided by the Subnet Manager (SM). On InfiniBand + * fabrics, devices should resolve routes to other hosts by contacting the + * SA to query the proper route. * - * Return false when port of the device don't support Infiniband - * Subnet Administration. + * Return: true if the port should act as a client to the fabric Subnet + * Administration interface. This does not imply that the SA service is + * running locally. */ static inline bool rdma_cap_ib_sa(struct ib_device *device, u8 port_num) { @@ -1852,12 +1876,19 @@ static inline bool rdma_cap_ib_sa(struct ib_device *device, u8 port_num) /** * rdma_cap_ib_mcast - Check if the port of device has the capability Infiniband * Multicast. + * @device: Device to check + * @port_num: Port number to check * - * @device: Device to be checked - * @port_num: Port number of the device + * InfiniBand multicast registration is more complex than normal IPv4 or + * IPv6 multicast registration. Each Host Channel Adapter must register + * with the Subnet Manager when it wishes to join a multicast group. It + * should do so only once regardless of how many queue pairs it subscribes + * to this group. And it should leave the group only after all queue pairs + * attached to the group have been detached. * - * Return false when port of the device don't support Infiniband - * Multicast. + * Return: true if the port must undertake the additional adminstrative + * overhead of registering/unregistering with the SM and tracking of the + * total number of queue pairs attached to the multicast group. */ static inline bool rdma_cap_ib_mcast(struct ib_device *device, u8 port_num) { @@ -1867,12 +1898,15 @@ static inline bool rdma_cap_ib_mcast(struct ib_device *device, u8 port_num) /** * rdma_cap_af_ib - Check if the port of device has the capability * Native Infiniband Address. + * @device: Device to check + * @port_num: Port number to check * - * @device: Device to be checked - * @port_num: Port number of the device + * InfiniBand addressing uses a port's GUID + Subnet Prefix to make a default + * GID. RoCE uses a different mechanism, but still generates a GID via + * a prescribed mechanism and port specific data. * - * Return false when port of the device don't support - * Native Infiniband Address. + * Return: true if the port uses a GID address to identify devices on the + * network. */ static inline bool rdma_cap_af_ib(struct ib_device *device, u8 port_num) { @@ -1881,13 +1915,19 @@ static inline bool rdma_cap_af_ib(struct ib_device *device, u8 port_num) /** * rdma_cap_eth_ah - Check if the port of device has the capability - * Ethernet Address Handler. + * Ethernet Address Handle. + * @device: Device to check + * @port_num: Port number to check * - * @device: Device to be checked - * @port_num: Port number of the device + * RoCE is InfiniBand over Ethernet, and it uses a well defined technique + * to fabricate GIDs over Ethernet/IP specific addresses native to the + * port. Normally, packet headers are generated by the sending host + * adapter, but when sending connectionless datagrams, we must manually + * inject the proper headers for the fabric we are communicating over. * - * Return false when port of the device don't support - * Ethernet Address Handler. + * Return: true if we are running as a RoCE port and must force the + * addition of a Global Route Header built from our Ethernet Address + * Handle into our header list for connectionless packets. */ static inline bool rdma_cap_eth_ah(struct ib_device *device, u8 port_num) { @@ -1897,12 +1937,24 @@ static inline bool rdma_cap_eth_ah(struct ib_device *device, u8 port_num) /** * rdma_cap_read_multi_sge - Check if the port of device has the capability * RDMA Read Multiple Scatter-Gather Entries. + * @device: Device to check + * @port_num: Port number to check * - * @device: Device to be checked - * @port_num: Port number of the device + * iWARP has a restriction that RDMA READ requests may only have a single + * Scatter/Gather Entry (SGE) in the work request. * - * Return false when port of the device don't support - * RDMA Read Multiple Scatter-Gather Entries. + * NOTE: although the linux kernel currently assumes all devices are either + * single SGE RDMA READ devices or identical SGE maximums for RDMA READs and + * WRITEs, according to Tom Talpey, this is not accurate. There are some + * devices out there that support more than a single SGE on RDMA READ + * requests, but do not support the same number of SGEs as they do on + * RDMA WRITE requests. The linux kernel would need rearchitecting to + * support these imbalanced READ/WRITE SGEs allowed devices. So, for now, + * suffice with either the device supports the same READ/WRITE SGEs, or + * it only gets one READ sge. + * + * Return: true for any device that allows more than one SGE in RDMA READ + * requests. */ static inline bool rdma_cap_read_multi_sge(struct ib_device *device, u8 port_num) -- cgit v1.2.3 From 0cf18d7723055709faf51b50f5a33253b480637f Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Wed, 13 May 2015 20:02:55 -0400 Subject: IB/core: Create common start/end port functions Previously start_port and end_port were defined in 2 places, cache.c and device.c and this prevented their use in other modules. Make these common functions, change the name to reflect the rdma name space, and update existing users. Signed-off-by: Ira Weiny Signed-off-by: Doug Ledford --- drivers/infiniband/core/cache.c | 61 ++++++++++++++++------------------------ drivers/infiniband/core/device.c | 26 +++++------------ include/rdma/ib_verbs.h | 27 ++++++++++++++++++ 3 files changed, 59 insertions(+), 55 deletions(-) (limited to 'include/rdma') diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index 80f6cf2449fb..08921b34182c 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -58,17 +58,6 @@ struct ib_update_work { u8 port_num; }; -static inline int start_port(struct ib_device *device) -{ - return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1; -} - -static inline int end_port(struct ib_device *device) -{ - return (device->node_type == RDMA_NODE_IB_SWITCH) ? - 0 : device->phys_port_cnt; -} - int ib_get_cached_gid(struct ib_device *device, u8 port_num, int index, @@ -78,12 +67,12 @@ int ib_get_cached_gid(struct ib_device *device, unsigned long flags; int ret = 0; - if (port_num < start_port(device) || port_num > end_port(device)) + if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) return -EINVAL; read_lock_irqsave(&device->cache.lock, flags); - cache = device->cache.gid_cache[port_num - start_port(device)]; + cache = device->cache.gid_cache[port_num - rdma_start_port(device)]; if (index < 0 || index >= cache->table_len) ret = -EINVAL; @@ -112,11 +101,11 @@ int ib_find_cached_gid(struct ib_device *device, read_lock_irqsave(&device->cache.lock, flags); - for (p = 0; p <= end_port(device) - start_port(device); ++p) { + for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) { cache = device->cache.gid_cache[p]; for (i = 0; i < cache->table_len; ++i) { if (!memcmp(gid, &cache->table[i], sizeof *gid)) { - *port_num = p + start_port(device); + *port_num = p + rdma_start_port(device); if (index) *index = i; ret = 0; @@ -140,12 +129,12 @@ int ib_get_cached_pkey(struct ib_device *device, unsigned long flags; int ret = 0; - if (port_num < start_port(device) || port_num > end_port(device)) + if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) return -EINVAL; read_lock_irqsave(&device->cache.lock, flags); - cache = device->cache.pkey_cache[port_num - start_port(device)]; + cache = device->cache.pkey_cache[port_num - rdma_start_port(device)]; if (index < 0 || index >= cache->table_len) ret = -EINVAL; @@ -169,12 +158,12 @@ int ib_find_cached_pkey(struct ib_device *device, int ret = -ENOENT; int partial_ix = -1; - if (port_num < start_port(device) || port_num > end_port(device)) + if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) return -EINVAL; read_lock_irqsave(&device->cache.lock, flags); - cache = device->cache.pkey_cache[port_num - start_port(device)]; + cache = device->cache.pkey_cache[port_num - rdma_start_port(device)]; *index = -1; @@ -209,12 +198,12 @@ int ib_find_exact_cached_pkey(struct ib_device *device, int i; int ret = -ENOENT; - if (port_num < start_port(device) || port_num > end_port(device)) + if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) return -EINVAL; read_lock_irqsave(&device->cache.lock, flags); - cache = device->cache.pkey_cache[port_num - start_port(device)]; + cache = device->cache.pkey_cache[port_num - rdma_start_port(device)]; *index = -1; @@ -238,11 +227,11 @@ int ib_get_cached_lmc(struct ib_device *device, unsigned long flags; int ret = 0; - if (port_num < start_port(device) || port_num > end_port(device)) + if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) return -EINVAL; read_lock_irqsave(&device->cache.lock, flags); - *lmc = device->cache.lmc_cache[port_num - start_port(device)]; + *lmc = device->cache.lmc_cache[port_num - rdma_start_port(device)]; read_unlock_irqrestore(&device->cache.lock, flags); return ret; @@ -303,13 +292,13 @@ static void ib_cache_update(struct ib_device *device, write_lock_irq(&device->cache.lock); - old_pkey_cache = device->cache.pkey_cache[port - start_port(device)]; - old_gid_cache = device->cache.gid_cache [port - start_port(device)]; + old_pkey_cache = device->cache.pkey_cache[port - rdma_start_port(device)]; + old_gid_cache = device->cache.gid_cache [port - rdma_start_port(device)]; - device->cache.pkey_cache[port - start_port(device)] = pkey_cache; - device->cache.gid_cache [port - start_port(device)] = gid_cache; + device->cache.pkey_cache[port - rdma_start_port(device)] = pkey_cache; + device->cache.gid_cache [port - rdma_start_port(device)] = gid_cache; - device->cache.lmc_cache[port - start_port(device)] = tprops->lmc; + device->cache.lmc_cache[port - rdma_start_port(device)] = tprops->lmc; write_unlock_irq(&device->cache.lock); @@ -363,14 +352,14 @@ static void ib_cache_setup_one(struct ib_device *device) device->cache.pkey_cache = kmalloc(sizeof *device->cache.pkey_cache * - (end_port(device) - start_port(device) + 1), GFP_KERNEL); + (rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL); device->cache.gid_cache = kmalloc(sizeof *device->cache.gid_cache * - (end_port(device) - start_port(device) + 1), GFP_KERNEL); + (rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL); device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache * - (end_port(device) - - start_port(device) + 1), + (rdma_end_port(device) - + rdma_start_port(device) + 1), GFP_KERNEL); if (!device->cache.pkey_cache || !device->cache.gid_cache || @@ -380,10 +369,10 @@ static void ib_cache_setup_one(struct ib_device *device) goto err; } - for (p = 0; p <= end_port(device) - start_port(device); ++p) { + for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) { device->cache.pkey_cache[p] = NULL; device->cache.gid_cache [p] = NULL; - ib_cache_update(device, p + start_port(device)); + ib_cache_update(device, p + rdma_start_port(device)); } INIT_IB_EVENT_HANDLER(&device->cache.event_handler, @@ -394,7 +383,7 @@ static void ib_cache_setup_one(struct ib_device *device) return; err_cache: - for (p = 0; p <= end_port(device) - start_port(device); ++p) { + for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) { kfree(device->cache.pkey_cache[p]); kfree(device->cache.gid_cache[p]); } @@ -412,7 +401,7 @@ static void ib_cache_cleanup_one(struct ib_device *device) ib_unregister_event_handler(&device->cache.event_handler); flush_workqueue(ib_wq); - for (p = 0; p <= end_port(device) - start_port(device); ++p) { + for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) { kfree(device->cache.pkey_cache[p]); kfree(device->cache.gid_cache[p]); } diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index b360350a0b20..0f16dd4d9bb4 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -152,18 +152,6 @@ static int alloc_name(char *name) return 0; } -static int start_port(struct ib_device *device) -{ - return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1; -} - - -static int end_port(struct ib_device *device) -{ - return (device->node_type == RDMA_NODE_IB_SWITCH) ? - 0 : device->phys_port_cnt; -} - /** * ib_alloc_device - allocate an IB device struct * @size:size of structure to allocate @@ -233,7 +221,7 @@ static int read_port_table_lengths(struct ib_device *device) if (!tprops) goto out; - num_ports = end_port(device) - start_port(device) + 1; + num_ports = rdma_end_port(device) - rdma_start_port(device) + 1; device->pkey_tbl_len = kmalloc(sizeof *device->pkey_tbl_len * num_ports, GFP_KERNEL); @@ -243,7 +231,7 @@ static int read_port_table_lengths(struct ib_device *device) goto err; for (port_index = 0; port_index < num_ports; ++port_index) { - ret = ib_query_port(device, port_index + start_port(device), + ret = ib_query_port(device, port_index + rdma_start_port(device), tprops); if (ret) goto err; @@ -576,7 +564,7 @@ int ib_query_port(struct ib_device *device, u8 port_num, struct ib_port_attr *port_attr) { - if (port_num < start_port(device) || port_num > end_port(device)) + if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) return -EINVAL; return device->query_port(device, port_num, port_attr); @@ -654,7 +642,7 @@ int ib_modify_port(struct ib_device *device, if (!device->modify_port) return -ENOSYS; - if (port_num < start_port(device) || port_num > end_port(device)) + if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device)) return -EINVAL; return device->modify_port(device, port_num, port_modify_mask, @@ -677,8 +665,8 @@ int ib_find_gid(struct ib_device *device, union ib_gid *gid, union ib_gid tmp_gid; int ret, port, i; - for (port = start_port(device); port <= end_port(device); ++port) { - for (i = 0; i < device->gid_tbl_len[port - start_port(device)]; ++i) { + for (port = rdma_start_port(device); port <= rdma_end_port(device); ++port) { + for (i = 0; i < device->gid_tbl_len[port - rdma_start_port(device)]; ++i) { ret = ib_query_gid(device, port, i, &tmp_gid); if (ret) return ret; @@ -710,7 +698,7 @@ int ib_find_pkey(struct ib_device *device, u16 tmp_pkey; int partial_ix = -1; - for (i = 0; i < device->pkey_tbl_len[port_num - start_port(device)]; ++i) { + for (i = 0; i < device->pkey_tbl_len[port_num - rdma_start_port(device)]; ++i) { ret = ib_query_pkey(device, port_num, i, &tmp_pkey); if (ret) return ret; diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 81740c14fdb1..be4465b5df7b 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1752,6 +1752,33 @@ int ib_query_port(struct ib_device *device, enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num); +/** + * rdma_start_port - Return the first valid port number for the device + * specified + * + * @device: Device to be checked + * + * Return start port number + */ +static inline u8 rdma_start_port(const struct ib_device *device) +{ + return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1; +} + +/** + * rdma_end_port - Return the last valid port number for the device + * specified + * + * @device: Device to be checked + * + * Return last port number + */ +static inline u8 rdma_end_port(const struct ib_device *device) +{ + return (device->node_type == RDMA_NODE_IB_SWITCH) ? + 0 : device->phys_port_cnt; +} + static inline bool rdma_protocol_ib(struct ib_device *device, u8 port_num) { return device->query_protocol(device, port_num) == RDMA_PROTOCOL_IB; -- cgit v1.2.3 From 2b1b5b601230ae4356be4724ea7a058ed7203c63 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Mon, 18 May 2015 13:40:28 +0300 Subject: IB/core, cma: Nice log-friendly string helpers Some of us keep revisiting the code to decode enumerations that appear in out logs. Let's borrow the nice logging helpers that exists in xprtrdma and rds for CMA events, IB events and WC statuses. Reviewd-by: Sean Hefty Signed-off-by: Sagi Grimberg Signed-off-by: Doug Ledford --- drivers/infiniband/core/cma.c | 28 ++++++++++++++++++ drivers/infiniband/core/verbs.c | 65 +++++++++++++++++++++++++++++++++++++++++ include/rdma/ib_verbs.h | 4 +++ include/rdma/rdma_cm.h | 2 ++ 4 files changed, 99 insertions(+) (limited to 'include/rdma') diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 06441a43c3aa..b2114efcb89e 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -65,6 +65,34 @@ MODULE_LICENSE("Dual BSD/GPL"); #define CMA_CM_MRA_SETTING (IB_CM_MRA_FLAG_DELAY | 24) #define CMA_IBOE_PACKET_LIFETIME 18 +static const char * const cma_events[] = { + [RDMA_CM_EVENT_ADDR_RESOLVED] = "address resolved", + [RDMA_CM_EVENT_ADDR_ERROR] = "address error", + [RDMA_CM_EVENT_ROUTE_RESOLVED] = "route resolved ", + [RDMA_CM_EVENT_ROUTE_ERROR] = "route error", + [RDMA_CM_EVENT_CONNECT_REQUEST] = "connect request", + [RDMA_CM_EVENT_CONNECT_RESPONSE] = "connect response", + [RDMA_CM_EVENT_CONNECT_ERROR] = "connect error", + [RDMA_CM_EVENT_UNREACHABLE] = "unreachable", + [RDMA_CM_EVENT_REJECTED] = "rejected", + [RDMA_CM_EVENT_ESTABLISHED] = "established", + [RDMA_CM_EVENT_DISCONNECTED] = "disconnected", + [RDMA_CM_EVENT_DEVICE_REMOVAL] = "device removal", + [RDMA_CM_EVENT_MULTICAST_JOIN] = "multicast join", + [RDMA_CM_EVENT_MULTICAST_ERROR] = "multicast error", + [RDMA_CM_EVENT_ADDR_CHANGE] = "address change", + [RDMA_CM_EVENT_TIMEWAIT_EXIT] = "timewait exit", +}; + +const char *rdma_event_msg(enum rdma_cm_event_type event) +{ + size_t index = event; + + return (index < ARRAY_SIZE(cma_events) && cma_events[index]) ? + cma_events[index] : "unrecognized event"; +} +EXPORT_SYMBOL(rdma_event_msg); + static void cma_add_one(struct ib_device *device); static void cma_remove_one(struct ib_device *device); diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index f93eb8da7b5a..4c01a34512da 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -48,6 +48,71 @@ #include "core_priv.h" +static const char * const ib_events[] = { + [IB_EVENT_CQ_ERR] = "CQ error", + [IB_EVENT_QP_FATAL] = "QP fatal error", + [IB_EVENT_QP_REQ_ERR] = "QP request error", + [IB_EVENT_QP_ACCESS_ERR] = "QP access error", + [IB_EVENT_COMM_EST] = "communication established", + [IB_EVENT_SQ_DRAINED] = "send queue drained", + [IB_EVENT_PATH_MIG] = "path migration successful", + [IB_EVENT_PATH_MIG_ERR] = "path migration error", + [IB_EVENT_DEVICE_FATAL] = "device fatal error", + [IB_EVENT_PORT_ACTIVE] = "port active", + [IB_EVENT_PORT_ERR] = "port error", + [IB_EVENT_LID_CHANGE] = "LID change", + [IB_EVENT_PKEY_CHANGE] = "P_key change", + [IB_EVENT_SM_CHANGE] = "SM change", + [IB_EVENT_SRQ_ERR] = "SRQ error", + [IB_EVENT_SRQ_LIMIT_REACHED] = "SRQ limit reached", + [IB_EVENT_QP_LAST_WQE_REACHED] = "last WQE reached", + [IB_EVENT_CLIENT_REREGISTER] = "client reregister", + [IB_EVENT_GID_CHANGE] = "GID changed", +}; + +const char *ib_event_msg(enum ib_event_type event) +{ + size_t index = event; + + return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ? + ib_events[index] : "unrecognized event"; +} +EXPORT_SYMBOL(ib_event_msg); + +static const char * const wc_statuses[] = { + [IB_WC_SUCCESS] = "success", + [IB_WC_LOC_LEN_ERR] = "local length error", + [IB_WC_LOC_QP_OP_ERR] = "local QP operation error", + [IB_WC_LOC_EEC_OP_ERR] = "local EE context operation error", + [IB_WC_LOC_PROT_ERR] = "local protection error", + [IB_WC_WR_FLUSH_ERR] = "WR flushed", + [IB_WC_MW_BIND_ERR] = "memory management operation error", + [IB_WC_BAD_RESP_ERR] = "bad response error", + [IB_WC_LOC_ACCESS_ERR] = "local access error", + [IB_WC_REM_INV_REQ_ERR] = "invalid request error", + [IB_WC_REM_ACCESS_ERR] = "remote access error", + [IB_WC_REM_OP_ERR] = "remote operation error", + [IB_WC_RETRY_EXC_ERR] = "transport retry counter exceeded", + [IB_WC_RNR_RETRY_EXC_ERR] = "RNR retry counter exceeded", + [IB_WC_LOC_RDD_VIOL_ERR] = "local RDD violation error", + [IB_WC_REM_INV_RD_REQ_ERR] = "remote invalid RD request", + [IB_WC_REM_ABORT_ERR] = "operation aborted", + [IB_WC_INV_EECN_ERR] = "invalid EE context number", + [IB_WC_INV_EEC_STATE_ERR] = "invalid EE context state", + [IB_WC_FATAL_ERR] = "fatal error", + [IB_WC_RESP_TIMEOUT_ERR] = "response timeout error", + [IB_WC_GENERAL_ERR] = "general error", +}; + +const char *ib_wc_status_msg(enum ib_wc_status status) +{ + size_t index = status; + + return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ? + wc_statuses[index] : "unrecognized status"; +} +EXPORT_SYMBOL(ib_wc_status_msg); + __attribute_const__ int ib_rate_to_mult(enum ib_rate rate) { switch (rate) { diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 65994a19e840..672fc8f20409 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -412,6 +412,8 @@ enum ib_event_type { IB_EVENT_GID_CHANGE, }; +__attribute_const__ const char *ib_event_msg(enum ib_event_type event); + struct ib_event { struct ib_device *device; union { @@ -663,6 +665,8 @@ enum ib_wc_status { IB_WC_GENERAL_ERR }; +__attribute_const__ const char *ib_wc_status_msg(enum ib_wc_status status); + enum ib_wc_opcode { IB_WC_SEND, IB_WC_RDMA_WRITE, diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h index 1ed2088dc9f5..c92522c192d2 100644 --- a/include/rdma/rdma_cm.h +++ b/include/rdma/rdma_cm.h @@ -62,6 +62,8 @@ enum rdma_cm_event_type { RDMA_CM_EVENT_TIMEWAIT_EXIT }; +__attribute_const__ const char *rdma_event_msg(enum rdma_cm_event_type event); + enum rdma_port_space { RDMA_PS_SDP = 0x0001, RDMA_PS_IPOIB = 0x0002, -- cgit v1.2.3 From 96909308542006215980874ce496101ea51aa031 Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Fri, 8 May 2015 14:27:22 -0400 Subject: IB/mad: Change ib_response_mad signature arguments ib_response_mad only needs read access to the MAD header, not write access to the entire mad struct, so replace struct ib_mad with const struct ib_mad_hdr Reviewed-By: Jason Gunthorpe Reviewed-by: Sean Hefty Signed-off-by: Ira Weiny Signed-off-by: Doug Ledford --- drivers/infiniband/core/mad.c | 20 ++++++++++---------- drivers/infiniband/core/user_mad.c | 6 +++--- include/rdma/ib_mad.h | 2 +- 3 files changed, 14 insertions(+), 14 deletions(-) (limited to 'include/rdma') diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 23d40c28d484..b902d8da9d16 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -179,12 +179,12 @@ static int is_vendor_method_in_use( return 0; } -int ib_response_mad(struct ib_mad *mad) +int ib_response_mad(const struct ib_mad_hdr *hdr) { - return ((mad->mad_hdr.method & IB_MGMT_METHOD_RESP) || - (mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) || - ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_BM) && - (mad->mad_hdr.attr_mod & IB_BM_ATTR_MOD_RESP))); + return ((hdr->method & IB_MGMT_METHOD_RESP) || + (hdr->method == IB_MGMT_METHOD_TRAP_REPRESS) || + ((hdr->mgmt_class == IB_MGMT_CLASS_BM) && + (hdr->attr_mod & IB_BM_ATTR_MOD_RESP))); } EXPORT_SYMBOL(ib_response_mad); @@ -791,7 +791,7 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv, switch (ret) { case IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY: - if (ib_response_mad(&mad_priv->mad.mad) && + if (ib_response_mad(&mad_priv->mad.mad.mad_hdr) && mad_agent_priv->agent.recv_handler) { local->mad_priv = mad_priv; local->recv_mad_agent = mad_agent_priv; @@ -1628,7 +1628,7 @@ find_mad_agent(struct ib_mad_port_private *port_priv, unsigned long flags; spin_lock_irqsave(&port_priv->reg_lock, flags); - if (ib_response_mad(mad)) { + if (ib_response_mad(&mad->mad_hdr)) { u32 hi_tid; struct ib_mad_agent_private *entry; @@ -1765,8 +1765,8 @@ static inline int rcv_has_same_gid(struct ib_mad_agent_private *mad_agent_priv, u8 port_num = mad_agent_priv->agent.port_num; u8 lmc; - send_resp = ib_response_mad((struct ib_mad *)wr->send_buf.mad); - rcv_resp = ib_response_mad(rwc->recv_buf.mad); + send_resp = ib_response_mad((struct ib_mad_hdr *)wr->send_buf.mad); + rcv_resp = ib_response_mad(&rwc->recv_buf.mad->mad_hdr); if (send_resp == rcv_resp) /* both requests, or both responses. GIDs different */ @@ -1879,7 +1879,7 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, } /* Complete corresponding request */ - if (ib_response_mad(mad_recv_wc->recv_buf.mad)) { + if (ib_response_mad(&mad_recv_wc->recv_buf.mad->mad_hdr)) { spin_lock_irqsave(&mad_agent_priv->lock, flags); mad_send_wr = ib_find_send_mad(mad_agent_priv, mad_recv_wc); if (!mad_send_wr) { diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index 928cdd20e2d1..66b5217841be 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c @@ -426,11 +426,11 @@ static int is_duplicate(struct ib_umad_file *file, * the same TID, reject the second as a duplicate. This is more * restrictive than required by the spec. */ - if (!ib_response_mad((struct ib_mad *) hdr)) { - if (!ib_response_mad((struct ib_mad *) sent_hdr)) + if (!ib_response_mad(hdr)) { + if (!ib_response_mad(sent_hdr)) return 1; continue; - } else if (!ib_response_mad((struct ib_mad *) sent_hdr)) + } else if (!ib_response_mad(sent_hdr)) continue; if (same_destination(&packet->mad.hdr, &sent_packet->mad.hdr)) diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h index 9bb99e983f58..69e3c45e7d46 100644 --- a/include/rdma/ib_mad.h +++ b/include/rdma/ib_mad.h @@ -263,7 +263,7 @@ struct ib_mad_send_buf { * ib_response_mad - Returns if the specified MAD has been generated in * response to a sent request or trap. */ -int ib_response_mad(struct ib_mad *mad); +int ib_response_mad(const struct ib_mad_hdr *hdr); /** * ib_get_rmpp_resptime - Returns the RMPP response time. -- cgit v1.2.3 From f766c58fa3ea38a30912df8b5af3ca40637fe5e9 Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Fri, 8 May 2015 14:27:24 -0400 Subject: IB/mad: Add const qualifiers to query only functions The following functions only need read access to the data passed to them. ib_mad_kernel_rmpp_agent is_rmpp_data_mad rcv_has_same_gid ib_find_send_mad Clarify with const specifiers Signed-off-by: Ira Weiny Signed-off-by: Doug Ledford --- drivers/infiniband/core/mad.c | 16 ++++++++-------- drivers/infiniband/core/mad_priv.h | 4 ++-- include/rdma/ib_mad.h | 2 +- 3 files changed, 11 insertions(+), 11 deletions(-) (limited to 'include/rdma') diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 0ae48d45395b..87e222ec7ee1 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -910,7 +910,7 @@ static int alloc_send_rmpp_list(struct ib_mad_send_wr_private *send_wr, return 0; } -int ib_mad_kernel_rmpp_agent(struct ib_mad_agent *agent) +int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent) { return agent->rmpp_version && !(agent->flags & IB_MAD_USER_RMPP); } @@ -1734,8 +1734,8 @@ out: return valid; } -static int is_rmpp_data_mad(struct ib_mad_agent_private *mad_agent_priv, - struct ib_mad_hdr *mad_hdr) +static int is_rmpp_data_mad(const struct ib_mad_agent_private *mad_agent_priv, + const struct ib_mad_hdr *mad_hdr) { struct ib_rmpp_mad *rmpp_mad; @@ -1754,9 +1754,9 @@ static inline int rcv_has_same_class(const struct ib_mad_send_wr_private *wr, rwc->recv_buf.mad->mad_hdr.mgmt_class; } -static inline int rcv_has_same_gid(struct ib_mad_agent_private *mad_agent_priv, - struct ib_mad_send_wr_private *wr, - struct ib_mad_recv_wc *rwc ) +static inline int rcv_has_same_gid(const struct ib_mad_agent_private *mad_agent_priv, + const struct ib_mad_send_wr_private *wr, + const struct ib_mad_recv_wc *rwc ) { struct ib_ah_attr attr; u8 send_resp, rcv_resp; @@ -1811,8 +1811,8 @@ static inline int is_direct(u8 class) } struct ib_mad_send_wr_private* -ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, - struct ib_mad_recv_wc *wc) +ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv, + const struct ib_mad_recv_wc *wc) { struct ib_mad_send_wr_private *wr; struct ib_mad *mad; diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h index d1a0b0ee9444..7b19cba2adf0 100644 --- a/drivers/infiniband/core/mad_priv.h +++ b/drivers/infiniband/core/mad_priv.h @@ -213,8 +213,8 @@ struct ib_mad_port_private { int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr); struct ib_mad_send_wr_private * -ib_find_send_mad(struct ib_mad_agent_private *mad_agent_priv, - struct ib_mad_recv_wc *mad_recv_wc); +ib_find_send_mad(const struct ib_mad_agent_private *mad_agent_priv, + const struct ib_mad_recv_wc *mad_recv_wc); void ib_mad_complete_send_wr(struct ib_mad_send_wr_private *mad_send_wr, struct ib_mad_send_wc *mad_send_wc); diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h index 69e3c45e7d46..c0ea51f90a03 100644 --- a/include/rdma/ib_mad.h +++ b/include/rdma/ib_mad.h @@ -675,6 +675,6 @@ void ib_free_send_mad(struct ib_mad_send_buf *send_buf); * @agent: the agent in question * @return: true if agent is performing rmpp, false otherwise. */ -int ib_mad_kernel_rmpp_agent(struct ib_mad_agent *agent); +int ib_mad_kernel_rmpp_agent(const struct ib_mad_agent *agent); #endif /* IB_MAD_H */ -- cgit v1.2.3 From 7738613e7cb419179545910744b1777d87edac5c Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Wed, 13 May 2015 20:02:58 -0400 Subject: IB/core: Add per port immutable struct to ib_device As of commit 5eb620c81ce3 "IB/core: Add helpers for uncached GID and P_Key searches"; pkey_tbl_len and gid_tbl_len are immutable data which are stored in the ib_device. The per port core capability flags to be added later are also immutable data to be stored in the ib_device object. In preparation for this create a structure for per port immutable data and place the pkey and gid table lengths within this structure. "get_port_immutable" is added as a mandatory device function to allow the drivers to fill in this data. Signed-off-by: Ira Weiny Signed-off-by: Doug Ledford --- drivers/infiniband/core/device.c | 63 +++++++++++++--------------- drivers/infiniband/core/sysfs.c | 1 + drivers/infiniband/hw/amso1100/c2_provider.c | 17 ++++++++ drivers/infiniband/hw/cxgb3/iwch_provider.c | 17 ++++++++ drivers/infiniband/hw/cxgb4/provider.c | 17 ++++++++ drivers/infiniband/hw/ehca/ehca_main.c | 17 ++++++++ drivers/infiniband/hw/ipath/ipath_verbs.c | 17 ++++++++ drivers/infiniband/hw/mlx4/main.c | 17 ++++++++ drivers/infiniband/hw/mlx5/main.c | 17 ++++++++ drivers/infiniband/hw/mthca/mthca_provider.c | 17 ++++++++ drivers/infiniband/hw/nes/nes_verbs.c | 16 +++++++ drivers/infiniband/hw/ocrdma/ocrdma_main.c | 17 ++++++++ drivers/infiniband/hw/qib/qib_verbs.c | 17 ++++++++ drivers/infiniband/hw/usnic/usnic_ib_main.c | 17 ++++++++ include/rdma/ib_verbs.h | 19 ++++++++- 15 files changed, 249 insertions(+), 37 deletions(-) (limited to 'include/rdma') diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 0f16dd4d9bb4..4aa4f5420bc9 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -93,7 +93,8 @@ static int ib_device_check_mandatory(struct ib_device *device) IB_MANDATORY_FUNC(poll_cq), IB_MANDATORY_FUNC(req_notify_cq), IB_MANDATORY_FUNC(get_dma_mr), - IB_MANDATORY_FUNC(dereg_mr) + IB_MANDATORY_FUNC(dereg_mr), + IB_MANDATORY_FUNC(get_port_immutable) }; int i; @@ -211,42 +212,38 @@ static int add_client_context(struct ib_device *device, struct ib_client *client return 0; } -static int read_port_table_lengths(struct ib_device *device) +static int read_port_immutable(struct ib_device *device) { - struct ib_port_attr *tprops = NULL; - int num_ports, ret = -ENOMEM; - u8 port_index; - - tprops = kmalloc(sizeof *tprops, GFP_KERNEL); - if (!tprops) - goto out; - - num_ports = rdma_end_port(device) - rdma_start_port(device) + 1; - - device->pkey_tbl_len = kmalloc(sizeof *device->pkey_tbl_len * num_ports, - GFP_KERNEL); - device->gid_tbl_len = kmalloc(sizeof *device->gid_tbl_len * num_ports, - GFP_KERNEL); - if (!device->pkey_tbl_len || !device->gid_tbl_len) + int ret = -ENOMEM; + u8 start_port = rdma_start_port(device); + u8 end_port = rdma_end_port(device); + u8 port; + + /** + * device->port_immutable is indexed directly by the port number to make + * access to this data as efficient as possible. + * + * Therefore port_immutable is declared as a 1 based array with + * potential empty slots at the beginning. + */ + device->port_immutable = kzalloc(sizeof(*device->port_immutable) + * (end_port + 1), + GFP_KERNEL); + if (!device->port_immutable) goto err; - for (port_index = 0; port_index < num_ports; ++port_index) { - ret = ib_query_port(device, port_index + rdma_start_port(device), - tprops); + for (port = start_port; port <= end_port; ++port) { + ret = device->get_port_immutable(device, port, + &device->port_immutable[port]); if (ret) goto err; - device->pkey_tbl_len[port_index] = tprops->pkey_tbl_len; - device->gid_tbl_len[port_index] = tprops->gid_tbl_len; } ret = 0; goto out; - err: - kfree(device->gid_tbl_len); - kfree(device->pkey_tbl_len); + kfree(device->port_immutable); out: - kfree(tprops); return ret; } @@ -283,9 +280,9 @@ int ib_register_device(struct ib_device *device, spin_lock_init(&device->event_handler_lock); spin_lock_init(&device->client_data_lock); - ret = read_port_table_lengths(device); + ret = read_port_immutable(device); if (ret) { - printk(KERN_WARNING "Couldn't create table lengths cache for device %s\n", + printk(KERN_WARNING "Couldn't create per port immutable data %s\n", device->name); goto out; } @@ -294,8 +291,7 @@ int ib_register_device(struct ib_device *device, if (ret) { printk(KERN_WARNING "Couldn't register device %s with driver model\n", device->name); - kfree(device->gid_tbl_len); - kfree(device->pkey_tbl_len); + kfree(device->port_immutable); goto out; } @@ -337,9 +333,6 @@ void ib_unregister_device(struct ib_device *device) list_del(&device->core_list); - kfree(device->gid_tbl_len); - kfree(device->pkey_tbl_len); - mutex_unlock(&device_mutex); ib_device_unregister_sysfs(device); @@ -666,7 +659,7 @@ int ib_find_gid(struct ib_device *device, union ib_gid *gid, int ret, port, i; for (port = rdma_start_port(device); port <= rdma_end_port(device); ++port) { - for (i = 0; i < device->gid_tbl_len[port - rdma_start_port(device)]; ++i) { + for (i = 0; i < device->port_immutable[port].gid_tbl_len; ++i) { ret = ib_query_gid(device, port, i, &tmp_gid); if (ret) return ret; @@ -698,7 +691,7 @@ int ib_find_pkey(struct ib_device *device, u16 tmp_pkey; int partial_ix = -1; - for (i = 0; i < device->pkey_tbl_len[port_num - rdma_start_port(device)]; ++i) { + for (i = 0; i < device->port_immutable[port_num].pkey_tbl_len; ++i) { ret = ib_query_pkey(device, port_num, i, &tmp_pkey); if (ret) return ret; diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index cbd0383f622e..d0334c101ecb 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c @@ -456,6 +456,7 @@ static void ib_device_release(struct device *device) { struct ib_device *dev = container_of(device, struct ib_device, dev); + kfree(dev->port_immutable); kfree(dev); } diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c index 6fe329a5d595..fa638963908f 100644 --- a/drivers/infiniband/hw/amso1100/c2_provider.c +++ b/drivers/infiniband/hw/amso1100/c2_provider.c @@ -763,6 +763,22 @@ static struct net_device *c2_pseudo_netdev_init(struct c2_dev *c2dev) return netdev; } +static int c2_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable) +{ + struct ib_port_attr attr; + int err; + + err = c2_query_port(ibdev, port_num, &attr); + if (err) + return err; + + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + + return 0; +} + int c2_register_device(struct c2_dev *dev) { int ret = -ENOMEM; @@ -827,6 +843,7 @@ int c2_register_device(struct c2_dev *dev) dev->ibdev.reg_phys_mr = c2_reg_phys_mr; dev->ibdev.reg_user_mr = c2_reg_user_mr; dev->ibdev.dereg_mr = c2_dereg_mr; + dev->ibdev.get_port_immutable = c2_port_immutable; dev->ibdev.alloc_fmr = NULL; dev->ibdev.unmap_fmr = NULL; diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index 298d1caab3a5..590ba0f422a1 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c @@ -1349,6 +1349,22 @@ static struct device_attribute *iwch_class_attributes[] = { &dev_attr_board_id, }; +static int iwch_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable) +{ + struct ib_port_attr attr; + int err; + + err = iwch_query_port(ibdev, port_num, &attr); + if (err) + return err; + + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + + return 0; +} + int iwch_register_device(struct iwch_dev *dev) { int ret; @@ -1427,6 +1443,7 @@ int iwch_register_device(struct iwch_dev *dev) dev->ibdev.post_recv = iwch_post_receive; dev->ibdev.get_protocol_stats = iwch_get_mib; dev->ibdev.uverbs_abi_ver = IWCH_UVERBS_ABI_VERSION; + dev->ibdev.get_port_immutable = iwch_port_immutable; dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL); if (!dev->ibdev.iwcm) diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index f52ee6343d41..5eded1b3bbad 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c @@ -471,6 +471,22 @@ static struct device_attribute *c4iw_class_attributes[] = { &dev_attr_board_id, }; +static int c4iw_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable) +{ + struct ib_port_attr attr; + int err; + + err = c4iw_query_port(ibdev, port_num, &attr); + if (err) + return err; + + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + + return 0; +} + int c4iw_register_device(struct c4iw_dev *dev) { int ret; @@ -549,6 +565,7 @@ int c4iw_register_device(struct c4iw_dev *dev) dev->ibdev.post_recv = c4iw_post_receive; dev->ibdev.get_protocol_stats = c4iw_get_mib; dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION; + dev->ibdev.get_port_immutable = c4iw_port_immutable; dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL); if (!dev->ibdev.iwcm) diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c index 321545b708ad..8454186e16ab 100644 --- a/drivers/infiniband/hw/ehca/ehca_main.c +++ b/drivers/infiniband/hw/ehca/ehca_main.c @@ -431,6 +431,22 @@ init_node_guid1: return ret; } +static int ehca_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable) +{ + struct ib_port_attr attr; + int err; + + err = ehca_query_port(ibdev, port_num, &attr); + if (err) + return err; + + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + + return 0; +} + static int ehca_init_device(struct ehca_shca *shca) { int ret; @@ -511,6 +527,7 @@ static int ehca_init_device(struct ehca_shca *shca) shca->ib_device.process_mad = ehca_process_mad; shca->ib_device.mmap = ehca_mmap; shca->ib_device.dma_ops = &ehca_dma_mapping_ops; + shca->ib_device.get_port_immutable = ehca_port_immutable; if (EHCA_BMASK_GET(HCA_CAP_SRQ, shca->hca_cap)) { shca->ib_device.uverbs_cmd_mask |= diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c index 34b94c3ae674..49b774b6f7e8 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.c +++ b/drivers/infiniband/hw/ipath/ipath_verbs.c @@ -1986,6 +1986,22 @@ static int disable_timer(struct ipath_devdata *dd) return 0; } +static int ipath_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable) +{ + struct ib_port_attr attr; + int err; + + err = ipath_query_port(ibdev, port_num, &attr); + if (err) + return err; + + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + + return 0; +} + /** * ipath_register_ib_device - register our device with the infiniband core * @dd: the device data structure @@ -2186,6 +2202,7 @@ int ipath_register_ib_device(struct ipath_devdata *dd) dev->process_mad = ipath_process_mad; dev->mmap = ipath_mmap; dev->dma_ops = &ipath_dma_mapping_ops; + dev->get_port_immutable = ipath_port_immutable; snprintf(dev->node_desc, sizeof(dev->node_desc), IPATH_IDSTR " %s", init_utsname()->nodename); diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 64f591437925..f46b1be08fc5 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -2123,6 +2123,22 @@ static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) kfree(ibdev->eq_table); } +static int mlx4_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable) +{ + struct ib_port_attr attr; + int err; + + err = mlx4_ib_query_port(ibdev, port_num, &attr); + if (err) + return err; + + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + + return 0; +} + static void *mlx4_ib_add(struct mlx4_dev *dev) { struct mlx4_ib_dev *ibdev; @@ -2251,6 +2267,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) ibdev->ib_dev.attach_mcast = mlx4_ib_mcg_attach; ibdev->ib_dev.detach_mcast = mlx4_ib_mcg_detach; ibdev->ib_dev.process_mad = mlx4_ib_process_mad; + ibdev->ib_dev.get_port_immutable = mlx4_port_immutable; if (!mlx4_is_slave(ibdev->dev)) { ibdev->ib_dev.alloc_fmr = mlx4_ib_fmr_alloc; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 8dec38055c49..8db0edca6dd8 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -1188,6 +1188,22 @@ static void destroy_dev_resources(struct mlx5_ib_resources *devr) mlx5_ib_dealloc_pd(devr->p0); } +static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable) +{ + struct ib_port_attr attr; + int err; + + err = mlx5_ib_query_port(ibdev, port_num, &attr); + if (err) + return err; + + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + + return 0; +} + static void *mlx5_ib_add(struct mlx5_core_dev *mdev) { struct mlx5_ib_dev *dev; @@ -1292,6 +1308,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) dev->ib_dev.alloc_fast_reg_page_list = mlx5_ib_alloc_fast_reg_page_list; dev->ib_dev.free_fast_reg_page_list = mlx5_ib_free_fast_reg_page_list; dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status; + dev->ib_dev.get_port_immutable = mlx5_port_immutable; mlx5_ib_internal_query_odp_caps(dev); diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index ad1cca3a3a5c..4662fd20b8e6 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c @@ -1250,6 +1250,22 @@ out: return err; } +static int mthca_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable) +{ + struct ib_port_attr attr; + int err; + + err = mthca_query_port(ibdev, port_num, &attr); + if (err) + return err; + + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + + return 0; +} + int mthca_register_device(struct mthca_dev *dev) { int ret; @@ -1330,6 +1346,7 @@ int mthca_register_device(struct mthca_dev *dev) dev->ib_dev.reg_phys_mr = mthca_reg_phys_mr; dev->ib_dev.reg_user_mr = mthca_reg_user_mr; dev->ib_dev.dereg_mr = mthca_dereg_mr; + dev->ib_dev.get_port_immutable = mthca_port_immutable; if (dev->mthca_flags & MTHCA_FLAG_FMR) { dev->ib_dev.alloc_fmr = mthca_alloc_fmr; diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 027f6d1cd059..4ed2d967d324 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -3833,6 +3833,21 @@ static int nes_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags notify_ return 0; } +static int nes_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable) +{ + struct ib_port_attr attr; + int err; + + err = nes_query_port(ibdev, port_num, &attr); + if (err) + return err; + + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + + return 0; +} /** * nes_init_ofa_device @@ -3934,6 +3949,7 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev) nesibdev->ibdev.iwcm->reject = nes_reject; nesibdev->ibdev.iwcm->create_listen = nes_create_listen; nesibdev->ibdev.iwcm->destroy_listen = nes_destroy_listen; + nesibdev->ibdev.get_port_immutable = nes_port_immutable; return nesibdev; } diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index 85d99e9306a0..21744be6cac5 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c @@ -202,6 +202,22 @@ static enum rdma_link_layer ocrdma_link_layer(struct ib_device *device, return IB_LINK_LAYER_ETHERNET; } +static int ocrdma_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable) +{ + struct ib_port_attr attr; + int err; + + err = ocrdma_query_port(ibdev, port_num, &attr); + if (err) + return err; + + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + + return 0; +} + static int ocrdma_register_device(struct ocrdma_dev *dev) { strlcpy(dev->ibdev.name, "ocrdma%d", IB_DEVICE_NAME_MAX); @@ -287,6 +303,7 @@ static int ocrdma_register_device(struct ocrdma_dev *dev) dev->ibdev.dma_device = &dev->nic_info.pdev->dev; dev->ibdev.process_mad = ocrdma_process_mad; + dev->ibdev.get_port_immutable = ocrdma_port_immutable; if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) { dev->ibdev.uverbs_cmd_mask |= diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index 9fd4b285e5e5..48f4784c53a6 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c @@ -2046,6 +2046,22 @@ static void init_ibport(struct qib_pportdata *ppd) RCU_INIT_POINTER(ibp->qp1, NULL); } +static int qib_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable) +{ + struct ib_port_attr attr; + int err; + + err = qib_query_port(ibdev, port_num, &attr); + if (err) + return err; + + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + + return 0; +} + /** * qib_register_ib_device - register our device with the infiniband core * @dd: the device data structure @@ -2234,6 +2250,7 @@ int qib_register_ib_device(struct qib_devdata *dd) ibdev->process_mad = qib_process_mad; ibdev->mmap = qib_mmap; ibdev->dma_ops = &qib_dma_mapping_ops; + ibdev->get_port_immutable = qib_port_immutable; snprintf(ibdev->node_desc, sizeof(ibdev->node_desc), "Intel Infiniband HCA %s", init_utsname()->nodename); diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c index bd9f364e909c..ce3e19bcfe00 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_main.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c @@ -300,6 +300,22 @@ static struct notifier_block usnic_ib_inetaddr_notifier = { }; /* End of inet section*/ +static int usnic_port_immutable(struct ib_device *ibdev, u8 port_num, + struct ib_port_immutable *immutable) +{ + struct ib_port_attr attr; + int err; + + err = usnic_ib_query_port(ibdev, port_num, &attr); + if (err) + return err; + + immutable->pkey_tbl_len = attr.pkey_tbl_len; + immutable->gid_tbl_len = attr.gid_tbl_len; + + return 0; +} + /* Start of PF discovery section */ static void *usnic_ib_device_add(struct pci_dev *dev) { @@ -384,6 +400,7 @@ static void *usnic_ib_device_add(struct pci_dev *dev) us_ibdev->ib_dev.poll_cq = usnic_ib_poll_cq; us_ibdev->ib_dev.req_notify_cq = usnic_ib_req_notify_cq; us_ibdev->ib_dev.get_dma_mr = usnic_ib_get_dma_mr; + us_ibdev->ib_dev.get_port_immutable = usnic_port_immutable; if (ib_register_device(&us_ibdev->ib_dev, NULL)) diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index be4465b5df7b..2d3515edc3fa 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1481,6 +1481,11 @@ struct ib_dma_mapping_ops { struct iw_cm_verbs; +struct ib_port_immutable { + int pkey_tbl_len; + int gid_tbl_len; +}; + struct ib_device { struct device *dma_device; @@ -1494,8 +1499,10 @@ struct ib_device { struct list_head client_data_list; struct ib_cache cache; - int *pkey_tbl_len; - int *gid_tbl_len; + /** + * port_immutable is indexed by port number + */ + struct ib_port_immutable *port_immutable; int num_comp_vectors; @@ -1684,6 +1691,14 @@ struct ib_device { u32 local_dma_lkey; u8 node_type; u8 phys_port_cnt; + + /** + * The following mandatory functions are used only at device + * registration. Keep functions such as these at the end of this + * structure to avoid cache line misses when accessing struct ib_device + * in fast paths. + */ + int (*get_port_immutable)(struct ib_device *, u8, struct ib_port_immutable *); }; struct ib_client { -- cgit v1.2.3 From f9b22e355d38c8dbfa19a2d9d5ef9bf07e7c17e6 Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Wed, 13 May 2015 20:02:59 -0400 Subject: IB/core: Convert core to use bitfield for caps Remove query_protocol callback Use the new Core Capability bits for: rdma_protocol_* rdma_cap_ib_mad rdma_cap_ib_smi rdma_cap_ib_cm rdma_cap_iw_cm rdma_cap_ib_sa rdma_cap_ib_mcast rdma_cap_af_ib rdma_cap_eth_ah Signed-off-by: Ira Weiny Signed-off-by: Doug Ledford --- drivers/infiniband/core/device.c | 1 - drivers/infiniband/hw/amso1100/c2_provider.c | 8 +--- drivers/infiniband/hw/cxgb3/iwch_provider.c | 8 +--- drivers/infiniband/hw/cxgb4/provider.c | 8 +--- drivers/infiniband/hw/ehca/ehca_hca.c | 6 --- drivers/infiniband/hw/ehca/ehca_main.c | 2 +- drivers/infiniband/hw/ipath/ipath_verbs.c | 8 +--- drivers/infiniband/hw/mlx4/main.c | 15 +++---- drivers/infiniband/hw/mlx5/main.c | 8 +--- drivers/infiniband/hw/mthca/mthca_provider.c | 8 +--- drivers/infiniband/hw/nes/nes_verbs.c | 8 +--- drivers/infiniband/hw/ocrdma/ocrdma_main.c | 2 +- drivers/infiniband/hw/ocrdma/ocrdma_verbs.c | 6 --- drivers/infiniband/hw/qib/qib_verbs.c | 8 +--- drivers/infiniband/hw/usnic/usnic_ib_main.c | 1 - drivers/infiniband/hw/usnic/usnic_ib_verbs.c | 6 --- include/rdma/ib_verbs.h | 64 +++++++++++++++++++++------- 17 files changed, 63 insertions(+), 104 deletions(-) (limited to 'include/rdma') diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 4aa4f5420bc9..8d07c12ab718 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -76,7 +76,6 @@ static int ib_device_check_mandatory(struct ib_device *device) } mandatory_table[] = { IB_MANDATORY_FUNC(query_device), IB_MANDATORY_FUNC(query_port), - IB_MANDATORY_FUNC(query_protocol), IB_MANDATORY_FUNC(query_pkey), IB_MANDATORY_FUNC(query_gid), IB_MANDATORY_FUNC(alloc_pd), diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c index fa638963908f..d396c39918de 100644 --- a/drivers/infiniband/hw/amso1100/c2_provider.c +++ b/drivers/infiniband/hw/amso1100/c2_provider.c @@ -99,12 +99,6 @@ static int c2_query_port(struct ib_device *ibdev, return 0; } -static enum rdma_protocol_type -c2_query_protocol(struct ib_device *device, u8 port_num) -{ - return RDMA_PROTOCOL_IWARP; -} - static int c2_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 * pkey) { @@ -775,6 +769,7 @@ static int c2_port_immutable(struct ib_device *ibdev, u8 port_num, immutable->pkey_tbl_len = attr.pkey_tbl_len; immutable->gid_tbl_len = attr.gid_tbl_len; + immutable->core_cap_flags = RDMA_CORE_PORT_IWARP; return 0; } @@ -823,7 +818,6 @@ int c2_register_device(struct c2_dev *dev) dev->ibdev.dma_device = &dev->pcidev->dev; dev->ibdev.query_device = c2_query_device; dev->ibdev.query_port = c2_query_port; - dev->ibdev.query_protocol = c2_query_protocol; dev->ibdev.query_pkey = c2_query_pkey; dev->ibdev.query_gid = c2_query_gid; dev->ibdev.alloc_ucontext = c2_alloc_ucontext; diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index 590ba0f422a1..061ef08c92e2 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c @@ -1232,12 +1232,6 @@ static int iwch_query_port(struct ib_device *ibdev, return 0; } -static enum rdma_protocol_type -iwch_query_protocol(struct ib_device *device, u8 port_num) -{ - return RDMA_PROTOCOL_IWARP; -} - static ssize_t show_rev(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1361,6 +1355,7 @@ static int iwch_port_immutable(struct ib_device *ibdev, u8 port_num, immutable->pkey_tbl_len = attr.pkey_tbl_len; immutable->gid_tbl_len = attr.gid_tbl_len; + immutable->core_cap_flags = RDMA_CORE_PORT_IWARP; return 0; } @@ -1407,7 +1402,6 @@ int iwch_register_device(struct iwch_dev *dev) dev->ibdev.dma_device = &(dev->rdev.rnic_info.pdev->dev); dev->ibdev.query_device = iwch_query_device; dev->ibdev.query_port = iwch_query_port; - dev->ibdev.query_protocol = iwch_query_protocol; dev->ibdev.query_pkey = iwch_query_pkey; dev->ibdev.query_gid = iwch_query_gid; dev->ibdev.alloc_ucontext = iwch_alloc_ucontext; diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index 5eded1b3bbad..ef08a9f29451 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c @@ -390,12 +390,6 @@ static int c4iw_query_port(struct ib_device *ibdev, u8 port, return 0; } -static enum rdma_protocol_type -c4iw_query_protocol(struct ib_device *device, u8 port_num) -{ - return RDMA_PROTOCOL_IWARP; -} - static ssize_t show_rev(struct device *dev, struct device_attribute *attr, char *buf) { @@ -483,6 +477,7 @@ static int c4iw_port_immutable(struct ib_device *ibdev, u8 port_num, immutable->pkey_tbl_len = attr.pkey_tbl_len; immutable->gid_tbl_len = attr.gid_tbl_len; + immutable->core_cap_flags = RDMA_CORE_PORT_IWARP; return 0; } @@ -528,7 +523,6 @@ int c4iw_register_device(struct c4iw_dev *dev) dev->ibdev.dma_device = &(dev->rdev.lldi.pdev->dev); dev->ibdev.query_device = c4iw_query_device; dev->ibdev.query_port = c4iw_query_port; - dev->ibdev.query_protocol = c4iw_query_protocol; dev->ibdev.query_pkey = c4iw_query_pkey; dev->ibdev.query_gid = c4iw_query_gid; dev->ibdev.alloc_ucontext = c4iw_alloc_ucontext; diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/infiniband/hw/ehca/ehca_hca.c index 1f4dc9c87bf9..9ed4d2588304 100644 --- a/drivers/infiniband/hw/ehca/ehca_hca.c +++ b/drivers/infiniband/hw/ehca/ehca_hca.c @@ -242,12 +242,6 @@ query_port1: return ret; } -enum rdma_protocol_type -ehca_query_protocol(struct ib_device *device, u8 port_num) -{ - return RDMA_PROTOCOL_IB; -} - int ehca_query_sma_attr(struct ehca_shca *shca, u8 port, struct ehca_sma_attr *attr) { diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c index 8454186e16ab..5e30b72d3677 100644 --- a/drivers/infiniband/hw/ehca/ehca_main.c +++ b/drivers/infiniband/hw/ehca/ehca_main.c @@ -443,6 +443,7 @@ static int ehca_port_immutable(struct ib_device *ibdev, u8 port_num, immutable->pkey_tbl_len = attr.pkey_tbl_len; immutable->gid_tbl_len = attr.gid_tbl_len; + immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB; return 0; } @@ -483,7 +484,6 @@ static int ehca_init_device(struct ehca_shca *shca) shca->ib_device.dma_device = &shca->ofdev->dev; shca->ib_device.query_device = ehca_query_device; shca->ib_device.query_port = ehca_query_port; - shca->ib_device.query_protocol = ehca_query_protocol; shca->ib_device.query_gid = ehca_query_gid; shca->ib_device.query_pkey = ehca_query_pkey; /* shca->in_device.modify_device = ehca_modify_device */ diff --git a/drivers/infiniband/hw/ipath/ipath_verbs.c b/drivers/infiniband/hw/ipath/ipath_verbs.c index 49b774b6f7e8..764081d305b6 100644 --- a/drivers/infiniband/hw/ipath/ipath_verbs.c +++ b/drivers/infiniband/hw/ipath/ipath_verbs.c @@ -1638,12 +1638,6 @@ static int ipath_query_port(struct ib_device *ibdev, return 0; } -static enum rdma_protocol_type -ipath_query_protocol(struct ib_device *device, u8 port_num) -{ - return RDMA_PROTOCOL_IB; -} - static int ipath_modify_device(struct ib_device *device, int device_modify_mask, struct ib_device_modify *device_modify) @@ -1998,6 +1992,7 @@ static int ipath_port_immutable(struct ib_device *ibdev, u8 port_num, immutable->pkey_tbl_len = attr.pkey_tbl_len; immutable->gid_tbl_len = attr.gid_tbl_len; + immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB; return 0; } @@ -2162,7 +2157,6 @@ int ipath_register_ib_device(struct ipath_devdata *dd) dev->query_device = ipath_query_device; dev->modify_device = ipath_modify_device; dev->query_port = ipath_query_port; - dev->query_protocol = ipath_query_protocol; dev->modify_port = ipath_modify_port; dev->query_pkey = ipath_query_pkey; dev->query_gid = ipath_query_gid; diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index f46b1be08fc5..c49dd0bb251a 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -420,15 +420,6 @@ static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port, return __mlx4_ib_query_port(ibdev, port, props, 0); } -static enum rdma_protocol_type -mlx4_ib_query_protocol(struct ib_device *device, u8 port_num) -{ - struct mlx4_dev *dev = to_mdev(device)->dev; - - return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ? - RDMA_PROTOCOL_IB : RDMA_PROTOCOL_IBOE; -} - int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index, union ib_gid *gid, int netw_view) { @@ -2136,6 +2127,11 @@ static int mlx4_port_immutable(struct ib_device *ibdev, u8 port_num, immutable->pkey_tbl_len = attr.pkey_tbl_len; immutable->gid_tbl_len = attr.gid_tbl_len; + if (mlx4_ib_port_link_layer(ibdev, port_num) == IB_LINK_LAYER_INFINIBAND) + immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB; + else + immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE; + return 0; } @@ -2226,7 +2222,6 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) ibdev->ib_dev.query_device = mlx4_ib_query_device; ibdev->ib_dev.query_port = mlx4_ib_query_port; - ibdev->ib_dev.query_protocol = mlx4_ib_query_protocol; ibdev->ib_dev.get_link_layer = mlx4_ib_port_link_layer; ibdev->ib_dev.query_gid = mlx4_ib_query_gid; ibdev->ib_dev.query_pkey = mlx4_ib_query_pkey; diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 8db0edca6dd8..b2fdb9cfa645 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -262,12 +262,6 @@ out: return err; } -static enum rdma_protocol_type -mlx5_ib_query_protocol(struct ib_device *device, u8 port_num) -{ - return RDMA_PROTOCOL_IB; -} - static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index, union ib_gid *gid) { @@ -1200,6 +1194,7 @@ static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num, immutable->pkey_tbl_len = attr.pkey_tbl_len; immutable->gid_tbl_len = attr.gid_tbl_len; + immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB; return 0; } @@ -1266,7 +1261,6 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev) dev->ib_dev.query_device = mlx5_ib_query_device; dev->ib_dev.query_port = mlx5_ib_query_port; - dev->ib_dev.query_protocol = mlx5_ib_query_protocol; dev->ib_dev.query_gid = mlx5_ib_query_gid; dev->ib_dev.query_pkey = mlx5_ib_query_pkey; dev->ib_dev.modify_device = mlx5_ib_modify_device; diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index 4662fd20b8e6..509d59e7a15a 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c @@ -179,12 +179,6 @@ static int mthca_query_port(struct ib_device *ibdev, return err; } -static enum rdma_protocol_type -mthca_query_protocol(struct ib_device *device, u8 port_num) -{ - return RDMA_PROTOCOL_IB; -} - static int mthca_modify_device(struct ib_device *ibdev, int mask, struct ib_device_modify *props) @@ -1262,6 +1256,7 @@ static int mthca_port_immutable(struct ib_device *ibdev, u8 port_num, immutable->pkey_tbl_len = attr.pkey_tbl_len; immutable->gid_tbl_len = attr.gid_tbl_len; + immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB; return 0; } @@ -1303,7 +1298,6 @@ int mthca_register_device(struct mthca_dev *dev) dev->ib_dev.dma_device = &dev->pdev->dev; dev->ib_dev.query_device = mthca_query_device; dev->ib_dev.query_port = mthca_query_port; - dev->ib_dev.query_protocol = mthca_query_protocol; dev->ib_dev.modify_device = mthca_modify_device; dev->ib_dev.modify_port = mthca_modify_port; dev->ib_dev.query_pkey = mthca_query_pkey; diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 4ed2d967d324..05530e3f6ff0 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -606,12 +606,6 @@ static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr return 0; } -static enum rdma_protocol_type -nes_query_protocol(struct ib_device *device, u8 port_num) -{ - return RDMA_PROTOCOL_IWARP; -} - /** * nes_query_pkey */ @@ -3845,6 +3839,7 @@ static int nes_port_immutable(struct ib_device *ibdev, u8 port_num, immutable->pkey_tbl_len = attr.pkey_tbl_len; immutable->gid_tbl_len = attr.gid_tbl_len; + immutable->core_cap_flags = RDMA_CORE_PORT_IWARP; return 0; } @@ -3899,7 +3894,6 @@ struct nes_ib_device *nes_init_ofa_device(struct net_device *netdev) nesibdev->ibdev.dev.parent = &nesdev->pcidev->dev; nesibdev->ibdev.query_device = nes_query_device; nesibdev->ibdev.query_port = nes_query_port; - nesibdev->ibdev.query_protocol = nes_query_protocol; nesibdev->ibdev.query_pkey = nes_query_pkey; nesibdev->ibdev.query_gid = nes_query_gid; nesibdev->ibdev.alloc_ucontext = nes_alloc_ucontext; diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index 21744be6cac5..f55289869357 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c @@ -214,6 +214,7 @@ static int ocrdma_port_immutable(struct ib_device *ibdev, u8 port_num, immutable->pkey_tbl_len = attr.pkey_tbl_len; immutable->gid_tbl_len = attr.gid_tbl_len; + immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE; return 0; } @@ -260,7 +261,6 @@ static int ocrdma_register_device(struct ocrdma_dev *dev) /* mandatory verbs. */ dev->ibdev.query_device = ocrdma_query_device; dev->ibdev.query_port = ocrdma_query_port; - dev->ibdev.query_protocol = ocrdma_query_protocol; dev->ibdev.modify_port = ocrdma_modify_port; dev->ibdev.query_gid = ocrdma_query_gid; dev->ibdev.get_link_layer = ocrdma_link_layer; diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index 3e98360e908d..877175563634 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -187,12 +187,6 @@ int ocrdma_query_port(struct ib_device *ibdev, return 0; } -enum rdma_protocol_type -ocrdma_query_protocol(struct ib_device *device, u8 port_num) -{ - return RDMA_PROTOCOL_IBOE; -} - int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask, struct ib_port_modify *props) { diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index 48f4784c53a6..dba1c92f1a54 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c @@ -1650,12 +1650,6 @@ static int qib_query_port(struct ib_device *ibdev, u8 port, return 0; } -static enum rdma_protocol_type -qib_query_protocol(struct ib_device *device, u8 port_num) -{ - return RDMA_PROTOCOL_IB; -} - static int qib_modify_device(struct ib_device *device, int device_modify_mask, struct ib_device_modify *device_modify) @@ -2058,6 +2052,7 @@ static int qib_port_immutable(struct ib_device *ibdev, u8 port_num, immutable->pkey_tbl_len = attr.pkey_tbl_len; immutable->gid_tbl_len = attr.gid_tbl_len; + immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB; return 0; } @@ -2206,7 +2201,6 @@ int qib_register_ib_device(struct qib_devdata *dd) ibdev->query_device = qib_query_device; ibdev->modify_device = qib_modify_device; ibdev->query_port = qib_query_port; - ibdev->query_protocol = qib_query_protocol; ibdev->modify_port = qib_modify_port; ibdev->query_pkey = qib_query_pkey; ibdev->query_gid = qib_query_gid; diff --git a/drivers/infiniband/hw/usnic/usnic_ib_main.c b/drivers/infiniband/hw/usnic/usnic_ib_main.c index ce3e19bcfe00..34c49b8105fe 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_main.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_main.c @@ -376,7 +376,6 @@ static void *usnic_ib_device_add(struct pci_dev *dev) us_ibdev->ib_dev.query_device = usnic_ib_query_device; us_ibdev->ib_dev.query_port = usnic_ib_query_port; - us_ibdev->ib_dev.query_protocol = usnic_ib_query_protocol; us_ibdev->ib_dev.query_pkey = usnic_ib_query_pkey; us_ibdev->ib_dev.query_gid = usnic_ib_query_gid; us_ibdev->ib_dev.get_link_layer = usnic_ib_port_link_layer; diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c index 732b5c5eeb32..53bd6a2d9cdb 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c @@ -348,12 +348,6 @@ int usnic_ib_query_port(struct ib_device *ibdev, u8 port, return 0; } -enum rdma_protocol_type -usnic_ib_query_protocol(struct ib_device *device, u8 port_num) -{ - return RDMA_PROTOCOL_USNIC_UDP; -} - int usnic_ib_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr, int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr) diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 2d3515edc3fa..73d1b1000785 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -353,6 +353,40 @@ union rdma_protocol_stats { struct iw_protocol_stats iw; }; +/* Define bits for the various functionality this port needs to be supported by + * the core. + */ +/* Management 0x00000FFF */ +#define RDMA_CORE_CAP_IB_MAD 0x00000001 +#define RDMA_CORE_CAP_IB_SMI 0x00000002 +#define RDMA_CORE_CAP_IB_CM 0x00000004 +#define RDMA_CORE_CAP_IW_CM 0x00000008 +#define RDMA_CORE_CAP_IB_SA 0x00000010 + +/* Address format 0x000FF000 */ +#define RDMA_CORE_CAP_AF_IB 0x00001000 +#define RDMA_CORE_CAP_ETH_AH 0x00002000 + +/* Protocol 0xFFF00000 */ +#define RDMA_CORE_CAP_PROT_IB 0x00100000 +#define RDMA_CORE_CAP_PROT_ROCE 0x00200000 +#define RDMA_CORE_CAP_PROT_IWARP 0x00400000 + +#define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \ + | RDMA_CORE_CAP_IB_MAD \ + | RDMA_CORE_CAP_IB_SMI \ + | RDMA_CORE_CAP_IB_CM \ + | RDMA_CORE_CAP_IB_SA \ + | RDMA_CORE_CAP_AF_IB) +#define RDMA_CORE_PORT_IBA_ROCE (RDMA_CORE_CAP_PROT_ROCE \ + | RDMA_CORE_CAP_IB_MAD \ + | RDMA_CORE_CAP_IB_CM \ + | RDMA_CORE_CAP_IB_SA \ + | RDMA_CORE_CAP_AF_IB \ + | RDMA_CORE_CAP_ETH_AH) +#define RDMA_CORE_PORT_IWARP (RDMA_CORE_CAP_PROT_IWARP \ + | RDMA_CORE_CAP_IW_CM) + struct ib_port_attr { enum ib_port_state state; enum ib_mtu max_mtu; @@ -1484,6 +1518,7 @@ struct iw_cm_verbs; struct ib_port_immutable { int pkey_tbl_len; int gid_tbl_len; + u32 core_cap_flags; }; struct ib_device { @@ -1515,8 +1550,6 @@ struct ib_device { int (*query_port)(struct ib_device *device, u8 port_num, struct ib_port_attr *port_attr); - enum rdma_protocol_type (*query_protocol)(struct ib_device *device, - u8 port_num); enum rdma_link_layer (*get_link_layer)(struct ib_device *device, u8 port_num); int (*query_gid)(struct ib_device *device, @@ -1796,24 +1829,23 @@ static inline u8 rdma_end_port(const struct ib_device *device) static inline bool rdma_protocol_ib(struct ib_device *device, u8 port_num) { - return device->query_protocol(device, port_num) == RDMA_PROTOCOL_IB; + return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB; } static inline bool rdma_protocol_iboe(struct ib_device *device, u8 port_num) { - return device->query_protocol(device, port_num) == RDMA_PROTOCOL_IBOE; + return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE; } static inline bool rdma_protocol_iwarp(struct ib_device *device, u8 port_num) { - return device->query_protocol(device, port_num) == RDMA_PROTOCOL_IWARP; + return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP; } static inline bool rdma_ib_or_iboe(struct ib_device *device, u8 port_num) { - enum rdma_protocol_type pt = device->query_protocol(device, port_num); - - return (pt == RDMA_PROTOCOL_IB || pt == RDMA_PROTOCOL_IBOE); + return device->port_immutable[port_num].core_cap_flags & + (RDMA_CORE_CAP_PROT_IB | RDMA_CORE_CAP_PROT_ROCE); } /** @@ -1830,7 +1862,7 @@ static inline bool rdma_ib_or_iboe(struct ib_device *device, u8 port_num) */ static inline bool rdma_cap_ib_mad(struct ib_device *device, u8 port_num) { - return rdma_ib_or_iboe(device, port_num); + return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_MAD; } /** @@ -1855,7 +1887,7 @@ static inline bool rdma_cap_ib_mad(struct ib_device *device, u8 port_num) */ static inline bool rdma_cap_ib_smi(struct ib_device *device, u8 port_num) { - return rdma_protocol_ib(device, port_num); + return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SMI; } /** @@ -1875,7 +1907,7 @@ static inline bool rdma_cap_ib_smi(struct ib_device *device, u8 port_num) */ static inline bool rdma_cap_ib_cm(struct ib_device *device, u8 port_num) { - return rdma_ib_or_iboe(device, port_num); + return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_CM; } /** @@ -1892,7 +1924,7 @@ static inline bool rdma_cap_ib_cm(struct ib_device *device, u8 port_num) */ static inline bool rdma_cap_iw_cm(struct ib_device *device, u8 port_num) { - return rdma_protocol_iwarp(device, port_num); + return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IW_CM; } /** @@ -1912,7 +1944,7 @@ static inline bool rdma_cap_iw_cm(struct ib_device *device, u8 port_num) */ static inline bool rdma_cap_ib_sa(struct ib_device *device, u8 port_num) { - return rdma_protocol_ib(device, port_num); + return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_IB_SA; } /** @@ -1952,7 +1984,7 @@ static inline bool rdma_cap_ib_mcast(struct ib_device *device, u8 port_num) */ static inline bool rdma_cap_af_ib(struct ib_device *device, u8 port_num) { - return rdma_ib_or_iboe(device, port_num); + return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_AF_IB; } /** @@ -1973,7 +2005,7 @@ static inline bool rdma_cap_af_ib(struct ib_device *device, u8 port_num) */ static inline bool rdma_cap_eth_ah(struct ib_device *device, u8 port_num) { - return rdma_protocol_iboe(device, port_num); + return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_ETH_AH; } /** @@ -2001,7 +2033,7 @@ static inline bool rdma_cap_eth_ah(struct ib_device *device, u8 port_num) static inline bool rdma_cap_read_multi_sge(struct ib_device *device, u8 port_num) { - return !rdma_protocol_iwarp(device, port_num); + return !(device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP); } int ib_query_gid(struct ib_device *device, -- cgit v1.2.3 From 5d9fb0440698a8b9e8595353d60cfac7ab30efae Mon Sep 17 00:00:00 2001 From: Ira Weiny Date: Thu, 14 May 2015 15:01:46 -0400 Subject: IB/core: Change rdma_protocol_iboe to roce After discussion upstream, it was agreed to transition the usage of iboe in the kernel to roce. This keeps our terminology consistent with what was finalized in the IBTA Annex 16 and IBTA Annex 17 publications. Signed-off-by: Ira Weiny Signed-off-by: Doug Ledford --- drivers/infiniband/core/cma.c | 12 ++++++------ drivers/infiniband/core/ucma.c | 2 +- include/rdma/ib_verbs.h | 4 ++-- net/sunrpc/xprtrdma/svc_rdma_transport.c | 2 +- 4 files changed, 10 insertions(+), 10 deletions(-) (limited to 'include/rdma') diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 1977f601a1ec..ea92a0daa61c 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -391,7 +391,7 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv, if (listen_id_priv) { cma_dev = listen_id_priv->cma_dev; port = listen_id_priv->id.port_num; - gidp = rdma_protocol_iboe(cma_dev->device, port) ? + gidp = rdma_protocol_roce(cma_dev->device, port) ? &iboe_gid : &gid; ret = cma_validate_port(cma_dev->device, port, gidp, @@ -409,7 +409,7 @@ static int cma_acquire_dev(struct rdma_id_private *id_priv, listen_id_priv->id.port_num == port) continue; - gidp = rdma_protocol_iboe(cma_dev->device, port) ? + gidp = rdma_protocol_roce(cma_dev->device, port) ? &iboe_gid : &gid; ret = cma_validate_port(cma_dev->device, port, gidp, @@ -647,7 +647,7 @@ static int cma_modify_qp_rtr(struct rdma_id_private *id_priv, BUG_ON(id_priv->cma_dev->device != id_priv->id.device); - if (rdma_protocol_iboe(id_priv->id.device, id_priv->id.port_num)) { + if (rdma_protocol_roce(id_priv->id.device, id_priv->id.port_num)) { ret = rdma_addr_find_smac_by_sgid(&sgid, qp_attr.smac, NULL); if (ret) @@ -1966,7 +1966,7 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms) atomic_inc(&id_priv->refcount); if (rdma_cap_ib_sa(id->device, id->port_num)) ret = cma_resolve_ib_route(id_priv, timeout_ms); - else if (rdma_protocol_iboe(id->device, id->port_num)) + else if (rdma_protocol_roce(id->device, id->port_num)) ret = cma_resolve_iboe_route(id_priv); else if (rdma_protocol_iwarp(id->device, id->port_num)) ret = cma_resolve_iw_route(id_priv, timeout_ms); @@ -3325,7 +3325,7 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, list_add(&mc->list, &id_priv->mc_list); spin_unlock(&id_priv->lock); - if (rdma_protocol_iboe(id->device, id->port_num)) { + if (rdma_protocol_roce(id->device, id->port_num)) { kref_init(&mc->mcref); ret = cma_iboe_join_multicast(id_priv, mc); } else if (rdma_cap_ib_mcast(id->device, id->port_num)) @@ -3365,7 +3365,7 @@ void rdma_leave_multicast(struct rdma_cm_id *id, struct sockaddr *addr) if (rdma_cap_ib_mcast(id->device, id->port_num)) { ib_sa_free_multicast(mc->multicast.ib); kfree(mc); - } else if (rdma_protocol_iboe(id->device, id->port_num)) + } else if (rdma_protocol_roce(id->device, id->port_num)) kref_put(&mc->mcref, release_mc); return; diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index d42b816c781f..ad45469f7582 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -725,7 +725,7 @@ static ssize_t ucma_query_route(struct ucma_file *file, if (rdma_cap_ib_sa(ctx->cm_id->device, ctx->cm_id->port_num)) ucma_copy_ib_route(&resp, &ctx->cm_id->route); - else if (rdma_protocol_iboe(ctx->cm_id->device, ctx->cm_id->port_num)) + else if (rdma_protocol_roce(ctx->cm_id->device, ctx->cm_id->port_num)) ucma_copy_iboe_route(&resp, &ctx->cm_id->route); else if (rdma_protocol_iwarp(ctx->cm_id->device, ctx->cm_id->port_num)) ucma_copy_iw_route(&resp, &ctx->cm_id->route); diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index 73d1b1000785..3ebf0c019a66 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1832,7 +1832,7 @@ static inline bool rdma_protocol_ib(struct ib_device *device, u8 port_num) return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB; } -static inline bool rdma_protocol_iboe(struct ib_device *device, u8 port_num) +static inline bool rdma_protocol_roce(struct ib_device *device, u8 port_num) { return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_ROCE; } @@ -1842,7 +1842,7 @@ static inline bool rdma_protocol_iwarp(struct ib_device *device, u8 port_num) return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IWARP; } -static inline bool rdma_ib_or_iboe(struct ib_device *device, u8 port_num) +static inline bool rdma_ib_or_roce(struct ib_device *device, u8 port_num) { return device->port_immutable[port_num].core_cap_flags & (RDMA_CORE_CAP_PROT_IB | RDMA_CORE_CAP_PROT_ROCE); diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index 3df8320c6efe..3f5750cf187e 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c @@ -987,7 +987,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) */ if (!rdma_protocol_iwarp(newxprt->sc_cm_id->device, newxprt->sc_cm_id->port_num) && - !rdma_ib_or_iboe(newxprt->sc_cm_id->device, + !rdma_ib_or_roce(newxprt->sc_cm_id->device, newxprt->sc_cm_id->port_num)) goto errout; -- cgit v1.2.3