summaryrefslogtreecommitdiff
path: root/include/rdma
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2020-06-06 00:05:57 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2020-06-06 00:05:57 +0300
commit242b23319809e05170b3cc0d44d3b4bd202bb073 (patch)
tree195e39fd02942ee0ef60ead7239859f2fe0c12a1 /include/rdma
parent3f7e82379fc91102d82ed89822bd4242c83e40d5 (diff)
parentfba97dc7fc76b2c9a909fa0b3786d30a9899f5cf (diff)
downloadlinux-242b23319809e05170b3cc0d44d3b4bd202bb073.tar.xz
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma
Pull rdma updates from Jason Gunthorpe: "A more active cycle than most of the recent past, with a few large, long discussed works this time. The RNBD block driver has been posted for nearly two years now, and flowing through RDMA due to it also introducing a new ULP. The removal of FMR has been a recurring discussion theme for a long time. And the usual smattering of features and bug fixes. Summary: - Various small driver bugs fixes in rxe, mlx5, hfi1, and efa - Continuing driver cleanups in bnxt_re, hns - Big cleanup of mlx5 QP creation flows - More consistent use of src port and flow label when LAG is used and a mlx5 implementation - Additional set of cleanups for IB CM - 'RNBD' network block driver and target. This is a network block RDMA device specific to ionos's cloud environment. It brings strong multipath and resiliency capabilities. - Accelerated IPoIB for HFI1 - QP/WQ/SRQ ioctl migration for uverbs, and support for multiple async fds - Support for exchanging the new IBTA defiend ECE data during RDMA CM exchanges - Removal of the very old and insecure FMR interface from all ULPs and drivers. FRWR should be preferred for at least a decade now" * tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma: (247 commits) RDMA/cm: Spurious WARNING triggered in cm_destroy_id() RDMA/mlx5: Return ECE DC support RDMA/mlx5: Don't rely on FW to set zeros in ECE response RDMA/mlx5: Return an error if copy_to_user fails IB/hfi1: Use free_netdev() in hfi1_netdev_free() RDMA/hns: Uninitialized variable in modify_qp_init_to_rtr() RDMA/core: Move and rename trace_cm_id_create() IB/hfi1: Fix hfi1_netdev_rx_init() error handling RDMA: Remove 'max_map_per_fmr' RDMA: Remove 'max_fmr' RDMA/core: Remove FMR device ops RDMA/rdmavt: Remove FMR memory registration RDMA/mthca: Remove FMR support for memory registration RDMA/mlx4: Remove FMR support for memory registration RDMA/i40iw: Remove FMR leftovers RDMA/bnxt_re: Remove FMR leftovers RDMA/mlx5: Remove FMR leftovers RDMA/core: Remove FMR pool API RDMA/rds: Remove FMR support for memory registration RDMA/srp: Remove support for FMR memory registration ...
Diffstat (limited to 'include/rdma')
-rw-r--r--include/rdma/ib_cm.h9
-rw-r--r--include/rdma/ib_fmr_pool.h93
-rw-r--r--include/rdma/ib_mad.h49
-rw-r--r--include/rdma/ib_verbs.h298
-rw-r--r--include/rdma/ibta_vol1_c12.h6
-rw-r--r--include/rdma/lag.h23
-rw-r--r--include/rdma/opa_port_info.h10
-rw-r--r--include/rdma/opa_vnic.h4
-rw-r--r--include/rdma/rdma_cm.h17
-rw-r--r--include/rdma/rdmavt_qp.h31
-rw-r--r--include/rdma/uverbs_ioctl.h18
-rw-r--r--include/rdma/uverbs_std_types.h2
-rw-r--r--include/rdma/uverbs_types.h3
13 files changed, 287 insertions, 276 deletions
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h
index 058cfbc2b37f..0f1ea5f2d01c 100644
--- a/include/rdma/ib_cm.h
+++ b/include/rdma/ib_cm.h
@@ -11,6 +11,7 @@
#include <rdma/ib_mad.h>
#include <rdma/ib_sa.h>
+#include <rdma/rdma_cm.h>
/* ib_cm and ib_user_cm modules share /sys/class/infiniband_cm */
extern struct class cm_class;
@@ -115,6 +116,7 @@ struct ib_cm_req_event_param {
unsigned int retry_count:3;
unsigned int rnr_retry_count:3;
unsigned int srq:1;
+ struct rdma_ucm_ece ece;
};
struct ib_cm_rep_event_param {
@@ -129,6 +131,7 @@ struct ib_cm_rep_event_param {
unsigned int flow_control:1;
unsigned int rnr_retry_count:3;
unsigned int srq:1;
+ struct rdma_ucm_ece ece;
};
enum ib_cm_rej_reason {
@@ -164,7 +167,8 @@ enum ib_cm_rej_reason {
IB_CM_REJ_DUPLICATE_LOCAL_COMM_ID = 30,
IB_CM_REJ_INVALID_CLASS_VERSION = 31,
IB_CM_REJ_INVALID_FLOW_LABEL = 32,
- IB_CM_REJ_INVALID_ALT_FLOW_LABEL = 33
+ IB_CM_REJ_INVALID_ALT_FLOW_LABEL = 33,
+ IB_CM_REJ_VENDOR_OPTION_NOT_SUPPORTED = 35,
};
struct ib_cm_rej_event_param {
@@ -369,6 +373,7 @@ struct ib_cm_req_param {
u8 rnr_retry_count;
u8 max_cm_retries;
u8 srq;
+ struct rdma_ucm_ece ece;
};
/**
@@ -392,6 +397,7 @@ struct ib_cm_rep_param {
u8 flow_control;
u8 rnr_retry_count;
u8 srq;
+ struct rdma_ucm_ece ece;
};
/**
@@ -546,6 +552,7 @@ struct ib_cm_sidr_rep_param {
u8 info_length;
const void *private_data;
u8 private_data_len;
+ struct rdma_ucm_ece ece;
};
/**
diff --git a/include/rdma/ib_fmr_pool.h b/include/rdma/ib_fmr_pool.h
deleted file mode 100644
index 2fd9bfb6d648..000000000000
--- a/include/rdma/ib_fmr_pool.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * Copyright (c) 2004 Topspin Corporation. All rights reserved.
- * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#if !defined(IB_FMR_POOL_H)
-#define IB_FMR_POOL_H
-
-#include <rdma/ib_verbs.h>
-
-struct ib_fmr_pool;
-
-/**
- * struct ib_fmr_pool_param - Parameters for creating FMR pool
- * @max_pages_per_fmr:Maximum number of pages per map request.
- * @page_shift: Log2 of sizeof "pages" mapped by this fmr
- * @access:Access flags for FMRs in pool.
- * @pool_size:Number of FMRs to allocate for pool.
- * @dirty_watermark:Flush is triggered when @dirty_watermark dirty
- * FMRs are present.
- * @flush_function:Callback called when unmapped FMRs are flushed and
- * more FMRs are possibly available for mapping
- * @flush_arg:Context passed to user's flush function.
- * @cache:If set, FMRs may be reused after unmapping for identical map
- * requests.
- */
-struct ib_fmr_pool_param {
- int max_pages_per_fmr;
- int page_shift;
- enum ib_access_flags access;
- int pool_size;
- int dirty_watermark;
- void (*flush_function)(struct ib_fmr_pool *pool,
- void *arg);
- void *flush_arg;
- unsigned cache:1;
-};
-
-struct ib_pool_fmr {
- struct ib_fmr *fmr;
- struct ib_fmr_pool *pool;
- struct list_head list;
- struct hlist_node cache_node;
- int ref_count;
- int remap_count;
- u64 io_virtual_address;
- int page_list_len;
- u64 page_list[];
-};
-
-struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
- struct ib_fmr_pool_param *params);
-
-void ib_destroy_fmr_pool(struct ib_fmr_pool *pool);
-
-int ib_flush_fmr_pool(struct ib_fmr_pool *pool);
-
-struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle,
- u64 *page_list,
- int list_len,
- u64 io_virtual_address);
-
-void ib_fmr_pool_unmap(struct ib_pool_fmr *fmr);
-
-#endif /* IB_FMR_POOL_H */
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index 4e62650e2127..8c093fc1bb9f 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -559,20 +559,6 @@ typedef void (*ib_mad_send_handler)(struct ib_mad_agent *mad_agent,
struct ib_mad_send_wc *mad_send_wc);
/**
- * ib_mad_snoop_handler - Callback handler for snooping sent MADs.
- * @mad_agent: MAD agent that snooped the MAD.
- * @send_buf: send MAD data buffer.
- * @mad_send_wc: Work completion information on the sent MAD. Valid
- * only for snooping that occurs on a send completion.
- *
- * Clients snooping MADs should not modify data referenced by the @send_buf
- * or @mad_send_wc.
- */
-typedef void (*ib_mad_snoop_handler)(struct ib_mad_agent *mad_agent,
- struct ib_mad_send_buf *send_buf,
- struct ib_mad_send_wc *mad_send_wc);
-
-/**
* ib_mad_recv_handler - callback handler for a received MAD.
* @mad_agent: MAD agent requesting the received MAD.
* @send_buf: Send buffer if found, else NULL
@@ -581,8 +567,7 @@ typedef void (*ib_mad_snoop_handler)(struct ib_mad_agent *mad_agent,
* MADs received in response to a send request operation will be handed to
* the user before the send operation completes. All data buffers given
* to registered agents through this routine are owned by the receiving
- * client, except for snooping agents. Clients snooping MADs should not
- * modify the data referenced by @mad_recv_wc.
+ * client.
*/
typedef void (*ib_mad_recv_handler)(struct ib_mad_agent *mad_agent,
struct ib_mad_send_buf *send_buf,
@@ -595,7 +580,6 @@ typedef void (*ib_mad_recv_handler)(struct ib_mad_agent *mad_agent,
* @mr: Memory region for system memory usable for DMA.
* @recv_handler: Callback handler for a received MAD.
* @send_handler: Callback handler for a sent MAD.
- * @snoop_handler: Callback handler for snooped sent MADs.
* @context: User-specified context associated with this registration.
* @hi_tid: Access layer assigned transaction ID for this client.
* Unsolicited MADs sent by this client will have the upper 32-bits
@@ -612,7 +596,6 @@ struct ib_mad_agent {
struct ib_qp *qp;
ib_mad_recv_handler recv_handler;
ib_mad_send_handler send_handler;
- ib_mad_snoop_handler snoop_handler;
void *context;
u32 hi_tid;
u32 flags;
@@ -720,36 +703,6 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device,
ib_mad_recv_handler recv_handler,
void *context,
u32 registration_flags);
-
-enum ib_mad_snoop_flags {
- /*IB_MAD_SNOOP_POSTED_SENDS = 1,*/
- /*IB_MAD_SNOOP_RMPP_SENDS = (1<<1),*/
- IB_MAD_SNOOP_SEND_COMPLETIONS = (1<<2),
- /*IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS = (1<<3),*/
- IB_MAD_SNOOP_RECVS = (1<<4)
- /*IB_MAD_SNOOP_RMPP_RECVS = (1<<5),*/
- /*IB_MAD_SNOOP_REDIRECTED_QPS = (1<<6)*/
-};
-
-/**
- * ib_register_mad_snoop - Register to snoop sent and received MADs.
- * @device: The device to register with.
- * @port_num: The port on the specified device to use.
- * @qp_type: Specifies which QP traffic to snoop. Must be either
- * IB_QPT_SMI or IB_QPT_GSI.
- * @mad_snoop_flags: Specifies information where snooping occurs.
- * @send_handler: The callback routine invoked for a snooped send.
- * @recv_handler: The callback routine invoked for a snooped receive.
- * @context: User specified context associated with the registration.
- */
-struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
- u8 port_num,
- enum ib_qp_type qp_type,
- int mad_snoop_flags,
- ib_mad_snoop_handler snoop_handler,
- ib_mad_recv_handler recv_handler,
- void *context);
-
/**
* ib_unregister_mad_agent - Unregisters a client from using MAD services.
* @mad_agent: Corresponding MAD registration request to deregister.
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index bbc5cfb57cd2..033e7044f29c 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -305,7 +305,7 @@ enum ib_device_cap_flags {
IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33),
/* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */
IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34),
- IB_DEVICE_RDMA_NETDEV_OPA_VNIC = (1ULL << 35),
+ IB_DEVICE_RDMA_NETDEV_OPA = (1ULL << 35),
/* The device supports padding incoming writes to cacheline. */
IB_DEVICE_PCI_WRITE_END_PADDING = (1ULL << 36),
IB_DEVICE_ALLOW_USER_UNREG = (1ULL << 37),
@@ -430,8 +430,6 @@ struct ib_device_attr {
int max_mcast_qp_attach;
int max_total_mcast_qp_attach;
int max_ah;
- int max_fmr;
- int max_map_per_fmr;
int max_srq;
int max_srq_wr;
int max_srq_sge;
@@ -462,6 +460,11 @@ enum ib_mtu {
IB_MTU_4096 = 5
};
+enum opa_mtu {
+ OPA_MTU_8192 = 6,
+ OPA_MTU_10240 = 7
+};
+
static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
{
switch (mtu) {
@@ -488,6 +491,28 @@ static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
return IB_MTU_256;
}
+static inline int opa_mtu_enum_to_int(enum opa_mtu mtu)
+{
+ switch (mtu) {
+ case OPA_MTU_8192:
+ return 8192;
+ case OPA_MTU_10240:
+ return 10240;
+ default:
+ return(ib_mtu_enum_to_int((enum ib_mtu)mtu));
+ }
+}
+
+static inline enum opa_mtu opa_mtu_int_to_enum(int mtu)
+{
+ if (mtu >= 10240)
+ return OPA_MTU_10240;
+ else if (mtu >= 8192)
+ return OPA_MTU_8192;
+ else
+ return ((enum opa_mtu)ib_mtu_int_to_enum(mtu));
+}
+
enum ib_port_state {
IB_PORT_NOP = 0,
IB_PORT_DOWN = 1,
@@ -651,6 +676,7 @@ struct ib_port_attr {
enum ib_port_state state;
enum ib_mtu max_mtu;
enum ib_mtu active_mtu;
+ u32 phys_mtu;
int gid_tbl_len;
unsigned int ip_gids:1;
/* This is the value from PortInfo CapabilityMask, defined by IBA */
@@ -880,6 +906,12 @@ struct ib_mr_status {
*/
__attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
+struct rdma_ah_init_attr {
+ struct rdma_ah_attr *ah_attr;
+ u32 flags;
+ struct net_device *xmit_slave;
+};
+
enum rdma_ah_attr_type {
RDMA_AH_ATTR_TYPE_UNDEFINED,
RDMA_AH_ATTR_TYPE_IB,
@@ -1006,9 +1038,9 @@ enum ib_cq_notify_flags {
};
enum ib_srq_type {
- IB_SRQT_BASIC,
- IB_SRQT_XRC,
- IB_SRQT_TM,
+ IB_SRQT_BASIC = IB_UVERBS_SRQT_BASIC,
+ IB_SRQT_XRC = IB_UVERBS_SRQT_XRC,
+ IB_SRQT_TM = IB_UVERBS_SRQT_TM,
};
static inline bool ib_srq_has_cq(enum ib_srq_type srq_type)
@@ -1077,16 +1109,16 @@ enum ib_qp_type {
IB_QPT_SMI,
IB_QPT_GSI,
- IB_QPT_RC,
- IB_QPT_UC,
- IB_QPT_UD,
+ IB_QPT_RC = IB_UVERBS_QPT_RC,
+ IB_QPT_UC = IB_UVERBS_QPT_UC,
+ IB_QPT_UD = IB_UVERBS_QPT_UD,
IB_QPT_RAW_IPV6,
IB_QPT_RAW_ETHERTYPE,
- IB_QPT_RAW_PACKET = 8,
- IB_QPT_XRC_INI = 9,
- IB_QPT_XRC_TGT,
+ IB_QPT_RAW_PACKET = IB_UVERBS_QPT_RAW_PACKET,
+ IB_QPT_XRC_INI = IB_UVERBS_QPT_XRC_INI,
+ IB_QPT_XRC_TGT = IB_UVERBS_QPT_XRC_TGT,
IB_QPT_MAX,
- IB_QPT_DRIVER = 0xFF,
+ IB_QPT_DRIVER = IB_UVERBS_QPT_DRIVER,
/* Reserve a range for qp types internal to the low level driver.
* These qp types will not be visible at the IB core layer, so the
* IB_QPT_MAX usages should not be affected in the core layer
@@ -1105,17 +1137,21 @@ enum ib_qp_type {
enum ib_qp_create_flags {
IB_QP_CREATE_IPOIB_UD_LSO = 1 << 0,
- IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK = 1 << 1,
+ IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK =
+ IB_UVERBS_QP_CREATE_BLOCK_MULTICAST_LOOPBACK,
IB_QP_CREATE_CROSS_CHANNEL = 1 << 2,
IB_QP_CREATE_MANAGED_SEND = 1 << 3,
IB_QP_CREATE_MANAGED_RECV = 1 << 4,
IB_QP_CREATE_NETIF_QP = 1 << 5,
IB_QP_CREATE_INTEGRITY_EN = 1 << 6,
- /* FREE = 1 << 7, */
- IB_QP_CREATE_SCATTER_FCS = 1 << 8,
- IB_QP_CREATE_CVLAN_STRIPPING = 1 << 9,
+ IB_QP_CREATE_NETDEV_USE = 1 << 7,
+ IB_QP_CREATE_SCATTER_FCS =
+ IB_UVERBS_QP_CREATE_SCATTER_FCS,
+ IB_QP_CREATE_CVLAN_STRIPPING =
+ IB_UVERBS_QP_CREATE_CVLAN_STRIPPING,
IB_QP_CREATE_SOURCE_QPN = 1 << 10,
- IB_QP_CREATE_PCI_WRITE_END_PADDING = 1 << 11,
+ IB_QP_CREATE_PCI_WRITE_END_PADDING =
+ IB_UVERBS_QP_CREATE_PCI_WRITE_END_PADDING,
/* reserve bits 26-31 for low level drivers' internal use */
IB_QP_CREATE_RESERVED_START = 1 << 26,
IB_QP_CREATE_RESERVED_END = 1 << 31,
@@ -1267,6 +1303,7 @@ struct ib_qp_attr {
u8 alt_port_num;
u8 alt_timeout;
u32 rate_limit;
+ struct net_device *xmit_slave;
};
enum ib_wr_opcode {
@@ -1436,12 +1473,6 @@ enum ib_mr_rereg_flags {
IB_MR_REREG_SUPPORTED = ((IB_MR_REREG_ACCESS << 1) - 1)
};
-struct ib_fmr_attr {
- int max_pages;
- int max_maps;
- u8 page_shift;
-};
-
struct ib_umem;
enum rdma_remove_reason {
@@ -1456,6 +1487,11 @@ enum rdma_remove_reason {
RDMA_REMOVE_DRIVER_REMOVE,
/* uobj is being cleaned-up before being committed */
RDMA_REMOVE_ABORT,
+ /*
+ * uobj has been fully created, with the uobj->object set, but is being
+ * cleaned up before being comitted
+ */
+ RDMA_REMOVE_ABORT_HWOBJ,
};
struct ib_rdmacg_object {
@@ -1544,10 +1580,12 @@ struct ib_ah {
typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
enum ib_poll_context {
- IB_POLL_DIRECT, /* caller context, no hw completions */
IB_POLL_SOFTIRQ, /* poll from softirq context */
IB_POLL_WORKQUEUE, /* poll from workqueue */
IB_POLL_UNBOUND_WORKQUEUE, /* poll from unbound workqueue */
+ IB_POLL_LAST_POOL_TYPE = IB_POLL_UNBOUND_WORKQUEUE,
+
+ IB_POLL_DIRECT, /* caller context, no hw completions */
};
struct ib_cq {
@@ -1557,9 +1595,11 @@ struct ib_cq {
void (*event_handler)(struct ib_event *, void *);
void *cq_context;
int cqe;
+ unsigned int cqe_used;
atomic_t usecnt; /* count number of work queues */
enum ib_poll_context poll_ctx;
struct ib_wc *wc;
+ struct list_head pool_entry;
union {
struct irq_poll iop;
struct work_struct work;
@@ -1569,7 +1609,9 @@ struct ib_cq {
/* updated only by trace points */
ktime_t timestamp;
- bool interrupt;
+ u8 interrupt:1;
+ u8 shared:1;
+ unsigned int comp_vector;
/*
* Implementation details of the RDMA core, don't use in drivers:
@@ -1614,7 +1656,7 @@ enum ib_raw_packet_caps {
};
enum ib_wq_type {
- IB_WQT_RQ
+ IB_WQT_RQ = IB_UVERBS_WQT_RQ,
};
enum ib_wq_state {
@@ -1637,10 +1679,11 @@ struct ib_wq {
};
enum ib_wq_flags {
- IB_WQ_FLAGS_CVLAN_STRIPPING = 1 << 0,
- IB_WQ_FLAGS_SCATTER_FCS = 1 << 1,
- IB_WQ_FLAGS_DELAY_DROP = 1 << 2,
- IB_WQ_FLAGS_PCI_WRITE_END_PADDING = 1 << 3,
+ IB_WQ_FLAGS_CVLAN_STRIPPING = IB_UVERBS_WQ_FLAGS_CVLAN_STRIPPING,
+ IB_WQ_FLAGS_SCATTER_FCS = IB_UVERBS_WQ_FLAGS_SCATTER_FCS,
+ IB_WQ_FLAGS_DELAY_DROP = IB_UVERBS_WQ_FLAGS_DELAY_DROP,
+ IB_WQ_FLAGS_PCI_WRITE_END_PADDING =
+ IB_UVERBS_WQ_FLAGS_PCI_WRITE_END_PADDING,
};
struct ib_wq_init_attr {
@@ -1804,14 +1847,6 @@ struct ib_mw {
enum ib_mw_type type;
};
-struct ib_fmr {
- struct ib_device *device;
- struct ib_pd *pd;
- struct list_head list;
- u32 lkey;
- u32 rkey;
-};
-
/* Supported steering options */
enum ib_flow_attr_type {
/* steering according to rule specifications */
@@ -2198,6 +2233,7 @@ struct rdma_netdev {
void *clnt_priv;
struct ib_device *hca;
u8 port_num;
+ int mtu;
/*
* cleanup function must be specified.
@@ -2403,8 +2439,8 @@ struct ib_device_ops {
void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
int (*alloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
void (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
- int (*create_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr,
- u32 flags, struct ib_udata *udata);
+ int (*create_ah)(struct ib_ah *ah, struct rdma_ah_init_attr *attr,
+ struct ib_udata *udata);
int (*modify_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
int (*query_ah)(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
void (*destroy_ah)(struct ib_ah *ah, u32 flags);
@@ -2453,12 +2489,6 @@ struct ib_device_ops {
struct ib_mw *(*alloc_mw)(struct ib_pd *pd, enum ib_mw_type type,
struct ib_udata *udata);
int (*dealloc_mw)(struct ib_mw *mw);
- struct ib_fmr *(*alloc_fmr)(struct ib_pd *pd, int mr_access_flags,
- struct ib_fmr_attr *fmr_attr);
- int (*map_phys_fmr)(struct ib_fmr *fmr, u64 *page_list, int list_len,
- u64 iova);
- int (*unmap_fmr)(struct list_head *fmr_list);
- int (*dealloc_fmr)(struct ib_fmr *fmr);
int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
struct ib_xrcd *(*alloc_xrcd)(struct ib_device *device,
@@ -2687,6 +2717,10 @@ struct ib_device {
#endif
u32 index;
+
+ spinlock_t cq_pools_lock;
+ struct list_head cq_pools[IB_POLL_LAST_POOL_TYPE + 1];
+
struct rdma_restrack_root *res;
const struct uapi_definition *driver_def;
@@ -2709,12 +2743,13 @@ struct ib_device {
/* Used by iWarp CM */
char iw_ifname[IFNAMSIZ];
u32 iw_driver_flags;
+ u32 lag_flags;
};
struct ib_client_nl_info;
struct ib_client {
const char *name;
- void (*add) (struct ib_device *);
+ int (*add)(struct ib_device *ibdev);
void (*remove)(struct ib_device *, void *client_data);
void (*rename)(struct ib_device *dev, void *client_data);
int (*get_nl_info)(struct ib_device *ibdev, void *client_data,
@@ -3355,6 +3390,55 @@ static inline unsigned int rdma_find_pg_bit(unsigned long addr,
return __fls(pgsz);
}
+/**
+ * rdma_core_cap_opa_port - Return whether the RDMA Port is OPA or not.
+ * @device: Device
+ * @port_num: 1 based Port number
+ *
+ * Return true if port is an Intel OPA port , false if not
+ */
+static inline bool rdma_core_cap_opa_port(struct ib_device *device,
+ u32 port_num)
+{
+ return (device->port_data[port_num].immutable.core_cap_flags &
+ RDMA_CORE_PORT_INTEL_OPA) == RDMA_CORE_PORT_INTEL_OPA;
+}
+
+/**
+ * rdma_mtu_enum_to_int - Return the mtu of the port as an integer value.
+ * @device: Device
+ * @port_num: Port number
+ * @mtu: enum value of MTU
+ *
+ * Return the MTU size supported by the port as an integer value. Will return
+ * -1 if enum value of mtu is not supported.
+ */
+static inline int rdma_mtu_enum_to_int(struct ib_device *device, u8 port,
+ int mtu)
+{
+ if (rdma_core_cap_opa_port(device, port))
+ return opa_mtu_enum_to_int((enum opa_mtu)mtu);
+ else
+ return ib_mtu_enum_to_int((enum ib_mtu)mtu);
+}
+
+/**
+ * rdma_mtu_from_attr - Return the mtu of the port from the port attribute.
+ * @device: Device
+ * @port_num: Port number
+ * @attr: port attribute
+ *
+ * Return the MTU size supported by the port as an integer value.
+ */
+static inline int rdma_mtu_from_attr(struct ib_device *device, u8 port,
+ struct ib_port_attr *attr)
+{
+ if (rdma_core_cap_opa_port(device, port))
+ return attr->phys_mtu;
+ else
+ return ib_mtu_enum_to_int(attr->max_mtu);
+}
+
int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
int state);
int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
@@ -3551,21 +3635,18 @@ static inline int rdma_destroy_ah(struct ib_ah *ah, u32 flags)
return rdma_destroy_ah_user(ah, flags, NULL);
}
-/**
- * ib_create_srq - Creates a SRQ associated with the specified protection
- * domain.
- * @pd: The protection domain associated with the SRQ.
- * @srq_init_attr: A list of initial attributes required to create the
- * SRQ. If SRQ creation succeeds, then the attributes are updated to
- * the actual capabilities of the created SRQ.
- *
- * srq_attr->max_wr and srq_attr->max_sge are read the determine the
- * requested size of the SRQ, and set to the actual values allocated
- * on return. If ib_create_srq() succeeds, then max_wr and max_sge
- * will always be at least as large as the requested values.
- */
-struct ib_srq *ib_create_srq(struct ib_pd *pd,
- struct ib_srq_init_attr *srq_init_attr);
+struct ib_srq *ib_create_srq_user(struct ib_pd *pd,
+ struct ib_srq_init_attr *srq_init_attr,
+ struct ib_usrq_object *uobject,
+ struct ib_udata *udata);
+static inline struct ib_srq *
+ib_create_srq(struct ib_pd *pd, struct ib_srq_init_attr *srq_init_attr)
+{
+ if (!pd->device->ops.create_srq)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ return ib_create_srq_user(pd, srq_init_attr, NULL, NULL);
+}
/**
* ib_modify_srq - Modifies the attributes for the specified SRQ.
@@ -3816,6 +3897,8 @@ static inline struct ib_cq *ib_alloc_cq_any(struct ib_device *dev,
* ib_free_cq_user - Free kernel/user CQ
* @cq: The CQ to free
* @udata: Valid user data or NULL for kernel objects
+ *
+ * NOTE: This function shouldn't be called on shared CQs.
*/
void ib_free_cq_user(struct ib_cq *cq, struct ib_udata *udata);
@@ -3941,6 +4024,12 @@ static inline int ib_req_notify_cq(struct ib_cq *cq,
return cq->device->ops.req_notify_cq(cq, flags);
}
+struct ib_cq *ib_cq_pool_get(struct ib_device *dev, unsigned int nr_cqe,
+ int comp_vector_hint,
+ enum ib_poll_context poll_ctx);
+
+void ib_cq_pool_put(struct ib_cq *cq, unsigned int nr_cqe);
+
/**
* ib_req_ncomp_notif - Request completion notification when there are
* at least the specified number of unreaped completions on the CQ.
@@ -4209,45 +4298,6 @@ static inline u32 ib_inc_rkey(u32 rkey)
}
/**
- * ib_alloc_fmr - Allocates a unmapped fast memory region.
- * @pd: The protection domain associated with the unmapped region.
- * @mr_access_flags: Specifies the memory access rights.
- * @fmr_attr: Attributes of the unmapped region.
- *
- * A fast memory region must be mapped before it can be used as part of
- * a work request.
- */
-struct ib_fmr *ib_alloc_fmr(struct ib_pd *pd,
- int mr_access_flags,
- struct ib_fmr_attr *fmr_attr);
-
-/**
- * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region.
- * @fmr: The fast memory region to associate with the pages.
- * @page_list: An array of physical pages to map to the fast memory region.
- * @list_len: The number of pages in page_list.
- * @iova: The I/O virtual address to use with the mapped region.
- */
-static inline int ib_map_phys_fmr(struct ib_fmr *fmr,
- u64 *page_list, int list_len,
- u64 iova)
-{
- return fmr->device->ops.map_phys_fmr(fmr, page_list, list_len, iova);
-}
-
-/**
- * ib_unmap_fmr - Removes the mapping from a list of fast memory regions.
- * @fmr_list: A linked list of fast memory regions to unmap.
- */
-int ib_unmap_fmr(struct list_head *fmr_list);
-
-/**
- * ib_dealloc_fmr - Deallocates a fast memory region.
- * @fmr: The fast memory region to deallocate.
- */
-int ib_dealloc_fmr(struct ib_fmr *fmr);
-
-/**
* ib_attach_mcast - Attaches the specified QP to a multicast group.
* @qp: QP to attach to the multicast group. The QP must be type
* IB_QPT_UD.
@@ -4701,4 +4751,48 @@ static inline struct ib_device *rdma_device_to_ibdev(struct device *device)
bool rdma_dev_access_netns(const struct ib_device *device,
const struct net *net);
+
+#define IB_ROCE_UDP_ENCAP_VALID_PORT_MIN (0xC000)
+#define IB_GRH_FLOWLABEL_MASK (0x000FFFFF)
+
+/**
+ * rdma_flow_label_to_udp_sport - generate a RoCE v2 UDP src port value based
+ * on the flow_label
+ *
+ * This function will convert the 20 bit flow_label input to a valid RoCE v2
+ * UDP src port 14 bit value. All RoCE V2 drivers should use this same
+ * convention.
+ */
+static inline u16 rdma_flow_label_to_udp_sport(u32 fl)
+{
+ u32 fl_low = fl & 0x03fff, fl_high = fl & 0xFC000;
+
+ fl_low ^= fl_high >> 14;
+ return (u16)(fl_low | IB_ROCE_UDP_ENCAP_VALID_PORT_MIN);
+}
+
+/**
+ * rdma_calc_flow_label - generate a RDMA symmetric flow label value based on
+ * local and remote qpn values
+ *
+ * This function folded the multiplication results of two qpns, 24 bit each,
+ * fields, and converts it to a 20 bit results.
+ *
+ * This function will create symmetric flow_label value based on the local
+ * and remote qpn values. this will allow both the requester and responder
+ * to calculate the same flow_label for a given connection.
+ *
+ * This helper function should be used by driver in case the upper layer
+ * provide a zero flow_label value. This is to improve entropy of RDMA
+ * traffic in the network.
+ */
+static inline u32 rdma_calc_flow_label(u32 lqpn, u32 rqpn)
+{
+ u64 v = (u64)lqpn * rqpn;
+
+ v ^= v >> 20;
+ v ^= v >> 40;
+
+ return (u32)(v & IB_GRH_FLOWLABEL_MASK);
+}
#endif /* IB_VERBS_H */
diff --git a/include/rdma/ibta_vol1_c12.h b/include/rdma/ibta_vol1_c12.h
index 269904425d3f..960c86bec76c 100644
--- a/include/rdma/ibta_vol1_c12.h
+++ b/include/rdma/ibta_vol1_c12.h
@@ -38,6 +38,7 @@
/* Table 106 REQ Message Contents */
#define CM_REQ_LOCAL_COMM_ID CM_FIELD32_LOC(struct cm_req_msg, 0, 32)
+#define CM_REQ_VENDOR_ID CM_FIELD32_LOC(struct cm_req_msg, 5, 24)
#define CM_REQ_SERVICE_ID CM_FIELD64_LOC(struct cm_req_msg, 8)
#define CM_REQ_LOCAL_CA_GUID CM_FIELD64_LOC(struct cm_req_msg, 16)
#define CM_REQ_LOCAL_Q_KEY CM_FIELD32_LOC(struct cm_req_msg, 28, 32)
@@ -119,8 +120,11 @@ CM_STRUCT(struct cm_rej_msg, 84 * 8 + 1184);
#define CM_REP_REMOTE_COMM_ID CM_FIELD32_LOC(struct cm_rep_msg, 4, 32)
#define CM_REP_LOCAL_Q_KEY CM_FIELD32_LOC(struct cm_rep_msg, 8, 32)
#define CM_REP_LOCAL_QPN CM_FIELD32_LOC(struct cm_rep_msg, 12, 24)
+#define CM_REP_VENDOR_ID_H CM_FIELD8_LOC(struct cm_rep_msg, 15, 8)
#define CM_REP_LOCAL_EE_CONTEXT_NUMBER CM_FIELD32_LOC(struct cm_rep_msg, 16, 24)
+#define CM_REP_VENDOR_ID_M CM_FIELD8_LOC(struct cm_rep_msg, 19, 8)
#define CM_REP_STARTING_PSN CM_FIELD32_LOC(struct cm_rep_msg, 20, 24)
+#define CM_REP_VENDOR_ID_L CM_FIELD8_LOC(struct cm_rep_msg, 23, 8)
#define CM_REP_RESPONDER_RESOURCES CM_FIELD8_LOC(struct cm_rep_msg, 24, 8)
#define CM_REP_INITIATOR_DEPTH CM_FIELD8_LOC(struct cm_rep_msg, 25, 8)
#define CM_REP_TARGET_ACK_DELAY CM_FIELD8_LOC(struct cm_rep_msg, 26, 5)
@@ -201,7 +205,9 @@ CM_STRUCT(struct cm_sidr_req_msg, 16 * 8 + 1728);
#define CM_SIDR_REP_STATUS CM_FIELD8_LOC(struct cm_sidr_rep_msg, 4, 8)
#define CM_SIDR_REP_ADDITIONAL_INFORMATION_LENGTH \
CM_FIELD8_LOC(struct cm_sidr_rep_msg, 5, 8)
+#define CM_SIDR_REP_VENDOR_ID_H CM_FIELD16_LOC(struct cm_sidr_rep_msg, 6, 16)
#define CM_SIDR_REP_QPN CM_FIELD32_LOC(struct cm_sidr_rep_msg, 8, 24)
+#define CM_SIDR_REP_VENDOR_ID_L CM_FIELD8_LOC(struct cm_sidr_rep_msg, 11, 8)
#define CM_SIDR_REP_SERVICEID CM_FIELD64_LOC(struct cm_sidr_rep_msg, 12)
#define CM_SIDR_REP_Q_KEY CM_FIELD32_LOC(struct cm_sidr_rep_msg, 20, 32)
#define CM_SIDR_REP_ADDITIONAL_INFORMATION \
diff --git a/include/rdma/lag.h b/include/rdma/lag.h
new file mode 100644
index 000000000000..7c06ec9b2eef
--- /dev/null
+++ b/include/rdma/lag.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2020 Mellanox Technologies. All rights reserved.
+ */
+
+#ifndef _RDMA_LAG_H_
+#define _RDMA_LAG_H_
+
+#include <net/lag.h>
+
+struct ib_device;
+struct rdma_ah_attr;
+
+enum rdma_lag_flags {
+ RDMA_LAG_FLAGS_HASH_ALL_SLAVES = 1 << 0
+};
+
+void rdma_lag_put_ah_roce_slave(struct net_device *xmit_slave);
+struct net_device *rdma_lag_get_ah_roce_slave(struct ib_device *device,
+ struct rdma_ah_attr *ah_attr,
+ gfp_t flags);
+
+#endif /* _RDMA_LAG_H_ */
diff --git a/include/rdma/opa_port_info.h b/include/rdma/opa_port_info.h
index bdbfe25d3854..0d9e6d74c385 100644
--- a/include/rdma/opa_port_info.h
+++ b/include/rdma/opa_port_info.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2017 Intel Corporation. All rights reserved.
+ * Copyright (c) 2014-2020 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -139,14 +139,6 @@
#define OPA_CAP_MASK3_IsVLMarkerSupported (1 << 1)
#define OPA_CAP_MASK3_IsVLrSupported (1 << 0)
-/**
- * new MTU values
- */
-enum {
- OPA_MTU_8192 = 6,
- OPA_MTU_10240 = 7,
-};
-
enum {
OPA_PORT_PHYS_CONF_DISCONNECTED = 0,
OPA_PORT_PHYS_CONF_STANDARD = 1,
diff --git a/include/rdma/opa_vnic.h b/include/rdma/opa_vnic.h
index e90b149fe92a..6f244e759b4f 100644
--- a/include/rdma/opa_vnic.h
+++ b/include/rdma/opa_vnic.h
@@ -1,7 +1,7 @@
#ifndef _OPA_VNIC_H
#define _OPA_VNIC_H
/*
- * Copyright(c) 2017 Intel Corporation.
+ * Copyright(c) 2017 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -132,7 +132,7 @@ struct opa_vnic_stats {
static inline bool rdma_cap_opa_vnic(struct ib_device *device)
{
return !!(device->attrs.device_cap_flags &
- IB_DEVICE_RDMA_NETDEV_OPA_VNIC);
+ IB_DEVICE_RDMA_NETDEV_OPA);
}
#endif /* _OPA_VNIC_H */
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index 71f48cfdc24c..939d7abe026f 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -111,6 +111,7 @@ struct rdma_cm_event {
struct rdma_conn_param conn;
struct rdma_ud_param ud;
} param;
+ struct rdma_ucm_ece ece;
};
struct rdma_cm_id;
@@ -264,6 +265,9 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,
*/
int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param);
+int rdma_connect_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
+ struct rdma_ucm_ece *ece);
+
/**
* rdma_listen - This function is called by the passive side to
* listen for incoming connection requests.
@@ -276,6 +280,9 @@ int rdma_listen(struct rdma_cm_id *id, int backlog);
int __rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
const char *caller);
+int __rdma_accept_ece(struct rdma_cm_id *id, struct rdma_conn_param *conn_param,
+ const char *caller, struct rdma_ucm_ece *ece);
+
/**
* rdma_accept - Called to accept a connection request or response.
* @id: Connection identifier associated with the request.
@@ -313,7 +320,7 @@ int rdma_notify(struct rdma_cm_id *id, enum ib_event_type event);
* rdma_reject - Called to reject a connection request or response.
*/
int rdma_reject(struct rdma_cm_id *id, const void *private_data,
- u8 private_data_len);
+ u8 private_data_len, u8 reason);
/**
* rdma_disconnect - This function disconnects the associated QP and
@@ -390,14 +397,6 @@ __be64 rdma_get_service_id(struct rdma_cm_id *id, struct sockaddr *addr);
const char *__attribute_const__ rdma_reject_msg(struct rdma_cm_id *id,
int reason);
/**
- * rdma_is_consumer_reject - return true if the consumer rejected the connect
- * request.
- * @id: Communication identifier that received the REJECT event.
- * @reason: Value returned in the REJECT event status field.
- */
-bool rdma_is_consumer_reject(struct rdma_cm_id *id, int reason);
-
-/**
* rdma_consumer_reject_data - return the consumer reject private data and
* length, if any.
* @id: Communication identifier that received the REJECT event.
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
index 5fc10108703a..c4369a6c2951 100644
--- a/include/rdma/rdmavt_qp.h
+++ b/include/rdma/rdmavt_qp.h
@@ -2,7 +2,7 @@
#define DEF_RDMAVT_INCQP_H
/*
- * Copyright(c) 2016 - 2019 Intel Corporation.
+ * Copyright(c) 2016 - 2020 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -69,6 +69,33 @@
#define RVT_R_COMM_EST 0x10
/*
+ * If a packet's QP[23:16] bits match this value, then it is
+ * a PSM packet and the hardware will expect a KDETH header
+ * following the BTH.
+ */
+#define RVT_KDETH_QP_PREFIX 0x80
+#define RVT_KDETH_QP_SUFFIX 0xffff
+#define RVT_KDETH_QP_PREFIX_MASK 0x00ff0000
+#define RVT_KDETH_QP_PREFIX_SHIFT 16
+#define RVT_KDETH_QP_BASE (u32)(RVT_KDETH_QP_PREFIX << \
+ RVT_KDETH_QP_PREFIX_SHIFT)
+#define RVT_KDETH_QP_MAX (u32)(RVT_KDETH_QP_BASE + RVT_KDETH_QP_SUFFIX)
+
+/*
+ * If a packet's LNH == BTH and DEST QPN[23:16] in the BTH match this
+ * prefix value, then it is an AIP packet with a DETH containing the entropy
+ * value in byte 4 following the BTH.
+ */
+#define RVT_AIP_QP_PREFIX 0x81
+#define RVT_AIP_QP_SUFFIX 0xffff
+#define RVT_AIP_QP_PREFIX_MASK 0x00ff0000
+#define RVT_AIP_QP_PREFIX_SHIFT 16
+#define RVT_AIP_QP_BASE (u32)(RVT_AIP_QP_PREFIX << \
+ RVT_AIP_QP_PREFIX_SHIFT)
+#define RVT_AIP_QPN_MAX BIT(RVT_AIP_QP_PREFIX_SHIFT)
+#define RVT_AIP_QP_MAX (u32)(RVT_AIP_QP_BASE + RVT_AIP_QPN_MAX - 1)
+
+/*
* Bit definitions for s_flags.
*
* RVT_S_SIGNAL_REQ_WR - set if QP send WRs contain completion signaled
@@ -440,7 +467,7 @@ struct rvt_qp {
/*
* This sge list MUST be last. Do not add anything below here.
*/
- struct rvt_sge r_sg_list[0] /* verified SGEs */
+ struct rvt_sge r_sg_list[] /* verified SGEs */
____cacheline_aligned_in_smp;
};
diff --git a/include/rdma/uverbs_ioctl.h b/include/rdma/uverbs_ioctl.h
index 9f3b1e004046..86de10ea30af 100644
--- a/include/rdma/uverbs_ioctl.h
+++ b/include/rdma/uverbs_ioctl.h
@@ -420,9 +420,9 @@ struct uapi_definition {
.scope = UAPI_SCOPE_OBJECT, \
.needs_fn_offset = \
offsetof(struct ib_device_ops, ibdev_fn) + \
- BUILD_BUG_ON_ZERO( \
- sizeof(((struct ib_device_ops *)0)->ibdev_fn) != \
- sizeof(void *)), \
+ BUILD_BUG_ON_ZERO(sizeof_field(struct ib_device_ops, \
+ ibdev_fn) != \
+ sizeof(void *)), \
}
/*
@@ -435,9 +435,9 @@ struct uapi_definition {
.scope = UAPI_SCOPE_METHOD, \
.needs_fn_offset = \
offsetof(struct ib_device_ops, ibdev_fn) + \
- BUILD_BUG_ON_ZERO( \
- sizeof(((struct ib_device_ops *)0)->ibdev_fn) != \
- sizeof(void *)), \
+ BUILD_BUG_ON_ZERO(sizeof_field(struct ib_device_ops, \
+ ibdev_fn) != \
+ sizeof(void *)), \
}
/* Call a function to determine if the entire object is supported or not */
@@ -491,8 +491,7 @@ struct uapi_definition {
*/
#define UVERBS_ATTR_STRUCT(_type, _last) \
.zero_trailing = 1, \
- UVERBS_ATTR_SIZE(((uintptr_t)(&((_type *)0)->_last + 1)), \
- sizeof(_type))
+ UVERBS_ATTR_SIZE(offsetofend(_type, _last), sizeof(_type))
/*
* Specifies at least min_len bytes must be passed in, but the amount can be
* larger, up to the protocol maximum size. No check for zeroing is done.
@@ -737,6 +736,9 @@ uverbs_attr_get_len(const struct uverbs_attr_bundle *attrs_bundle, u16 idx)
return attr->ptr_attr.len;
}
+void uverbs_finalize_uobj_create(const struct uverbs_attr_bundle *attrs_bundle,
+ u16 idx);
+
/*
* uverbs_attr_ptr_get_array_size() - Get array size pointer by a ptr
* attribute.
diff --git a/include/rdma/uverbs_std_types.h b/include/rdma/uverbs_std_types.h
index 325fdaa3bb66..bf0392ae15eb 100644
--- a/include/rdma/uverbs_std_types.h
+++ b/include/rdma/uverbs_std_types.h
@@ -107,7 +107,7 @@ static inline void uobj_put_write(struct ib_uobject *uobj)
static inline void uobj_alloc_abort(struct ib_uobject *uobj,
struct uverbs_attr_bundle *attrs)
{
- rdma_alloc_abort_uobject(uobj, attrs);
+ rdma_alloc_abort_uobject(uobj, attrs, false);
}
static inline struct ib_uobject *
diff --git a/include/rdma/uverbs_types.h b/include/rdma/uverbs_types.h
index f1cbdae67250..c15b298aa62f 100644
--- a/include/rdma/uverbs_types.h
+++ b/include/rdma/uverbs_types.h
@@ -139,7 +139,8 @@ void rdma_lookup_put_uobject(struct ib_uobject *uobj,
struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_api_object *obj,
struct uverbs_attr_bundle *attrs);
void rdma_alloc_abort_uobject(struct ib_uobject *uobj,
- struct uverbs_attr_bundle *attrs);
+ struct uverbs_attr_bundle *attrs,
+ bool hw_obj_valid);
void rdma_alloc_commit_uobject(struct ib_uobject *uobj,
struct uverbs_attr_bundle *attrs);