summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/mlx4/cq.h5
-rw-r--r--include/linux/mlx5/driver.h5
-rw-r--r--include/linux/mlx5/mlx5_ifc.h24
-rw-r--r--include/rdma/ib_cm.h34
-rw-r--r--include/rdma/ib_verbs.h42
-rw-r--r--include/rdma/iba.h146
-rw-r--r--include/rdma/ibta_vol1_c12.h213
-rw-r--r--include/rdma/rdmavt_qp.h22
-rw-r--r--include/rdma/uverbs_named_ioctl.h6
-rw-r--r--include/rdma/uverbs_std_types.h13
-rw-r--r--include/rdma/uverbs_types.h34
-rw-r--r--include/trace/events/rdma_core.h394
-rw-r--r--include/uapi/rdma/ib_user_ioctl_cmds.h15
-rw-r--r--include/uapi/rdma/ib_user_ioctl_verbs.h12
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_cmds.h17
-rw-r--r--include/uapi/rdma/qedr-abi.h18
16 files changed, 899 insertions, 101 deletions
diff --git a/include/linux/mlx4/cq.h b/include/linux/mlx4/cq.h
index 508e8cc5ee86..653d2a0aa44c 100644
--- a/include/linux/mlx4/cq.h
+++ b/include/linux/mlx4/cq.h
@@ -130,6 +130,11 @@ enum {
MLX4_CQE_STATUS_IPOK = 1 << 12,
};
+/* L4_CSUM is logically part of status, but has to checked against badfcs_enc */
+enum {
+ MLX4_CQE_STATUS_L4_CSUM = 1 << 2,
+};
+
enum {
MLX4_CQE_LLC = 1,
MLX4_CQE_SNAP = 1 << 1,
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 22bd0d5024c8..277a51d3ec40 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -463,6 +463,11 @@ struct mlx5_vf_context {
int enabled;
u64 port_guid;
u64 node_guid;
+ /* Valid bits are used to validate administrative guid only.
+ * Enabled after ndo_set_vf_guid
+ */
+ u8 port_guid_valid:1;
+ u8 node_guid_valid:1;
enum port_state_policy policy;
};
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index ee0a34d66c7c..032cd6630720 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -1188,7 +1188,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_max_cq[0x5];
u8 log_max_eq_sz[0x8];
- u8 reserved_at_e8[0x2];
+ u8 relaxed_ordering_write[0x1];
+ u8 relaxed_ordering_read[0x1];
u8 log_max_mkey[0x6];
u8 reserved_at_f0[0x8];
u8 dump_fill_mkey[0x1];
@@ -1211,7 +1212,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_130[0xa];
u8 log_max_ra_res_dc[0x6];
- u8 reserved_at_140[0xa];
+ u8 reserved_at_140[0x9];
+ u8 roce_accl[0x1];
u8 log_max_ra_req_qp[0x6];
u8 reserved_at_150[0xa];
u8 log_max_ra_res_qp[0x6];
@@ -3428,7 +3430,9 @@ struct mlx5_ifc_mkc_bits {
u8 translations_octword_size[0x20];
- u8 reserved_at_1c0[0x1b];
+ u8 reserved_at_1c0[0x19];
+ u8 relaxed_ordering_read[0x1];
+ u8 reserved_at_1d9[0x1];
u8 log_page_size[0x5];
u8 reserved_at_1e0[0x20];
@@ -4889,7 +4893,19 @@ struct mlx5_ifc_query_q_counter_out_bits {
u8 req_cqe_flush_error[0x20];
- u8 reserved_at_620[0x1e0];
+ u8 reserved_at_620[0x20];
+
+ u8 roce_adp_retrans[0x20];
+
+ u8 roce_adp_retrans_to[0x20];
+
+ u8 roce_slow_restart[0x20];
+
+ u8 roce_slow_restart_cnps[0x20];
+
+ u8 roce_slow_restart_trans[0x20];
+
+ u8 reserved_at_6e0[0x120];
};
struct mlx5_ifc_query_q_counter_in_bits {
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h
index b01a8a8d4de9..8ec482e391aa 100644
--- a/include/rdma/ib_cm.h
+++ b/include/rdma/ib_cm.h
@@ -500,21 +500,6 @@ int ib_send_cm_mra(struct ib_cm_id *cm_id,
u8 private_data_len);
/**
- * ib_send_cm_lap - Sends a load alternate path request.
- * @cm_id: Connection identifier associated with the load alternate path
- * message.
- * @alternate_path: A path record that identifies the alternate path to
- * load.
- * @private_data: Optional user-defined private data sent with the
- * load alternate path message.
- * @private_data_len: Size of the private data buffer, in bytes.
- */
-int ib_send_cm_lap(struct ib_cm_id *cm_id,
- struct sa_path_rec *alternate_path,
- const void *private_data,
- u8 private_data_len);
-
-/**
* ib_cm_init_qp_attr - Initializes the QP attributes for use in transitioning
* to a specified QP state.
* @cm_id: Communication identifier associated with the QP attributes to
@@ -534,25 +519,6 @@ int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
struct ib_qp_attr *qp_attr,
int *qp_attr_mask);
-/**
- * ib_send_cm_apr - Sends an alternate path response message in response to
- * a load alternate path request.
- * @cm_id: Connection identifier associated with the alternate path response.
- * @status: Reply status sent with the alternate path response.
- * @info: Optional additional information sent with the alternate path
- * response.
- * @info_length: Size of the additional information, in bytes.
- * @private_data: Optional user-defined private data sent with the
- * alternate path response message.
- * @private_data_len: Size of the private data buffer, in bytes.
- */
-int ib_send_cm_apr(struct ib_cm_id *cm_id,
- enum ib_cm_apr_status status,
- void *info,
- u8 info_length,
- const void *private_data,
- u8 private_data_len);
-
struct ib_cm_sidr_req_param {
struct sa_path_rec *path;
const struct ib_gid_attr *sgid_attr;
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index e2cc62217cc2..1f779fad3a1e 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -72,11 +72,16 @@
#define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN
struct ib_umem_odp;
+struct ib_uqp_object;
+struct ib_usrq_object;
+struct ib_uwq_object;
extern struct workqueue_struct *ib_wq;
extern struct workqueue_struct *ib_comp_wq;
extern struct workqueue_struct *ib_comp_unbound_wq;
+struct ib_ucq_object;
+
__printf(3, 4) __cold
void ibdev_printk(const char *level, const struct ib_device *ibdev,
const char *format, ...);
@@ -1413,8 +1418,11 @@ enum ib_access_flags {
IB_ZERO_BASED = IB_UVERBS_ACCESS_ZERO_BASED,
IB_ACCESS_ON_DEMAND = IB_UVERBS_ACCESS_ON_DEMAND,
IB_ACCESS_HUGETLB = IB_UVERBS_ACCESS_HUGETLB,
+ IB_ACCESS_RELAXED_ORDERING = IB_UVERBS_ACCESS_RELAXED_ORDERING,
- IB_ACCESS_SUPPORTED = ((IB_ACCESS_HUGETLB << 1) - 1)
+ IB_ACCESS_OPTIONAL = IB_UVERBS_ACCESS_OPTIONAL_RANGE,
+ IB_ACCESS_SUPPORTED =
+ ((IB_ACCESS_HUGETLB << 1) - 1) | IB_ACCESS_OPTIONAL,
};
/*
@@ -1544,7 +1552,7 @@ enum ib_poll_context {
struct ib_cq {
struct ib_device *device;
- struct ib_uobject *uobject;
+ struct ib_ucq_object *uobject;
ib_comp_handler comp_handler;
void (*event_handler)(struct ib_event *, void *);
void *cq_context;
@@ -1558,6 +1566,11 @@ struct ib_cq {
};
struct workqueue_struct *comp_wq;
struct dim *dim;
+
+ /* updated only by trace points */
+ ktime_t timestamp;
+ bool interrupt;
+
/*
* Implementation details of the RDMA core, don't use in drivers:
*/
@@ -1567,7 +1580,7 @@ struct ib_cq {
struct ib_srq {
struct ib_device *device;
struct ib_pd *pd;
- struct ib_uobject *uobject;
+ struct ib_usrq_object *uobject;
void (*event_handler)(struct ib_event *, void *);
void *srq_context;
enum ib_srq_type srq_type;
@@ -1612,7 +1625,7 @@ enum ib_wq_state {
struct ib_wq {
struct ib_device *device;
- struct ib_uobject *uobject;
+ struct ib_uwq_object *uobject;
void *wq_context;
void (*event_handler)(struct ib_event *, void *);
struct ib_pd *pd;
@@ -1728,7 +1741,7 @@ struct ib_qp {
atomic_t usecnt;
struct list_head open_list;
struct ib_qp *real_qp;
- struct ib_uobject *uobject;
+ struct ib_uqp_object *uobject;
void (*event_handler)(struct ib_event *, void *);
void *qp_context;
/* sgid_attrs associated with the AV's */
@@ -2147,11 +2160,6 @@ struct ib_port_cache {
enum ib_port_state port_state;
};
-struct ib_cache {
- rwlock_t lock;
- struct ib_event_handler event_handler;
-};
-
struct ib_port_immutable {
int pkey_tbl_len;
int gid_tbl_len;
@@ -2627,13 +2635,18 @@ struct ib_device {
struct rcu_head rcu_head;
struct list_head event_handler_list;
- spinlock_t event_handler_lock;
+ /* Protects event_handler_list */
+ struct rw_semaphore event_handler_rwsem;
+
+ /* Protects QP's event_handler calls and open_qp list */
+ spinlock_t qp_open_list_lock;
struct rw_semaphore client_data_rwsem;
struct xarray client_data;
struct mutex unregistration_lock;
- struct ib_cache cache;
+ /* Synchronize GID, Pkey cache entries, subnet prefix, LMC */
+ rwlock_t cache_lock;
/**
* port_data is indexed by port number
*/
@@ -2942,7 +2955,7 @@ bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
void ib_register_event_handler(struct ib_event_handler *event_handler);
void ib_unregister_event_handler(struct ib_event_handler *event_handler);
-void ib_dispatch_event(struct ib_event *event);
+void ib_dispatch_event(const struct ib_event *event);
int ib_query_port(struct ib_device *device,
u8 port_num, struct ib_port_attr *port_attr);
@@ -4309,6 +4322,9 @@ static inline int ib_check_mr_access(int flags)
!(flags & IB_ACCESS_LOCAL_WRITE))
return -EINVAL;
+ if (flags & ~IB_ACCESS_SUPPORTED)
+ return -EINVAL;
+
return 0;
}
diff --git a/include/rdma/iba.h b/include/rdma/iba.h
new file mode 100644
index 000000000000..6a1115b02a0d
--- /dev/null
+++ b/include/rdma/iba.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
+ */
+#ifndef _IBA_DEFS_H_
+#define _IBA_DEFS_H_
+
+#include <linux/kernel.h>
+#include <linux/bitfield.h>
+#include <asm/unaligned.h>
+
+static inline u32 _iba_get8(const u8 *ptr)
+{
+ return *ptr;
+}
+
+static inline void _iba_set8(u8 *ptr, u32 mask, u32 prep_value)
+{
+ *ptr = (*ptr & ~mask) | prep_value;
+}
+
+static inline u16 _iba_get16(const __be16 *ptr)
+{
+ return be16_to_cpu(*ptr);
+}
+
+static inline void _iba_set16(__be16 *ptr, u16 mask, u16 prep_value)
+{
+ *ptr = cpu_to_be16((be16_to_cpu(*ptr) & ~mask) | prep_value);
+}
+
+static inline u32 _iba_get32(const __be32 *ptr)
+{
+ return be32_to_cpu(*ptr);
+}
+
+static inline void _iba_set32(__be32 *ptr, u32 mask, u32 prep_value)
+{
+ *ptr = cpu_to_be32((be32_to_cpu(*ptr) & ~mask) | prep_value);
+}
+
+static inline u64 _iba_get64(const __be64 *ptr)
+{
+ /*
+ * The mads are constructed so that 32 bit and smaller are naturally
+ * aligned, everything larger has a max alignment of 4 bytes.
+ */
+ return be64_to_cpu(get_unaligned(ptr));
+}
+
+static inline void _iba_set64(__be64 *ptr, u64 mask, u64 prep_value)
+{
+ put_unaligned(cpu_to_be64((_iba_get64(ptr) & ~mask) | prep_value), ptr);
+}
+
+#define _IBA_SET(field_struct, field_offset, field_mask, num_bits, ptr, value) \
+ ({ \
+ field_struct *_ptr = ptr; \
+ _iba_set##num_bits((void *)_ptr + (field_offset), field_mask, \
+ FIELD_PREP(field_mask, value)); \
+ })
+#define IBA_SET(field, ptr, value) _IBA_SET(field, ptr, value)
+
+#define _IBA_GET_MEM_PTR(field_struct, field_offset, type, num_bits, ptr) \
+ ({ \
+ field_struct *_ptr = ptr; \
+ (type *)((void *)_ptr + (field_offset)); \
+ })
+#define IBA_GET_MEM_PTR(field, ptr) _IBA_GET_MEM_PTR(field, ptr)
+
+/* FIXME: A set should always set the entire field, meaning we should zero the trailing bytes */
+#define _IBA_SET_MEM(field_struct, field_offset, type, num_bits, ptr, in, \
+ bytes) \
+ ({ \
+ const type *_in_ptr = in; \
+ WARN_ON(bytes * 8 > num_bits); \
+ if (in && bytes) \
+ memcpy(_IBA_GET_MEM_PTR(field_struct, field_offset, \
+ type, num_bits, ptr), \
+ _in_ptr, bytes); \
+ })
+#define IBA_SET_MEM(field, ptr, in, bytes) _IBA_SET_MEM(field, ptr, in, bytes)
+
+#define _IBA_GET(field_struct, field_offset, field_mask, num_bits, ptr) \
+ ({ \
+ const field_struct *_ptr = ptr; \
+ (u##num_bits) FIELD_GET( \
+ field_mask, _iba_get##num_bits((const void *)_ptr + \
+ (field_offset))); \
+ })
+#define IBA_GET(field, ptr) _IBA_GET(field, ptr)
+
+#define _IBA_GET_MEM(field_struct, field_offset, type, num_bits, ptr, out, \
+ bytes) \
+ ({ \
+ type *_out_ptr = out; \
+ WARN_ON(bytes * 8 > num_bits); \
+ if (out && bytes) \
+ memcpy(_out_ptr, \
+ _IBA_GET_MEM_PTR(field_struct, field_offset, \
+ type, num_bits, ptr), \
+ bytes); \
+ })
+#define IBA_GET_MEM(field, ptr, out, bytes) _IBA_GET_MEM(field, ptr, out, bytes)
+
+/*
+ * The generated list becomes the parameters to the macros, the order is:
+ * - struct this applies to
+ * - starting offset of the max
+ * - GENMASK or GENMASK_ULL in CPU order
+ * - The width of data the mask operations should work on, in bits
+ */
+
+/*
+ * Extraction using a tabular description like table 106. bit_offset is from
+ * the Byte[Bit] notation.
+ */
+#define IBA_FIELD_BLOC(field_struct, byte_offset, bit_offset, num_bits) \
+ field_struct, byte_offset, \
+ GENMASK(7 - (bit_offset), 7 - (bit_offset) - (num_bits - 1)), \
+ 8
+#define IBA_FIELD8_LOC(field_struct, byte_offset, num_bits) \
+ IBA_FIELD_BLOC(field_struct, byte_offset, 0, num_bits)
+
+#define IBA_FIELD16_LOC(field_struct, byte_offset, num_bits) \
+ field_struct, (byte_offset)&0xFFFE, \
+ GENMASK(15 - (((byte_offset) % 2) * 8), \
+ 15 - (((byte_offset) % 2) * 8) - (num_bits - 1)), \
+ 16
+
+#define IBA_FIELD32_LOC(field_struct, byte_offset, num_bits) \
+ field_struct, (byte_offset)&0xFFFC, \
+ GENMASK(31 - (((byte_offset) % 4) * 8), \
+ 31 - (((byte_offset) % 4) * 8) - (num_bits - 1)), \
+ 32
+
+#define IBA_FIELD64_LOC(field_struct, byte_offset) \
+ field_struct, byte_offset, GENMASK_ULL(63, 0), 64
+/*
+ * In IBTA spec, everything that is more than 64bits is multiple
+ * of bytes without leftover bits.
+ */
+#define IBA_FIELD_MLOC(field_struct, byte_offset, num_bits, type) \
+ field_struct, byte_offset, type, num_bits
+
+#endif /* _IBA_DEFS_H_ */
diff --git a/include/rdma/ibta_vol1_c12.h b/include/rdma/ibta_vol1_c12.h
new file mode 100644
index 000000000000..269904425d3f
--- /dev/null
+++ b/include/rdma/ibta_vol1_c12.h
@@ -0,0 +1,213 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
+ *
+ * This file is IBTA volume 1, chapter 12 declarations:
+ * CHAPTER 12: COMMUNICATION MANAGEMENT
+ */
+#ifndef _IBTA_VOL1_C12_H_
+#define _IBTA_VOL1_C12_H_
+
+#include <rdma/iba.h>
+
+#define CM_FIELD_BLOC(field_struct, byte_offset, bits_offset, width) \
+ IBA_FIELD_BLOC(field_struct, \
+ (byte_offset + sizeof(struct ib_mad_hdr)), bits_offset, \
+ width)
+#define CM_FIELD8_LOC(field_struct, byte_offset, width) \
+ IBA_FIELD8_LOC(field_struct, \
+ (byte_offset + sizeof(struct ib_mad_hdr)), width)
+#define CM_FIELD16_LOC(field_struct, byte_offset, width) \
+ IBA_FIELD16_LOC(field_struct, \
+ (byte_offset + sizeof(struct ib_mad_hdr)), width)
+#define CM_FIELD32_LOC(field_struct, byte_offset, width) \
+ IBA_FIELD32_LOC(field_struct, \
+ (byte_offset + sizeof(struct ib_mad_hdr)), width)
+#define CM_FIELD64_LOC(field_struct, byte_offset) \
+ IBA_FIELD64_LOC(field_struct, (byte_offset + sizeof(struct ib_mad_hdr)))
+#define CM_FIELD_MLOC(field_struct, byte_offset, width, type) \
+ IBA_FIELD_MLOC(field_struct, \
+ (byte_offset + sizeof(struct ib_mad_hdr)), width, type)
+#define CM_STRUCT(field_struct, total_len) \
+ field_struct \
+ { \
+ struct ib_mad_hdr hdr; \
+ u32 _data[(total_len) / 32 + \
+ BUILD_BUG_ON_ZERO((total_len) % 32 != 0)]; \
+ }
+
+/* Table 106 REQ Message Contents */
+#define CM_REQ_LOCAL_COMM_ID CM_FIELD32_LOC(struct cm_req_msg, 0, 32)
+#define CM_REQ_SERVICE_ID CM_FIELD64_LOC(struct cm_req_msg, 8)
+#define CM_REQ_LOCAL_CA_GUID CM_FIELD64_LOC(struct cm_req_msg, 16)
+#define CM_REQ_LOCAL_Q_KEY CM_FIELD32_LOC(struct cm_req_msg, 28, 32)
+#define CM_REQ_LOCAL_QPN CM_FIELD32_LOC(struct cm_req_msg, 32, 24)
+#define CM_REQ_RESPONDER_RESOURCES CM_FIELD8_LOC(struct cm_req_msg, 35, 8)
+#define CM_REQ_LOCAL_EECN CM_FIELD32_LOC(struct cm_req_msg, 36, 24)
+#define CM_REQ_INITIATOR_DEPTH CM_FIELD8_LOC(struct cm_req_msg, 39, 8)
+#define CM_REQ_REMOTE_EECN CM_FIELD32_LOC(struct cm_req_msg, 40, 24)
+#define CM_REQ_REMOTE_CM_RESPONSE_TIMEOUT \
+ CM_FIELD8_LOC(struct cm_req_msg, 43, 5)
+#define CM_REQ_TRANSPORT_SERVICE_TYPE CM_FIELD_BLOC(struct cm_req_msg, 43, 5, 2)
+#define CM_REQ_END_TO_END_FLOW_CONTROL \
+ CM_FIELD_BLOC(struct cm_req_msg, 43, 7, 1)
+#define CM_REQ_STARTING_PSN CM_FIELD32_LOC(struct cm_req_msg, 44, 24)
+#define CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT CM_FIELD8_LOC(struct cm_req_msg, 47, 5)
+#define CM_REQ_RETRY_COUNT CM_FIELD_BLOC(struct cm_req_msg, 47, 5, 3)
+#define CM_REQ_PARTITION_KEY CM_FIELD16_LOC(struct cm_req_msg, 48, 16)
+#define CM_REQ_PATH_PACKET_PAYLOAD_MTU CM_FIELD8_LOC(struct cm_req_msg, 50, 4)
+#define CM_REQ_RDC_EXISTS CM_FIELD_BLOC(struct cm_req_msg, 50, 4, 1)
+#define CM_REQ_RNR_RETRY_COUNT CM_FIELD_BLOC(struct cm_req_msg, 50, 5, 3)
+#define CM_REQ_MAX_CM_RETRIES CM_FIELD8_LOC(struct cm_req_msg, 51, 4)
+#define CM_REQ_SRQ CM_FIELD_BLOC(struct cm_req_msg, 51, 4, 1)
+#define CM_REQ_EXTENDED_TRANSPORT_TYPE \
+ CM_FIELD_BLOC(struct cm_req_msg, 51, 5, 3)
+#define CM_REQ_PRIMARY_LOCAL_PORT_LID CM_FIELD16_LOC(struct cm_req_msg, 52, 16)
+#define CM_REQ_PRIMARY_REMOTE_PORT_LID CM_FIELD16_LOC(struct cm_req_msg, 54, 16)
+#define CM_REQ_PRIMARY_LOCAL_PORT_GID \
+ CM_FIELD_MLOC(struct cm_req_msg, 56, 128, union ib_gid)
+#define CM_REQ_PRIMARY_REMOTE_PORT_GID \
+ CM_FIELD_MLOC(struct cm_req_msg, 72, 128, union ib_gid)
+#define CM_REQ_PRIMARY_FLOW_LABEL CM_FIELD32_LOC(struct cm_req_msg, 88, 20)
+#define CM_REQ_PRIMARY_PACKET_RATE CM_FIELD_BLOC(struct cm_req_msg, 91, 2, 6)
+#define CM_REQ_PRIMARY_TRAFFIC_CLASS CM_FIELD8_LOC(struct cm_req_msg, 92, 8)
+#define CM_REQ_PRIMARY_HOP_LIMIT CM_FIELD8_LOC(struct cm_req_msg, 93, 8)
+#define CM_REQ_PRIMARY_SL CM_FIELD8_LOC(struct cm_req_msg, 94, 4)
+#define CM_REQ_PRIMARY_SUBNET_LOCAL CM_FIELD_BLOC(struct cm_req_msg, 94, 4, 1)
+#define CM_REQ_PRIMARY_LOCAL_ACK_TIMEOUT CM_FIELD8_LOC(struct cm_req_msg, 95, 5)
+#define CM_REQ_ALTERNATE_LOCAL_PORT_LID \
+ CM_FIELD16_LOC(struct cm_req_msg, 96, 16)
+#define CM_REQ_ALTERNATE_REMOTE_PORT_LID \
+ CM_FIELD16_LOC(struct cm_req_msg, 98, 16)
+#define CM_REQ_ALTERNATE_LOCAL_PORT_GID \
+ CM_FIELD_MLOC(struct cm_req_msg, 100, 128, union ib_gid)
+#define CM_REQ_ALTERNATE_REMOTE_PORT_GID \
+ CM_FIELD_MLOC(struct cm_req_msg, 116, 128, union ib_gid)
+#define CM_REQ_ALTERNATE_FLOW_LABEL CM_FIELD32_LOC(struct cm_req_msg, 132, 20)
+#define CM_REQ_ALTERNATE_PACKET_RATE CM_FIELD_BLOC(struct cm_req_msg, 135, 2, 6)
+#define CM_REQ_ALTERNATE_TRAFFIC_CLASS CM_FIELD8_LOC(struct cm_req_msg, 136, 8)
+#define CM_REQ_ALTERNATE_HOP_LIMIT CM_FIELD8_LOC(struct cm_req_msg, 137, 8)
+#define CM_REQ_ALTERNATE_SL CM_FIELD8_LOC(struct cm_req_msg, 138, 4)
+#define CM_REQ_ALTERNATE_SUBNET_LOCAL \
+ CM_FIELD_BLOC(struct cm_req_msg, 138, 4, 1)
+#define CM_REQ_ALTERNATE_LOCAL_ACK_TIMEOUT \
+ CM_FIELD8_LOC(struct cm_req_msg, 139, 5)
+#define CM_REQ_SAP_SUPPORTED CM_FIELD_BLOC(struct cm_req_msg, 139, 5, 1)
+#define CM_REQ_PRIVATE_DATA CM_FIELD_MLOC(struct cm_req_msg, 140, 736, void)
+CM_STRUCT(struct cm_req_msg, 140 * 8 + 736);
+
+/* Table 107 MRA Message Contents */
+#define CM_MRA_LOCAL_COMM_ID CM_FIELD32_LOC(struct cm_mra_msg, 0, 32)
+#define CM_MRA_REMOTE_COMM_ID CM_FIELD32_LOC(struct cm_mra_msg, 4, 32)
+#define CM_MRA_MESSAGE_MRAED CM_FIELD8_LOC(struct cm_mra_msg, 8, 2)
+#define CM_MRA_SERVICE_TIMEOUT CM_FIELD8_LOC(struct cm_mra_msg, 9, 5)
+#define CM_MRA_PRIVATE_DATA CM_FIELD_MLOC(struct cm_mra_msg, 10, 1776, void)
+CM_STRUCT(struct cm_mra_msg, 10 * 8 + 1776);
+
+/* Table 108 REJ Message Contents */
+#define CM_REJ_LOCAL_COMM_ID CM_FIELD32_LOC(struct cm_rej_msg, 0, 32)
+#define CM_REJ_REMOTE_COMM_ID CM_FIELD32_LOC(struct cm_rej_msg, 4, 32)
+#define CM_REJ_MESSAGE_REJECTED CM_FIELD8_LOC(struct cm_rej_msg, 8, 2)
+#define CM_REJ_REJECTED_INFO_LENGTH CM_FIELD8_LOC(struct cm_rej_msg, 9, 7)
+#define CM_REJ_REASON CM_FIELD16_LOC(struct cm_rej_msg, 10, 16)
+#define CM_REJ_ARI CM_FIELD_MLOC(struct cm_rej_msg, 12, 576, void)
+#define CM_REJ_PRIVATE_DATA CM_FIELD_MLOC(struct cm_rej_msg, 84, 1184, void)
+CM_STRUCT(struct cm_rej_msg, 84 * 8 + 1184);
+
+/* Table 110 REP Message Contents */
+#define CM_REP_LOCAL_COMM_ID CM_FIELD32_LOC(struct cm_rep_msg, 0, 32)
+#define CM_REP_REMOTE_COMM_ID CM_FIELD32_LOC(struct cm_rep_msg, 4, 32)
+#define CM_REP_LOCAL_Q_KEY CM_FIELD32_LOC(struct cm_rep_msg, 8, 32)
+#define CM_REP_LOCAL_QPN CM_FIELD32_LOC(struct cm_rep_msg, 12, 24)
+#define CM_REP_LOCAL_EE_CONTEXT_NUMBER CM_FIELD32_LOC(struct cm_rep_msg, 16, 24)
+#define CM_REP_STARTING_PSN CM_FIELD32_LOC(struct cm_rep_msg, 20, 24)
+#define CM_REP_RESPONDER_RESOURCES CM_FIELD8_LOC(struct cm_rep_msg, 24, 8)
+#define CM_REP_INITIATOR_DEPTH CM_FIELD8_LOC(struct cm_rep_msg, 25, 8)
+#define CM_REP_TARGET_ACK_DELAY CM_FIELD8_LOC(struct cm_rep_msg, 26, 5)
+#define CM_REP_FAILOVER_ACCEPTED CM_FIELD_BLOC(struct cm_rep_msg, 26, 5, 2)
+#define CM_REP_END_TO_END_FLOW_CONTROL \
+ CM_FIELD_BLOC(struct cm_rep_msg, 26, 7, 1)
+#define CM_REP_RNR_RETRY_COUNT CM_FIELD8_LOC(struct cm_rep_msg, 27, 3)
+#define CM_REP_SRQ CM_FIELD_BLOC(struct cm_rep_msg, 27, 3, 1)
+#define CM_REP_LOCAL_CA_GUID CM_FIELD64_LOC(struct cm_rep_msg, 28)
+#define CM_REP_PRIVATE_DATA CM_FIELD_MLOC(struct cm_rep_msg, 36, 1568, void)
+CM_STRUCT(struct cm_rep_msg, 36 * 8 + 1568);
+
+/* Table 111 RTU Message Contents */
+#define CM_RTU_LOCAL_COMM_ID CM_FIELD32_LOC(struct cm_rtu_msg, 0, 32)
+#define CM_RTU_REMOTE_COMM_ID CM_FIELD32_LOC(struct cm_rtu_msg, 4, 32)
+#define CM_RTU_PRIVATE_DATA CM_FIELD_MLOC(struct cm_rtu_msg, 8, 1792, void)
+CM_STRUCT(struct cm_rtu_msg, 8 * 8 + 1792);
+
+/* Table 112 DREQ Message Contents */
+#define CM_DREQ_LOCAL_COMM_ID CM_FIELD32_LOC(struct cm_dreq_msg, 0, 32)
+#define CM_DREQ_REMOTE_COMM_ID CM_FIELD32_LOC(struct cm_dreq_msg, 4, 32)
+#define CM_DREQ_REMOTE_QPN_EECN CM_FIELD32_LOC(struct cm_dreq_msg, 8, 24)
+#define CM_DREQ_PRIVATE_DATA CM_FIELD_MLOC(struct cm_dreq_msg, 12, 1760, void)
+CM_STRUCT(struct cm_dreq_msg, 12 * 8 + 1760);
+
+/* Table 113 DREP Message Contents */
+#define CM_DREP_LOCAL_COMM_ID CM_FIELD32_LOC(struct cm_drep_msg, 0, 32)
+#define CM_DREP_REMOTE_COMM_ID CM_FIELD32_LOC(struct cm_drep_msg, 4, 32)
+#define CM_DREP_PRIVATE_DATA CM_FIELD_MLOC(struct cm_drep_msg, 8, 1792, void)
+CM_STRUCT(struct cm_drep_msg, 8 * 8 + 1792);
+
+/* Table 115 LAP Message Contents */
+#define CM_LAP_LOCAL_COMM_ID CM_FIELD32_LOC(struct cm_lap_msg, 0, 32)
+#define CM_LAP_REMOTE_COMM_ID CM_FIELD32_LOC(struct cm_lap_msg, 4, 32)
+#define CM_LAP_REMOTE_QPN_EECN CM_FIELD32_LOC(struct cm_lap_msg, 12, 24)
+#define CM_LAP_REMOTE_CM_RESPONSE_TIMEOUT \
+ CM_FIELD8_LOC(struct cm_lap_msg, 15, 5)
+#define CM_LAP_ALTERNATE_LOCAL_PORT_LID \
+ CM_FIELD16_LOC(struct cm_lap_msg, 20, 16)
+#define CM_LAP_ALTERNATE_REMOTE_PORT_LID \
+ CM_FIELD16_LOC(struct cm_lap_msg, 22, 16)
+#define CM_LAP_ALTERNATE_LOCAL_PORT_GID \
+ CM_FIELD_MLOC(struct cm_lap_msg, 24, 128, union ib_gid)
+#define CM_LAP_ALTERNATE_REMOTE_PORT_GID \
+ CM_FIELD_MLOC(struct cm_lap_msg, 40, 128, union ib_gid)
+#define CM_LAP_ALTERNATE_FLOW_LABEL CM_FIELD32_LOC(struct cm_lap_msg, 56, 20)
+#define CM_LAP_ALTERNATE_TRAFFIC_CLASS CM_FIELD8_LOC(struct cm_lap_msg, 59, 8)
+#define CM_LAP_ALTERNATE_HOP_LIMIT CM_FIELD8_LOC(struct cm_lap_msg, 60, 8)
+#define CM_LAP_ALTERNATE_PACKET_RATE CM_FIELD_BLOC(struct cm_lap_msg, 61, 2, 6)
+#define CM_LAP_ALTERNATE_SL CM_FIELD8_LOC(struct cm_lap_msg, 62, 4)
+#define CM_LAP_ALTERNATE_SUBNET_LOCAL CM_FIELD_BLOC(struct cm_lap_msg, 62, 4, 1)
+#define CM_LAP_ALTERNATE_LOCAL_ACK_TIMEOUT \
+ CM_FIELD8_LOC(struct cm_lap_msg, 63, 5)
+#define CM_LAP_PRIVATE_DATA CM_FIELD_MLOC(struct cm_lap_msg, 64, 1344, void)
+CM_STRUCT(struct cm_lap_msg, 64 * 8 + 1344);
+
+/* Table 116 APR Message Contents */
+#define CM_APR_LOCAL_COMM_ID CM_FIELD32_LOC(struct cm_apr_msg, 0, 32)
+#define CM_APR_REMOTE_COMM_ID CM_FIELD32_LOC(struct cm_apr_msg, 4, 32)
+#define CM_APR_ADDITIONAL_INFORMATION_LENGTH \
+ CM_FIELD8_LOC(struct cm_apr_msg, 8, 8)
+#define CM_APR_AR_STATUS CM_FIELD8_LOC(struct cm_apr_msg, 9, 8)
+#define CM_APR_ADDITIONAL_INFORMATION \
+ CM_FIELD_MLOC(struct cm_apr_msg, 12, 576, void)
+#define CM_APR_PRIVATE_DATA CM_FIELD_MLOC(struct cm_apr_msg, 84, 1184, void)
+CM_STRUCT(struct cm_apr_msg, 84 * 8 + 1184);
+
+/* Table 119 SIDR_REQ Message Contents */
+#define CM_SIDR_REQ_REQUESTID CM_FIELD32_LOC(struct cm_sidr_req_msg, 0, 32)
+#define CM_SIDR_REQ_PARTITION_KEY CM_FIELD16_LOC(struct cm_sidr_req_msg, 4, 16)
+#define CM_SIDR_REQ_SERVICEID CM_FIELD64_LOC(struct cm_sidr_req_msg, 8)
+#define CM_SIDR_REQ_PRIVATE_DATA \
+ CM_FIELD_MLOC(struct cm_sidr_req_msg, 16, 1728, void)
+CM_STRUCT(struct cm_sidr_req_msg, 16 * 8 + 1728);
+
+/* Table 120 SIDR_REP Message Contents */
+#define CM_SIDR_REP_REQUESTID CM_FIELD32_LOC(struct cm_sidr_rep_msg, 0, 32)
+#define CM_SIDR_REP_STATUS CM_FIELD8_LOC(struct cm_sidr_rep_msg, 4, 8)
+#define CM_SIDR_REP_ADDITIONAL_INFORMATION_LENGTH \
+ CM_FIELD8_LOC(struct cm_sidr_rep_msg, 5, 8)
+#define CM_SIDR_REP_QPN CM_FIELD32_LOC(struct cm_sidr_rep_msg, 8, 24)
+#define CM_SIDR_REP_SERVICEID CM_FIELD64_LOC(struct cm_sidr_rep_msg, 12)
+#define CM_SIDR_REP_Q_KEY CM_FIELD32_LOC(struct cm_sidr_rep_msg, 20, 32)
+#define CM_SIDR_REP_ADDITIONAL_INFORMATION \
+ CM_FIELD_MLOC(struct cm_sidr_rep_msg, 24, 576, void)
+#define CM_SIDR_REP_PRIVATE_DATA \
+ CM_FIELD_MLOC(struct cm_sidr_rep_msg, 96, 1088, void)
+CM_STRUCT(struct cm_sidr_rep_msg, 96 * 8 + 1088);
+
+#endif /* _IBTA_VOL1_C12_H_ */
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
index b550ae89bf85..0d5c70e2d8ab 100644
--- a/include/rdma/rdmavt_qp.h
+++ b/include/rdma/rdmavt_qp.h
@@ -640,34 +640,14 @@ static inline int rvt_cmp_msn(u32 a, u32 b)
return (((int)a) - ((int)b)) << 8;
}
-/**
- * rvt_compute_aeth - compute the AETH (syndrome + MSN)
- * @qp: the queue pair to compute the AETH for
- *
- * Returns the AETH.
- */
__be32 rvt_compute_aeth(struct rvt_qp *qp);
-/**
- * rvt_get_credit - flush the send work queue of a QP
- * @qp: the qp who's send work queue to flush
- * @aeth: the Acknowledge Extended Transport Header
- *
- * The QP s_lock should be held.
- */
void rvt_get_credit(struct rvt_qp *qp, u32 aeth);
-/**
- * rvt_restart_sge - rewind the sge state for a wqe
- * @ss: the sge state pointer
- * @wqe: the wqe to rewind
- * @len: the data length from the start of the wqe in bytes
- *
- * Returns the remaining data length.
- */
u32 rvt_restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, u32 len);
/**
+ * rvt_div_round_up_mtu - round up divide
* @qp - the qp pair
* @len - the length
*
diff --git a/include/rdma/uverbs_named_ioctl.h b/include/rdma/uverbs_named_ioctl.h
index 3447bfe356d6..6ae6cf8e4c2e 100644
--- a/include/rdma/uverbs_named_ioctl.h
+++ b/include/rdma/uverbs_named_ioctl.h
@@ -76,7 +76,7 @@
#define DECLARE_UVERBS_NAMED_OBJECT(_object_id, _type_attrs, ...) \
static const struct uverbs_method_def *const UVERBS_OBJECT_METHODS( \
_object_id)[] = { __VA_ARGS__ }; \
- const struct uverbs_object_def UVERBS_OBJECT(_object_id) = { \
+ static const struct uverbs_object_def UVERBS_OBJECT(_object_id) = { \
.id = _object_id, \
.type_attrs = &_type_attrs, \
.num_methods = ARRAY_SIZE(UVERBS_OBJECT_METHODS(_object_id)), \
@@ -88,10 +88,10 @@
* identify all uapi methods with a (object,method) tuple. However, they have
* no type pointer.
*/
-#define DECLARE_UVERBS_GLOBAL_METHODS(_object_id, ...) \
+#define DECLARE_UVERBS_GLOBAL_METHODS(_object_id, ...) \
static const struct uverbs_method_def *const UVERBS_OBJECT_METHODS( \
_object_id)[] = { __VA_ARGS__ }; \
- const struct uverbs_object_def UVERBS_OBJECT(_object_id) = { \
+ static const struct uverbs_object_def UVERBS_OBJECT(_object_id) = { \
.id = _object_id, \
.num_methods = ARRAY_SIZE(UVERBS_OBJECT_METHODS(_object_id)), \
.methods = &UVERBS_OBJECT_METHODS(_object_id) \
diff --git a/include/rdma/uverbs_std_types.h b/include/rdma/uverbs_std_types.h
index 05eabfd5d0d3..1b28ce1aba07 100644
--- a/include/rdma/uverbs_std_types.h
+++ b/include/rdma/uverbs_std_types.h
@@ -104,16 +104,6 @@ static inline void uobj_put_write(struct ib_uobject *uobj)
rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE);
}
-static inline int __must_check
-uobj_alloc_commit(struct ib_uobject *uobj, struct uverbs_attr_bundle *attrs)
-{
- int ret = rdma_alloc_commit_uobject(uobj, attrs);
-
- if (ret)
- return ret;
- return 0;
-}
-
static inline void uobj_alloc_abort(struct ib_uobject *uobj,
struct uverbs_attr_bundle *attrs)
{
@@ -124,8 +114,7 @@ static inline struct ib_uobject *
__uobj_alloc(const struct uverbs_api_object *obj,
struct uverbs_attr_bundle *attrs, struct ib_device **ib_dev)
{
- struct ib_uobject *uobj =
- rdma_alloc_begin_uobject(obj, attrs->ufile, attrs);
+ struct ib_uobject *uobj = rdma_alloc_begin_uobject(obj, attrs);
if (!IS_ERR(uobj))
*ib_dev = attrs->context->device;
diff --git a/include/rdma/uverbs_types.h b/include/rdma/uverbs_types.h
index d57a5ba00c74..f1cbdae67250 100644
--- a/include/rdma/uverbs_types.h
+++ b/include/rdma/uverbs_types.h
@@ -83,9 +83,9 @@ enum rdma_lookup_mode {
*/
struct uverbs_obj_type_class {
struct ib_uobject *(*alloc_begin)(const struct uverbs_api_object *obj,
- struct ib_uverbs_file *ufile);
+ struct uverbs_attr_bundle *attrs);
/* This consumes the kref on uobj */
- int (*alloc_commit)(struct ib_uobject *uobj);
+ void (*alloc_commit)(struct ib_uobject *uobj);
/* This does not consume the kref on uobj */
void (*alloc_abort)(struct ib_uobject *uobj);
@@ -98,7 +98,6 @@ struct uverbs_obj_type_class {
enum rdma_remove_reason why,
struct uverbs_attr_bundle *attrs);
void (*remove_handle)(struct ib_uobject *uobj);
- u8 needs_kfree_rcu;
};
struct uverbs_obj_type {
@@ -138,23 +137,34 @@ struct ib_uobject *rdma_lookup_get_uobject(const struct uverbs_api_object *obj,
void rdma_lookup_put_uobject(struct ib_uobject *uobj,
enum rdma_lookup_mode mode);
struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_api_object *obj,
- struct ib_uverbs_file *ufile,
struct uverbs_attr_bundle *attrs);
void rdma_alloc_abort_uobject(struct ib_uobject *uobj,
struct uverbs_attr_bundle *attrs);
-int __must_check rdma_alloc_commit_uobject(struct ib_uobject *uobj,
- struct uverbs_attr_bundle *attrs);
+void rdma_alloc_commit_uobject(struct ib_uobject *uobj,
+ struct uverbs_attr_bundle *attrs);
+
+/*
+ * uverbs_uobject_get is called in order to increase the reference count on
+ * an uobject. This is useful when a handler wants to keep the uobject's memory
+ * alive, regardless if this uobject is still alive in the context's objects
+ * repository. Objects are put via uverbs_uobject_put.
+ */
+static inline void uverbs_uobject_get(struct ib_uobject *uobject)
+{
+ kref_get(&uobject->ref);
+}
+void uverbs_uobject_put(struct ib_uobject *uobject);
struct uverbs_obj_fd_type {
/*
* In fd based objects, uverbs_obj_type_ops points to generic
* fd operations. In order to specialize the underlying types (e.g.
* completion_channel), we use fops, name and flags for fd creation.
- * context_closed is called when the context is closed either when
- * the driver is removed or the process terminated.
+ * destroy_object is called when the uobject is to be destroyed,
+ * because the driver is removed or the FD is closed.
*/
struct uverbs_obj_type type;
- int (*context_closed)(struct ib_uobject *uobj,
+ int (*destroy_object)(struct ib_uobject *uobj,
enum rdma_remove_reason why);
const struct file_operations *fops;
const char *name;
@@ -163,11 +173,11 @@ struct uverbs_obj_fd_type {
extern const struct uverbs_obj_type_class uverbs_idr_class;
extern const struct uverbs_obj_type_class uverbs_fd_class;
-void uverbs_close_fd(struct file *f);
+int uverbs_uobject_fd_release(struct inode *inode, struct file *filp);
#define UVERBS_BUILD_BUG_ON(cond) (sizeof(char[1 - 2 * !!(cond)]) - \
sizeof(char))
-#define UVERBS_TYPE_ALLOC_FD(_obj_size, _context_closed, _fops, _name, _flags)\
+#define UVERBS_TYPE_ALLOC_FD(_obj_size, _destroy_object, _fops, _name, _flags) \
((&((const struct uverbs_obj_fd_type) \
{.type = { \
.type_class = &uverbs_fd_class, \
@@ -175,7 +185,7 @@ void uverbs_close_fd(struct file *f);
UVERBS_BUILD_BUG_ON((_obj_size) < \
sizeof(struct ib_uobject)), \
}, \
- .context_closed = _context_closed, \
+ .destroy_object = _destroy_object, \
.fops = _fops, \
.name = _name, \
.flags = _flags}))->type)
diff --git a/include/trace/events/rdma_core.h b/include/trace/events/rdma_core.h
new file mode 100644
index 000000000000..17642aa54437
--- /dev/null
+++ b/include/trace/events/rdma_core.h
@@ -0,0 +1,394 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Trace point definitions for core RDMA functions.
+ *
+ * Author: Chuck Lever <chuck.lever@oracle.com>
+ *
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rdma_core
+
+#if !defined(_TRACE_RDMA_CORE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RDMA_CORE_H
+
+#include <linux/tracepoint.h>
+#include <rdma/ib_verbs.h>
+
+/*
+ * enum ib_poll_context, from include/rdma/ib_verbs.h
+ */
+#define IB_POLL_CTX_LIST \
+ ib_poll_ctx(DIRECT) \
+ ib_poll_ctx(SOFTIRQ) \
+ ib_poll_ctx(WORKQUEUE) \
+ ib_poll_ctx_end(UNBOUND_WORKQUEUE)
+
+#undef ib_poll_ctx
+#undef ib_poll_ctx_end
+
+#define ib_poll_ctx(x) TRACE_DEFINE_ENUM(IB_POLL_##x);
+#define ib_poll_ctx_end(x) TRACE_DEFINE_ENUM(IB_POLL_##x);
+
+IB_POLL_CTX_LIST
+
+#undef ib_poll_ctx
+#undef ib_poll_ctx_end
+
+#define ib_poll_ctx(x) { IB_POLL_##x, #x },
+#define ib_poll_ctx_end(x) { IB_POLL_##x, #x }
+
+#define rdma_show_ib_poll_ctx(x) \
+ __print_symbolic(x, IB_POLL_CTX_LIST)
+
+/**
+ ** Completion Queue events
+ **/
+
+TRACE_EVENT(cq_schedule,
+ TP_PROTO(
+ struct ib_cq *cq
+ ),
+
+ TP_ARGS(cq),
+
+ TP_STRUCT__entry(
+ __field(u32, cq_id)
+ ),
+
+ TP_fast_assign(
+ cq->timestamp = ktime_get();
+ cq->interrupt = true;
+
+ __entry->cq_id = cq->res.id;
+ ),
+
+ TP_printk("cq.id=%u", __entry->cq_id)
+);
+
+TRACE_EVENT(cq_reschedule,
+ TP_PROTO(
+ struct ib_cq *cq
+ ),
+
+ TP_ARGS(cq),
+
+ TP_STRUCT__entry(
+ __field(u32, cq_id)
+ ),
+
+ TP_fast_assign(
+ cq->timestamp = ktime_get();
+ cq->interrupt = false;
+
+ __entry->cq_id = cq->res.id;
+ ),
+
+ TP_printk("cq.id=%u", __entry->cq_id)
+);
+
+TRACE_EVENT(cq_process,
+ TP_PROTO(
+ const struct ib_cq *cq
+ ),
+
+ TP_ARGS(cq),
+
+ TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(bool, interrupt)
+ __field(s64, latency)
+ ),
+
+ TP_fast_assign(
+ ktime_t latency = ktime_sub(ktime_get(), cq->timestamp);
+
+ __entry->cq_id = cq->res.id;
+ __entry->latency = ktime_to_us(latency);
+ __entry->interrupt = cq->interrupt;
+ ),
+
+ TP_printk("cq.id=%u wake-up took %lld [us] from %s",
+ __entry->cq_id, __entry->latency,
+ __entry->interrupt ? "interrupt" : "reschedule"
+ )
+);
+
+TRACE_EVENT(cq_poll,
+ TP_PROTO(
+ const struct ib_cq *cq,
+ int requested,
+ int rc
+ ),
+
+ TP_ARGS(cq, requested, rc),
+
+ TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, requested)
+ __field(int, rc)
+ ),
+
+ TP_fast_assign(
+ __entry->cq_id = cq->res.id;
+ __entry->requested = requested;
+ __entry->rc = rc;
+ ),
+
+ TP_printk("cq.id=%u requested %d, returned %d",
+ __entry->cq_id, __entry->requested, __entry->rc
+ )
+);
+
+TRACE_EVENT(cq_drain_complete,
+ TP_PROTO(
+ const struct ib_cq *cq
+ ),
+
+ TP_ARGS(cq),
+
+ TP_STRUCT__entry(
+ __field(u32, cq_id)
+ ),
+
+ TP_fast_assign(
+ __entry->cq_id = cq->res.id;
+ ),
+
+ TP_printk("cq.id=%u",
+ __entry->cq_id
+ )
+);
+
+
+TRACE_EVENT(cq_modify,
+ TP_PROTO(
+ const struct ib_cq *cq,
+ u16 comps,
+ u16 usec
+ ),
+
+ TP_ARGS(cq, comps, usec),
+
+ TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(unsigned int, comps)
+ __field(unsigned int, usec)
+ ),
+
+ TP_fast_assign(
+ __entry->cq_id = cq->res.id;
+ __entry->comps = comps;
+ __entry->usec = usec;
+ ),
+
+ TP_printk("cq.id=%u comps=%u usec=%u",
+ __entry->cq_id, __entry->comps, __entry->usec
+ )
+);
+
+TRACE_EVENT(cq_alloc,
+ TP_PROTO(
+ const struct ib_cq *cq,
+ int nr_cqe,
+ int comp_vector,
+ enum ib_poll_context poll_ctx
+ ),
+
+ TP_ARGS(cq, nr_cqe, comp_vector, poll_ctx),
+
+ TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, nr_cqe)
+ __field(int, comp_vector)
+ __field(unsigned long, poll_ctx)
+ ),
+
+ TP_fast_assign(
+ __entry->cq_id = cq->res.id;
+ __entry->nr_cqe = nr_cqe;
+ __entry->comp_vector = comp_vector;
+ __entry->poll_ctx = poll_ctx;
+ ),
+
+ TP_printk("cq.id=%u nr_cqe=%d comp_vector=%d poll_ctx=%s",
+ __entry->cq_id, __entry->nr_cqe, __entry->comp_vector,
+ rdma_show_ib_poll_ctx(__entry->poll_ctx)
+ )
+);
+
+TRACE_EVENT(cq_alloc_error,
+ TP_PROTO(
+ int nr_cqe,
+ int comp_vector,
+ enum ib_poll_context poll_ctx,
+ int rc
+ ),
+
+ TP_ARGS(nr_cqe, comp_vector, poll_ctx, rc),
+
+ TP_STRUCT__entry(
+ __field(int, rc)
+ __field(int, nr_cqe)
+ __field(int, comp_vector)
+ __field(unsigned long, poll_ctx)
+ ),
+
+ TP_fast_assign(
+ __entry->rc = rc;
+ __entry->nr_cqe = nr_cqe;
+ __entry->comp_vector = comp_vector;
+ __entry->poll_ctx = poll_ctx;
+ ),
+
+ TP_printk("nr_cqe=%d comp_vector=%d poll_ctx=%s rc=%d",
+ __entry->nr_cqe, __entry->comp_vector,
+ rdma_show_ib_poll_ctx(__entry->poll_ctx), __entry->rc
+ )
+);
+
+TRACE_EVENT(cq_free,
+ TP_PROTO(
+ const struct ib_cq *cq
+ ),
+
+ TP_ARGS(cq),
+
+ TP_STRUCT__entry(
+ __field(u32, cq_id)
+ ),
+
+ TP_fast_assign(
+ __entry->cq_id = cq->res.id;
+ ),
+
+ TP_printk("cq.id=%u", __entry->cq_id)
+);
+
+/**
+ ** Memory Region events
+ **/
+
+/*
+ * enum ib_mr_type, from include/rdma/ib_verbs.h
+ */
+#define IB_MR_TYPE_LIST \
+ ib_mr_type_item(MEM_REG) \
+ ib_mr_type_item(SG_GAPS) \
+ ib_mr_type_item(DM) \
+ ib_mr_type_item(USER) \
+ ib_mr_type_item(DMA) \
+ ib_mr_type_end(INTEGRITY)
+
+#undef ib_mr_type_item
+#undef ib_mr_type_end
+
+#define ib_mr_type_item(x) TRACE_DEFINE_ENUM(IB_MR_TYPE_##x);
+#define ib_mr_type_end(x) TRACE_DEFINE_ENUM(IB_MR_TYPE_##x);
+
+IB_MR_TYPE_LIST
+
+#undef ib_mr_type_item
+#undef ib_mr_type_end
+
+#define ib_mr_type_item(x) { IB_MR_TYPE_##x, #x },
+#define ib_mr_type_end(x) { IB_MR_TYPE_##x, #x }
+
+#define rdma_show_ib_mr_type(x) \
+ __print_symbolic(x, IB_MR_TYPE_LIST)
+
+TRACE_EVENT(mr_alloc,
+ TP_PROTO(
+ const struct ib_pd *pd,
+ enum ib_mr_type mr_type,
+ u32 max_num_sg,
+ const struct ib_mr *mr
+ ),
+
+ TP_ARGS(pd, mr_type, max_num_sg, mr),
+
+ TP_STRUCT__entry(
+ __field(u32, pd_id)
+ __field(u32, mr_id)
+ __field(u32, max_num_sg)
+ __field(int, rc)
+ __field(unsigned long, mr_type)
+ ),
+
+ TP_fast_assign(
+ __entry->pd_id = pd->res.id;
+ if (IS_ERR(mr)) {
+ __entry->mr_id = 0;
+ __entry->rc = PTR_ERR(mr);
+ } else {
+ __entry->mr_id = mr->res.id;
+ __entry->rc = 0;
+ }
+ __entry->max_num_sg = max_num_sg;
+ __entry->mr_type = mr_type;
+ ),
+
+ TP_printk("pd.id=%u mr.id=%u type=%s max_num_sg=%u rc=%d",
+ __entry->pd_id, __entry->mr_id,
+ rdma_show_ib_mr_type(__entry->mr_type),
+ __entry->max_num_sg, __entry->rc)
+);
+
+TRACE_EVENT(mr_integ_alloc,
+ TP_PROTO(
+ const struct ib_pd *pd,
+ u32 max_num_data_sg,
+ u32 max_num_meta_sg,
+ const struct ib_mr *mr
+ ),
+
+ TP_ARGS(pd, max_num_data_sg, max_num_meta_sg, mr),
+
+ TP_STRUCT__entry(
+ __field(u32, pd_id)
+ __field(u32, mr_id)
+ __field(u32, max_num_data_sg)
+ __field(u32, max_num_meta_sg)
+ __field(int, rc)
+ ),
+
+ TP_fast_assign(
+ __entry->pd_id = pd->res.id;
+ if (IS_ERR(mr)) {
+ __entry->mr_id = 0;
+ __entry->rc = PTR_ERR(mr);
+ } else {
+ __entry->mr_id = mr->res.id;
+ __entry->rc = 0;
+ }
+ __entry->max_num_data_sg = max_num_data_sg;
+ __entry->max_num_meta_sg = max_num_meta_sg;
+ ),
+
+ TP_printk("pd.id=%u mr.id=%u max_num_data_sg=%u max_num_meta_sg=%u rc=%d",
+ __entry->pd_id, __entry->mr_id, __entry->max_num_data_sg,
+ __entry->max_num_meta_sg, __entry->rc)
+);
+
+TRACE_EVENT(mr_dereg,
+ TP_PROTO(
+ const struct ib_mr *mr
+ ),
+
+ TP_ARGS(mr),
+
+ TP_STRUCT__entry(
+ __field(u32, id)
+ ),
+
+ TP_fast_assign(
+ __entry->id = mr->res.id;
+ ),
+
+ TP_printk("mr.id=%u", __entry->id)
+);
+
+#endif /* _TRACE_RDMA_CORE_H */
+
+#include <trace/define_trace.h>
diff --git a/include/uapi/rdma/ib_user_ioctl_cmds.h b/include/uapi/rdma/ib_user_ioctl_cmds.h
index 64f0e3aacd3f..d4ddbe4e696c 100644
--- a/include/uapi/rdma/ib_user_ioctl_cmds.h
+++ b/include/uapi/rdma/ib_user_ioctl_cmds.h
@@ -56,6 +56,7 @@ enum uverbs_default_objects {
UVERBS_OBJECT_FLOW_ACTION,
UVERBS_OBJECT_DM,
UVERBS_OBJECT_COUNTERS,
+ UVERBS_OBJECT_ASYNC_EVENT,
};
enum {
@@ -67,6 +68,7 @@ enum uverbs_methods_device {
UVERBS_METHOD_INVOKE_WRITE,
UVERBS_METHOD_INFO_HANDLES,
UVERBS_METHOD_QUERY_PORT,
+ UVERBS_METHOD_GET_CONTEXT,
};
enum uverbs_attrs_invoke_write_cmd_attr_ids {
@@ -80,6 +82,11 @@ enum uverbs_attrs_query_port_cmd_attr_ids {
UVERBS_ATTR_QUERY_PORT_RESP,
};
+enum uverbs_attrs_get_context_attr_ids {
+ UVERBS_ATTR_GET_CONTEXT_NUM_COMP_VECTORS,
+ UVERBS_ATTR_GET_CONTEXT_CORE_SUPPORT,
+};
+
enum uverbs_attrs_create_cq_cmd_attr_ids {
UVERBS_ATTR_CREATE_CQ_HANDLE,
UVERBS_ATTR_CREATE_CQ_CQE,
@@ -241,4 +248,12 @@ enum uverbs_attrs_flow_destroy_ids {
UVERBS_ATTR_DESTROY_FLOW_HANDLE,
};
+enum uverbs_method_async_event {
+ UVERBS_METHOD_ASYNC_EVENT_ALLOC,
+};
+
+enum uverbs_attrs_async_event_create {
+ UVERBS_ATTR_ASYNC_EVENT_ALLOC_FD_HANDLE,
+};
+
#endif
diff --git a/include/uapi/rdma/ib_user_ioctl_verbs.h b/include/uapi/rdma/ib_user_ioctl_verbs.h
index 9019b2d906ea..a640bb814be0 100644
--- a/include/uapi/rdma/ib_user_ioctl_verbs.h
+++ b/include/uapi/rdma/ib_user_ioctl_verbs.h
@@ -41,6 +41,13 @@
#define RDMA_UAPI_PTR(_type, _name) __aligned_u64 _name
#endif
+#define IB_UVERBS_ACCESS_OPTIONAL_FIRST (1 << 20)
+#define IB_UVERBS_ACCESS_OPTIONAL_LAST (1 << 29)
+
+enum ib_uverbs_core_support {
+ IB_UVERBS_CORE_SUPPORT_OPTIONAL_MR_ACCESS = 1 << 0,
+};
+
enum ib_uverbs_access_flags {
IB_UVERBS_ACCESS_LOCAL_WRITE = 1 << 0,
IB_UVERBS_ACCESS_REMOTE_WRITE = 1 << 1,
@@ -50,6 +57,11 @@ enum ib_uverbs_access_flags {
IB_UVERBS_ACCESS_ZERO_BASED = 1 << 5,
IB_UVERBS_ACCESS_ON_DEMAND = 1 << 6,
IB_UVERBS_ACCESS_HUGETLB = 1 << 7,
+
+ IB_UVERBS_ACCESS_RELAXED_ORDERING = IB_UVERBS_ACCESS_OPTIONAL_FIRST,
+ IB_UVERBS_ACCESS_OPTIONAL_RANGE =
+ ((IB_UVERBS_ACCESS_OPTIONAL_LAST << 1) - 1) &
+ ~(IB_UVERBS_ACCESS_OPTIONAL_FIRST - 1)
};
enum ib_uverbs_query_port_cap_flags {
diff --git a/include/uapi/rdma/mlx5_user_ioctl_cmds.h b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
index 20d88307f75f..afe7da6f2b8e 100644
--- a/include/uapi/rdma/mlx5_user_ioctl_cmds.h
+++ b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
@@ -115,6 +115,22 @@ enum mlx5_ib_devx_obj_methods {
MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY,
};
+enum mlx5_ib_var_alloc_attrs {
+ MLX5_IB_ATTR_VAR_OBJ_ALLOC_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_OFFSET,
+ MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_LENGTH,
+ MLX5_IB_ATTR_VAR_OBJ_ALLOC_PAGE_ID,
+};
+
+enum mlx5_ib_var_obj_destroy_attrs {
+ MLX5_IB_ATTR_VAR_OBJ_DESTROY_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+};
+
+enum mlx5_ib_var_obj_methods {
+ MLX5_IB_METHOD_VAR_OBJ_ALLOC = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_METHOD_VAR_OBJ_DESTROY,
+};
+
enum mlx5_ib_devx_umem_reg_attrs {
MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR,
@@ -156,6 +172,7 @@ enum mlx5_ib_objects {
MLX5_IB_OBJECT_FLOW_MATCHER,
MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
+ MLX5_IB_OBJECT_VAR,
};
enum mlx5_ib_flow_matcher_create_attrs {
diff --git a/include/uapi/rdma/qedr-abi.h b/include/uapi/rdma/qedr-abi.h
index c022ee26089b..a0b83c9d4498 100644
--- a/include/uapi/rdma/qedr-abi.h
+++ b/include/uapi/rdma/qedr-abi.h
@@ -48,6 +48,18 @@ struct qedr_alloc_ucontext_req {
__u32 reserved;
};
+#define QEDR_LDPM_MAX_SIZE (8192)
+#define QEDR_EDPM_TRANS_SIZE (64)
+
+enum qedr_rdma_dpm_type {
+ QEDR_DPM_TYPE_NONE = 0,
+ QEDR_DPM_TYPE_ROCE_ENHANCED = 1 << 0,
+ QEDR_DPM_TYPE_ROCE_LEGACY = 1 << 1,
+ QEDR_DPM_TYPE_IWARP_LEGACY = 1 << 2,
+ QEDR_DPM_TYPE_RESERVED = 1 << 3,
+ QEDR_DPM_SIZES_SET = 1 << 4,
+};
+
struct qedr_alloc_ucontext_resp {
__aligned_u64 db_pa;
__u32 db_size;
@@ -59,10 +71,12 @@ struct qedr_alloc_ucontext_resp {
__u32 sges_per_recv_wr;
__u32 sges_per_srq_wr;
__u32 max_cqes;
- __u8 dpm_enabled;
+ __u8 dpm_flags;
__u8 wids_enabled;
__u16 wid_count;
- __u32 reserved;
+ __u16 ldpm_limit_size;
+ __u8 edpm_trans_size;
+ __u8 reserved;
};
struct qedr_alloc_pd_ureq {