| 1 | /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ |
| 2 | /* Copyright (c) 2015 - 2020 Intel Corporation */ |
| 3 | #ifndef IRDMA_USER_H |
| 4 | #define IRDMA_USER_H |
| 5 | |
| 6 | #define irdma_handle void * |
| 7 | #define irdma_adapter_handle irdma_handle |
| 8 | #define irdma_qp_handle irdma_handle |
| 9 | #define irdma_cq_handle irdma_handle |
| 10 | #define irdma_pd_id irdma_handle |
| 11 | #define irdma_stag_handle irdma_handle |
| 12 | #define irdma_stag_index u32 |
| 13 | #define irdma_stag u32 |
| 14 | #define irdma_stag_key u8 |
| 15 | #define irdma_tagged_offset u64 |
| 16 | #define irdma_access_privileges u32 |
| 17 | #define irdma_physical_fragment u64 |
| 18 | #define irdma_address_list u64 * |
| 19 | |
| 20 | #define IRDMA_MAX_MR_SIZE 0x200000000000ULL |
| 21 | |
| 22 | #define IRDMA_ACCESS_FLAGS_LOCALREAD 0x01 |
| 23 | #define IRDMA_ACCESS_FLAGS_LOCALWRITE 0x02 |
| 24 | #define IRDMA_ACCESS_FLAGS_REMOTEREAD_ONLY 0x04 |
| 25 | #define IRDMA_ACCESS_FLAGS_REMOTEREAD 0x05 |
| 26 | #define IRDMA_ACCESS_FLAGS_REMOTEWRITE_ONLY 0x08 |
| 27 | #define IRDMA_ACCESS_FLAGS_REMOTEWRITE 0x0a |
| 28 | #define IRDMA_ACCESS_FLAGS_BIND_WINDOW 0x10 |
| 29 | #define IRDMA_ACCESS_FLAGS_ZERO_BASED 0x20 |
| 30 | #define IRDMA_ACCESS_FLAGS_ALL 0x3f |
| 31 | |
| 32 | #define IRDMA_OP_TYPE_RDMA_WRITE 0x00 |
| 33 | #define IRDMA_OP_TYPE_RDMA_READ 0x01 |
| 34 | #define IRDMA_OP_TYPE_SEND 0x03 |
| 35 | #define IRDMA_OP_TYPE_SEND_INV 0x04 |
| 36 | #define IRDMA_OP_TYPE_SEND_SOL 0x05 |
| 37 | #define IRDMA_OP_TYPE_SEND_SOL_INV 0x06 |
| 38 | #define IRDMA_OP_TYPE_RDMA_WRITE_SOL 0x0d |
| 39 | #define IRDMA_OP_TYPE_BIND_MW 0x08 |
| 40 | #define IRDMA_OP_TYPE_FAST_REG_NSMR 0x09 |
| 41 | #define IRDMA_OP_TYPE_INV_STAG 0x0a |
| 42 | #define IRDMA_OP_TYPE_RDMA_READ_INV_STAG 0x0b |
| 43 | #define IRDMA_OP_TYPE_NOP 0x0c |
| 44 | #define IRDMA_OP_TYPE_ATOMIC_FETCH_AND_ADD 0x0f |
| 45 | #define IRDMA_OP_TYPE_ATOMIC_COMPARE_AND_SWAP 0x11 |
| 46 | #define IRDMA_OP_TYPE_REC 0x3e |
| 47 | #define IRDMA_OP_TYPE_REC_IMM 0x3f |
| 48 | |
| 49 | #define IRDMA_FLUSH_MAJOR_ERR 1 |
| 50 | #define IRDMA_SRQFLUSH_RSVD_MAJOR_ERR 0xfffe |
| 51 | |
| 52 | /* Async Events codes */ |
| 53 | #define IRDMA_AE_AMP_UNALLOCATED_STAG 0x0102 |
| 54 | #define IRDMA_AE_AMP_INVALID_STAG 0x0103 |
| 55 | #define IRDMA_AE_AMP_BAD_QP 0x0104 |
| 56 | #define IRDMA_AE_AMP_BAD_PD 0x0105 |
| 57 | #define IRDMA_AE_AMP_BAD_STAG_KEY 0x0106 |
| 58 | #define IRDMA_AE_AMP_BAD_STAG_INDEX 0x0107 |
| 59 | #define IRDMA_AE_AMP_BOUNDS_VIOLATION 0x0108 |
| 60 | #define IRDMA_AE_AMP_RIGHTS_VIOLATION 0x0109 |
| 61 | #define IRDMA_AE_AMP_TO_WRAP 0x010a |
| 62 | #define IRDMA_AE_AMP_FASTREG_VALID_STAG 0x010c |
| 63 | #define IRDMA_AE_AMP_FASTREG_MW_STAG 0x010d |
| 64 | #define IRDMA_AE_AMP_FASTREG_INVALID_RIGHTS 0x010e |
| 65 | #define IRDMA_AE_AMP_FASTREG_INVALID_LENGTH 0x0110 |
| 66 | #define IRDMA_AE_AMP_INVALIDATE_SHARED 0x0111 |
| 67 | #define IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS 0x0112 |
| 68 | #define IRDMA_AE_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS 0x0113 |
| 69 | #define IRDMA_AE_AMP_MWBIND_VALID_STAG 0x0114 |
| 70 | #define IRDMA_AE_AMP_MWBIND_OF_MR_STAG 0x0115 |
| 71 | #define IRDMA_AE_AMP_MWBIND_TO_ZERO_BASED_STAG 0x0116 |
| 72 | #define IRDMA_AE_AMP_MWBIND_TO_MW_STAG 0x0117 |
| 73 | #define IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS 0x0118 |
| 74 | #define IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS 0x0119 |
| 75 | #define IRDMA_AE_AMP_MWBIND_TO_INVALID_PARENT 0x011a |
| 76 | #define IRDMA_AE_AMP_MWBIND_BIND_DISABLED 0x011b |
| 77 | #define IRDMA_AE_PRIV_OPERATION_DENIED 0x011c |
| 78 | #define IRDMA_AE_AMP_INVALIDATE_TYPE1_MW 0x011d |
| 79 | #define IRDMA_AE_AMP_MWBIND_ZERO_BASED_TYPE1_MW 0x011e |
| 80 | #define IRDMA_AE_AMP_FASTREG_INVALID_PBL_HPS_CFG 0x011f |
| 81 | #define IRDMA_AE_AMP_MWBIND_WRONG_TYPE 0x0120 |
| 82 | #define IRDMA_AE_AMP_FASTREG_PBLE_MISMATCH 0x0121 |
| 83 | #define IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG 0x0132 |
| 84 | #define IRDMA_AE_UDA_XMIT_BAD_PD 0x0133 |
| 85 | #define IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT 0x0134 |
| 86 | #define IRDMA_AE_UDA_L4LEN_INVALID 0x0135 |
| 87 | #define IRDMA_AE_BAD_CLOSE 0x0201 |
| 88 | #define IRDMA_AE_RDMAP_ROE_BAD_LLP_CLOSE 0x0202 |
| 89 | #define IRDMA_AE_CQ_OPERATION_ERROR 0x0203 |
| 90 | #define IRDMA_AE_RDMA_READ_WHILE_ORD_ZERO 0x0205 |
| 91 | #define IRDMA_AE_STAG_ZERO_INVALID 0x0206 |
| 92 | #define IRDMA_AE_IB_RREQ_AND_Q1_FULL 0x0207 |
| 93 | #define IRDMA_AE_IB_INVALID_REQUEST 0x0208 |
| 94 | #define IRDMA_AE_SRQ_LIMIT 0x0209 |
| 95 | #define IRDMA_AE_WQE_UNEXPECTED_OPCODE 0x020a |
| 96 | #define IRDMA_AE_WQE_INVALID_PARAMETER 0x020b |
| 97 | #define IRDMA_AE_WQE_INVALID_FRAG_DATA 0x020c |
| 98 | #define IRDMA_AE_IB_REMOTE_ACCESS_ERROR 0x020d |
| 99 | #define IRDMA_AE_IB_REMOTE_OP_ERROR 0x020e |
| 100 | #define IRDMA_AE_SRQ_CATASTROPHIC_ERROR 0x020f |
| 101 | #define IRDMA_AE_WQE_LSMM_TOO_LONG 0x0220 |
| 102 | #define IRDMA_AE_ATOMIC_ALIGNMENT 0x0221 |
| 103 | #define IRDMA_AE_ATOMIC_MASK 0x0222 |
| 104 | #define IRDMA_AE_INVALID_REQUEST 0x0223 |
| 105 | #define IRDMA_AE_PCIE_ATOMIC_DISABLE 0x0224 |
| 106 | #define IRDMA_AE_DDP_INVALID_MSN_GAP_IN_MSN 0x0301 |
| 107 | #define IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER 0x0303 |
| 108 | #define IRDMA_AE_DDP_UBE_INVALID_DDP_VERSION 0x0304 |
| 109 | #define IRDMA_AE_DDP_UBE_INVALID_MO 0x0305 |
| 110 | #define IRDMA_AE_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE 0x0306 |
| 111 | #define IRDMA_AE_DDP_UBE_INVALID_QN 0x0307 |
| 112 | #define IRDMA_AE_DDP_NO_L_BIT 0x0308 |
| 113 | #define IRDMA_AE_RDMAP_ROE_INVALID_RDMAP_VERSION 0x0311 |
| 114 | #define IRDMA_AE_RDMAP_ROE_UNEXPECTED_OPCODE 0x0312 |
| 115 | #define IRDMA_AE_ROE_INVALID_RDMA_READ_REQUEST 0x0313 |
| 116 | #define IRDMA_AE_ROE_INVALID_RDMA_WRITE_OR_READ_RESP 0x0314 |
| 117 | #define IRDMA_AE_ROCE_RSP_LENGTH_ERROR 0x0316 |
| 118 | #define IRDMA_AE_ROCE_EMPTY_MCG 0x0380 |
| 119 | #define IRDMA_AE_ROCE_BAD_MC_IP_ADDR 0x0381 |
| 120 | #define IRDMA_AE_ROCE_BAD_MC_QPID 0x0382 |
| 121 | #define IRDMA_AE_MCG_QP_PROTOCOL_MISMATCH 0x0383 |
| 122 | #define IRDMA_AE_INVALID_ARP_ENTRY 0x0401 |
| 123 | #define IRDMA_AE_INVALID_TCP_OPTION_RCVD 0x0402 |
| 124 | #define IRDMA_AE_STALE_ARP_ENTRY 0x0403 |
| 125 | #define IRDMA_AE_INVALID_AH_ENTRY 0x0406 |
| 126 | #define IRDMA_AE_LLP_CLOSE_COMPLETE 0x0501 |
| 127 | #define IRDMA_AE_LLP_CONNECTION_RESET 0x0502 |
| 128 | #define IRDMA_AE_LLP_FIN_RECEIVED 0x0503 |
| 129 | #define IRDMA_AE_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH 0x0504 |
| 130 | #define IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR 0x0505 |
| 131 | #define IRDMA_AE_LLP_SEGMENT_TOO_SMALL 0x0507 |
| 132 | #define IRDMA_AE_LLP_SYN_RECEIVED 0x0508 |
| 133 | #define IRDMA_AE_LLP_TERMINATE_RECEIVED 0x0509 |
| 134 | #define IRDMA_AE_LLP_TOO_MANY_RETRIES 0x050a |
| 135 | #define IRDMA_AE_LLP_TOO_MANY_KEEPALIVE_RETRIES 0x050b |
| 136 | #define IRDMA_AE_LLP_DOUBT_REACHABILITY 0x050c |
| 137 | #define IRDMA_AE_LLP_CONNECTION_ESTABLISHED 0x050e |
| 138 | #define IRDMA_AE_LLP_TOO_MANY_RNRS 0x050f |
| 139 | #define IRDMA_AE_RESOURCE_EXHAUSTION 0x0520 |
| 140 | #define IRDMA_AE_RESET_SENT 0x0601 |
| 141 | #define IRDMA_AE_TERMINATE_SENT 0x0602 |
| 142 | #define IRDMA_AE_RESET_NOT_SENT 0x0603 |
| 143 | #define IRDMA_AE_LCE_QP_CATASTROPHIC 0x0700 |
| 144 | #define IRDMA_AE_LCE_FUNCTION_CATASTROPHIC 0x0701 |
| 145 | #define IRDMA_AE_LCE_CQ_CATASTROPHIC 0x0702 |
| 146 | #define IRDMA_AE_REMOTE_QP_CATASTROPHIC 0x0703 |
| 147 | #define IRDMA_AE_LOCAL_QP_CATASTROPHIC 0x0704 |
| 148 | #define IRDMA_AE_RCE_QP_CATASTROPHIC 0x0705 |
| 149 | #define IRDMA_AE_QP_SUSPEND_COMPLETE 0x0900 |
| 150 | #define IRDMA_AE_CQP_DEFERRED_COMPLETE 0x0901 |
| 151 | #define IRDMA_AE_ADAPTER_CATASTROPHIC 0x0B0B |
| 152 | |
| 153 | enum irdma_device_caps_const { |
| 154 | IRDMA_WQE_SIZE = 4, |
| 155 | IRDMA_CQP_WQE_SIZE = 8, |
| 156 | IRDMA_CQE_SIZE = 4, |
| 157 | IRDMA_EXTENDED_CQE_SIZE = 8, |
| 158 | IRDMA_AEQE_SIZE = 2, |
| 159 | IRDMA_CEQE_SIZE = 1, |
| 160 | IRDMA_CQP_CTX_SIZE = 8, |
| 161 | IRDMA_SHADOW_AREA_SIZE = 8, |
| 162 | IRDMA_QUERY_FPM_BUF_SIZE = 192, |
| 163 | IRDMA_COMMIT_FPM_BUF_SIZE = 192, |
| 164 | IRDMA_GATHER_STATS_BUF_SIZE = 1024, |
| 165 | IRDMA_MIN_IW_QP_ID = 0, |
| 166 | IRDMA_MAX_IW_QP_ID = 262143, |
| 167 | IRDMA_MIN_IW_SRQ_ID = 0, |
| 168 | IRDMA_MIN_CEQID = 0, |
| 169 | IRDMA_MAX_CEQID = 1023, |
| 170 | IRDMA_CEQ_MAX_COUNT = IRDMA_MAX_CEQID + 1, |
| 171 | IRDMA_MIN_CQID = 0, |
| 172 | IRDMA_MAX_CQID = 524287, |
| 173 | IRDMA_MIN_AEQ_ENTRIES = 1, |
| 174 | IRDMA_MAX_AEQ_ENTRIES = 524287, |
| 175 | IRDMA_MAX_AEQ_ENTRIES_GEN_3 = 262144, |
| 176 | IRDMA_MIN_CEQ_ENTRIES = 1, |
| 177 | IRDMA_MAX_CEQ_ENTRIES = 262143, |
| 178 | IRDMA_MIN_CQ_SIZE = 1, |
| 179 | IRDMA_MAX_CQ_SIZE = 1048575, |
| 180 | IRDMA_DB_ID_ZERO = 0, |
| 181 | IRDMA_MAX_WQ_FRAGMENT_COUNT = 13, |
| 182 | IRDMA_MAX_SGE_RD = 13, |
| 183 | IRDMA_MAX_OUTBOUND_MSG_SIZE = 2147483647, |
| 184 | IRDMA_MAX_INBOUND_MSG_SIZE = 2147483647, |
| 185 | IRDMA_MAX_PUSH_PAGE_COUNT = 1024, |
| 186 | IRDMA_MAX_PE_ENA_VF_COUNT = 32, |
| 187 | IRDMA_MAX_VF_FPM_ID = 47, |
| 188 | IRDMA_MAX_SQ_PAYLOAD_SIZE = 2145386496, |
| 189 | IRDMA_MAX_INLINE_DATA_SIZE = 101, |
| 190 | IRDMA_MAX_WQ_ENTRIES = 32768, |
| 191 | IRDMA_Q2_BUF_SIZE = 256, |
| 192 | IRDMA_QP_CTX_SIZE = 256, |
| 193 | IRDMA_MAX_PDS = 262144, |
| 194 | IRDMA_MIN_WQ_SIZE_GEN2 = 8, |
| 195 | }; |
| 196 | |
| 197 | enum irdma_addressing_type { |
| 198 | IRDMA_ADDR_TYPE_ZERO_BASED = 0, |
| 199 | IRDMA_ADDR_TYPE_VA_BASED = 1, |
| 200 | }; |
| 201 | |
| 202 | enum irdma_flush_opcode { |
| 203 | FLUSH_INVALID = 0, |
| 204 | FLUSH_GENERAL_ERR, |
| 205 | FLUSH_PROT_ERR, |
| 206 | FLUSH_REM_ACCESS_ERR, |
| 207 | FLUSH_LOC_QP_OP_ERR, |
| 208 | FLUSH_REM_OP_ERR, |
| 209 | FLUSH_LOC_LEN_ERR, |
| 210 | FLUSH_FATAL_ERR, |
| 211 | FLUSH_RETRY_EXC_ERR, |
| 212 | FLUSH_MW_BIND_ERR, |
| 213 | FLUSH_REM_INV_REQ_ERR, |
| 214 | FLUSH_RNR_RETRY_EXC_ERR, |
| 215 | }; |
| 216 | |
| 217 | enum irdma_qp_event_type { |
| 218 | IRDMA_QP_EVENT_CATASTROPHIC, |
| 219 | IRDMA_QP_EVENT_ACCESS_ERR, |
| 220 | IRDMA_QP_EVENT_REQ_ERR, |
| 221 | }; |
| 222 | |
| 223 | enum irdma_cmpl_status { |
| 224 | IRDMA_COMPL_STATUS_SUCCESS = 0, |
| 225 | IRDMA_COMPL_STATUS_FLUSHED, |
| 226 | IRDMA_COMPL_STATUS_INVALID_WQE, |
| 227 | IRDMA_COMPL_STATUS_QP_CATASTROPHIC, |
| 228 | IRDMA_COMPL_STATUS_REMOTE_TERMINATION, |
| 229 | IRDMA_COMPL_STATUS_INVALID_STAG, |
| 230 | IRDMA_COMPL_STATUS_BASE_BOUND_VIOLATION, |
| 231 | IRDMA_COMPL_STATUS_ACCESS_VIOLATION, |
| 232 | IRDMA_COMPL_STATUS_INVALID_PD_ID, |
| 233 | IRDMA_COMPL_STATUS_WRAP_ERROR, |
| 234 | IRDMA_COMPL_STATUS_STAG_INVALID_PDID, |
| 235 | IRDMA_COMPL_STATUS_RDMA_READ_ZERO_ORD, |
| 236 | IRDMA_COMPL_STATUS_QP_NOT_PRIVLEDGED, |
| 237 | IRDMA_COMPL_STATUS_STAG_NOT_INVALID, |
| 238 | IRDMA_COMPL_STATUS_INVALID_PHYS_BUF_SIZE, |
| 239 | IRDMA_COMPL_STATUS_INVALID_PHYS_BUF_ENTRY, |
| 240 | IRDMA_COMPL_STATUS_INVALID_FBO, |
| 241 | IRDMA_COMPL_STATUS_INVALID_LEN, |
| 242 | IRDMA_COMPL_STATUS_INVALID_ACCESS, |
| 243 | IRDMA_COMPL_STATUS_PHYS_BUF_LIST_TOO_LONG, |
| 244 | IRDMA_COMPL_STATUS_INVALID_VIRT_ADDRESS, |
| 245 | IRDMA_COMPL_STATUS_INVALID_REGION, |
| 246 | IRDMA_COMPL_STATUS_INVALID_WINDOW, |
| 247 | IRDMA_COMPL_STATUS_INVALID_TOTAL_LEN, |
| 248 | IRDMA_COMPL_STATUS_UNKNOWN, |
| 249 | }; |
| 250 | |
| 251 | enum irdma_cmpl_notify { |
| 252 | IRDMA_CQ_COMPL_EVENT = 0, |
| 253 | IRDMA_CQ_COMPL_SOLICITED = 1, |
| 254 | }; |
| 255 | |
| 256 | enum irdma_qp_caps { |
| 257 | IRDMA_WRITE_WITH_IMM = 1, |
| 258 | IRDMA_SEND_WITH_IMM = 2, |
| 259 | IRDMA_ROCE = 4, |
| 260 | IRDMA_PUSH_MODE = 8, |
| 261 | }; |
| 262 | |
| 263 | struct irdma_srq_uk; |
| 264 | struct irdma_srq_uk_init_info; |
| 265 | struct irdma_qp_uk; |
| 266 | struct irdma_cq_uk; |
| 267 | struct irdma_qp_uk_init_info; |
| 268 | struct irdma_cq_uk_init_info; |
| 269 | |
| 270 | struct irdma_ring { |
| 271 | u32 head; |
| 272 | u32 tail; |
| 273 | u32 size; |
| 274 | }; |
| 275 | |
| 276 | struct irdma_cqe { |
| 277 | __le64 buf[IRDMA_CQE_SIZE]; |
| 278 | }; |
| 279 | |
| 280 | struct irdma_extended_cqe { |
| 281 | __le64 buf[IRDMA_EXTENDED_CQE_SIZE]; |
| 282 | }; |
| 283 | |
| 284 | struct irdma_post_send { |
| 285 | struct ib_sge *sg_list; |
| 286 | u32 num_sges; |
| 287 | u32 qkey; |
| 288 | u32 dest_qp; |
| 289 | u32 ah_id; |
| 290 | }; |
| 291 | |
| 292 | struct irdma_post_rq_info { |
| 293 | u64 wr_id; |
| 294 | struct ib_sge *sg_list; |
| 295 | u32 num_sges; |
| 296 | }; |
| 297 | |
| 298 | struct irdma_rdma_write { |
| 299 | struct ib_sge *lo_sg_list; |
| 300 | u32 num_lo_sges; |
| 301 | struct ib_sge rem_addr; |
| 302 | }; |
| 303 | |
| 304 | struct irdma_rdma_read { |
| 305 | struct ib_sge *lo_sg_list; |
| 306 | u32 num_lo_sges; |
| 307 | struct ib_sge rem_addr; |
| 308 | }; |
| 309 | |
| 310 | struct irdma_bind_window { |
| 311 | irdma_stag mr_stag; |
| 312 | u64 bind_len; |
| 313 | void *va; |
| 314 | enum irdma_addressing_type addressing_type; |
| 315 | bool ena_reads:1; |
| 316 | bool ena_writes:1; |
| 317 | irdma_stag mw_stag; |
| 318 | bool mem_window_type_1:1; |
| 319 | bool remote_atomics_en:1; |
| 320 | }; |
| 321 | |
| 322 | struct irdma_atomic_fetch_add { |
| 323 | u64 tagged_offset; |
| 324 | u64 remote_tagged_offset; |
| 325 | u64 fetch_add_data_bytes; |
| 326 | u32 stag; |
| 327 | u32 remote_stag; |
| 328 | }; |
| 329 | |
| 330 | struct irdma_atomic_compare_swap { |
| 331 | u64 tagged_offset; |
| 332 | u64 remote_tagged_offset; |
| 333 | u64 swap_data_bytes; |
| 334 | u64 compare_data_bytes; |
| 335 | u32 stag; |
| 336 | u32 remote_stag; |
| 337 | }; |
| 338 | |
| 339 | struct irdma_inv_local_stag { |
| 340 | irdma_stag target_stag; |
| 341 | }; |
| 342 | |
| 343 | struct irdma_post_sq_info { |
| 344 | u64 wr_id; |
| 345 | u8 op_type; |
| 346 | u8 l4len; |
| 347 | bool signaled:1; |
| 348 | bool read_fence:1; |
| 349 | bool local_fence:1; |
| 350 | bool inline_data:1; |
| 351 | bool imm_data_valid:1; |
| 352 | bool report_rtt:1; |
| 353 | bool udp_hdr:1; |
| 354 | bool defer_flag:1; |
| 355 | bool remote_atomic_en:1; |
| 356 | u32 imm_data; |
| 357 | u32 stag_to_inv; |
| 358 | union { |
| 359 | struct irdma_post_send send; |
| 360 | struct irdma_rdma_write rdma_write; |
| 361 | struct irdma_rdma_read rdma_read; |
| 362 | struct irdma_bind_window bind_window; |
| 363 | struct irdma_inv_local_stag inv_local_stag; |
| 364 | struct irdma_atomic_fetch_add atomic_fetch_add; |
| 365 | struct irdma_atomic_compare_swap atomic_compare_swap; |
| 366 | } op; |
| 367 | }; |
| 368 | |
| 369 | struct irdma_cq_poll_info { |
| 370 | u64 wr_id; |
| 371 | irdma_qp_handle qp_handle; |
| 372 | u32 bytes_xfered; |
| 373 | u32 tcp_seq_num_rtt; |
| 374 | u32 qp_id; |
| 375 | u32 ud_src_qpn; |
| 376 | u32 imm_data; |
| 377 | irdma_stag inv_stag; /* or L_R_Key */ |
| 378 | enum irdma_cmpl_status comp_status; |
| 379 | u16 major_err; |
| 380 | u16 minor_err; |
| 381 | u16 ud_vlan; |
| 382 | u8 ud_smac[6]; |
| 383 | u8 op_type; |
| 384 | u8 q_type; |
| 385 | bool stag_invalid_set:1; /* or L_R_Key set */ |
| 386 | bool error:1; |
| 387 | bool solicited_event:1; |
| 388 | bool ipv4:1; |
| 389 | bool ud_vlan_valid:1; |
| 390 | bool ud_smac_valid:1; |
| 391 | bool imm_valid:1; |
| 392 | }; |
| 393 | |
| 394 | struct qp_err_code { |
| 395 | enum irdma_flush_opcode flush_code; |
| 396 | enum irdma_qp_event_type event_type; |
| 397 | }; |
| 398 | |
| 399 | int irdma_uk_atomic_compare_swap(struct irdma_qp_uk *qp, |
| 400 | struct irdma_post_sq_info *info, bool post_sq); |
| 401 | int irdma_uk_atomic_fetch_add(struct irdma_qp_uk *qp, |
| 402 | struct irdma_post_sq_info *info, bool post_sq); |
| 403 | int irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp, |
| 404 | struct irdma_post_sq_info *info, bool post_sq); |
| 405 | int irdma_uk_inline_send(struct irdma_qp_uk *qp, |
| 406 | struct irdma_post_sq_info *info, bool post_sq); |
| 407 | int irdma_uk_post_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, |
| 408 | bool post_sq); |
| 409 | int irdma_uk_post_receive(struct irdma_qp_uk *qp, |
| 410 | struct irdma_post_rq_info *info); |
| 411 | void irdma_uk_qp_post_wr(struct irdma_qp_uk *qp); |
| 412 | int irdma_uk_rdma_read(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, |
| 413 | bool inv_stag, bool post_sq); |
| 414 | int irdma_uk_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, |
| 415 | bool post_sq); |
| 416 | int irdma_uk_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, |
| 417 | bool post_sq); |
| 418 | int irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp, |
| 419 | struct irdma_post_sq_info *info, |
| 420 | bool post_sq); |
| 421 | |
| 422 | struct irdma_wqe_uk_ops { |
| 423 | void (*iw_copy_inline_data)(u8 *dest, struct ib_sge *sge_list, |
| 424 | u32 num_sges, u8 polarity); |
| 425 | u16 (*iw_inline_data_size_to_quanta)(u32 data_size); |
| 426 | void (*iw_set_fragment)(__le64 *wqe, u32 offset, struct ib_sge *sge, |
| 427 | u8 valid); |
| 428 | void (*iw_set_mw_bind_wqe)(__le64 *wqe, |
| 429 | struct irdma_bind_window *op_info); |
| 430 | }; |
| 431 | |
| 432 | bool irdma_uk_cq_empty(struct irdma_cq_uk *cq); |
| 433 | int irdma_uk_cq_poll_cmpl(struct irdma_cq_uk *cq, |
| 434 | struct irdma_cq_poll_info *info); |
| 435 | void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq, |
| 436 | enum irdma_cmpl_notify cq_notify); |
| 437 | void irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int size); |
| 438 | void irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *qp, u16 cnt); |
| 439 | void irdma_uk_cq_init(struct irdma_cq_uk *cq, |
| 440 | struct irdma_cq_uk_init_info *info); |
| 441 | int irdma_uk_qp_init(struct irdma_qp_uk *qp, |
| 442 | struct irdma_qp_uk_init_info *info); |
| 443 | void irdma_uk_calc_shift_wq(struct irdma_qp_uk_init_info *ukinfo, u8 *sq_shift, |
| 444 | u8 *rq_shift); |
| 445 | int irdma_uk_calc_depth_shift_sq(struct irdma_qp_uk_init_info *ukinfo, |
| 446 | u32 *sq_depth, u8 *sq_shift); |
| 447 | int irdma_uk_calc_depth_shift_rq(struct irdma_qp_uk_init_info *ukinfo, |
| 448 | u32 *rq_depth, u8 *rq_shift); |
| 449 | int irdma_uk_srq_init(struct irdma_srq_uk *srq, |
| 450 | struct irdma_srq_uk_init_info *info); |
| 451 | int irdma_uk_srq_post_receive(struct irdma_srq_uk *srq, |
| 452 | struct irdma_post_rq_info *info); |
| 453 | |
| 454 | struct irdma_srq_uk { |
| 455 | u32 srq_caps; |
| 456 | struct irdma_qp_quanta *srq_base; |
| 457 | struct irdma_uk_attrs *uk_attrs; |
| 458 | __le64 *shadow_area; |
| 459 | struct irdma_ring srq_ring; |
| 460 | u32 srq_id; |
| 461 | u32 srq_size; |
| 462 | u32 max_srq_frag_cnt; |
| 463 | struct irdma_wqe_uk_ops wqe_ops; |
| 464 | u8 srwqe_polarity; |
| 465 | u8 wqe_size; |
| 466 | u8 wqe_size_multiplier; |
| 467 | u8 deferred_flag; |
| 468 | spinlock_t *lock; |
| 469 | }; |
| 470 | |
| 471 | struct irdma_srq_uk_init_info { |
| 472 | struct irdma_qp_quanta *srq; |
| 473 | struct irdma_uk_attrs *uk_attrs; |
| 474 | __le64 *shadow_area; |
| 475 | u64 *srq_wrid_array; |
| 476 | u32 srq_id; |
| 477 | u32 srq_caps; |
| 478 | u32 srq_size; |
| 479 | u32 max_srq_frag_cnt; |
| 480 | }; |
| 481 | |
| 482 | struct irdma_sq_uk_wr_trk_info { |
| 483 | u64 wrid; |
| 484 | u32 wr_len; |
| 485 | u16 quanta; |
| 486 | u8 signaled; |
| 487 | u8 reserved[1]; |
| 488 | }; |
| 489 | |
| 490 | struct irdma_qp_quanta { |
| 491 | __le64 elem[IRDMA_WQE_SIZE]; |
| 492 | }; |
| 493 | |
| 494 | struct irdma_qp_uk { |
| 495 | struct irdma_qp_quanta *sq_base; |
| 496 | struct irdma_qp_quanta *rq_base; |
| 497 | struct irdma_uk_attrs *uk_attrs; |
| 498 | u32 __iomem *wqe_alloc_db; |
| 499 | struct irdma_sq_uk_wr_trk_info *sq_wrtrk_array; |
| 500 | u64 *rq_wrid_array; |
| 501 | __le64 *shadow_area; |
| 502 | struct irdma_ring sq_ring; |
| 503 | struct irdma_ring rq_ring; |
| 504 | struct irdma_ring initial_ring; |
| 505 | u32 qp_id; |
| 506 | u32 qp_caps; |
| 507 | u32 sq_size; |
| 508 | u32 rq_size; |
| 509 | u32 max_sq_frag_cnt; |
| 510 | u32 max_rq_frag_cnt; |
| 511 | u32 max_inline_data; |
| 512 | struct irdma_wqe_uk_ops wqe_ops; |
| 513 | u16 conn_wqes; |
| 514 | u8 qp_type; |
| 515 | u8 swqe_polarity; |
| 516 | u8 swqe_polarity_deferred; |
| 517 | u8 rwqe_polarity; |
| 518 | u8 rq_wqe_size; |
| 519 | u8 rq_wqe_size_multiplier; |
| 520 | bool deferred_flag:1; |
| 521 | bool first_sq_wq:1; |
| 522 | bool sq_flush_complete:1; /* Indicates flush was seen and SQ was empty after the flush */ |
| 523 | bool rq_flush_complete:1; /* Indicates flush was seen and RQ was empty after the flush */ |
| 524 | bool destroy_pending:1; /* Indicates the QP is being destroyed */ |
| 525 | void *back_qp; |
| 526 | u8 dbg_rq_flushed; |
| 527 | struct irdma_srq_uk *srq_uk; |
| 528 | u8 sq_flush_seen; |
| 529 | u8 rq_flush_seen; |
| 530 | }; |
| 531 | |
| 532 | struct irdma_cq_uk { |
| 533 | struct irdma_cqe *cq_base; |
| 534 | u32 __iomem *cqe_alloc_db; |
| 535 | u32 __iomem *cq_ack_db; |
| 536 | __le64 *shadow_area; |
| 537 | u32 cq_id; |
| 538 | u32 cq_size; |
| 539 | struct irdma_ring cq_ring; |
| 540 | u8 polarity; |
| 541 | bool avoid_mem_cflct:1; |
| 542 | }; |
| 543 | |
| 544 | struct irdma_qp_uk_init_info { |
| 545 | struct irdma_qp_quanta *sq; |
| 546 | struct irdma_qp_quanta *rq; |
| 547 | struct irdma_uk_attrs *uk_attrs; |
| 548 | u32 __iomem *wqe_alloc_db; |
| 549 | __le64 *shadow_area; |
| 550 | struct irdma_sq_uk_wr_trk_info *sq_wrtrk_array; |
| 551 | u64 *rq_wrid_array; |
| 552 | u32 qp_id; |
| 553 | u32 qp_caps; |
| 554 | u32 sq_size; |
| 555 | u32 rq_size; |
| 556 | u32 max_sq_frag_cnt; |
| 557 | u32 max_rq_frag_cnt; |
| 558 | u32 max_inline_data; |
| 559 | u32 sq_depth; |
| 560 | u32 rq_depth; |
| 561 | u8 first_sq_wq; |
| 562 | u8 type; |
| 563 | u8 sq_shift; |
| 564 | u8 rq_shift; |
| 565 | int abi_ver; |
| 566 | bool legacy_mode; |
| 567 | struct irdma_srq_uk *srq_uk; |
| 568 | }; |
| 569 | |
| 570 | struct irdma_cq_uk_init_info { |
| 571 | u32 __iomem *cqe_alloc_db; |
| 572 | u32 __iomem *cq_ack_db; |
| 573 | struct irdma_cqe *cq_base; |
| 574 | __le64 *shadow_area; |
| 575 | u32 cq_size; |
| 576 | u32 cq_id; |
| 577 | bool avoid_mem_cflct; |
| 578 | }; |
| 579 | |
| 580 | __le64 *irdma_qp_get_next_send_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx, |
| 581 | u16 quanta, u32 total_size, |
| 582 | struct irdma_post_sq_info *info); |
| 583 | __le64 *irdma_srq_get_next_recv_wqe(struct irdma_srq_uk *srq, u32 *wqe_idx); |
| 584 | __le64 *irdma_qp_get_next_recv_wqe(struct irdma_qp_uk *qp, u32 *wqe_idx); |
| 585 | void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq); |
| 586 | int irdma_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq); |
| 587 | int irdma_fragcnt_to_quanta_sq(u32 frag_cnt, u16 *quanta); |
| 588 | int irdma_fragcnt_to_wqesize_rq(u32 frag_cnt, u16 *wqe_size); |
| 589 | void irdma_get_wqe_shift(struct irdma_uk_attrs *uk_attrs, u32 sge, |
| 590 | u32 inline_data, u8 *shift); |
| 591 | int irdma_get_sqdepth(struct irdma_uk_attrs *uk_attrs, u32 sq_size, u8 shift, |
| 592 | u32 *wqdepth); |
| 593 | int irdma_get_rqdepth(struct irdma_uk_attrs *uk_attrs, u32 rq_size, u8 shift, |
| 594 | u32 *wqdepth); |
| 595 | int irdma_get_srqdepth(struct irdma_uk_attrs *uk_attrs, u32 srq_size, u8 shift, |
| 596 | u32 *srqdepth); |
| 597 | void irdma_clr_wqes(struct irdma_qp_uk *qp, u32 qp_wqe_idx); |
| 598 | |
| 599 | static inline struct qp_err_code irdma_ae_to_qp_err_code(u16 ae_id) |
| 600 | { |
| 601 | struct qp_err_code qp_err = {}; |
| 602 | |
| 603 | switch (ae_id) { |
| 604 | case IRDMA_AE_AMP_BOUNDS_VIOLATION: |
| 605 | case IRDMA_AE_AMP_INVALID_STAG: |
| 606 | case IRDMA_AE_AMP_RIGHTS_VIOLATION: |
| 607 | case IRDMA_AE_AMP_UNALLOCATED_STAG: |
| 608 | case IRDMA_AE_AMP_BAD_PD: |
| 609 | case IRDMA_AE_AMP_BAD_QP: |
| 610 | case IRDMA_AE_AMP_BAD_STAG_KEY: |
| 611 | case IRDMA_AE_AMP_BAD_STAG_INDEX: |
| 612 | case IRDMA_AE_AMP_TO_WRAP: |
| 613 | case IRDMA_AE_PRIV_OPERATION_DENIED: |
| 614 | qp_err.flush_code = FLUSH_PROT_ERR; |
| 615 | qp_err.event_type = IRDMA_QP_EVENT_ACCESS_ERR; |
| 616 | break; |
| 617 | case IRDMA_AE_UDA_XMIT_BAD_PD: |
| 618 | case IRDMA_AE_WQE_UNEXPECTED_OPCODE: |
| 619 | qp_err.flush_code = FLUSH_LOC_QP_OP_ERR; |
| 620 | qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC; |
| 621 | break; |
| 622 | case IRDMA_AE_UDA_XMIT_DGRAM_TOO_SHORT: |
| 623 | case IRDMA_AE_UDA_XMIT_DGRAM_TOO_LONG: |
| 624 | case IRDMA_AE_UDA_L4LEN_INVALID: |
| 625 | case IRDMA_AE_DDP_UBE_INVALID_MO: |
| 626 | case IRDMA_AE_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER: |
| 627 | qp_err.flush_code = FLUSH_LOC_LEN_ERR; |
| 628 | qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC; |
| 629 | break; |
| 630 | case IRDMA_AE_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS: |
| 631 | case IRDMA_AE_IB_REMOTE_ACCESS_ERROR: |
| 632 | qp_err.flush_code = FLUSH_REM_ACCESS_ERR; |
| 633 | qp_err.event_type = IRDMA_QP_EVENT_ACCESS_ERR; |
| 634 | break; |
| 635 | case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS: |
| 636 | case IRDMA_AE_AMP_MWBIND_BIND_DISABLED: |
| 637 | case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS: |
| 638 | case IRDMA_AE_AMP_MWBIND_VALID_STAG: |
| 639 | qp_err.flush_code = FLUSH_MW_BIND_ERR; |
| 640 | qp_err.event_type = IRDMA_QP_EVENT_ACCESS_ERR; |
| 641 | break; |
| 642 | case IRDMA_AE_LLP_TOO_MANY_RETRIES: |
| 643 | qp_err.flush_code = FLUSH_RETRY_EXC_ERR; |
| 644 | qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC; |
| 645 | break; |
| 646 | case IRDMA_AE_IB_INVALID_REQUEST: |
| 647 | qp_err.flush_code = FLUSH_REM_INV_REQ_ERR; |
| 648 | qp_err.event_type = IRDMA_QP_EVENT_REQ_ERR; |
| 649 | break; |
| 650 | case IRDMA_AE_LLP_SEGMENT_TOO_SMALL: |
| 651 | case IRDMA_AE_LLP_RECEIVED_MPA_CRC_ERROR: |
| 652 | case IRDMA_AE_ROCE_RSP_LENGTH_ERROR: |
| 653 | case IRDMA_AE_IB_REMOTE_OP_ERROR: |
| 654 | qp_err.flush_code = FLUSH_REM_OP_ERR; |
| 655 | qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC; |
| 656 | break; |
| 657 | case IRDMA_AE_LLP_TOO_MANY_RNRS: |
| 658 | qp_err.flush_code = FLUSH_RNR_RETRY_EXC_ERR; |
| 659 | qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC; |
| 660 | break; |
| 661 | case IRDMA_AE_LCE_QP_CATASTROPHIC: |
| 662 | case IRDMA_AE_REMOTE_QP_CATASTROPHIC: |
| 663 | case IRDMA_AE_LOCAL_QP_CATASTROPHIC: |
| 664 | case IRDMA_AE_RCE_QP_CATASTROPHIC: |
| 665 | qp_err.flush_code = FLUSH_FATAL_ERR; |
| 666 | qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC; |
| 667 | break; |
| 668 | default: |
| 669 | qp_err.flush_code = FLUSH_GENERAL_ERR; |
| 670 | qp_err.event_type = IRDMA_QP_EVENT_CATASTROPHIC; |
| 671 | break; |
| 672 | } |
| 673 | |
| 674 | return qp_err; |
| 675 | } |
| 676 | #endif /* IRDMA_USER_H */ |
| 677 | |