| 1 | /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ |
| 2 | /* Copyright (c) 2021, Microsoft Corporation. */ |
| 3 | |
| 4 | #ifndef _MANA_H |
| 5 | #define _MANA_H |
| 6 | |
| 7 | #include <net/xdp.h> |
| 8 | #include <net/net_shaper.h> |
| 9 | |
| 10 | #include "gdma.h" |
| 11 | #include "hw_channel.h" |
| 12 | |
| 13 | /* Microsoft Azure Network Adapter (MANA)'s definitions |
| 14 | * |
| 15 | * Structures labeled with "HW DATA" are exchanged with the hardware. All of |
| 16 | * them are naturally aligned and hence don't need __packed. |
| 17 | */ |
| 18 | |
| 19 | /* MANA protocol version */ |
| 20 | #define MANA_MAJOR_VERSION 0 |
| 21 | #define MANA_MINOR_VERSION 1 |
| 22 | #define MANA_MICRO_VERSION 1 |
| 23 | |
| 24 | typedef u64 mana_handle_t; |
| 25 | #define INVALID_MANA_HANDLE ((mana_handle_t)-1) |
| 26 | |
| 27 | enum TRI_STATE { |
| 28 | TRI_STATE_UNKNOWN = -1, |
| 29 | TRI_STATE_FALSE = 0, |
| 30 | TRI_STATE_TRUE = 1 |
| 31 | }; |
| 32 | |
| 33 | /* Number of entries for hardware indirection table must be in power of 2 */ |
| 34 | #define MANA_INDIRECT_TABLE_MAX_SIZE 512 |
| 35 | #define MANA_INDIRECT_TABLE_DEF_SIZE 64 |
| 36 | |
| 37 | /* The Toeplitz hash key's length in bytes: should be multiple of 8 */ |
| 38 | #define MANA_HASH_KEY_SIZE 40 |
| 39 | |
| 40 | #define COMP_ENTRY_SIZE 64 |
| 41 | |
| 42 | /* This Max value for RX buffers is derived from __alloc_page()'s max page |
| 43 | * allocation calculation. It allows maximum 2^(MAX_ORDER -1) pages. RX buffer |
| 44 | * size beyond this value gets rejected by __alloc_page() call. |
| 45 | */ |
| 46 | #define MAX_RX_BUFFERS_PER_QUEUE 8192 |
| 47 | #define DEF_RX_BUFFERS_PER_QUEUE 1024 |
| 48 | #define MIN_RX_BUFFERS_PER_QUEUE 128 |
| 49 | |
| 50 | /* This max value for TX buffers is derived as the maximum allocatable |
| 51 | * pages supported on host per guest through testing. TX buffer size beyond |
| 52 | * this value is rejected by the hardware. |
| 53 | */ |
| 54 | #define MAX_TX_BUFFERS_PER_QUEUE 16384 |
| 55 | #define DEF_TX_BUFFERS_PER_QUEUE 256 |
| 56 | #define MIN_TX_BUFFERS_PER_QUEUE 128 |
| 57 | |
| 58 | #define EQ_SIZE (8 * MANA_PAGE_SIZE) |
| 59 | |
| 60 | #define LOG2_EQ_THROTTLE 3 |
| 61 | |
| 62 | #define MAX_PORTS_IN_MANA_DEV 256 |
| 63 | |
| 64 | /* Update this count whenever the respective structures are changed */ |
| 65 | #define MANA_STATS_RX_COUNT 5 |
| 66 | #define MANA_STATS_TX_COUNT 11 |
| 67 | |
| 68 | #define MANA_RX_FRAG_ALIGNMENT 64 |
| 69 | |
| 70 | struct mana_stats_rx { |
| 71 | u64 packets; |
| 72 | u64 bytes; |
| 73 | u64 xdp_drop; |
| 74 | u64 xdp_tx; |
| 75 | u64 xdp_redirect; |
| 76 | struct u64_stats_sync syncp; |
| 77 | }; |
| 78 | |
| 79 | struct mana_stats_tx { |
| 80 | u64 packets; |
| 81 | u64 bytes; |
| 82 | u64 xdp_xmit; |
| 83 | u64 tso_packets; |
| 84 | u64 tso_bytes; |
| 85 | u64 tso_inner_packets; |
| 86 | u64 tso_inner_bytes; |
| 87 | u64 short_pkt_fmt; |
| 88 | u64 long_pkt_fmt; |
| 89 | u64 csum_partial; |
| 90 | u64 mana_map_err; |
| 91 | struct u64_stats_sync syncp; |
| 92 | }; |
| 93 | |
| 94 | struct mana_txq { |
| 95 | struct gdma_queue *gdma_sq; |
| 96 | |
| 97 | union { |
| 98 | u32 gdma_txq_id; |
| 99 | struct { |
| 100 | u32 reserved1 : 10; |
| 101 | u32 vsq_frame : 14; |
| 102 | u32 reserved2 : 8; |
| 103 | }; |
| 104 | }; |
| 105 | |
| 106 | u16 vp_offset; |
| 107 | |
| 108 | struct net_device *ndev; |
| 109 | |
| 110 | /* The SKBs are sent to the HW and we are waiting for the CQEs. */ |
| 111 | struct sk_buff_head pending_skbs; |
| 112 | struct netdev_queue *net_txq; |
| 113 | |
| 114 | atomic_t pending_sends; |
| 115 | |
| 116 | bool napi_initialized; |
| 117 | |
| 118 | struct mana_stats_tx stats; |
| 119 | }; |
| 120 | |
| 121 | /* skb data and frags dma mappings */ |
| 122 | struct mana_skb_head { |
| 123 | /* GSO pkts may have 2 SGEs for the linear part*/ |
| 124 | dma_addr_t dma_handle[MAX_SKB_FRAGS + 2]; |
| 125 | |
| 126 | u32 size[MAX_SKB_FRAGS + 2]; |
| 127 | }; |
| 128 | |
| 129 | #define MANA_HEADROOM sizeof(struct mana_skb_head) |
| 130 | |
| 131 | enum mana_tx_pkt_format { |
| 132 | MANA_SHORT_PKT_FMT = 0, |
| 133 | MANA_LONG_PKT_FMT = 1, |
| 134 | }; |
| 135 | |
| 136 | struct mana_tx_short_oob { |
| 137 | u32 pkt_fmt : 2; |
| 138 | u32 is_outer_ipv4 : 1; |
| 139 | u32 is_outer_ipv6 : 1; |
| 140 | u32 comp_iphdr_csum : 1; |
| 141 | u32 comp_tcp_csum : 1; |
| 142 | u32 comp_udp_csum : 1; |
| 143 | u32 supress_txcqe_gen : 1; |
| 144 | u32 vcq_num : 24; |
| 145 | |
| 146 | u32 trans_off : 10; /* Transport header offset */ |
| 147 | u32 vsq_frame : 14; |
| 148 | u32 short_vp_offset : 8; |
| 149 | }; /* HW DATA */ |
| 150 | |
| 151 | struct mana_tx_long_oob { |
| 152 | u32 is_encap : 1; |
| 153 | u32 inner_is_ipv6 : 1; |
| 154 | u32 inner_tcp_opt : 1; |
| 155 | u32 inject_vlan_pri_tag : 1; |
| 156 | u32 reserved1 : 12; |
| 157 | u32 pcp : 3; /* 802.1Q */ |
| 158 | u32 dei : 1; /* 802.1Q */ |
| 159 | u32 vlan_id : 12; /* 802.1Q */ |
| 160 | |
| 161 | u32 inner_frame_offset : 10; |
| 162 | u32 inner_ip_rel_offset : 6; |
| 163 | u32 long_vp_offset : 12; |
| 164 | u32 reserved2 : 4; |
| 165 | |
| 166 | u32 reserved3; |
| 167 | u32 reserved4; |
| 168 | }; /* HW DATA */ |
| 169 | |
| 170 | struct mana_tx_oob { |
| 171 | struct mana_tx_short_oob s_oob; |
| 172 | struct mana_tx_long_oob l_oob; |
| 173 | }; /* HW DATA */ |
| 174 | |
| 175 | enum mana_cq_type { |
| 176 | MANA_CQ_TYPE_RX, |
| 177 | MANA_CQ_TYPE_TX, |
| 178 | }; |
| 179 | |
| 180 | enum mana_cqe_type { |
| 181 | CQE_INVALID = 0, |
| 182 | CQE_RX_OKAY = 1, |
| 183 | CQE_RX_COALESCED_4 = 2, |
| 184 | CQE_RX_OBJECT_FENCE = 3, |
| 185 | CQE_RX_TRUNCATED = 4, |
| 186 | |
| 187 | CQE_TX_OKAY = 32, |
| 188 | CQE_TX_SA_DROP = 33, |
| 189 | CQE_TX_MTU_DROP = 34, |
| 190 | CQE_TX_INVALID_OOB = 35, |
| 191 | CQE_TX_INVALID_ETH_TYPE = 36, |
| 192 | CQE_TX_HDR_PROCESSING_ERROR = 37, |
| 193 | CQE_TX_VF_DISABLED = 38, |
| 194 | CQE_TX_VPORT_IDX_OUT_OF_RANGE = 39, |
| 195 | CQE_TX_VPORT_DISABLED = 40, |
| 196 | CQE_TX_VLAN_TAGGING_VIOLATION = 41, |
| 197 | }; |
| 198 | |
| 199 | #define MANA_CQE_COMPLETION 1 |
| 200 | |
| 201 | struct { |
| 202 | u32 : 6; |
| 203 | u32 : 2; |
| 204 | u32 : 24; |
| 205 | }; /* HW DATA */ |
| 206 | |
| 207 | /* NDIS HASH Types */ |
| 208 | #define NDIS_HASH_IPV4 BIT(0) |
| 209 | #define NDIS_HASH_TCP_IPV4 BIT(1) |
| 210 | #define NDIS_HASH_UDP_IPV4 BIT(2) |
| 211 | #define NDIS_HASH_IPV6 BIT(3) |
| 212 | #define NDIS_HASH_TCP_IPV6 BIT(4) |
| 213 | #define NDIS_HASH_UDP_IPV6 BIT(5) |
| 214 | #define NDIS_HASH_IPV6_EX BIT(6) |
| 215 | #define NDIS_HASH_TCP_IPV6_EX BIT(7) |
| 216 | #define NDIS_HASH_UDP_IPV6_EX BIT(8) |
| 217 | |
| 218 | #define MANA_HASH_L3 (NDIS_HASH_IPV4 | NDIS_HASH_IPV6 | NDIS_HASH_IPV6_EX) |
| 219 | #define MANA_HASH_L4 \ |
| 220 | (NDIS_HASH_TCP_IPV4 | NDIS_HASH_UDP_IPV4 | NDIS_HASH_TCP_IPV6 | \ |
| 221 | NDIS_HASH_UDP_IPV6 | NDIS_HASH_TCP_IPV6_EX | NDIS_HASH_UDP_IPV6_EX) |
| 222 | |
| 223 | struct mana_rxcomp_perpkt_info { |
| 224 | u32 pkt_len : 16; |
| 225 | u32 reserved1 : 16; |
| 226 | u32 reserved2; |
| 227 | u32 pkt_hash; |
| 228 | }; /* HW DATA */ |
| 229 | |
| 230 | #define MANA_RXCOMP_OOB_NUM_PPI 4 |
| 231 | |
| 232 | /* Receive completion OOB */ |
| 233 | struct mana_rxcomp_oob { |
| 234 | struct mana_cqe_header cqe_hdr; |
| 235 | |
| 236 | u32 rx_vlan_id : 12; |
| 237 | u32 rx_vlantag_present : 1; |
| 238 | u32 rx_outer_iphdr_csum_succeed : 1; |
| 239 | u32 rx_outer_iphdr_csum_fail : 1; |
| 240 | u32 reserved1 : 1; |
| 241 | u32 rx_hashtype : 9; |
| 242 | u32 rx_iphdr_csum_succeed : 1; |
| 243 | u32 rx_iphdr_csum_fail : 1; |
| 244 | u32 rx_tcp_csum_succeed : 1; |
| 245 | u32 rx_tcp_csum_fail : 1; |
| 246 | u32 rx_udp_csum_succeed : 1; |
| 247 | u32 rx_udp_csum_fail : 1; |
| 248 | u32 reserved2 : 1; |
| 249 | |
| 250 | struct mana_rxcomp_perpkt_info ppi[MANA_RXCOMP_OOB_NUM_PPI]; |
| 251 | |
| 252 | u32 rx_wqe_offset; |
| 253 | }; /* HW DATA */ |
| 254 | |
| 255 | struct mana_tx_comp_oob { |
| 256 | struct mana_cqe_header cqe_hdr; |
| 257 | |
| 258 | u32 tx_data_offset; |
| 259 | |
| 260 | u32 tx_sgl_offset : 5; |
| 261 | u32 tx_wqe_offset : 27; |
| 262 | |
| 263 | u32 reserved[12]; |
| 264 | }; /* HW DATA */ |
| 265 | |
| 266 | struct mana_rxq; |
| 267 | |
| 268 | #define CQE_POLLING_BUFFER 512 |
| 269 | |
| 270 | struct mana_cq { |
| 271 | struct gdma_queue *gdma_cq; |
| 272 | |
| 273 | /* Cache the CQ id (used to verify if each CQE comes to the right CQ. */ |
| 274 | u32 gdma_id; |
| 275 | |
| 276 | /* Type of the CQ: TX or RX */ |
| 277 | enum mana_cq_type type; |
| 278 | |
| 279 | /* Pointer to the mana_rxq that is pushing RX CQEs to the queue. |
| 280 | * Only and must be non-NULL if type is MANA_CQ_TYPE_RX. |
| 281 | */ |
| 282 | struct mana_rxq *rxq; |
| 283 | |
| 284 | /* Pointer to the mana_txq that is pushing TX CQEs to the queue. |
| 285 | * Only and must be non-NULL if type is MANA_CQ_TYPE_TX. |
| 286 | */ |
| 287 | struct mana_txq *txq; |
| 288 | |
| 289 | /* Buffer which the CQ handler can copy the CQE's into. */ |
| 290 | struct gdma_comp gdma_comp_buf[CQE_POLLING_BUFFER]; |
| 291 | |
| 292 | /* NAPI data */ |
| 293 | struct napi_struct napi; |
| 294 | int work_done; |
| 295 | int work_done_since_doorbell; |
| 296 | int budget; |
| 297 | }; |
| 298 | |
| 299 | struct mana_recv_buf_oob { |
| 300 | /* A valid GDMA work request representing the data buffer. */ |
| 301 | struct gdma_wqe_request wqe_req; |
| 302 | |
| 303 | void *buf_va; |
| 304 | bool from_pool; /* allocated from a page pool */ |
| 305 | |
| 306 | /* SGL of the buffer going to be sent as part of the work request. */ |
| 307 | u32 num_sge; |
| 308 | struct gdma_sge sgl[MAX_RX_WQE_SGL_ENTRIES]; |
| 309 | |
| 310 | /* Required to store the result of mana_gd_post_work_request. |
| 311 | * gdma_posted_wqe_info.wqe_size_in_bu is required for progressing the |
| 312 | * work queue when the WQE is consumed. |
| 313 | */ |
| 314 | struct gdma_posted_wqe_info wqe_inf; |
| 315 | }; |
| 316 | |
| 317 | #define MANA_RXBUF_PAD (SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) \ |
| 318 | + ETH_HLEN) |
| 319 | |
| 320 | #define MANA_XDP_MTU_MAX (PAGE_SIZE - MANA_RXBUF_PAD - XDP_PACKET_HEADROOM) |
| 321 | |
| 322 | struct mana_rxq { |
| 323 | struct gdma_queue *gdma_rq; |
| 324 | /* Cache the gdma receive queue id */ |
| 325 | u32 gdma_id; |
| 326 | |
| 327 | /* Index of RQ in the vPort, not gdma receive queue id */ |
| 328 | u32 rxq_idx; |
| 329 | |
| 330 | u32 datasize; |
| 331 | u32 alloc_size; |
| 332 | u32 headroom; |
| 333 | u32 frag_count; |
| 334 | |
| 335 | mana_handle_t rxobj; |
| 336 | |
| 337 | struct mana_cq rx_cq; |
| 338 | |
| 339 | struct completion fence_event; |
| 340 | |
| 341 | struct net_device *ndev; |
| 342 | |
| 343 | /* Total number of receive buffers to be allocated */ |
| 344 | u32 num_rx_buf; |
| 345 | |
| 346 | u32 buf_index; |
| 347 | |
| 348 | struct mana_stats_rx stats; |
| 349 | |
| 350 | struct bpf_prog __rcu *bpf_prog; |
| 351 | struct xdp_rxq_info xdp_rxq; |
| 352 | void *xdp_save_va; /* for reusing */ |
| 353 | bool xdp_flush; |
| 354 | int xdp_rc; /* XDP redirect return code */ |
| 355 | |
| 356 | struct page_pool *page_pool; |
| 357 | struct dentry *mana_rx_debugfs; |
| 358 | |
| 359 | /* MUST BE THE LAST MEMBER: |
| 360 | * Each receive buffer has an associated mana_recv_buf_oob. |
| 361 | */ |
| 362 | struct mana_recv_buf_oob rx_oobs[] __counted_by(num_rx_buf); |
| 363 | }; |
| 364 | |
| 365 | struct mana_tx_qp { |
| 366 | struct mana_txq txq; |
| 367 | |
| 368 | struct mana_cq tx_cq; |
| 369 | |
| 370 | mana_handle_t tx_object; |
| 371 | |
| 372 | struct dentry *mana_tx_debugfs; |
| 373 | }; |
| 374 | |
| 375 | struct mana_ethtool_stats { |
| 376 | u64 stop_queue; |
| 377 | u64 wake_queue; |
| 378 | u64 tx_cqe_err; |
| 379 | u64 tx_cqe_unknown_type; |
| 380 | u64 tx_linear_pkt_cnt; |
| 381 | u64 rx_coalesced_err; |
| 382 | u64 rx_cqe_unknown_type; |
| 383 | }; |
| 384 | |
| 385 | struct mana_ethtool_hc_stats { |
| 386 | u64 hc_rx_discards_no_wqe; |
| 387 | u64 hc_rx_err_vport_disabled; |
| 388 | u64 hc_rx_bytes; |
| 389 | u64 hc_rx_ucast_pkts; |
| 390 | u64 hc_rx_ucast_bytes; |
| 391 | u64 hc_rx_bcast_pkts; |
| 392 | u64 hc_rx_bcast_bytes; |
| 393 | u64 hc_rx_mcast_pkts; |
| 394 | u64 hc_rx_mcast_bytes; |
| 395 | u64 hc_tx_err_gf_disabled; |
| 396 | u64 hc_tx_err_vport_disabled; |
| 397 | u64 hc_tx_err_inval_vportoffset_pkt; |
| 398 | u64 hc_tx_err_vlan_enforcement; |
| 399 | u64 hc_tx_err_eth_type_enforcement; |
| 400 | u64 hc_tx_err_sa_enforcement; |
| 401 | u64 hc_tx_err_sqpdid_enforcement; |
| 402 | u64 hc_tx_err_cqpdid_enforcement; |
| 403 | u64 hc_tx_err_mtu_violation; |
| 404 | u64 hc_tx_err_inval_oob; |
| 405 | u64 hc_tx_bytes; |
| 406 | u64 hc_tx_ucast_pkts; |
| 407 | u64 hc_tx_ucast_bytes; |
| 408 | u64 hc_tx_bcast_pkts; |
| 409 | u64 hc_tx_bcast_bytes; |
| 410 | u64 hc_tx_mcast_pkts; |
| 411 | u64 hc_tx_mcast_bytes; |
| 412 | u64 hc_tx_err_gdma; |
| 413 | }; |
| 414 | |
| 415 | struct mana_ethtool_phy_stats { |
| 416 | /* Drop Counters */ |
| 417 | u64 rx_pkt_drop_phy; |
| 418 | u64 tx_pkt_drop_phy; |
| 419 | |
| 420 | /* Per TC traffic Counters */ |
| 421 | u64 rx_pkt_tc0_phy; |
| 422 | u64 tx_pkt_tc0_phy; |
| 423 | u64 rx_pkt_tc1_phy; |
| 424 | u64 tx_pkt_tc1_phy; |
| 425 | u64 rx_pkt_tc2_phy; |
| 426 | u64 tx_pkt_tc2_phy; |
| 427 | u64 rx_pkt_tc3_phy; |
| 428 | u64 tx_pkt_tc3_phy; |
| 429 | u64 rx_pkt_tc4_phy; |
| 430 | u64 tx_pkt_tc4_phy; |
| 431 | u64 rx_pkt_tc5_phy; |
| 432 | u64 tx_pkt_tc5_phy; |
| 433 | u64 rx_pkt_tc6_phy; |
| 434 | u64 tx_pkt_tc6_phy; |
| 435 | u64 rx_pkt_tc7_phy; |
| 436 | u64 tx_pkt_tc7_phy; |
| 437 | |
| 438 | u64 rx_byte_tc0_phy; |
| 439 | u64 tx_byte_tc0_phy; |
| 440 | u64 rx_byte_tc1_phy; |
| 441 | u64 tx_byte_tc1_phy; |
| 442 | u64 rx_byte_tc2_phy; |
| 443 | u64 tx_byte_tc2_phy; |
| 444 | u64 rx_byte_tc3_phy; |
| 445 | u64 tx_byte_tc3_phy; |
| 446 | u64 rx_byte_tc4_phy; |
| 447 | u64 tx_byte_tc4_phy; |
| 448 | u64 rx_byte_tc5_phy; |
| 449 | u64 tx_byte_tc5_phy; |
| 450 | u64 rx_byte_tc6_phy; |
| 451 | u64 tx_byte_tc6_phy; |
| 452 | u64 rx_byte_tc7_phy; |
| 453 | u64 tx_byte_tc7_phy; |
| 454 | |
| 455 | /* Per TC pause Counters */ |
| 456 | u64 rx_pause_tc0_phy; |
| 457 | u64 tx_pause_tc0_phy; |
| 458 | u64 rx_pause_tc1_phy; |
| 459 | u64 tx_pause_tc1_phy; |
| 460 | u64 rx_pause_tc2_phy; |
| 461 | u64 tx_pause_tc2_phy; |
| 462 | u64 rx_pause_tc3_phy; |
| 463 | u64 tx_pause_tc3_phy; |
| 464 | u64 rx_pause_tc4_phy; |
| 465 | u64 tx_pause_tc4_phy; |
| 466 | u64 rx_pause_tc5_phy; |
| 467 | u64 tx_pause_tc5_phy; |
| 468 | u64 rx_pause_tc6_phy; |
| 469 | u64 tx_pause_tc6_phy; |
| 470 | u64 rx_pause_tc7_phy; |
| 471 | u64 tx_pause_tc7_phy; |
| 472 | }; |
| 473 | |
| 474 | struct mana_context { |
| 475 | struct gdma_dev *gdma_dev; |
| 476 | |
| 477 | u16 num_ports; |
| 478 | u8 bm_hostmode; |
| 479 | |
| 480 | struct mana_ethtool_hc_stats hc_stats; |
| 481 | struct mana_eq *eqs; |
| 482 | struct dentry *mana_eqs_debugfs; |
| 483 | |
| 484 | /* Workqueue for querying hardware stats */ |
| 485 | struct delayed_work gf_stats_work; |
| 486 | bool hwc_timeout_occurred; |
| 487 | |
| 488 | struct net_device *ports[MAX_PORTS_IN_MANA_DEV]; |
| 489 | |
| 490 | /* Link state change work */ |
| 491 | struct work_struct link_change_work; |
| 492 | u32 link_event; |
| 493 | }; |
| 494 | |
| 495 | struct mana_port_context { |
| 496 | struct mana_context *ac; |
| 497 | struct net_device *ndev; |
| 498 | |
| 499 | u8 mac_addr[ETH_ALEN]; |
| 500 | |
| 501 | enum TRI_STATE ; |
| 502 | |
| 503 | mana_handle_t default_rxobj; |
| 504 | bool tx_shortform_allowed; |
| 505 | u16 tx_vp_offset; |
| 506 | |
| 507 | struct mana_tx_qp *tx_qp; |
| 508 | |
| 509 | /* Indirection Table for RX & TX. The values are queue indexes */ |
| 510 | u32 *indir_table; |
| 511 | u32 indir_table_sz; |
| 512 | |
| 513 | /* Indirection table containing RxObject Handles */ |
| 514 | mana_handle_t *rxobj_table; |
| 515 | |
| 516 | /* Hash key used by the NIC */ |
| 517 | u8 hashkey[MANA_HASH_KEY_SIZE]; |
| 518 | |
| 519 | /* This points to an array of num_queues of RQ pointers. */ |
| 520 | struct mana_rxq **rxqs; |
| 521 | |
| 522 | /* pre-allocated rx buffer array */ |
| 523 | void **rxbufs_pre; |
| 524 | dma_addr_t *das_pre; |
| 525 | int rxbpre_total; |
| 526 | u32 rxbpre_datasize; |
| 527 | u32 rxbpre_alloc_size; |
| 528 | u32 rxbpre_headroom; |
| 529 | u32 rxbpre_frag_count; |
| 530 | |
| 531 | struct bpf_prog *bpf_prog; |
| 532 | |
| 533 | /* Create num_queues EQs, SQs, SQ-CQs, RQs and RQ-CQs, respectively. */ |
| 534 | unsigned int max_queues; |
| 535 | unsigned int num_queues; |
| 536 | |
| 537 | unsigned int rx_queue_size; |
| 538 | unsigned int tx_queue_size; |
| 539 | |
| 540 | mana_handle_t port_handle; |
| 541 | mana_handle_t pf_filter_handle; |
| 542 | |
| 543 | /* Mutex for sharing access to vport_use_count */ |
| 544 | struct mutex vport_mutex; |
| 545 | int vport_use_count; |
| 546 | |
| 547 | /* Net shaper handle*/ |
| 548 | struct net_shaper_handle handle; |
| 549 | |
| 550 | u16 port_idx; |
| 551 | /* Currently configured speed (mbps) */ |
| 552 | u32 speed; |
| 553 | /* Maximum speed supported by the SKU (mbps) */ |
| 554 | u32 max_speed; |
| 555 | |
| 556 | bool port_is_up; |
| 557 | bool port_st_save; /* Saved port state */ |
| 558 | |
| 559 | struct mana_ethtool_stats eth_stats; |
| 560 | |
| 561 | struct mana_ethtool_phy_stats phy_stats; |
| 562 | |
| 563 | /* Debugfs */ |
| 564 | struct dentry *mana_port_debugfs; |
| 565 | }; |
| 566 | |
| 567 | netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev); |
| 568 | int (struct mana_port_context *ac, enum TRI_STATE rx, |
| 569 | bool update_hash, bool update_tab); |
| 570 | |
| 571 | int mana_alloc_queues(struct net_device *ndev); |
| 572 | int mana_attach(struct net_device *ndev); |
| 573 | int mana_detach(struct net_device *ndev, bool from_close); |
| 574 | |
| 575 | int mana_probe(struct gdma_dev *gd, bool resuming); |
| 576 | void mana_remove(struct gdma_dev *gd, bool suspending); |
| 577 | |
| 578 | int mana_rdma_probe(struct gdma_dev *gd); |
| 579 | void mana_rdma_remove(struct gdma_dev *gd); |
| 580 | |
| 581 | void mana_xdp_tx(struct sk_buff *skb, struct net_device *ndev); |
| 582 | int mana_xdp_xmit(struct net_device *ndev, int n, struct xdp_frame **frames, |
| 583 | u32 flags); |
| 584 | u32 mana_run_xdp(struct net_device *ndev, struct mana_rxq *rxq, |
| 585 | struct xdp_buff *xdp, void *buf_va, uint pkt_len); |
| 586 | struct bpf_prog *mana_xdp_get(struct mana_port_context *apc); |
| 587 | void mana_chn_setxdp(struct mana_port_context *apc, struct bpf_prog *prog); |
| 588 | int mana_bpf(struct net_device *ndev, struct netdev_bpf *bpf); |
| 589 | int mana_query_gf_stats(struct mana_context *ac); |
| 590 | int mana_query_link_cfg(struct mana_port_context *apc); |
| 591 | int mana_set_bw_clamp(struct mana_port_context *apc, u32 speed, |
| 592 | int enable_clamping); |
| 593 | void mana_query_phy_stats(struct mana_port_context *apc); |
| 594 | int mana_pre_alloc_rxbufs(struct mana_port_context *apc, int mtu, int num_queues); |
| 595 | void mana_pre_dealloc_rxbufs(struct mana_port_context *apc); |
| 596 | void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc); |
| 597 | |
| 598 | extern const struct ethtool_ops mana_ethtool_ops; |
| 599 | extern struct dentry *mana_debugfs_root; |
| 600 | |
| 601 | /* A CQ can be created not associated with any EQ */ |
| 602 | #define GDMA_CQ_NO_EQ 0xffff |
| 603 | |
| 604 | struct mana_obj_spec { |
| 605 | u32 queue_index; |
| 606 | u64 gdma_region; |
| 607 | u32 queue_size; |
| 608 | u32 attached_eq; |
| 609 | u32 modr_ctx_id; |
| 610 | }; |
| 611 | |
| 612 | enum mana_command_code { |
| 613 | MANA_QUERY_DEV_CONFIG = 0x20001, |
| 614 | MANA_QUERY_GF_STAT = 0x20002, |
| 615 | MANA_CONFIG_VPORT_TX = 0x20003, |
| 616 | MANA_CREATE_WQ_OBJ = 0x20004, |
| 617 | MANA_DESTROY_WQ_OBJ = 0x20005, |
| 618 | MANA_FENCE_RQ = 0x20006, |
| 619 | MANA_CONFIG_VPORT_RX = 0x20007, |
| 620 | MANA_QUERY_VPORT_CONFIG = 0x20008, |
| 621 | MANA_QUERY_LINK_CONFIG = 0x2000A, |
| 622 | MANA_SET_BW_CLAMP = 0x2000B, |
| 623 | MANA_QUERY_PHY_STAT = 0x2000c, |
| 624 | |
| 625 | /* Privileged commands for the PF mode */ |
| 626 | MANA_REGISTER_FILTER = 0x28000, |
| 627 | MANA_DEREGISTER_FILTER = 0x28001, |
| 628 | MANA_REGISTER_HW_PORT = 0x28003, |
| 629 | MANA_DEREGISTER_HW_PORT = 0x28004, |
| 630 | }; |
| 631 | |
| 632 | /* Query Link Configuration*/ |
| 633 | struct mana_query_link_config_req { |
| 634 | struct gdma_req_hdr hdr; |
| 635 | mana_handle_t vport; |
| 636 | }; /* HW DATA */ |
| 637 | |
| 638 | struct mana_query_link_config_resp { |
| 639 | struct gdma_resp_hdr hdr; |
| 640 | u32 qos_speed_mbps; |
| 641 | u8 qos_unconfigured; |
| 642 | u8 reserved1[3]; |
| 643 | u32 link_speed_mbps; |
| 644 | u8 reserved2[4]; |
| 645 | }; /* HW DATA */ |
| 646 | |
| 647 | /* Set Bandwidth Clamp*/ |
| 648 | struct mana_set_bw_clamp_req { |
| 649 | struct gdma_req_hdr hdr; |
| 650 | mana_handle_t vport; |
| 651 | enum TRI_STATE enable_clamping; |
| 652 | u32 link_speed_mbps; |
| 653 | }; /* HW DATA */ |
| 654 | |
| 655 | struct mana_set_bw_clamp_resp { |
| 656 | struct gdma_resp_hdr hdr; |
| 657 | u8 qos_unconfigured; |
| 658 | u8 reserved[7]; |
| 659 | }; /* HW DATA */ |
| 660 | |
| 661 | /* Query Device Configuration */ |
| 662 | struct mana_query_device_cfg_req { |
| 663 | struct gdma_req_hdr hdr; |
| 664 | |
| 665 | /* MANA Nic Driver Capability flags */ |
| 666 | u64 mn_drv_cap_flags1; |
| 667 | u64 mn_drv_cap_flags2; |
| 668 | u64 mn_drv_cap_flags3; |
| 669 | u64 mn_drv_cap_flags4; |
| 670 | |
| 671 | u32 proto_major_ver; |
| 672 | u32 proto_minor_ver; |
| 673 | u32 proto_micro_ver; |
| 674 | |
| 675 | u32 reserved; |
| 676 | }; /* HW DATA */ |
| 677 | |
| 678 | struct mana_query_device_cfg_resp { |
| 679 | struct gdma_resp_hdr hdr; |
| 680 | |
| 681 | u64 pf_cap_flags1; |
| 682 | u64 pf_cap_flags2; |
| 683 | u64 pf_cap_flags3; |
| 684 | u64 pf_cap_flags4; |
| 685 | |
| 686 | u16 max_num_vports; |
| 687 | u8 bm_hostmode; /* response v3: Bare Metal Host Mode */ |
| 688 | u8 reserved; |
| 689 | u32 max_num_eqs; |
| 690 | |
| 691 | /* response v2: */ |
| 692 | u16 adapter_mtu; |
| 693 | u16 reserved2; |
| 694 | u32 reserved3; |
| 695 | }; /* HW DATA */ |
| 696 | |
| 697 | /* Query vPort Configuration */ |
| 698 | struct mana_query_vport_cfg_req { |
| 699 | struct gdma_req_hdr hdr; |
| 700 | u32 vport_index; |
| 701 | }; /* HW DATA */ |
| 702 | |
| 703 | struct mana_query_vport_cfg_resp { |
| 704 | struct gdma_resp_hdr hdr; |
| 705 | u32 max_num_sq; |
| 706 | u32 max_num_rq; |
| 707 | u32 num_indirection_ent; |
| 708 | u32 reserved1; |
| 709 | u8 mac_addr[6]; |
| 710 | u8 reserved2[2]; |
| 711 | mana_handle_t vport; |
| 712 | }; /* HW DATA */ |
| 713 | |
| 714 | /* Configure vPort */ |
| 715 | struct mana_config_vport_req { |
| 716 | struct gdma_req_hdr hdr; |
| 717 | mana_handle_t vport; |
| 718 | u32 pdid; |
| 719 | u32 doorbell_pageid; |
| 720 | }; /* HW DATA */ |
| 721 | |
| 722 | struct mana_config_vport_resp { |
| 723 | struct gdma_resp_hdr hdr; |
| 724 | u16 tx_vport_offset; |
| 725 | u8 short_form_allowed; |
| 726 | u8 reserved; |
| 727 | }; /* HW DATA */ |
| 728 | |
| 729 | /* Create WQ Object */ |
| 730 | struct mana_create_wqobj_req { |
| 731 | struct gdma_req_hdr hdr; |
| 732 | mana_handle_t vport; |
| 733 | u32 wq_type; |
| 734 | u32 reserved; |
| 735 | u64 wq_gdma_region; |
| 736 | u64 cq_gdma_region; |
| 737 | u32 wq_size; |
| 738 | u32 cq_size; |
| 739 | u32 cq_moderation_ctx_id; |
| 740 | u32 cq_parent_qid; |
| 741 | }; /* HW DATA */ |
| 742 | |
| 743 | struct mana_create_wqobj_resp { |
| 744 | struct gdma_resp_hdr hdr; |
| 745 | u32 wq_id; |
| 746 | u32 cq_id; |
| 747 | mana_handle_t wq_obj; |
| 748 | }; /* HW DATA */ |
| 749 | |
| 750 | /* Destroy WQ Object */ |
| 751 | struct mana_destroy_wqobj_req { |
| 752 | struct gdma_req_hdr hdr; |
| 753 | u32 wq_type; |
| 754 | u32 reserved; |
| 755 | mana_handle_t wq_obj_handle; |
| 756 | }; /* HW DATA */ |
| 757 | |
| 758 | struct mana_destroy_wqobj_resp { |
| 759 | struct gdma_resp_hdr hdr; |
| 760 | }; /* HW DATA */ |
| 761 | |
| 762 | /* Fence RQ */ |
| 763 | struct mana_fence_rq_req { |
| 764 | struct gdma_req_hdr hdr; |
| 765 | mana_handle_t wq_obj_handle; |
| 766 | }; /* HW DATA */ |
| 767 | |
| 768 | struct mana_fence_rq_resp { |
| 769 | struct gdma_resp_hdr hdr; |
| 770 | }; /* HW DATA */ |
| 771 | |
| 772 | /* Query stats RQ */ |
| 773 | struct mana_query_gf_stat_req { |
| 774 | struct gdma_req_hdr hdr; |
| 775 | u64 req_stats; |
| 776 | }; /* HW DATA */ |
| 777 | |
| 778 | struct mana_query_gf_stat_resp { |
| 779 | struct gdma_resp_hdr hdr; |
| 780 | u64 reported_stats; |
| 781 | /* rx errors/discards */ |
| 782 | u64 rx_discards_nowqe; |
| 783 | u64 rx_err_vport_disabled; |
| 784 | /* rx bytes/packets */ |
| 785 | u64 hc_rx_bytes; |
| 786 | u64 hc_rx_ucast_pkts; |
| 787 | u64 hc_rx_ucast_bytes; |
| 788 | u64 hc_rx_bcast_pkts; |
| 789 | u64 hc_rx_bcast_bytes; |
| 790 | u64 hc_rx_mcast_pkts; |
| 791 | u64 hc_rx_mcast_bytes; |
| 792 | /* tx errors */ |
| 793 | u64 tx_err_gf_disabled; |
| 794 | u64 tx_err_vport_disabled; |
| 795 | u64 tx_err_inval_vport_offset_pkt; |
| 796 | u64 tx_err_vlan_enforcement; |
| 797 | u64 tx_err_ethtype_enforcement; |
| 798 | u64 tx_err_SA_enforcement; |
| 799 | u64 tx_err_SQPDID_enforcement; |
| 800 | u64 tx_err_CQPDID_enforcement; |
| 801 | u64 tx_err_mtu_violation; |
| 802 | u64 tx_err_inval_oob; |
| 803 | /* tx bytes/packets */ |
| 804 | u64 hc_tx_bytes; |
| 805 | u64 hc_tx_ucast_pkts; |
| 806 | u64 hc_tx_ucast_bytes; |
| 807 | u64 hc_tx_bcast_pkts; |
| 808 | u64 hc_tx_bcast_bytes; |
| 809 | u64 hc_tx_mcast_pkts; |
| 810 | u64 hc_tx_mcast_bytes; |
| 811 | /* tx error */ |
| 812 | u64 tx_err_gdma; |
| 813 | }; /* HW DATA */ |
| 814 | |
| 815 | /* Query phy stats */ |
| 816 | struct mana_query_phy_stat_req { |
| 817 | struct gdma_req_hdr hdr; |
| 818 | u64 req_stats; |
| 819 | }; /* HW DATA */ |
| 820 | |
| 821 | struct mana_query_phy_stat_resp { |
| 822 | struct gdma_resp_hdr hdr; |
| 823 | u64 reported_stats; |
| 824 | |
| 825 | /* Aggregate Drop Counters */ |
| 826 | u64 rx_pkt_drop_phy; |
| 827 | u64 tx_pkt_drop_phy; |
| 828 | |
| 829 | /* Per TC(Traffic class) traffic Counters */ |
| 830 | u64 rx_pkt_tc0_phy; |
| 831 | u64 tx_pkt_tc0_phy; |
| 832 | u64 rx_pkt_tc1_phy; |
| 833 | u64 tx_pkt_tc1_phy; |
| 834 | u64 rx_pkt_tc2_phy; |
| 835 | u64 tx_pkt_tc2_phy; |
| 836 | u64 rx_pkt_tc3_phy; |
| 837 | u64 tx_pkt_tc3_phy; |
| 838 | u64 rx_pkt_tc4_phy; |
| 839 | u64 tx_pkt_tc4_phy; |
| 840 | u64 rx_pkt_tc5_phy; |
| 841 | u64 tx_pkt_tc5_phy; |
| 842 | u64 rx_pkt_tc6_phy; |
| 843 | u64 tx_pkt_tc6_phy; |
| 844 | u64 rx_pkt_tc7_phy; |
| 845 | u64 tx_pkt_tc7_phy; |
| 846 | |
| 847 | u64 rx_byte_tc0_phy; |
| 848 | u64 tx_byte_tc0_phy; |
| 849 | u64 rx_byte_tc1_phy; |
| 850 | u64 tx_byte_tc1_phy; |
| 851 | u64 rx_byte_tc2_phy; |
| 852 | u64 tx_byte_tc2_phy; |
| 853 | u64 rx_byte_tc3_phy; |
| 854 | u64 tx_byte_tc3_phy; |
| 855 | u64 rx_byte_tc4_phy; |
| 856 | u64 tx_byte_tc4_phy; |
| 857 | u64 rx_byte_tc5_phy; |
| 858 | u64 tx_byte_tc5_phy; |
| 859 | u64 rx_byte_tc6_phy; |
| 860 | u64 tx_byte_tc6_phy; |
| 861 | u64 rx_byte_tc7_phy; |
| 862 | u64 tx_byte_tc7_phy; |
| 863 | |
| 864 | /* Per TC(Traffic Class) pause Counters */ |
| 865 | u64 rx_pause_tc0_phy; |
| 866 | u64 tx_pause_tc0_phy; |
| 867 | u64 rx_pause_tc1_phy; |
| 868 | u64 tx_pause_tc1_phy; |
| 869 | u64 rx_pause_tc2_phy; |
| 870 | u64 tx_pause_tc2_phy; |
| 871 | u64 rx_pause_tc3_phy; |
| 872 | u64 tx_pause_tc3_phy; |
| 873 | u64 rx_pause_tc4_phy; |
| 874 | u64 tx_pause_tc4_phy; |
| 875 | u64 rx_pause_tc5_phy; |
| 876 | u64 tx_pause_tc5_phy; |
| 877 | u64 rx_pause_tc6_phy; |
| 878 | u64 tx_pause_tc6_phy; |
| 879 | u64 rx_pause_tc7_phy; |
| 880 | u64 tx_pause_tc7_phy; |
| 881 | }; /* HW DATA */ |
| 882 | |
| 883 | /* Configure vPort Rx Steering */ |
| 884 | struct mana_cfg_rx_steer_req_v2 { |
| 885 | struct gdma_req_hdr hdr; |
| 886 | mana_handle_t vport; |
| 887 | u16 num_indir_entries; |
| 888 | u16 indir_tab_offset; |
| 889 | u32 rx_enable; |
| 890 | u32 ; |
| 891 | u8 update_default_rxobj; |
| 892 | u8 update_hashkey; |
| 893 | u8 update_indir_tab; |
| 894 | u8 reserved; |
| 895 | mana_handle_t default_rxobj; |
| 896 | u8 hashkey[MANA_HASH_KEY_SIZE]; |
| 897 | u8 cqe_coalescing_enable; |
| 898 | u8 reserved2[7]; |
| 899 | mana_handle_t indir_tab[] __counted_by(num_indir_entries); |
| 900 | }; /* HW DATA */ |
| 901 | |
| 902 | struct mana_cfg_rx_steer_resp { |
| 903 | struct gdma_resp_hdr hdr; |
| 904 | }; /* HW DATA */ |
| 905 | |
| 906 | /* Register HW vPort */ |
| 907 | struct mana_register_hw_vport_req { |
| 908 | struct gdma_req_hdr hdr; |
| 909 | u16 attached_gfid; |
| 910 | u8 is_pf_default_vport; |
| 911 | u8 reserved1; |
| 912 | u8 allow_all_ether_types; |
| 913 | u8 reserved2; |
| 914 | u8 reserved3; |
| 915 | u8 reserved4; |
| 916 | }; /* HW DATA */ |
| 917 | |
| 918 | struct mana_register_hw_vport_resp { |
| 919 | struct gdma_resp_hdr hdr; |
| 920 | mana_handle_t hw_vport_handle; |
| 921 | }; /* HW DATA */ |
| 922 | |
| 923 | /* Deregister HW vPort */ |
| 924 | struct mana_deregister_hw_vport_req { |
| 925 | struct gdma_req_hdr hdr; |
| 926 | mana_handle_t hw_vport_handle; |
| 927 | }; /* HW DATA */ |
| 928 | |
| 929 | struct mana_deregister_hw_vport_resp { |
| 930 | struct gdma_resp_hdr hdr; |
| 931 | }; /* HW DATA */ |
| 932 | |
| 933 | /* Register filter */ |
| 934 | struct mana_register_filter_req { |
| 935 | struct gdma_req_hdr hdr; |
| 936 | mana_handle_t vport; |
| 937 | u8 mac_addr[6]; |
| 938 | u8 reserved1; |
| 939 | u8 reserved2; |
| 940 | u8 reserved3; |
| 941 | u8 reserved4; |
| 942 | u16 reserved5; |
| 943 | u32 reserved6; |
| 944 | u32 reserved7; |
| 945 | u32 reserved8; |
| 946 | }; /* HW DATA */ |
| 947 | |
| 948 | struct mana_register_filter_resp { |
| 949 | struct gdma_resp_hdr hdr; |
| 950 | mana_handle_t filter_handle; |
| 951 | }; /* HW DATA */ |
| 952 | |
| 953 | /* Deregister filter */ |
| 954 | struct mana_deregister_filter_req { |
| 955 | struct gdma_req_hdr hdr; |
| 956 | mana_handle_t filter_handle; |
| 957 | }; /* HW DATA */ |
| 958 | |
| 959 | struct mana_deregister_filter_resp { |
| 960 | struct gdma_resp_hdr hdr; |
| 961 | }; /* HW DATA */ |
| 962 | |
| 963 | /* Requested GF stats Flags */ |
| 964 | /* Rx discards/Errors */ |
| 965 | #define STATISTICS_FLAGS_RX_DISCARDS_NO_WQE 0x0000000000000001 |
| 966 | #define STATISTICS_FLAGS_RX_ERRORS_VPORT_DISABLED 0x0000000000000002 |
| 967 | /* Rx bytes/pkts */ |
| 968 | #define STATISTICS_FLAGS_HC_RX_BYTES 0x0000000000000004 |
| 969 | #define STATISTICS_FLAGS_HC_RX_UCAST_PACKETS 0x0000000000000008 |
| 970 | #define STATISTICS_FLAGS_HC_RX_UCAST_BYTES 0x0000000000000010 |
| 971 | #define STATISTICS_FLAGS_HC_RX_MCAST_PACKETS 0x0000000000000020 |
| 972 | #define STATISTICS_FLAGS_HC_RX_MCAST_BYTES 0x0000000000000040 |
| 973 | #define STATISTICS_FLAGS_HC_RX_BCAST_PACKETS 0x0000000000000080 |
| 974 | #define STATISTICS_FLAGS_HC_RX_BCAST_BYTES 0x0000000000000100 |
| 975 | /* Tx errors */ |
| 976 | #define STATISTICS_FLAGS_TX_ERRORS_GF_DISABLED 0x0000000000000200 |
| 977 | #define STATISTICS_FLAGS_TX_ERRORS_VPORT_DISABLED 0x0000000000000400 |
| 978 | #define STATISTICS_FLAGS_TX_ERRORS_INVAL_VPORT_OFFSET_PACKETS \ |
| 979 | 0x0000000000000800 |
| 980 | #define STATISTICS_FLAGS_TX_ERRORS_VLAN_ENFORCEMENT 0x0000000000001000 |
| 981 | #define STATISTICS_FLAGS_TX_ERRORS_ETH_TYPE_ENFORCEMENT \ |
| 982 | 0x0000000000002000 |
| 983 | #define STATISTICS_FLAGS_TX_ERRORS_SA_ENFORCEMENT 0x0000000000004000 |
| 984 | #define STATISTICS_FLAGS_TX_ERRORS_SQPDID_ENFORCEMENT 0x0000000000008000 |
| 985 | #define STATISTICS_FLAGS_TX_ERRORS_CQPDID_ENFORCEMENT 0x0000000000010000 |
| 986 | #define STATISTICS_FLAGS_TX_ERRORS_MTU_VIOLATION 0x0000000000020000 |
| 987 | #define STATISTICS_FLAGS_TX_ERRORS_INVALID_OOB 0x0000000000040000 |
| 988 | /* Tx bytes/pkts */ |
| 989 | #define STATISTICS_FLAGS_HC_TX_BYTES 0x0000000000080000 |
| 990 | #define STATISTICS_FLAGS_HC_TX_UCAST_PACKETS 0x0000000000100000 |
| 991 | #define STATISTICS_FLAGS_HC_TX_UCAST_BYTES 0x0000000000200000 |
| 992 | #define STATISTICS_FLAGS_HC_TX_MCAST_PACKETS 0x0000000000400000 |
| 993 | #define STATISTICS_FLAGS_HC_TX_MCAST_BYTES 0x0000000000800000 |
| 994 | #define STATISTICS_FLAGS_HC_TX_BCAST_PACKETS 0x0000000001000000 |
| 995 | #define STATISTICS_FLAGS_HC_TX_BCAST_BYTES 0x0000000002000000 |
| 996 | /* Tx error */ |
| 997 | #define STATISTICS_FLAGS_TX_ERRORS_GDMA_ERROR 0x0000000004000000 |
| 998 | |
| 999 | #define MANA_MAX_NUM_QUEUES 64 |
| 1000 | |
| 1001 | #define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1) |
| 1002 | |
| 1003 | struct mana_tx_package { |
| 1004 | struct gdma_wqe_request wqe_req; |
| 1005 | struct gdma_sge sgl_array[5]; |
| 1006 | struct gdma_sge *sgl_ptr; |
| 1007 | |
| 1008 | struct mana_tx_oob tx_oob; |
| 1009 | |
| 1010 | struct gdma_posted_wqe_info wqe_info; |
| 1011 | }; |
| 1012 | |
| 1013 | int mana_create_wq_obj(struct mana_port_context *apc, |
| 1014 | mana_handle_t vport, |
| 1015 | u32 wq_type, struct mana_obj_spec *wq_spec, |
| 1016 | struct mana_obj_spec *cq_spec, |
| 1017 | mana_handle_t *wq_obj); |
| 1018 | |
| 1019 | void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type, |
| 1020 | mana_handle_t wq_obj); |
| 1021 | |
| 1022 | int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id, |
| 1023 | u32 doorbell_pg_id); |
| 1024 | void mana_uncfg_vport(struct mana_port_context *apc); |
| 1025 | |
| 1026 | struct net_device *mana_get_primary_netdev(struct mana_context *ac, |
| 1027 | u32 port_index, |
| 1028 | netdevice_tracker *tracker); |
| 1029 | #endif /* _MANA_H */ |
| 1030 | |