| 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | /* Copyright(c) 2013 - 2018 Intel Corporation. */ |
| 3 | |
| 4 | #ifndef _I40E_TXRX_H_ |
| 5 | #define _I40E_TXRX_H_ |
| 6 | |
| 7 | #include <linux/net/intel/libie/pctype.h> |
| 8 | #include <net/xdp.h> |
| 9 | #include "i40e_type.h" |
| 10 | |
| 11 | /* Interrupt Throttling and Rate Limiting Goodies */ |
| 12 | #define I40E_DEFAULT_IRQ_WORK 256 |
| 13 | |
| 14 | /* The datasheet for the X710 and XL710 indicate that the maximum value for |
| 15 | * the ITR is 8160usec which is then called out as 0xFF0 with a 2usec |
| 16 | * resolution. 8160 is 0x1FE0 when written out in hex. So instead of storing |
| 17 | * the register value which is divided by 2 lets use the actual values and |
| 18 | * avoid an excessive amount of translation. |
| 19 | */ |
| 20 | #define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */ |
| 21 | #define I40E_ITR_MASK 0x1FFE /* mask for ITR register value */ |
| 22 | #define I40E_MIN_ITR 2 /* reg uses 2 usec resolution */ |
| 23 | #define I40E_ITR_20K 50 |
| 24 | #define I40E_ITR_8K 122 |
| 25 | #define I40E_MAX_ITR 8160 /* maximum value as per datasheet */ |
| 26 | #define ITR_TO_REG(setting) ((setting) & ~I40E_ITR_DYNAMIC) |
| 27 | #define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~I40E_ITR_MASK) |
| 28 | #define ITR_IS_DYNAMIC(setting) (!!((setting) & I40E_ITR_DYNAMIC)) |
| 29 | |
| 30 | #define I40E_ITR_RX_DEF (I40E_ITR_20K | I40E_ITR_DYNAMIC) |
| 31 | #define I40E_ITR_TX_DEF (I40E_ITR_20K | I40E_ITR_DYNAMIC) |
| 32 | |
| 33 | /* 0x40 is the enable bit for interrupt rate limiting, and must be set if |
| 34 | * the value of the rate limit is non-zero |
| 35 | */ |
| 36 | #define INTRL_ENA BIT(6) |
| 37 | #define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */ |
| 38 | #define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2) |
| 39 | |
| 40 | /** |
| 41 | * i40e_intrl_usec_to_reg - convert interrupt rate limit to register |
| 42 | * @intrl: interrupt rate limit to convert |
| 43 | * |
| 44 | * This function converts a decimal interrupt rate limit to the appropriate |
| 45 | * register format expected by the firmware when setting interrupt rate limit. |
| 46 | */ |
| 47 | static inline u16 i40e_intrl_usec_to_reg(int intrl) |
| 48 | { |
| 49 | if (intrl >> 2) |
| 50 | return ((intrl >> 2) | INTRL_ENA); |
| 51 | else |
| 52 | return 0; |
| 53 | } |
| 54 | |
| 55 | #define I40E_QUEUE_END_OF_LIST 0x7FF |
| 56 | |
| 57 | /* this enum matches hardware bits and is meant to be used by DYN_CTLN |
| 58 | * registers and QINT registers or more generally anywhere in the manual |
| 59 | * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any |
| 60 | * register but instead is a special value meaning "don't update" ITR0/1/2. |
| 61 | */ |
| 62 | enum i40e_dyn_idx { |
| 63 | I40E_IDX_ITR0 = 0, |
| 64 | I40E_IDX_ITR1 = 1, |
| 65 | I40E_IDX_ITR2 = 2, |
| 66 | I40E_ITR_NONE = 3 /* ITR_NONE must not be used as an index */ |
| 67 | }; |
| 68 | |
| 69 | /* these are indexes into ITRN registers */ |
| 70 | #define I40E_RX_ITR I40E_IDX_ITR0 |
| 71 | #define I40E_TX_ITR I40E_IDX_ITR1 |
| 72 | #define I40E_SW_ITR I40E_IDX_ITR2 |
| 73 | |
| 74 | /* Supported RSS offloads */ |
| 75 | #define ( \ |
| 76 | BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_UDP) | \ |
| 77 | BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_SCTP) | \ |
| 78 | BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP) | \ |
| 79 | BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_OTHER) | \ |
| 80 | BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV4) | \ |
| 81 | BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_UDP) | \ |
| 82 | BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP) | \ |
| 83 | BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_SCTP) | \ |
| 84 | BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_OTHER) | \ |
| 85 | BIT_ULL(LIBIE_FILTER_PCTYPE_FRAG_IPV6) | \ |
| 86 | BIT_ULL(LIBIE_FILTER_PCTYPE_L2_PAYLOAD)) |
| 87 | |
| 88 | #define I40E_DEFAULT_RSS_HASHCFG_EXPANDED (I40E_DEFAULT_RSS_HASHCFG | \ |
| 89 | BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \ |
| 90 | BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \ |
| 91 | BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \ |
| 92 | BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \ |
| 93 | BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \ |
| 94 | BIT_ULL(LIBIE_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP)) |
| 95 | |
| 96 | #define (pf) \ |
| 97 | (test_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE, (pf)->hw.caps) ? \ |
| 98 | I40E_DEFAULT_RSS_HASHCFG_EXPANDED : I40E_DEFAULT_RSS_HASHCFG) |
| 99 | |
| 100 | /* Supported Rx Buffer Sizes (a multiple of 128) */ |
| 101 | #define I40E_RXBUFFER_256 256 |
| 102 | #define I40E_RXBUFFER_1536 1536 /* 128B aligned standard Ethernet frame */ |
| 103 | #define I40E_RXBUFFER_2048 2048 |
| 104 | #define I40E_RXBUFFER_3072 3072 /* Used for large frames w/ padding */ |
| 105 | #define I40E_MAX_RXBUFFER 9728 /* largest size for single descriptor */ |
| 106 | |
| 107 | /* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we |
| 108 | * reserve 2 more, and skb_shared_info adds an additional 384 bytes more, |
| 109 | * this adds up to 512 bytes of extra data meaning the smallest allocation |
| 110 | * we could have is 1K. |
| 111 | * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab) |
| 112 | * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab) |
| 113 | */ |
| 114 | #define I40E_RX_HDR_SIZE I40E_RXBUFFER_256 |
| 115 | #define I40E_PACKET_HDR_PAD (ETH_HLEN + ETH_FCS_LEN + (VLAN_HLEN * 2)) |
| 116 | #define i40e_rx_desc i40e_16byte_rx_desc |
| 117 | |
| 118 | #define I40E_RX_DMA_ATTR \ |
| 119 | (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) |
| 120 | |
| 121 | /* Attempt to maximize the headroom available for incoming frames. We |
| 122 | * use a 2K buffer for receives and need 1536/1534 to store the data for |
| 123 | * the frame. This leaves us with 512 bytes of room. From that we need |
| 124 | * to deduct the space needed for the shared info and the padding needed |
| 125 | * to IP align the frame. |
| 126 | * |
| 127 | * Note: For cache line sizes 256 or larger this value is going to end |
| 128 | * up negative. In these cases we should fall back to the legacy |
| 129 | * receive path. |
| 130 | */ |
| 131 | #if (PAGE_SIZE < 8192) |
| 132 | #define I40E_2K_TOO_SMALL_WITH_PADDING \ |
| 133 | ((NET_SKB_PAD + I40E_RXBUFFER_1536) > SKB_WITH_OVERHEAD(I40E_RXBUFFER_2048)) |
| 134 | |
| 135 | static inline int i40e_compute_pad(int rx_buf_len) |
| 136 | { |
| 137 | int page_size, pad_size; |
| 138 | |
| 139 | page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2); |
| 140 | pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len; |
| 141 | |
| 142 | return pad_size; |
| 143 | } |
| 144 | |
| 145 | static inline int i40e_skb_pad(void) |
| 146 | { |
| 147 | int rx_buf_len; |
| 148 | |
| 149 | /* If a 2K buffer cannot handle a standard Ethernet frame then |
| 150 | * optimize padding for a 3K buffer instead of a 1.5K buffer. |
| 151 | * |
| 152 | * For a 3K buffer we need to add enough padding to allow for |
| 153 | * tailroom due to NET_IP_ALIGN possibly shifting us out of |
| 154 | * cache-line alignment. |
| 155 | */ |
| 156 | if (I40E_2K_TOO_SMALL_WITH_PADDING) |
| 157 | rx_buf_len = I40E_RXBUFFER_3072 + SKB_DATA_ALIGN(NET_IP_ALIGN); |
| 158 | else |
| 159 | rx_buf_len = I40E_RXBUFFER_1536; |
| 160 | |
| 161 | /* if needed make room for NET_IP_ALIGN */ |
| 162 | rx_buf_len -= NET_IP_ALIGN; |
| 163 | |
| 164 | return i40e_compute_pad(rx_buf_len); |
| 165 | } |
| 166 | |
| 167 | #define I40E_SKB_PAD i40e_skb_pad() |
| 168 | #else |
| 169 | #define I40E_2K_TOO_SMALL_WITH_PADDING false |
| 170 | #define I40E_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) |
| 171 | #endif |
| 172 | |
| 173 | /** |
| 174 | * i40e_test_staterr - tests bits in Rx descriptor status and error fields |
| 175 | * @rx_desc: pointer to receive descriptor (in le64 format) |
| 176 | * @stat_err_bits: value to mask |
| 177 | * |
| 178 | * This function does some fast chicanery in order to return the |
| 179 | * value of the mask which is really only used for boolean tests. |
| 180 | * The status_error_len doesn't need to be shifted because it begins |
| 181 | * at offset zero. |
| 182 | */ |
| 183 | static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc, |
| 184 | const u64 stat_err_bits) |
| 185 | { |
| 186 | return !!(rx_desc->wb.qword1.status_error_len & |
| 187 | cpu_to_le64(stat_err_bits)); |
| 188 | } |
| 189 | |
| 190 | /* How many Rx Buffers do we bundle into one write to the hardware ? */ |
| 191 | #define I40E_RX_BUFFER_WRITE 32 /* Must be power of 2 */ |
| 192 | |
| 193 | #define I40E_RX_NEXT_DESC(r, i, n) \ |
| 194 | do { \ |
| 195 | (i)++; \ |
| 196 | if ((i) == (r)->count) \ |
| 197 | i = 0; \ |
| 198 | (n) = I40E_RX_DESC((r), (i)); \ |
| 199 | } while (0) |
| 200 | |
| 201 | |
| 202 | #define I40E_MAX_BUFFER_TXD 8 |
| 203 | #define I40E_MIN_TX_LEN 17 |
| 204 | |
| 205 | /* The size limit for a transmit buffer in a descriptor is (16K - 1). |
| 206 | * In order to align with the read requests we will align the value to |
| 207 | * the nearest 4K which represents our maximum read request size. |
| 208 | */ |
| 209 | #define I40E_MAX_READ_REQ_SIZE 4096 |
| 210 | #define I40E_MAX_DATA_PER_TXD (16 * 1024 - 1) |
| 211 | #define I40E_MAX_DATA_PER_TXD_ALIGNED \ |
| 212 | (I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1)) |
| 213 | |
| 214 | /** |
| 215 | * i40e_txd_use_count - estimate the number of descriptors needed for Tx |
| 216 | * @size: transmit request size in bytes |
| 217 | * |
| 218 | * Due to hardware alignment restrictions (4K alignment), we need to |
| 219 | * assume that we can have no more than 12K of data per descriptor, even |
| 220 | * though each descriptor can take up to 16K - 1 bytes of aligned memory. |
| 221 | * Thus, we need to divide by 12K. But division is slow! Instead, |
| 222 | * we decompose the operation into shifts and one relatively cheap |
| 223 | * multiply operation. |
| 224 | * |
| 225 | * To divide by 12K, we first divide by 4K, then divide by 3: |
| 226 | * To divide by 4K, shift right by 12 bits |
| 227 | * To divide by 3, multiply by 85, then divide by 256 |
| 228 | * (Divide by 256 is done by shifting right by 8 bits) |
| 229 | * Finally, we add one to round up. Because 256 isn't an exact multiple of |
| 230 | * 3, we'll underestimate near each multiple of 12K. This is actually more |
| 231 | * accurate as we have 4K - 1 of wiggle room that we can fit into the last |
| 232 | * segment. For our purposes this is accurate out to 1M which is orders of |
| 233 | * magnitude greater than our largest possible GSO size. |
| 234 | * |
| 235 | * This would then be implemented as: |
| 236 | * return (((size >> 12) * 85) >> 8) + 1; |
| 237 | * |
| 238 | * Since multiplication and division are commutative, we can reorder |
| 239 | * operations into: |
| 240 | * return ((size * 85) >> 20) + 1; |
| 241 | */ |
| 242 | static inline unsigned int i40e_txd_use_count(unsigned int size) |
| 243 | { |
| 244 | return ((size * 85) >> 20) + 1; |
| 245 | } |
| 246 | |
| 247 | /* Tx Descriptors needed, worst case */ |
| 248 | #define DESC_NEEDED (MAX_SKB_FRAGS + 6) |
| 249 | |
| 250 | #define I40E_TX_FLAGS_HW_VLAN BIT(1) |
| 251 | #define I40E_TX_FLAGS_SW_VLAN BIT(2) |
| 252 | #define I40E_TX_FLAGS_TSO BIT(3) |
| 253 | #define I40E_TX_FLAGS_IPV4 BIT(4) |
| 254 | #define I40E_TX_FLAGS_IPV6 BIT(5) |
| 255 | #define I40E_TX_FLAGS_TSYN BIT(8) |
| 256 | #define I40E_TX_FLAGS_FD_SB BIT(9) |
| 257 | #define I40E_TX_FLAGS_UDP_TUNNEL BIT(10) |
| 258 | #define I40E_TX_FLAGS_VLAN_MASK 0xffff0000 |
| 259 | #define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 |
| 260 | #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29 |
| 261 | #define I40E_TX_FLAGS_VLAN_SHIFT 16 |
| 262 | |
| 263 | struct i40e_tx_buffer { |
| 264 | struct i40e_tx_desc *next_to_watch; |
| 265 | union { |
| 266 | struct xdp_frame *xdpf; |
| 267 | struct sk_buff *skb; |
| 268 | void *raw_buf; |
| 269 | }; |
| 270 | unsigned int bytecount; |
| 271 | unsigned short gso_segs; |
| 272 | |
| 273 | DEFINE_DMA_UNMAP_ADDR(dma); |
| 274 | DEFINE_DMA_UNMAP_LEN(len); |
| 275 | u32 tx_flags; |
| 276 | }; |
| 277 | |
| 278 | struct i40e_rx_buffer { |
| 279 | dma_addr_t dma; |
| 280 | struct page *page; |
| 281 | __u32 page_offset; |
| 282 | __u16 pagecnt_bias; |
| 283 | __u32 page_count; |
| 284 | }; |
| 285 | |
| 286 | struct i40e_queue_stats { |
| 287 | u64 packets; |
| 288 | u64 bytes; |
| 289 | }; |
| 290 | |
| 291 | struct i40e_tx_queue_stats { |
| 292 | u64 restart_queue; |
| 293 | u64 tx_busy; |
| 294 | u64 tx_done_old; |
| 295 | u64 tx_linearize; |
| 296 | u64 tx_force_wb; |
| 297 | u64 tx_stopped; |
| 298 | int prev_pkt_ctr; |
| 299 | }; |
| 300 | |
| 301 | struct i40e_rx_queue_stats { |
| 302 | u64 non_eop_descs; |
| 303 | u64 alloc_page_failed; |
| 304 | u64 alloc_buff_failed; |
| 305 | u64 page_reuse_count; |
| 306 | u64 page_alloc_count; |
| 307 | u64 page_waive_count; |
| 308 | u64 page_busy_count; |
| 309 | }; |
| 310 | |
| 311 | enum i40e_ring_state { |
| 312 | __I40E_TX_FDIR_INIT_DONE, |
| 313 | __I40E_TX_XPS_INIT_DONE, |
| 314 | __I40E_RING_STATE_NBITS /* must be last */ |
| 315 | }; |
| 316 | |
| 317 | /* some useful defines for virtchannel interface, which |
| 318 | * is the only remaining user of header split |
| 319 | */ |
| 320 | #define 1 |
| 321 | #define I40E_RX_SPLIT_L2 0x1 |
| 322 | #define I40E_RX_SPLIT_IP 0x2 |
| 323 | #define I40E_RX_SPLIT_TCP_UDP 0x4 |
| 324 | #define I40E_RX_SPLIT_SCTP 0x8 |
| 325 | |
| 326 | /* struct that defines a descriptor ring, associated with a VSI */ |
| 327 | struct i40e_ring { |
| 328 | struct i40e_ring *next; /* pointer to next ring in q_vector */ |
| 329 | void *desc; /* Descriptor ring memory */ |
| 330 | struct device *dev; /* Used for DMA mapping */ |
| 331 | struct net_device *netdev; /* netdev ring maps to */ |
| 332 | struct bpf_prog *xdp_prog; |
| 333 | union { |
| 334 | struct i40e_tx_buffer *tx_bi; |
| 335 | struct i40e_rx_buffer *rx_bi; |
| 336 | struct xdp_buff **rx_bi_zc; |
| 337 | }; |
| 338 | DECLARE_BITMAP(state, __I40E_RING_STATE_NBITS); |
| 339 | u16 queue_index; /* Queue number of ring */ |
| 340 | u8 dcb_tc; /* Traffic class of ring */ |
| 341 | u8 __iomem *tail; |
| 342 | |
| 343 | /* Storing xdp_buff on ring helps in saving the state of partially built |
| 344 | * packet when i40e_clean_rx_ring_irq() must return before it sees EOP |
| 345 | * and to resume packet building for this ring in the next call to |
| 346 | * i40e_clean_rx_ring_irq(). |
| 347 | */ |
| 348 | struct xdp_buff xdp; |
| 349 | |
| 350 | /* Next descriptor to be processed; next_to_clean is updated only on |
| 351 | * processing EOP descriptor |
| 352 | */ |
| 353 | u16 next_to_process; |
| 354 | /* high bit set means dynamic, use accessor routines to read/write. |
| 355 | * hardware only supports 2us resolution for the ITR registers. |
| 356 | * these values always store the USER setting, and must be converted |
| 357 | * before programming to a register. |
| 358 | */ |
| 359 | u16 itr_setting; |
| 360 | |
| 361 | u16 count; /* Number of descriptors */ |
| 362 | u16 reg_idx; /* HW register index of the ring */ |
| 363 | u16 rx_buf_len; |
| 364 | |
| 365 | /* used in interrupt processing */ |
| 366 | u16 next_to_use; |
| 367 | u16 next_to_clean; |
| 368 | u16 xdp_tx_active; |
| 369 | |
| 370 | u8 atr_sample_rate; |
| 371 | u8 atr_count; |
| 372 | |
| 373 | bool ring_active; /* is ring online or not */ |
| 374 | bool arm_wb; /* do something to arm write back */ |
| 375 | u8 packet_stride; |
| 376 | |
| 377 | u16 flags; |
| 378 | #define I40E_TXR_FLAGS_WB_ON_ITR BIT(0) |
| 379 | #define I40E_RXR_FLAGS_BUILD_SKB_ENABLED BIT(1) |
| 380 | #define I40E_TXR_FLAGS_XDP BIT(2) |
| 381 | |
| 382 | /* stats structs */ |
| 383 | struct i40e_queue_stats stats; |
| 384 | struct u64_stats_sync syncp; |
| 385 | union { |
| 386 | struct i40e_tx_queue_stats tx_stats; |
| 387 | struct i40e_rx_queue_stats rx_stats; |
| 388 | }; |
| 389 | |
| 390 | unsigned int size; /* length of descriptor ring in bytes */ |
| 391 | dma_addr_t dma; /* physical address of ring */ |
| 392 | |
| 393 | struct i40e_vsi *vsi; /* Backreference to associated VSI */ |
| 394 | struct i40e_q_vector *q_vector; /* Backreference to associated vector */ |
| 395 | |
| 396 | struct rcu_head rcu; /* to avoid race on free */ |
| 397 | u16 next_to_alloc; |
| 398 | |
| 399 | struct i40e_channel *ch; |
| 400 | u16 rx_offset; |
| 401 | struct xdp_rxq_info xdp_rxq; |
| 402 | struct xsk_buff_pool *xsk_pool; |
| 403 | } ____cacheline_internodealigned_in_smp; |
| 404 | |
| 405 | static inline bool ring_uses_build_skb(struct i40e_ring *ring) |
| 406 | { |
| 407 | return !!(ring->flags & I40E_RXR_FLAGS_BUILD_SKB_ENABLED); |
| 408 | } |
| 409 | |
| 410 | static inline void set_ring_build_skb_enabled(struct i40e_ring *ring) |
| 411 | { |
| 412 | ring->flags |= I40E_RXR_FLAGS_BUILD_SKB_ENABLED; |
| 413 | } |
| 414 | |
| 415 | static inline void clear_ring_build_skb_enabled(struct i40e_ring *ring) |
| 416 | { |
| 417 | ring->flags &= ~I40E_RXR_FLAGS_BUILD_SKB_ENABLED; |
| 418 | } |
| 419 | |
| 420 | static inline bool ring_is_xdp(struct i40e_ring *ring) |
| 421 | { |
| 422 | return !!(ring->flags & I40E_TXR_FLAGS_XDP); |
| 423 | } |
| 424 | |
| 425 | static inline void set_ring_xdp(struct i40e_ring *ring) |
| 426 | { |
| 427 | ring->flags |= I40E_TXR_FLAGS_XDP; |
| 428 | } |
| 429 | |
| 430 | #define I40E_ITR_ADAPTIVE_MIN_INC 0x0002 |
| 431 | #define I40E_ITR_ADAPTIVE_MIN_USECS 0x0002 |
| 432 | #define I40E_ITR_ADAPTIVE_MAX_USECS 0x007e |
| 433 | #define I40E_ITR_ADAPTIVE_LATENCY 0x8000 |
| 434 | #define I40E_ITR_ADAPTIVE_BULK 0x0000 |
| 435 | |
| 436 | struct i40e_ring_container { |
| 437 | struct i40e_ring *ring; /* pointer to linked list of ring(s) */ |
| 438 | unsigned long next_update; /* jiffies value of next update */ |
| 439 | unsigned int total_bytes; /* total bytes processed this int */ |
| 440 | unsigned int total_packets; /* total packets processed this int */ |
| 441 | u16 count; |
| 442 | u16 target_itr; /* target ITR setting for ring(s) */ |
| 443 | u16 current_itr; /* current ITR setting for ring(s) */ |
| 444 | }; |
| 445 | |
| 446 | /* iterator for handling rings in ring container */ |
| 447 | #define i40e_for_each_ring(pos, head) \ |
| 448 | for (pos = (head).ring; pos != NULL; pos = pos->next) |
| 449 | |
| 450 | static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring) |
| 451 | { |
| 452 | #if (PAGE_SIZE < 8192) |
| 453 | if (ring->rx_buf_len > (PAGE_SIZE / 2)) |
| 454 | return 1; |
| 455 | #endif |
| 456 | return 0; |
| 457 | } |
| 458 | |
| 459 | #define i40e_rx_pg_size(_ring) (PAGE_SIZE << i40e_rx_pg_order(_ring)) |
| 460 | |
| 461 | bool i40e_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count); |
| 462 | netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev); |
| 463 | u16 i40e_lan_select_queue(struct net_device *netdev, struct sk_buff *skb, |
| 464 | struct net_device *sb_dev); |
| 465 | void i40e_clean_tx_ring(struct i40e_ring *tx_ring); |
| 466 | void i40e_clean_rx_ring(struct i40e_ring *rx_ring); |
| 467 | int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring); |
| 468 | int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring); |
| 469 | void i40e_free_tx_resources(struct i40e_ring *tx_ring); |
| 470 | void i40e_free_rx_resources(struct i40e_ring *rx_ring); |
| 471 | int i40e_napi_poll(struct napi_struct *napi, int budget); |
| 472 | void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector); |
| 473 | u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw); |
| 474 | void i40e_detect_recover_hung(struct i40e_pf *pf); |
| 475 | int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size); |
| 476 | bool __i40e_chk_linearize(struct sk_buff *skb); |
| 477 | int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, |
| 478 | u32 flags); |
| 479 | bool i40e_is_non_eop(struct i40e_ring *rx_ring, |
| 480 | union i40e_rx_desc *rx_desc); |
| 481 | |
| 482 | /** |
| 483 | * i40e_get_head - Retrieve head from head writeback |
| 484 | * @tx_ring: tx ring to fetch head of |
| 485 | * |
| 486 | * Returns value of Tx ring head based on value stored |
| 487 | * in head write-back location |
| 488 | **/ |
| 489 | static inline u32 i40e_get_head(struct i40e_ring *tx_ring) |
| 490 | { |
| 491 | void *head = (struct i40e_tx_desc *)tx_ring->desc + tx_ring->count; |
| 492 | |
| 493 | return le32_to_cpu(*(volatile __le32 *)head); |
| 494 | } |
| 495 | |
| 496 | /** |
| 497 | * i40e_xmit_descriptor_count - calculate number of Tx descriptors needed |
| 498 | * @skb: send buffer |
| 499 | * |
| 500 | * Returns number of data descriptors needed for this skb. Returns 0 to indicate |
| 501 | * there is not enough descriptors available in this ring since we need at least |
| 502 | * one descriptor. |
| 503 | **/ |
| 504 | static inline int i40e_xmit_descriptor_count(struct sk_buff *skb) |
| 505 | { |
| 506 | const skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; |
| 507 | unsigned int nr_frags = skb_shinfo(skb)->nr_frags; |
| 508 | int count = 0, size = skb_headlen(skb); |
| 509 | |
| 510 | for (;;) { |
| 511 | count += i40e_txd_use_count(size); |
| 512 | |
| 513 | if (!nr_frags--) |
| 514 | break; |
| 515 | |
| 516 | size = skb_frag_size(frag: frag++); |
| 517 | } |
| 518 | |
| 519 | return count; |
| 520 | } |
| 521 | |
| 522 | /** |
| 523 | * i40e_maybe_stop_tx - 1st level check for Tx stop conditions |
| 524 | * @tx_ring: the ring to be checked |
| 525 | * @size: the size buffer we want to assure is available |
| 526 | * |
| 527 | * Returns 0 if stop is not needed |
| 528 | **/ |
| 529 | static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) |
| 530 | { |
| 531 | if (likely(I40E_DESC_UNUSED(tx_ring) >= size)) |
| 532 | return 0; |
| 533 | return __i40e_maybe_stop_tx(tx_ring, size); |
| 534 | } |
| 535 | |
| 536 | /** |
| 537 | * i40e_chk_linearize - Check if there are more than 8 fragments per packet |
| 538 | * @skb: send buffer |
| 539 | * @count: number of buffers used |
| 540 | * |
| 541 | * Note: Our HW can't scatter-gather more than 8 fragments to build |
| 542 | * a packet on the wire and so we need to figure out the cases where we |
| 543 | * need to linearize the skb. |
| 544 | **/ |
| 545 | static inline bool i40e_chk_linearize(struct sk_buff *skb, int count) |
| 546 | { |
| 547 | /* Both TSO and single send will work if count is less than 8 */ |
| 548 | if (likely(count < I40E_MAX_BUFFER_TXD)) |
| 549 | return false; |
| 550 | |
| 551 | if (skb_is_gso(skb)) |
| 552 | return __i40e_chk_linearize(skb); |
| 553 | |
| 554 | /* we can support up to 8 data buffers for a single send */ |
| 555 | return count != I40E_MAX_BUFFER_TXD; |
| 556 | } |
| 557 | |
| 558 | /** |
| 559 | * txring_txq - Find the netdev Tx ring based on the i40e Tx ring |
| 560 | * @ring: Tx ring to find the netdev equivalent of |
| 561 | **/ |
| 562 | static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring) |
| 563 | { |
| 564 | return netdev_get_tx_queue(dev: ring->netdev, index: ring->queue_index); |
| 565 | } |
| 566 | #endif /* _I40E_TXRX_H_ */ |
| 567 | |