| 1 | /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ |
| 2 | /* |
| 3 | * Copyright(c) 2015 - 2018 Intel Corporation. |
| 4 | */ |
| 5 | |
| 6 | #ifndef _HFI1_SDMA_H |
| 7 | #define _HFI1_SDMA_H |
| 8 | |
| 9 | #include <linux/types.h> |
| 10 | #include <linux/list.h> |
| 11 | #include <asm/byteorder.h> |
| 12 | #include <linux/workqueue.h> |
| 13 | #include <linux/rculist.h> |
| 14 | |
| 15 | #include "hfi.h" |
| 16 | #include "verbs.h" |
| 17 | #include "sdma_txreq.h" |
| 18 | |
| 19 | /* Hardware limit */ |
| 20 | #define MAX_DESC 64 |
| 21 | /* Hardware limit for SDMA packet size */ |
| 22 | #define MAX_SDMA_PKT_SIZE ((16 * 1024) - 1) |
| 23 | |
| 24 | #define SDMA_MAP_NONE 0 |
| 25 | #define SDMA_MAP_SINGLE 1 |
| 26 | #define SDMA_MAP_PAGE 2 |
| 27 | |
| 28 | #define SDMA_AHG_VALUE_MASK 0xffff |
| 29 | #define SDMA_AHG_VALUE_SHIFT 0 |
| 30 | #define SDMA_AHG_INDEX_MASK 0xf |
| 31 | #define SDMA_AHG_INDEX_SHIFT 16 |
| 32 | #define SDMA_AHG_FIELD_LEN_MASK 0xf |
| 33 | #define SDMA_AHG_FIELD_LEN_SHIFT 20 |
| 34 | #define SDMA_AHG_FIELD_START_MASK 0x1f |
| 35 | #define SDMA_AHG_FIELD_START_SHIFT 24 |
| 36 | #define SDMA_AHG_UPDATE_ENABLE_MASK 0x1 |
| 37 | #define SDMA_AHG_UPDATE_ENABLE_SHIFT 31 |
| 38 | |
| 39 | /* AHG modes */ |
| 40 | |
| 41 | /* |
| 42 | * Be aware the ordering and values |
| 43 | * for SDMA_AHG_APPLY_UPDATE[123] |
| 44 | * are assumed in generating a skip |
| 45 | * count in submit_tx() in sdma.c |
| 46 | */ |
| 47 | #define SDMA_AHG_NO_AHG 0 |
| 48 | #define SDMA_AHG_COPY 1 |
| 49 | #define SDMA_AHG_APPLY_UPDATE1 2 |
| 50 | #define SDMA_AHG_APPLY_UPDATE2 3 |
| 51 | #define SDMA_AHG_APPLY_UPDATE3 4 |
| 52 | |
| 53 | /* |
| 54 | * Bits defined in the send DMA descriptor. |
| 55 | */ |
| 56 | #define SDMA_DESC0_FIRST_DESC_FLAG BIT_ULL(63) |
| 57 | #define SDMA_DESC0_LAST_DESC_FLAG BIT_ULL(62) |
| 58 | #define SDMA_DESC0_BYTE_COUNT_SHIFT 48 |
| 59 | #define SDMA_DESC0_BYTE_COUNT_WIDTH 14 |
| 60 | #define SDMA_DESC0_BYTE_COUNT_MASK \ |
| 61 | ((1ULL << SDMA_DESC0_BYTE_COUNT_WIDTH) - 1) |
| 62 | #define SDMA_DESC0_BYTE_COUNT_SMASK \ |
| 63 | (SDMA_DESC0_BYTE_COUNT_MASK << SDMA_DESC0_BYTE_COUNT_SHIFT) |
| 64 | #define SDMA_DESC0_PHY_ADDR_SHIFT 0 |
| 65 | #define SDMA_DESC0_PHY_ADDR_WIDTH 48 |
| 66 | #define SDMA_DESC0_PHY_ADDR_MASK \ |
| 67 | ((1ULL << SDMA_DESC0_PHY_ADDR_WIDTH) - 1) |
| 68 | #define SDMA_DESC0_PHY_ADDR_SMASK \ |
| 69 | (SDMA_DESC0_PHY_ADDR_MASK << SDMA_DESC0_PHY_ADDR_SHIFT) |
| 70 | |
| 71 | #define 32 |
| 72 | #define 32 |
| 73 | #define \ |
| 74 | ((1ULL << SDMA_DESC1_HEADER_UPDATE1_WIDTH) - 1) |
| 75 | #define \ |
| 76 | (SDMA_DESC1_HEADER_UPDATE1_MASK << SDMA_DESC1_HEADER_UPDATE1_SHIFT) |
| 77 | #define 13 |
| 78 | #define 3 |
| 79 | #define \ |
| 80 | ((1ULL << SDMA_DESC1_HEADER_MODE_WIDTH) - 1) |
| 81 | #define \ |
| 82 | (SDMA_DESC1_HEADER_MODE_MASK << SDMA_DESC1_HEADER_MODE_SHIFT) |
| 83 | #define 8 |
| 84 | #define 5 |
| 85 | #define \ |
| 86 | ((1ULL << SDMA_DESC1_HEADER_INDEX_WIDTH) - 1) |
| 87 | #define \ |
| 88 | (SDMA_DESC1_HEADER_INDEX_MASK << SDMA_DESC1_HEADER_INDEX_SHIFT) |
| 89 | #define 4 |
| 90 | #define 4 |
| 91 | #define \ |
| 92 | ((1ULL << SDMA_DESC1_HEADER_DWS_WIDTH) - 1) |
| 93 | #define \ |
| 94 | (SDMA_DESC1_HEADER_DWS_MASK << SDMA_DESC1_HEADER_DWS_SHIFT) |
| 95 | #define SDMA_DESC1_GENERATION_SHIFT 2 |
| 96 | #define SDMA_DESC1_GENERATION_WIDTH 2 |
| 97 | #define SDMA_DESC1_GENERATION_MASK \ |
| 98 | ((1ULL << SDMA_DESC1_GENERATION_WIDTH) - 1) |
| 99 | #define SDMA_DESC1_GENERATION_SMASK \ |
| 100 | (SDMA_DESC1_GENERATION_MASK << SDMA_DESC1_GENERATION_SHIFT) |
| 101 | #define SDMA_DESC1_INT_REQ_FLAG BIT_ULL(1) |
| 102 | #define SDMA_DESC1_HEAD_TO_HOST_FLAG BIT_ULL(0) |
| 103 | |
| 104 | enum sdma_states { |
| 105 | sdma_state_s00_hw_down, |
| 106 | sdma_state_s10_hw_start_up_halt_wait, |
| 107 | sdma_state_s15_hw_start_up_clean_wait, |
| 108 | sdma_state_s20_idle, |
| 109 | sdma_state_s30_sw_clean_up_wait, |
| 110 | sdma_state_s40_hw_clean_up_wait, |
| 111 | sdma_state_s50_hw_halt_wait, |
| 112 | sdma_state_s60_idle_halt_wait, |
| 113 | sdma_state_s80_hw_freeze, |
| 114 | sdma_state_s82_freeze_sw_clean, |
| 115 | sdma_state_s99_running, |
| 116 | }; |
| 117 | |
| 118 | enum sdma_events { |
| 119 | sdma_event_e00_go_hw_down, |
| 120 | sdma_event_e10_go_hw_start, |
| 121 | sdma_event_e15_hw_halt_done, |
| 122 | sdma_event_e25_hw_clean_up_done, |
| 123 | sdma_event_e30_go_running, |
| 124 | sdma_event_e40_sw_cleaned, |
| 125 | sdma_event_e50_hw_cleaned, |
| 126 | sdma_event_e60_hw_halted, |
| 127 | sdma_event_e70_go_idle, |
| 128 | sdma_event_e80_hw_freeze, |
| 129 | sdma_event_e81_hw_frozen, |
| 130 | sdma_event_e82_hw_unfreeze, |
| 131 | sdma_event_e85_link_down, |
| 132 | sdma_event_e90_sw_halted, |
| 133 | }; |
| 134 | |
| 135 | struct sdma_set_state_action { |
| 136 | unsigned op_enable:1; |
| 137 | unsigned op_intenable:1; |
| 138 | unsigned op_halt:1; |
| 139 | unsigned op_cleanup:1; |
| 140 | unsigned go_s99_running_tofalse:1; |
| 141 | unsigned go_s99_running_totrue:1; |
| 142 | }; |
| 143 | |
| 144 | struct sdma_state { |
| 145 | struct kref kref; |
| 146 | struct completion comp; |
| 147 | enum sdma_states current_state; |
| 148 | unsigned current_op; |
| 149 | unsigned go_s99_running; |
| 150 | /* debugging/development */ |
| 151 | enum sdma_states previous_state; |
| 152 | unsigned previous_op; |
| 153 | enum sdma_events last_event; |
| 154 | }; |
| 155 | |
| 156 | /** |
| 157 | * DOC: sdma exported routines |
| 158 | * |
| 159 | * These sdma routines fit into three categories: |
| 160 | * - The SDMA API for building and submitting packets |
| 161 | * to the ring |
| 162 | * |
| 163 | * - Initialization and tear down routines to buildup |
| 164 | * and tear down SDMA |
| 165 | * |
| 166 | * - ISR entrances to handle interrupts, state changes |
| 167 | * and errors |
| 168 | */ |
| 169 | |
| 170 | /** |
| 171 | * DOC: sdma PSM/verbs API |
| 172 | * |
| 173 | * The sdma API is designed to be used by both PSM |
| 174 | * and verbs to supply packets to the SDMA ring. |
| 175 | * |
| 176 | * The usage of the API is as follows: |
| 177 | * |
| 178 | * Embed a struct iowait in the QP or |
| 179 | * PQ. The iowait should be initialized with a |
| 180 | * call to iowait_init(). |
| 181 | * |
| 182 | * The user of the API should create an allocation method |
| 183 | * for their version of the txreq. slabs, pre-allocated lists, |
| 184 | * and dma pools can be used. Once the user's overload of |
| 185 | * the sdma_txreq has been allocated, the sdma_txreq member |
| 186 | * must be initialized with sdma_txinit() or sdma_txinit_ahg(). |
| 187 | * |
| 188 | * The txreq must be declared with the sdma_txreq first. |
| 189 | * |
| 190 | * The tx request, once initialized, is manipulated with calls to |
| 191 | * sdma_txadd_daddr(), sdma_txadd_page(), or sdma_txadd_kvaddr() |
| 192 | * for each disjoint memory location. It is the user's responsibility |
| 193 | * to understand the packet boundaries and page boundaries to do the |
| 194 | * appropriate number of sdma_txadd_* calls.. The user |
| 195 | * must be prepared to deal with failures from these routines due to |
| 196 | * either memory allocation or dma_mapping failures. |
| 197 | * |
| 198 | * The mapping specifics for each memory location are recorded |
| 199 | * in the tx. Memory locations added with sdma_txadd_page() |
| 200 | * and sdma_txadd_kvaddr() are automatically mapped when added |
| 201 | * to the tx and nmapped as part of the progress processing in the |
| 202 | * SDMA interrupt handling. |
| 203 | * |
| 204 | * sdma_txadd_daddr() is used to add an dma_addr_t memory to the |
| 205 | * tx. An example of a use case would be a pre-allocated |
| 206 | * set of headers allocated via dma_pool_alloc() or |
| 207 | * dma_alloc_coherent(). For these memory locations, it |
| 208 | * is the responsibility of the user to handle that unmapping. |
| 209 | * (This would usually be at an unload or job termination.) |
| 210 | * |
| 211 | * The routine sdma_send_txreq() is used to submit |
| 212 | * a tx to the ring after the appropriate number of |
| 213 | * sdma_txadd_* have been done. |
| 214 | * |
| 215 | * If it is desired to send a burst of sdma_txreqs, sdma_send_txlist() |
| 216 | * can be used to submit a list of packets. |
| 217 | * |
| 218 | * The user is free to use the link overhead in the struct sdma_txreq as |
| 219 | * long as the tx isn't in flight. |
| 220 | * |
| 221 | * The extreme degenerate case of the number of descriptors |
| 222 | * exceeding the ring size is automatically handled as |
| 223 | * memory locations are added. An overflow of the descriptor |
| 224 | * array that is part of the sdma_txreq is also automatically |
| 225 | * handled. |
| 226 | * |
| 227 | */ |
| 228 | |
| 229 | /** |
| 230 | * DOC: Infrastructure calls |
| 231 | * |
| 232 | * sdma_init() is used to initialize data structures and |
| 233 | * CSRs for the desired number of SDMA engines. |
| 234 | * |
| 235 | * sdma_start() is used to kick the SDMA engines initialized |
| 236 | * with sdma_init(). Interrupts must be enabled at this |
| 237 | * point since aspects of the state machine are interrupt |
| 238 | * driven. |
| 239 | * |
| 240 | * sdma_engine_error() and sdma_engine_interrupt() are |
| 241 | * entrances for interrupts. |
| 242 | * |
| 243 | * sdma_map_init() is for the management of the mapping |
| 244 | * table when the number of vls is changed. |
| 245 | * |
| 246 | */ |
| 247 | |
| 248 | /* |
| 249 | * struct hw_sdma_desc - raw 128 bit SDMA descriptor |
| 250 | * |
| 251 | * This is the raw descriptor in the SDMA ring |
| 252 | */ |
| 253 | struct hw_sdma_desc { |
| 254 | /* private: don't use directly */ |
| 255 | __le64 qw[2]; |
| 256 | }; |
| 257 | |
| 258 | /** |
| 259 | * struct sdma_engine - Data pertaining to each SDMA engine. |
| 260 | * @dd: a back-pointer to the device data |
| 261 | * @ppd: per port back-pointer |
| 262 | * @imask: mask for irq manipulation |
| 263 | * @idle_mask: mask for determining if an interrupt is due to sdma_idle |
| 264 | * |
| 265 | * This structure has the state for each sdma_engine. |
| 266 | * |
| 267 | * Accessing to non public fields are not supported |
| 268 | * since the private members are subject to change. |
| 269 | */ |
| 270 | struct sdma_engine { |
| 271 | /* read mostly */ |
| 272 | struct hfi1_devdata *dd; |
| 273 | struct hfi1_pportdata *ppd; |
| 274 | /* private: */ |
| 275 | void __iomem *tail_csr; |
| 276 | u64 imask; /* clear interrupt mask */ |
| 277 | u64 idle_mask; |
| 278 | u64 progress_mask; |
| 279 | u64 int_mask; |
| 280 | /* private: */ |
| 281 | volatile __le64 *head_dma; /* DMA'ed by chip */ |
| 282 | /* private: */ |
| 283 | dma_addr_t head_phys; |
| 284 | /* private: */ |
| 285 | struct hw_sdma_desc *descq; |
| 286 | /* private: */ |
| 287 | unsigned descq_full_count; |
| 288 | struct sdma_txreq **tx_ring; |
| 289 | /* private: */ |
| 290 | dma_addr_t descq_phys; |
| 291 | /* private */ |
| 292 | u32 sdma_mask; |
| 293 | /* private */ |
| 294 | struct sdma_state state; |
| 295 | /* private */ |
| 296 | int cpu; |
| 297 | /* private: */ |
| 298 | u8 sdma_shift; |
| 299 | /* private: */ |
| 300 | u8 this_idx; /* zero relative engine */ |
| 301 | /* protect changes to senddmactrl shadow */ |
| 302 | spinlock_t senddmactrl_lock; |
| 303 | /* private: */ |
| 304 | u64 p_senddmactrl; /* shadow per-engine SendDmaCtrl */ |
| 305 | |
| 306 | /* read/write using tail_lock */ |
| 307 | spinlock_t tail_lock ____cacheline_aligned_in_smp; |
| 308 | #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER |
| 309 | /* private: */ |
| 310 | u64 tail_sn; |
| 311 | #endif |
| 312 | /* private: */ |
| 313 | u32 descq_tail; |
| 314 | /* private: */ |
| 315 | unsigned long ahg_bits; |
| 316 | /* private: */ |
| 317 | u16 desc_avail; |
| 318 | /* private: */ |
| 319 | u16 tx_tail; |
| 320 | /* private: */ |
| 321 | u16 descq_cnt; |
| 322 | |
| 323 | /* read/write using head_lock */ |
| 324 | /* private: */ |
| 325 | seqlock_t head_lock ____cacheline_aligned_in_smp; |
| 326 | #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER |
| 327 | /* private: */ |
| 328 | u64 head_sn; |
| 329 | #endif |
| 330 | /* private: */ |
| 331 | u32 descq_head; |
| 332 | /* private: */ |
| 333 | u16 tx_head; |
| 334 | /* private: */ |
| 335 | u64 last_status; |
| 336 | /* private */ |
| 337 | u64 err_cnt; |
| 338 | /* private */ |
| 339 | u64 sdma_int_cnt; |
| 340 | u64 idle_int_cnt; |
| 341 | u64 progress_int_cnt; |
| 342 | |
| 343 | /* private: */ |
| 344 | seqlock_t waitlock; |
| 345 | struct list_head dmawait; |
| 346 | |
| 347 | /* CONFIG SDMA for now, just blindly duplicate */ |
| 348 | /* private: */ |
| 349 | struct tasklet_struct sdma_hw_clean_up_task |
| 350 | ____cacheline_aligned_in_smp; |
| 351 | |
| 352 | /* private: */ |
| 353 | struct tasklet_struct sdma_sw_clean_up_task |
| 354 | ____cacheline_aligned_in_smp; |
| 355 | /* private: */ |
| 356 | struct work_struct err_halt_worker; |
| 357 | /* private */ |
| 358 | struct timer_list err_progress_check_timer; |
| 359 | u32 progress_check_head; |
| 360 | /* private: */ |
| 361 | struct work_struct flush_worker; |
| 362 | /* protect flush list */ |
| 363 | spinlock_t flushlist_lock; |
| 364 | /* private: */ |
| 365 | struct list_head flushlist; |
| 366 | struct cpumask cpu_mask; |
| 367 | struct kobject kobj; |
| 368 | u32 msix_intr; |
| 369 | }; |
| 370 | |
| 371 | int sdma_init(struct hfi1_devdata *dd, u8 port); |
| 372 | void sdma_start(struct hfi1_devdata *dd); |
| 373 | void sdma_exit(struct hfi1_devdata *dd); |
| 374 | void sdma_clean(struct hfi1_devdata *dd, size_t num_engines); |
| 375 | void sdma_all_running(struct hfi1_devdata *dd); |
| 376 | void sdma_freeze_notify(struct hfi1_devdata *dd, int go_idle); |
| 377 | void sdma_freeze(struct hfi1_devdata *dd); |
| 378 | void sdma_unfreeze(struct hfi1_devdata *dd); |
| 379 | void sdma_wait(struct hfi1_devdata *dd); |
| 380 | |
| 381 | /** |
| 382 | * sdma_empty() - idle engine test |
| 383 | * @engine: sdma engine |
| 384 | * |
| 385 | * Currently used by verbs as a latency optimization. |
| 386 | * |
| 387 | * Return: |
| 388 | * 1 - empty, 0 - non-empty |
| 389 | */ |
| 390 | static inline int sdma_empty(struct sdma_engine *sde) |
| 391 | { |
| 392 | return sde->descq_tail == sde->descq_head; |
| 393 | } |
| 394 | |
| 395 | static inline u16 sdma_descq_freecnt(struct sdma_engine *sde) |
| 396 | { |
| 397 | return sde->descq_cnt - |
| 398 | (sde->descq_tail - |
| 399 | READ_ONCE(sde->descq_head)) - 1; |
| 400 | } |
| 401 | |
| 402 | static inline u16 sdma_descq_inprocess(struct sdma_engine *sde) |
| 403 | { |
| 404 | return sde->descq_cnt - sdma_descq_freecnt(sde); |
| 405 | } |
| 406 | |
| 407 | /* |
| 408 | * Either head_lock or tail lock required to see |
| 409 | * a steady state. |
| 410 | */ |
| 411 | static inline int __sdma_running(struct sdma_engine *engine) |
| 412 | { |
| 413 | return engine->state.current_state == sdma_state_s99_running; |
| 414 | } |
| 415 | |
| 416 | /** |
| 417 | * sdma_running() - state suitability test |
| 418 | * @engine: sdma engine |
| 419 | * |
| 420 | * sdma_running probes the internal state to determine if it is suitable |
| 421 | * for submitting packets. |
| 422 | * |
| 423 | * Return: |
| 424 | * 1 - ok to submit, 0 - not ok to submit |
| 425 | * |
| 426 | */ |
| 427 | static inline int sdma_running(struct sdma_engine *engine) |
| 428 | { |
| 429 | unsigned long flags; |
| 430 | int ret; |
| 431 | |
| 432 | spin_lock_irqsave(&engine->tail_lock, flags); |
| 433 | ret = __sdma_running(engine); |
| 434 | spin_unlock_irqrestore(lock: &engine->tail_lock, flags); |
| 435 | return ret; |
| 436 | } |
| 437 | |
| 438 | void _sdma_txreq_ahgadd( |
| 439 | struct sdma_txreq *tx, |
| 440 | u8 num_ahg, |
| 441 | u8 ahg_entry, |
| 442 | u32 *ahg, |
| 443 | u8 ahg_hlen); |
| 444 | |
| 445 | /** |
| 446 | * sdma_txinit_ahg() - initialize an sdma_txreq struct with AHG |
| 447 | * @tx: tx request to initialize |
| 448 | * @flags: flags to key last descriptor additions |
| 449 | * @tlen: total packet length (pbc + headers + data) |
| 450 | * @ahg_entry: ahg entry to use (0 - 31) |
| 451 | * @num_ahg: ahg descriptor for first descriptor (0 - 9) |
| 452 | * @ahg: array of AHG descriptors (up to 9 entries) |
| 453 | * @ahg_hlen: number of bytes from ASIC entry to use |
| 454 | * @cb: callback |
| 455 | * |
| 456 | * The allocation of the sdma_txreq and it enclosing structure is user |
| 457 | * dependent. This routine must be called to initialize the user independent |
| 458 | * fields. |
| 459 | * |
| 460 | * The currently supported flags are SDMA_TXREQ_F_URGENT, |
| 461 | * SDMA_TXREQ_F_AHG_COPY, and SDMA_TXREQ_F_USE_AHG. |
| 462 | * |
| 463 | * SDMA_TXREQ_F_URGENT is used for latency sensitive situations where the |
| 464 | * completion is desired as soon as possible. |
| 465 | * |
| 466 | * SDMA_TXREQ_F_AHG_COPY causes the header in the first descriptor to be |
| 467 | * copied to chip entry. SDMA_TXREQ_F_USE_AHG causes the code to add in |
| 468 | * the AHG descriptors into the first 1 to 3 descriptors. |
| 469 | * |
| 470 | * Completions of submitted requests can be gotten on selected |
| 471 | * txreqs by giving a completion routine callback to sdma_txinit() or |
| 472 | * sdma_txinit_ahg(). The environment in which the callback runs |
| 473 | * can be from an ISR, a tasklet, or a thread, so no sleeping |
| 474 | * kernel routines can be used. Aspects of the sdma ring may |
| 475 | * be locked so care should be taken with locking. |
| 476 | * |
| 477 | * The callback pointer can be NULL to avoid any callback for the packet |
| 478 | * being submitted. The callback will be provided this tx, a status, and a flag. |
| 479 | * |
| 480 | * The status will be one of SDMA_TXREQ_S_OK, SDMA_TXREQ_S_SENDERROR, |
| 481 | * SDMA_TXREQ_S_ABORTED, or SDMA_TXREQ_S_SHUTDOWN. |
| 482 | * |
| 483 | * The flag, if the is the iowait had been used, indicates the iowait |
| 484 | * sdma_busy count has reached zero. |
| 485 | * |
| 486 | * user data portion of tlen should be precise. The sdma_txadd_* entrances |
| 487 | * will pad with a descriptor references 1 - 3 bytes when the number of bytes |
| 488 | * specified in tlen have been supplied to the sdma_txreq. |
| 489 | * |
| 490 | * ahg_hlen is used to determine the number of on-chip entry bytes to |
| 491 | * use as the header. This is for cases where the stored header is |
| 492 | * larger than the header to be used in a packet. This is typical |
| 493 | * for verbs where an RDMA_WRITE_FIRST is larger than the packet in |
| 494 | * and RDMA_WRITE_MIDDLE. |
| 495 | * |
| 496 | */ |
| 497 | static inline int sdma_txinit_ahg( |
| 498 | struct sdma_txreq *tx, |
| 499 | u16 flags, |
| 500 | u16 tlen, |
| 501 | u8 ahg_entry, |
| 502 | u8 num_ahg, |
| 503 | u32 *ahg, |
| 504 | u8 ahg_hlen, |
| 505 | void (*cb)(struct sdma_txreq *, int)) |
| 506 | { |
| 507 | if (tlen == 0) |
| 508 | return -ENODATA; |
| 509 | if (tlen > MAX_SDMA_PKT_SIZE) |
| 510 | return -EMSGSIZE; |
| 511 | tx->desc_limit = ARRAY_SIZE(tx->descs); |
| 512 | tx->descp = &tx->descs[0]; |
| 513 | INIT_LIST_HEAD(list: &tx->list); |
| 514 | tx->num_desc = 0; |
| 515 | tx->flags = flags; |
| 516 | tx->complete = cb; |
| 517 | tx->coalesce_buf = NULL; |
| 518 | tx->wait = NULL; |
| 519 | tx->packet_len = tlen; |
| 520 | tx->tlen = tx->packet_len; |
| 521 | tx->descs[0].qw[0] = SDMA_DESC0_FIRST_DESC_FLAG; |
| 522 | tx->descs[0].qw[1] = 0; |
| 523 | if (flags & SDMA_TXREQ_F_AHG_COPY) |
| 524 | tx->descs[0].qw[1] |= |
| 525 | (((u64)ahg_entry & SDMA_DESC1_HEADER_INDEX_MASK) |
| 526 | << SDMA_DESC1_HEADER_INDEX_SHIFT) | |
| 527 | (((u64)SDMA_AHG_COPY & SDMA_DESC1_HEADER_MODE_MASK) |
| 528 | << SDMA_DESC1_HEADER_MODE_SHIFT); |
| 529 | else if (flags & SDMA_TXREQ_F_USE_AHG && num_ahg) |
| 530 | _sdma_txreq_ahgadd(tx, num_ahg, ahg_entry, ahg, ahg_hlen); |
| 531 | return 0; |
| 532 | } |
| 533 | |
| 534 | /** |
| 535 | * sdma_txinit() - initialize an sdma_txreq struct (no AHG) |
| 536 | * @tx: tx request to initialize |
| 537 | * @flags: flags to key last descriptor additions |
| 538 | * @tlen: total packet length (pbc + headers + data) |
| 539 | * @cb: callback pointer |
| 540 | * |
| 541 | * The allocation of the sdma_txreq and it enclosing structure is user |
| 542 | * dependent. This routine must be called to initialize the user |
| 543 | * independent fields. |
| 544 | * |
| 545 | * The currently supported flags is SDMA_TXREQ_F_URGENT. |
| 546 | * |
| 547 | * SDMA_TXREQ_F_URGENT is used for latency sensitive situations where the |
| 548 | * completion is desired as soon as possible. |
| 549 | * |
| 550 | * Completions of submitted requests can be gotten on selected |
| 551 | * txreqs by giving a completion routine callback to sdma_txinit() or |
| 552 | * sdma_txinit_ahg(). The environment in which the callback runs |
| 553 | * can be from an ISR, a tasklet, or a thread, so no sleeping |
| 554 | * kernel routines can be used. The head size of the sdma ring may |
| 555 | * be locked so care should be taken with locking. |
| 556 | * |
| 557 | * The callback pointer can be NULL to avoid any callback for the packet |
| 558 | * being submitted. |
| 559 | * |
| 560 | * The callback, if non-NULL, will be provided this tx and a status. The |
| 561 | * status will be one of SDMA_TXREQ_S_OK, SDMA_TXREQ_S_SENDERROR, |
| 562 | * SDMA_TXREQ_S_ABORTED, or SDMA_TXREQ_S_SHUTDOWN. |
| 563 | * |
| 564 | */ |
| 565 | static inline int sdma_txinit( |
| 566 | struct sdma_txreq *tx, |
| 567 | u16 flags, |
| 568 | u16 tlen, |
| 569 | void (*cb)(struct sdma_txreq *, int)) |
| 570 | { |
| 571 | return sdma_txinit_ahg(tx, flags, tlen, ahg_entry: 0, num_ahg: 0, NULL, ahg_hlen: 0, cb); |
| 572 | } |
| 573 | |
| 574 | /* helpers - don't use */ |
| 575 | static inline int sdma_mapping_type(struct sdma_desc *d) |
| 576 | { |
| 577 | return (d->qw[1] & SDMA_DESC1_GENERATION_SMASK) |
| 578 | >> SDMA_DESC1_GENERATION_SHIFT; |
| 579 | } |
| 580 | |
| 581 | static inline size_t sdma_mapping_len(struct sdma_desc *d) |
| 582 | { |
| 583 | return (d->qw[0] & SDMA_DESC0_BYTE_COUNT_SMASK) |
| 584 | >> SDMA_DESC0_BYTE_COUNT_SHIFT; |
| 585 | } |
| 586 | |
| 587 | static inline dma_addr_t sdma_mapping_addr(struct sdma_desc *d) |
| 588 | { |
| 589 | return (d->qw[0] & SDMA_DESC0_PHY_ADDR_SMASK) |
| 590 | >> SDMA_DESC0_PHY_ADDR_SHIFT; |
| 591 | } |
| 592 | |
| 593 | static inline void make_tx_sdma_desc( |
| 594 | struct sdma_txreq *tx, |
| 595 | int type, |
| 596 | dma_addr_t addr, |
| 597 | size_t len, |
| 598 | void *pinning_ctx, |
| 599 | void (*ctx_get)(void *), |
| 600 | void (*ctx_put)(void *)) |
| 601 | { |
| 602 | struct sdma_desc *desc = &tx->descp[tx->num_desc]; |
| 603 | |
| 604 | if (!tx->num_desc) { |
| 605 | /* qw[0] zero; qw[1] first, ahg mode already in from init */ |
| 606 | desc->qw[1] |= ((u64)type & SDMA_DESC1_GENERATION_MASK) |
| 607 | << SDMA_DESC1_GENERATION_SHIFT; |
| 608 | } else { |
| 609 | desc->qw[0] = 0; |
| 610 | desc->qw[1] = ((u64)type & SDMA_DESC1_GENERATION_MASK) |
| 611 | << SDMA_DESC1_GENERATION_SHIFT; |
| 612 | } |
| 613 | desc->qw[0] |= (((u64)addr & SDMA_DESC0_PHY_ADDR_MASK) |
| 614 | << SDMA_DESC0_PHY_ADDR_SHIFT) | |
| 615 | (((u64)len & SDMA_DESC0_BYTE_COUNT_MASK) |
| 616 | << SDMA_DESC0_BYTE_COUNT_SHIFT); |
| 617 | |
| 618 | desc->pinning_ctx = pinning_ctx; |
| 619 | desc->ctx_put = ctx_put; |
| 620 | if (pinning_ctx && ctx_get) |
| 621 | ctx_get(pinning_ctx); |
| 622 | } |
| 623 | |
| 624 | /* helper to extend txreq */ |
| 625 | int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx, |
| 626 | int type, void *kvaddr, struct page *page, |
| 627 | unsigned long offset, u16 len); |
| 628 | int _pad_sdma_tx_descs(struct hfi1_devdata *, struct sdma_txreq *); |
| 629 | void __sdma_txclean(struct hfi1_devdata *, struct sdma_txreq *); |
| 630 | |
| 631 | static inline void sdma_txclean(struct hfi1_devdata *dd, struct sdma_txreq *tx) |
| 632 | { |
| 633 | if (tx->num_desc) |
| 634 | __sdma_txclean(dd, tx); |
| 635 | } |
| 636 | |
| 637 | /* helpers used by public routines */ |
| 638 | static inline void _sdma_close_tx(struct hfi1_devdata *dd, |
| 639 | struct sdma_txreq *tx) |
| 640 | { |
| 641 | u16 last_desc = tx->num_desc - 1; |
| 642 | |
| 643 | tx->descp[last_desc].qw[0] |= SDMA_DESC0_LAST_DESC_FLAG; |
| 644 | tx->descp[last_desc].qw[1] |= dd->default_desc1; |
| 645 | if (tx->flags & SDMA_TXREQ_F_URGENT) |
| 646 | tx->descp[last_desc].qw[1] |= (SDMA_DESC1_HEAD_TO_HOST_FLAG | |
| 647 | SDMA_DESC1_INT_REQ_FLAG); |
| 648 | } |
| 649 | |
| 650 | static inline int _sdma_txadd_daddr( |
| 651 | struct hfi1_devdata *dd, |
| 652 | int type, |
| 653 | struct sdma_txreq *tx, |
| 654 | dma_addr_t addr, |
| 655 | u16 len, |
| 656 | void *pinning_ctx, |
| 657 | void (*ctx_get)(void *), |
| 658 | void (*ctx_put)(void *)) |
| 659 | { |
| 660 | int rval = 0; |
| 661 | |
| 662 | make_tx_sdma_desc( |
| 663 | tx, |
| 664 | type, |
| 665 | addr, len, |
| 666 | pinning_ctx, ctx_get, ctx_put); |
| 667 | WARN_ON(len > tx->tlen); |
| 668 | tx->num_desc++; |
| 669 | tx->tlen -= len; |
| 670 | /* special cases for last */ |
| 671 | if (!tx->tlen) { |
| 672 | if (tx->packet_len & (sizeof(u32) - 1)) { |
| 673 | rval = _pad_sdma_tx_descs(dd, tx); |
| 674 | if (rval) |
| 675 | return rval; |
| 676 | } else { |
| 677 | _sdma_close_tx(dd, tx); |
| 678 | } |
| 679 | } |
| 680 | return rval; |
| 681 | } |
| 682 | |
| 683 | /** |
| 684 | * sdma_txadd_page() - add a page to the sdma_txreq |
| 685 | * @dd: the device to use for mapping |
| 686 | * @tx: tx request to which the page is added |
| 687 | * @page: page to map |
| 688 | * @offset: offset within the page |
| 689 | * @len: length in bytes |
| 690 | * @pinning_ctx: context to be stored on struct sdma_desc .pinning_ctx. Not |
| 691 | * added if coalesce buffer is used. E.g. pointer to pinned-page |
| 692 | * cache entry for the sdma_desc. |
| 693 | * @ctx_get: optional function to take reference to @pinning_ctx. Not called if |
| 694 | * @pinning_ctx is NULL. |
| 695 | * @ctx_put: optional function to release reference to @pinning_ctx after |
| 696 | * sdma_desc completes. May be called in interrupt context so must |
| 697 | * not sleep. Not called if @pinning_ctx is NULL. |
| 698 | * |
| 699 | * This is used to add a page/offset/length descriptor. |
| 700 | * |
| 701 | * The mapping/unmapping of the page/offset/len is automatically handled. |
| 702 | * |
| 703 | * Return: |
| 704 | * 0 - success, -ENOSPC - mapping fail, -ENOMEM - couldn't |
| 705 | * extend/coalesce descriptor array |
| 706 | */ |
| 707 | static inline int sdma_txadd_page( |
| 708 | struct hfi1_devdata *dd, |
| 709 | struct sdma_txreq *tx, |
| 710 | struct page *page, |
| 711 | unsigned long offset, |
| 712 | u16 len, |
| 713 | void *pinning_ctx, |
| 714 | void (*ctx_get)(void *), |
| 715 | void (*ctx_put)(void *)) |
| 716 | { |
| 717 | dma_addr_t addr; |
| 718 | int rval; |
| 719 | |
| 720 | if ((unlikely(tx->num_desc == tx->desc_limit))) { |
| 721 | rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_PAGE, |
| 722 | NULL, page, offset, len); |
| 723 | if (rval <= 0) |
| 724 | return rval; |
| 725 | } |
| 726 | |
| 727 | addr = dma_map_page( |
| 728 | &dd->pcidev->dev, |
| 729 | page, |
| 730 | offset, |
| 731 | len, |
| 732 | DMA_TO_DEVICE); |
| 733 | |
| 734 | if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) { |
| 735 | __sdma_txclean(dd, tx); |
| 736 | return -ENOSPC; |
| 737 | } |
| 738 | |
| 739 | return _sdma_txadd_daddr(dd, SDMA_MAP_PAGE, tx, addr, len, |
| 740 | pinning_ctx, ctx_get, ctx_put); |
| 741 | } |
| 742 | |
| 743 | /** |
| 744 | * sdma_txadd_daddr() - add a dma address to the sdma_txreq |
| 745 | * @dd: the device to use for mapping |
| 746 | * @tx: sdma_txreq to which the page is added |
| 747 | * @addr: dma address mapped by caller |
| 748 | * @len: length in bytes |
| 749 | * |
| 750 | * This is used to add a descriptor for memory that is already dma mapped. |
| 751 | * |
| 752 | * In this case, there is no unmapping as part of the progress processing for |
| 753 | * this memory location. |
| 754 | * |
| 755 | * Return: |
| 756 | * 0 - success, -ENOMEM - couldn't extend descriptor array |
| 757 | */ |
| 758 | |
| 759 | static inline int sdma_txadd_daddr( |
| 760 | struct hfi1_devdata *dd, |
| 761 | struct sdma_txreq *tx, |
| 762 | dma_addr_t addr, |
| 763 | u16 len) |
| 764 | { |
| 765 | int rval; |
| 766 | |
| 767 | if ((unlikely(tx->num_desc == tx->desc_limit))) { |
| 768 | rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_NONE, |
| 769 | NULL, NULL, offset: 0, len: 0); |
| 770 | if (rval <= 0) |
| 771 | return rval; |
| 772 | } |
| 773 | |
| 774 | return _sdma_txadd_daddr(dd, SDMA_MAP_NONE, tx, addr, len, |
| 775 | NULL, NULL, NULL); |
| 776 | } |
| 777 | |
| 778 | /** |
| 779 | * sdma_txadd_kvaddr() - add a kernel virtual address to sdma_txreq |
| 780 | * @dd: the device to use for mapping |
| 781 | * @tx: sdma_txreq to which the page is added |
| 782 | * @kvaddr: the kernel virtual address |
| 783 | * @len: length in bytes |
| 784 | * |
| 785 | * This is used to add a descriptor referenced by the indicated kvaddr and |
| 786 | * len. |
| 787 | * |
| 788 | * The mapping/unmapping of the kvaddr and len is automatically handled. |
| 789 | * |
| 790 | * Return: |
| 791 | * 0 - success, -ENOSPC - mapping fail, -ENOMEM - couldn't extend/coalesce |
| 792 | * descriptor array |
| 793 | */ |
| 794 | static inline int sdma_txadd_kvaddr( |
| 795 | struct hfi1_devdata *dd, |
| 796 | struct sdma_txreq *tx, |
| 797 | void *kvaddr, |
| 798 | u16 len) |
| 799 | { |
| 800 | dma_addr_t addr; |
| 801 | int rval; |
| 802 | |
| 803 | if ((unlikely(tx->num_desc == tx->desc_limit))) { |
| 804 | rval = ext_coal_sdma_tx_descs(dd, tx, SDMA_MAP_SINGLE, |
| 805 | kvaddr, NULL, offset: 0, len); |
| 806 | if (rval <= 0) |
| 807 | return rval; |
| 808 | } |
| 809 | |
| 810 | addr = dma_map_single( |
| 811 | &dd->pcidev->dev, |
| 812 | kvaddr, |
| 813 | len, |
| 814 | DMA_TO_DEVICE); |
| 815 | |
| 816 | if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) { |
| 817 | __sdma_txclean(dd, tx); |
| 818 | return -ENOSPC; |
| 819 | } |
| 820 | |
| 821 | return _sdma_txadd_daddr(dd, SDMA_MAP_SINGLE, tx, addr, len, |
| 822 | NULL, NULL, NULL); |
| 823 | } |
| 824 | |
| 825 | struct iowait_work; |
| 826 | |
| 827 | int sdma_send_txreq(struct sdma_engine *sde, |
| 828 | struct iowait_work *wait, |
| 829 | struct sdma_txreq *tx, |
| 830 | bool pkts_sent); |
| 831 | int sdma_send_txlist(struct sdma_engine *sde, |
| 832 | struct iowait_work *wait, |
| 833 | struct list_head *tx_list, |
| 834 | u16 *count_out); |
| 835 | |
| 836 | int sdma_ahg_alloc(struct sdma_engine *sde); |
| 837 | void sdma_ahg_free(struct sdma_engine *sde, int ahg_index); |
| 838 | |
| 839 | /** |
| 840 | * sdma_build_ahg - build ahg descriptor |
| 841 | * @data |
| 842 | * @dwindex |
| 843 | * @startbit |
| 844 | * @bits |
| 845 | * |
| 846 | * Build and return a 32 bit descriptor. |
| 847 | */ |
| 848 | static inline u32 sdma_build_ahg_descriptor( |
| 849 | u16 data, |
| 850 | u8 dwindex, |
| 851 | u8 startbit, |
| 852 | u8 bits) |
| 853 | { |
| 854 | return (u32)(1UL << SDMA_AHG_UPDATE_ENABLE_SHIFT | |
| 855 | ((startbit & SDMA_AHG_FIELD_START_MASK) << |
| 856 | SDMA_AHG_FIELD_START_SHIFT) | |
| 857 | ((bits & SDMA_AHG_FIELD_LEN_MASK) << |
| 858 | SDMA_AHG_FIELD_LEN_SHIFT) | |
| 859 | ((dwindex & SDMA_AHG_INDEX_MASK) << |
| 860 | SDMA_AHG_INDEX_SHIFT) | |
| 861 | ((data & SDMA_AHG_VALUE_MASK) << |
| 862 | SDMA_AHG_VALUE_SHIFT)); |
| 863 | } |
| 864 | |
| 865 | /** |
| 866 | * sdma_progress - use seq number of detect head progress |
| 867 | * @sde: sdma_engine to check |
| 868 | * @seq: base seq count |
| 869 | * @tx: txreq for which we need to check descriptor availability |
| 870 | * |
| 871 | * This is used in the appropriate spot in the sleep routine |
| 872 | * to check for potential ring progress. This routine gets the |
| 873 | * seqcount before queuing the iowait structure for progress. |
| 874 | * |
| 875 | * If the seqcount indicates that progress needs to be checked, |
| 876 | * re-submission is detected by checking whether the descriptor |
| 877 | * queue has enough descriptor for the txreq. |
| 878 | */ |
| 879 | static inline unsigned sdma_progress(struct sdma_engine *sde, unsigned seq, |
| 880 | struct sdma_txreq *tx) |
| 881 | { |
| 882 | if (read_seqretry(sl: &sde->head_lock, start: seq)) { |
| 883 | sde->desc_avail = sdma_descq_freecnt(sde); |
| 884 | if (tx->num_desc > sde->desc_avail) |
| 885 | return 0; |
| 886 | return 1; |
| 887 | } |
| 888 | return 0; |
| 889 | } |
| 890 | |
| 891 | /* for use by interrupt handling */ |
| 892 | void sdma_engine_error(struct sdma_engine *sde, u64 status); |
| 893 | void sdma_engine_interrupt(struct sdma_engine *sde, u64 status); |
| 894 | |
| 895 | /* |
| 896 | * |
| 897 | * The diagram below details the relationship of the mapping structures |
| 898 | * |
| 899 | * Since the mapping now allows for non-uniform engines per vl, the |
| 900 | * number of engines for a vl is either the vl_engines[vl] or |
| 901 | * a computation based on num_sdma/num_vls: |
| 902 | * |
| 903 | * For example: |
| 904 | * nactual = vl_engines ? vl_engines[vl] : num_sdma/num_vls |
| 905 | * |
| 906 | * n = roundup to next highest power of 2 using nactual |
| 907 | * |
| 908 | * In the case where there are num_sdma/num_vls doesn't divide |
| 909 | * evenly, the extras are added from the last vl downward. |
| 910 | * |
| 911 | * For the case where n > nactual, the engines are assigned |
| 912 | * in a round robin fashion wrapping back to the first engine |
| 913 | * for a particular vl. |
| 914 | * |
| 915 | * dd->sdma_map |
| 916 | * | sdma_map_elem[0] |
| 917 | * | +--------------------+ |
| 918 | * v | mask | |
| 919 | * sdma_vl_map |--------------------| |
| 920 | * +--------------------------+ | sde[0] -> eng 1 | |
| 921 | * | list (RCU) | |--------------------| |
| 922 | * |--------------------------| ->| sde[1] -> eng 2 | |
| 923 | * | mask | --/ |--------------------| |
| 924 | * |--------------------------| -/ | * | |
| 925 | * | actual_vls (max 8) | -/ |--------------------| |
| 926 | * |--------------------------| --/ | sde[n-1] -> eng n | |
| 927 | * | vls (max 8) | -/ +--------------------+ |
| 928 | * |--------------------------| --/ |
| 929 | * | map[0] |-/ |
| 930 | * |--------------------------| +---------------------+ |
| 931 | * | map[1] |--- | mask | |
| 932 | * |--------------------------| \---- |---------------------| |
| 933 | * | * | \-- | sde[0] -> eng 1+n | |
| 934 | * | * | \---- |---------------------| |
| 935 | * | * | \->| sde[1] -> eng 2+n | |
| 936 | * |--------------------------| |---------------------| |
| 937 | * | map[vls - 1] |- | * | |
| 938 | * +--------------------------+ \- |---------------------| |
| 939 | * \- | sde[m-1] -> eng m+n | |
| 940 | * \ +---------------------+ |
| 941 | * \- |
| 942 | * \ |
| 943 | * \- +----------------------+ |
| 944 | * \- | mask | |
| 945 | * \ |----------------------| |
| 946 | * \- | sde[0] -> eng 1+m+n | |
| 947 | * \- |----------------------| |
| 948 | * >| sde[1] -> eng 2+m+n | |
| 949 | * |----------------------| |
| 950 | * | * | |
| 951 | * |----------------------| |
| 952 | * | sde[o-1] -> eng o+m+n| |
| 953 | * +----------------------+ |
| 954 | * |
| 955 | */ |
| 956 | |
| 957 | /** |
| 958 | * struct sdma_map_elem - mapping for a vl |
| 959 | * @mask - selector mask |
| 960 | * @sde - array of engines for this vl |
| 961 | * |
| 962 | * The mask is used to "mod" the selector |
| 963 | * to produce index into the trailing |
| 964 | * array of sdes. |
| 965 | */ |
| 966 | struct sdma_map_elem { |
| 967 | u32 mask; |
| 968 | struct sdma_engine *sde[]; |
| 969 | }; |
| 970 | |
| 971 | /** |
| 972 | * struct sdma_map_el - mapping for a vl |
| 973 | * @engine_to_vl - map of an engine to a vl |
| 974 | * @list - rcu head for free callback |
| 975 | * @mask - vl mask to "mod" the vl to produce an index to map array |
| 976 | * @actual_vls - number of vls |
| 977 | * @vls - number of vls rounded to next power of 2 |
| 978 | * @map - array of sdma_map_elem entries |
| 979 | * |
| 980 | * This is the parent mapping structure. The trailing |
| 981 | * members of the struct point to sdma_map_elem entries, which |
| 982 | * in turn point to an array of sde's for that vl. |
| 983 | */ |
| 984 | struct sdma_vl_map { |
| 985 | s8 engine_to_vl[TXE_NUM_SDMA_ENGINES]; |
| 986 | struct rcu_head list; |
| 987 | u32 mask; |
| 988 | u8 actual_vls; |
| 989 | u8 vls; |
| 990 | struct sdma_map_elem *map[]; |
| 991 | }; |
| 992 | |
| 993 | int sdma_map_init( |
| 994 | struct hfi1_devdata *dd, |
| 995 | u8 port, |
| 996 | u8 num_vls, |
| 997 | u8 *vl_engines); |
| 998 | |
| 999 | /* slow path */ |
| 1000 | void _sdma_engine_progress_schedule(struct sdma_engine *sde); |
| 1001 | |
| 1002 | /** |
| 1003 | * sdma_engine_progress_schedule() - schedule progress on engine |
| 1004 | * @sde: sdma_engine to schedule progress |
| 1005 | * |
| 1006 | * This is the fast path. |
| 1007 | * |
| 1008 | */ |
| 1009 | static inline void sdma_engine_progress_schedule( |
| 1010 | struct sdma_engine *sde) |
| 1011 | { |
| 1012 | if (!sde || sdma_descq_inprocess(sde) < (sde->descq_cnt / 8)) |
| 1013 | return; |
| 1014 | _sdma_engine_progress_schedule(sde); |
| 1015 | } |
| 1016 | |
| 1017 | struct sdma_engine *sdma_select_engine_sc( |
| 1018 | struct hfi1_devdata *dd, |
| 1019 | u32 selector, |
| 1020 | u8 sc5); |
| 1021 | |
| 1022 | struct sdma_engine *sdma_select_engine_vl( |
| 1023 | struct hfi1_devdata *dd, |
| 1024 | u32 selector, |
| 1025 | u8 vl); |
| 1026 | |
| 1027 | struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd, |
| 1028 | u32 selector, u8 vl); |
| 1029 | ssize_t sdma_get_cpu_to_sde_map(struct sdma_engine *sde, char *buf); |
| 1030 | ssize_t sdma_set_cpu_to_sde_map(struct sdma_engine *sde, const char *buf, |
| 1031 | size_t count); |
| 1032 | int sdma_engine_get_vl(struct sdma_engine *sde); |
| 1033 | void sdma_seqfile_dump_sde(struct seq_file *s, struct sdma_engine *); |
| 1034 | void sdma_seqfile_dump_cpu_list(struct seq_file *s, struct hfi1_devdata *dd, |
| 1035 | unsigned long cpuid); |
| 1036 | |
| 1037 | #ifdef CONFIG_SDMA_VERBOSITY |
| 1038 | void sdma_dumpstate(struct sdma_engine *); |
| 1039 | #endif |
| 1040 | static inline char *slashstrip(char *s) |
| 1041 | { |
| 1042 | char *r = s; |
| 1043 | |
| 1044 | while (*s) |
| 1045 | if (*s++ == '/') |
| 1046 | r = s; |
| 1047 | return r; |
| 1048 | } |
| 1049 | |
| 1050 | u16 sdma_get_descq_cnt(void); |
| 1051 | |
| 1052 | extern uint mod_num_sdma; |
| 1053 | |
| 1054 | void sdma_update_lmc(struct hfi1_devdata *dd, u64 mask, u32 lid); |
| 1055 | #endif |
| 1056 | |