1// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause
2/*
3 * Copyright 2018-2025 Amazon.com, Inc. or its affiliates. All rights reserved.
4 */
5
6#include "efa_com.h"
7#include "efa_regs_defs.h"
8
9#define ADMIN_CMD_TIMEOUT_US 30000000 /* usecs */
10
11#define EFA_REG_READ_TIMEOUT_US 50000 /* usecs */
12#define EFA_MMIO_READ_INVALID 0xffffffff
13
14#define EFA_POLL_INTERVAL_MS 100 /* msecs */
15
16#define EFA_ASYNC_QUEUE_DEPTH 16
17#define EFA_ADMIN_QUEUE_DEPTH 32
18
19#define EFA_CTRL_MAJOR 0
20#define EFA_CTRL_MINOR 0
21#define EFA_CTRL_SUB_MINOR 1
22
23enum efa_cmd_status {
24 EFA_CMD_SUBMITTED,
25 EFA_CMD_COMPLETED,
26};
27
28struct efa_comp_ctx {
29 struct completion wait_event;
30 struct efa_admin_acq_entry *user_cqe;
31 u32 comp_size;
32 enum efa_cmd_status status;
33 u16 cmd_id;
34 u8 cmd_opcode;
35 u8 occupied;
36};
37
38static const char *efa_com_cmd_str(u8 cmd)
39{
40#define EFA_CMD_STR_CASE(_cmd) case EFA_ADMIN_##_cmd: return #_cmd
41
42 switch (cmd) {
43 EFA_CMD_STR_CASE(CREATE_QP);
44 EFA_CMD_STR_CASE(MODIFY_QP);
45 EFA_CMD_STR_CASE(QUERY_QP);
46 EFA_CMD_STR_CASE(DESTROY_QP);
47 EFA_CMD_STR_CASE(CREATE_AH);
48 EFA_CMD_STR_CASE(DESTROY_AH);
49 EFA_CMD_STR_CASE(REG_MR);
50 EFA_CMD_STR_CASE(DEREG_MR);
51 EFA_CMD_STR_CASE(CREATE_CQ);
52 EFA_CMD_STR_CASE(DESTROY_CQ);
53 EFA_CMD_STR_CASE(GET_FEATURE);
54 EFA_CMD_STR_CASE(SET_FEATURE);
55 EFA_CMD_STR_CASE(GET_STATS);
56 EFA_CMD_STR_CASE(ALLOC_PD);
57 EFA_CMD_STR_CASE(DEALLOC_PD);
58 EFA_CMD_STR_CASE(ALLOC_UAR);
59 EFA_CMD_STR_CASE(DEALLOC_UAR);
60 EFA_CMD_STR_CASE(CREATE_EQ);
61 EFA_CMD_STR_CASE(DESTROY_EQ);
62 default: return "unknown command opcode";
63 }
64#undef EFA_CMD_STR_CASE
65}
66
67void efa_com_set_dma_addr(dma_addr_t addr, u32 *addr_high, u32 *addr_low)
68{
69 *addr_low = lower_32_bits(addr);
70 *addr_high = upper_32_bits(addr);
71}
72
73static u32 efa_com_reg_read32(struct efa_com_dev *edev, u16 offset)
74{
75 struct efa_com_mmio_read *mmio_read = &edev->mmio_read;
76 struct efa_admin_mmio_req_read_less_resp *read_resp;
77 unsigned long exp_time;
78 u32 mmio_read_reg = 0;
79 u32 err;
80
81 read_resp = mmio_read->read_resp;
82
83 spin_lock(lock: &mmio_read->lock);
84 mmio_read->seq_num++;
85
86 /* trash DMA req_id to identify when hardware is done */
87 read_resp->req_id = mmio_read->seq_num + 0x9aL;
88 EFA_SET(&mmio_read_reg, EFA_REGS_MMIO_REG_READ_REG_OFF, offset);
89 EFA_SET(&mmio_read_reg, EFA_REGS_MMIO_REG_READ_REQ_ID,
90 mmio_read->seq_num);
91
92 writel(val: mmio_read_reg, addr: edev->reg_bar + EFA_REGS_MMIO_REG_READ_OFF);
93
94 exp_time = jiffies + usecs_to_jiffies(u: mmio_read->mmio_read_timeout);
95 do {
96 if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num)
97 break;
98 udelay(usec: 1);
99 } while (time_is_after_jiffies(exp_time));
100
101 if (read_resp->req_id != mmio_read->seq_num) {
102 ibdev_err_ratelimited(
103 edev->efa_dev,
104 "Reading register timed out. expected: req id[%u] offset[%#x] actual: req id[%u] offset[%#x]\n",
105 mmio_read->seq_num, offset, read_resp->req_id,
106 read_resp->reg_off);
107 err = EFA_MMIO_READ_INVALID;
108 goto out;
109 }
110
111 if (read_resp->reg_off != offset) {
112 ibdev_err_ratelimited(
113 edev->efa_dev,
114 "Reading register failed: wrong offset provided\n");
115 err = EFA_MMIO_READ_INVALID;
116 goto out;
117 }
118
119 err = read_resp->reg_val;
120out:
121 spin_unlock(lock: &mmio_read->lock);
122 return err;
123}
124
125static int efa_com_admin_init_sq(struct efa_com_dev *edev)
126{
127 struct efa_com_admin_queue *aq = &edev->aq;
128 struct efa_com_admin_sq *sq = &aq->sq;
129 u16 size = aq->depth * sizeof(*sq->entries);
130 u32 aq_caps = 0;
131 u32 addr_high;
132 u32 addr_low;
133
134 sq->entries =
135 dma_alloc_coherent(dev: aq->dmadev, size, dma_handle: &sq->dma_addr, GFP_KERNEL);
136 if (!sq->entries)
137 return -ENOMEM;
138
139 spin_lock_init(&sq->lock);
140
141 sq->cc = 0;
142 sq->pc = 0;
143 sq->phase = 1;
144
145 sq->db_addr = (u32 __iomem *)(edev->reg_bar + EFA_REGS_AQ_PROD_DB_OFF);
146
147 addr_high = upper_32_bits(sq->dma_addr);
148 addr_low = lower_32_bits(sq->dma_addr);
149
150 writel(val: addr_low, addr: edev->reg_bar + EFA_REGS_AQ_BASE_LO_OFF);
151 writel(val: addr_high, addr: edev->reg_bar + EFA_REGS_AQ_BASE_HI_OFF);
152
153 EFA_SET(&aq_caps, EFA_REGS_AQ_CAPS_AQ_DEPTH, aq->depth);
154 EFA_SET(&aq_caps, EFA_REGS_AQ_CAPS_AQ_ENTRY_SIZE,
155 sizeof(struct efa_admin_aq_entry));
156
157 writel(val: aq_caps, addr: edev->reg_bar + EFA_REGS_AQ_CAPS_OFF);
158
159 return 0;
160}
161
162static int efa_com_admin_init_cq(struct efa_com_dev *edev)
163{
164 struct efa_com_admin_queue *aq = &edev->aq;
165 struct efa_com_admin_cq *cq = &aq->cq;
166 u16 size = aq->depth * sizeof(*cq->entries);
167 u32 acq_caps = 0;
168 u32 addr_high;
169 u32 addr_low;
170
171 cq->entries =
172 dma_alloc_coherent(dev: aq->dmadev, size, dma_handle: &cq->dma_addr, GFP_KERNEL);
173 if (!cq->entries)
174 return -ENOMEM;
175
176 spin_lock_init(&cq->lock);
177
178 cq->cc = 0;
179 cq->phase = 1;
180
181 addr_high = upper_32_bits(cq->dma_addr);
182 addr_low = lower_32_bits(cq->dma_addr);
183
184 writel(val: addr_low, addr: edev->reg_bar + EFA_REGS_ACQ_BASE_LO_OFF);
185 writel(val: addr_high, addr: edev->reg_bar + EFA_REGS_ACQ_BASE_HI_OFF);
186
187 EFA_SET(&acq_caps, EFA_REGS_ACQ_CAPS_ACQ_DEPTH, aq->depth);
188 EFA_SET(&acq_caps, EFA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE,
189 sizeof(struct efa_admin_acq_entry));
190 EFA_SET(&acq_caps, EFA_REGS_ACQ_CAPS_ACQ_MSIX_VECTOR,
191 aq->msix_vector_idx);
192
193 writel(val: acq_caps, addr: edev->reg_bar + EFA_REGS_ACQ_CAPS_OFF);
194
195 return 0;
196}
197
198static int efa_com_admin_init_aenq(struct efa_com_dev *edev,
199 struct efa_aenq_handlers *aenq_handlers)
200{
201 struct efa_com_aenq *aenq = &edev->aenq;
202 u32 addr_low, addr_high;
203 u32 aenq_caps = 0;
204 u16 size;
205
206 if (!aenq_handlers) {
207 ibdev_err(ibdev: edev->efa_dev, format: "aenq handlers pointer is NULL\n");
208 return -EINVAL;
209 }
210
211 size = EFA_ASYNC_QUEUE_DEPTH * sizeof(*aenq->entries);
212 aenq->entries = dma_alloc_coherent(dev: edev->dmadev, size, dma_handle: &aenq->dma_addr,
213 GFP_KERNEL);
214 if (!aenq->entries)
215 return -ENOMEM;
216
217 aenq->aenq_handlers = aenq_handlers;
218 aenq->depth = EFA_ASYNC_QUEUE_DEPTH;
219 aenq->cc = 0;
220 aenq->phase = 1;
221
222 addr_low = lower_32_bits(aenq->dma_addr);
223 addr_high = upper_32_bits(aenq->dma_addr);
224
225 writel(val: addr_low, addr: edev->reg_bar + EFA_REGS_AENQ_BASE_LO_OFF);
226 writel(val: addr_high, addr: edev->reg_bar + EFA_REGS_AENQ_BASE_HI_OFF);
227
228 EFA_SET(&aenq_caps, EFA_REGS_AENQ_CAPS_AENQ_DEPTH, aenq->depth);
229 EFA_SET(&aenq_caps, EFA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE,
230 sizeof(struct efa_admin_aenq_entry));
231 EFA_SET(&aenq_caps, EFA_REGS_AENQ_CAPS_AENQ_MSIX_VECTOR,
232 aenq->msix_vector_idx);
233 writel(val: aenq_caps, addr: edev->reg_bar + EFA_REGS_AENQ_CAPS_OFF);
234
235 /*
236 * Init cons_db to mark that all entries in the queue
237 * are initially available
238 */
239 writel(val: edev->aenq.cc, addr: edev->reg_bar + EFA_REGS_AENQ_CONS_DB_OFF);
240
241 return 0;
242}
243
244/* ID to be used with efa_com_get_comp_ctx */
245static u16 efa_com_alloc_ctx_id(struct efa_com_admin_queue *aq)
246{
247 u16 ctx_id;
248
249 spin_lock(lock: &aq->comp_ctx_lock);
250 ctx_id = aq->comp_ctx_pool[aq->comp_ctx_pool_next];
251 aq->comp_ctx_pool_next++;
252 spin_unlock(lock: &aq->comp_ctx_lock);
253
254 return ctx_id;
255}
256
257static void efa_com_dealloc_ctx_id(struct efa_com_admin_queue *aq,
258 u16 ctx_id)
259{
260 spin_lock(lock: &aq->comp_ctx_lock);
261 aq->comp_ctx_pool_next--;
262 aq->comp_ctx_pool[aq->comp_ctx_pool_next] = ctx_id;
263 spin_unlock(lock: &aq->comp_ctx_lock);
264}
265
266static inline void efa_com_put_comp_ctx(struct efa_com_admin_queue *aq,
267 struct efa_comp_ctx *comp_ctx)
268{
269 u16 cmd_id = EFA_GET(&comp_ctx->user_cqe->acq_common_descriptor.command,
270 EFA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID);
271 u16 ctx_id = cmd_id & (aq->depth - 1);
272
273 ibdev_dbg(aq->efa_dev, "Put completion command_id %#x\n", cmd_id);
274 comp_ctx->occupied = 0;
275 efa_com_dealloc_ctx_id(aq, ctx_id);
276}
277
278static struct efa_comp_ctx *efa_com_get_comp_ctx(struct efa_com_admin_queue *aq,
279 u16 cmd_id, bool capture)
280{
281 u16 ctx_id = cmd_id & (aq->depth - 1);
282
283 if (aq->comp_ctx[ctx_id].occupied && capture) {
284 ibdev_err_ratelimited(
285 aq->efa_dev,
286 "Completion context for command_id %#x is occupied\n",
287 cmd_id);
288 return NULL;
289 }
290
291 if (capture) {
292 aq->comp_ctx[ctx_id].occupied = 1;
293 ibdev_dbg(aq->efa_dev,
294 "Take completion ctxt for command_id %#x\n", cmd_id);
295 }
296
297 return &aq->comp_ctx[ctx_id];
298}
299
300static struct efa_comp_ctx *__efa_com_submit_admin_cmd(struct efa_com_admin_queue *aq,
301 struct efa_admin_aq_entry *cmd,
302 size_t cmd_size_in_bytes,
303 struct efa_admin_acq_entry *comp,
304 size_t comp_size_in_bytes)
305{
306 struct efa_admin_aq_entry *aqe;
307 struct efa_comp_ctx *comp_ctx;
308 u16 queue_size_mask;
309 u16 cmd_id;
310 u16 ctx_id;
311 u16 pi;
312
313 queue_size_mask = aq->depth - 1;
314 pi = aq->sq.pc & queue_size_mask;
315
316 ctx_id = efa_com_alloc_ctx_id(aq);
317
318 /* cmd_id LSBs are the ctx_id and MSBs are entropy bits from pc */
319 cmd_id = ctx_id & queue_size_mask;
320 cmd_id |= aq->sq.pc & ~queue_size_mask;
321 cmd_id &= EFA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
322
323 cmd->aq_common_descriptor.command_id = cmd_id;
324 EFA_SET(&cmd->aq_common_descriptor.flags,
325 EFA_ADMIN_AQ_COMMON_DESC_PHASE, aq->sq.phase);
326
327 comp_ctx = efa_com_get_comp_ctx(aq, cmd_id, capture: true);
328 if (!comp_ctx) {
329 efa_com_dealloc_ctx_id(aq, ctx_id);
330 return ERR_PTR(error: -EINVAL);
331 }
332
333 comp_ctx->status = EFA_CMD_SUBMITTED;
334 comp_ctx->comp_size = comp_size_in_bytes;
335 comp_ctx->user_cqe = comp;
336 comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
337 comp_ctx->cmd_id = cmd_id;
338
339 reinit_completion(x: &comp_ctx->wait_event);
340
341 aqe = &aq->sq.entries[pi];
342 memset(aqe, 0, sizeof(*aqe));
343 memcpy(aqe, cmd, cmd_size_in_bytes);
344
345 aq->sq.pc++;
346 atomic64_inc(v: &aq->stats.submitted_cmd);
347
348 if ((aq->sq.pc & queue_size_mask) == 0)
349 aq->sq.phase = !aq->sq.phase;
350
351 /* barrier not needed in case of writel */
352 writel(val: aq->sq.pc, addr: aq->sq.db_addr);
353
354 return comp_ctx;
355}
356
357static inline int efa_com_init_comp_ctxt(struct efa_com_admin_queue *aq)
358{
359 size_t pool_size = aq->depth * sizeof(*aq->comp_ctx_pool);
360 size_t size = aq->depth * sizeof(struct efa_comp_ctx);
361 struct efa_comp_ctx *comp_ctx;
362 u16 i;
363
364 aq->comp_ctx = devm_kzalloc(dev: aq->dmadev, size, GFP_KERNEL);
365 aq->comp_ctx_pool = devm_kzalloc(dev: aq->dmadev, size: pool_size, GFP_KERNEL);
366 if (!aq->comp_ctx || !aq->comp_ctx_pool) {
367 devm_kfree(dev: aq->dmadev, p: aq->comp_ctx_pool);
368 devm_kfree(dev: aq->dmadev, p: aq->comp_ctx);
369 return -ENOMEM;
370 }
371
372 for (i = 0; i < aq->depth; i++) {
373 comp_ctx = efa_com_get_comp_ctx(aq, cmd_id: i, capture: false);
374 if (comp_ctx)
375 init_completion(x: &comp_ctx->wait_event);
376
377 aq->comp_ctx_pool[i] = i;
378 }
379
380 spin_lock_init(&aq->comp_ctx_lock);
381
382 aq->comp_ctx_pool_next = 0;
383
384 return 0;
385}
386
387static struct efa_comp_ctx *efa_com_submit_admin_cmd(struct efa_com_admin_queue *aq,
388 struct efa_admin_aq_entry *cmd,
389 size_t cmd_size_in_bytes,
390 struct efa_admin_acq_entry *comp,
391 size_t comp_size_in_bytes)
392{
393 struct efa_comp_ctx *comp_ctx;
394
395 spin_lock(lock: &aq->sq.lock);
396 if (!test_bit(EFA_AQ_STATE_RUNNING_BIT, &aq->state)) {
397 ibdev_err_ratelimited(aq->efa_dev, "Admin queue is closed\n");
398 spin_unlock(lock: &aq->sq.lock);
399 return ERR_PTR(error: -ENODEV);
400 }
401
402 comp_ctx = __efa_com_submit_admin_cmd(aq, cmd, cmd_size_in_bytes, comp,
403 comp_size_in_bytes);
404 spin_unlock(lock: &aq->sq.lock);
405 if (IS_ERR(ptr: comp_ctx))
406 clear_bit(nr: EFA_AQ_STATE_RUNNING_BIT, addr: &aq->state);
407
408 return comp_ctx;
409}
410
411static int efa_com_handle_single_admin_completion(struct efa_com_admin_queue *aq,
412 struct efa_admin_acq_entry *cqe)
413{
414 struct efa_comp_ctx *comp_ctx;
415 u16 cmd_id;
416
417 cmd_id = EFA_GET(&cqe->acq_common_descriptor.command,
418 EFA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID);
419
420 comp_ctx = efa_com_get_comp_ctx(aq, cmd_id, capture: false);
421 if (comp_ctx->status != EFA_CMD_SUBMITTED) {
422 ibdev_err(ibdev: aq->efa_dev,
423 format: "Received completion with unexpected command id[%d], sq producer: %d, sq consumer: %d, cq consumer: %d\n",
424 cmd_id, aq->sq.pc, aq->sq.cc, aq->cq.cc);
425 return -EINVAL;
426 }
427
428 comp_ctx->status = EFA_CMD_COMPLETED;
429 memcpy(comp_ctx->user_cqe, cqe, comp_ctx->comp_size);
430
431 if (!test_bit(EFA_AQ_STATE_POLLING_BIT, &aq->state))
432 complete(&comp_ctx->wait_event);
433
434 return 0;
435}
436
437static void efa_com_handle_admin_completion(struct efa_com_admin_queue *aq)
438{
439 struct efa_admin_acq_entry *cqe;
440 u16 queue_size_mask;
441 u16 comp_cmds = 0;
442 u8 phase;
443 int err;
444 u16 ci;
445
446 queue_size_mask = aq->depth - 1;
447
448 ci = aq->cq.cc & queue_size_mask;
449 phase = aq->cq.phase;
450
451 cqe = &aq->cq.entries[ci];
452
453 /* Go over all the completions */
454 while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
455 EFA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
456 /*
457 * Do not read the rest of the completion entry before the
458 * phase bit was validated
459 */
460 dma_rmb();
461 err = efa_com_handle_single_admin_completion(aq, cqe);
462 if (!err)
463 comp_cmds++;
464
465 aq->cq.cc++;
466 ci++;
467 if (ci == aq->depth) {
468 ci = 0;
469 phase = !phase;
470 }
471
472 cqe = &aq->cq.entries[ci];
473 }
474
475 aq->cq.phase = phase;
476 aq->sq.cc += comp_cmds;
477 atomic64_add(i: comp_cmds, v: &aq->stats.completed_cmd);
478}
479
480static int efa_com_comp_status_to_errno(u8 comp_status)
481{
482 switch (comp_status) {
483 case EFA_ADMIN_SUCCESS:
484 return 0;
485 case EFA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
486 return -ENOMEM;
487 case EFA_ADMIN_UNSUPPORTED_OPCODE:
488 return -EOPNOTSUPP;
489 case EFA_ADMIN_BAD_OPCODE:
490 case EFA_ADMIN_MALFORMED_REQUEST:
491 case EFA_ADMIN_ILLEGAL_PARAMETER:
492 case EFA_ADMIN_UNKNOWN_ERROR:
493 return -EINVAL;
494 default:
495 return -EINVAL;
496 }
497}
498
499static int efa_com_wait_and_process_admin_cq_polling(struct efa_comp_ctx *comp_ctx,
500 struct efa_com_admin_queue *aq)
501{
502 unsigned long timeout;
503 unsigned long flags;
504 int err;
505
506 timeout = jiffies + usecs_to_jiffies(u: aq->completion_timeout);
507
508 while (1) {
509 spin_lock_irqsave(&aq->cq.lock, flags);
510 efa_com_handle_admin_completion(aq);
511 spin_unlock_irqrestore(lock: &aq->cq.lock, flags);
512
513 if (comp_ctx->status != EFA_CMD_SUBMITTED)
514 break;
515
516 if (time_is_before_jiffies(timeout)) {
517 ibdev_err_ratelimited(
518 aq->efa_dev,
519 "Wait for completion (polling) timeout\n");
520 /* EFA didn't have any completion */
521 atomic64_inc(v: &aq->stats.no_completion);
522
523 clear_bit(nr: EFA_AQ_STATE_RUNNING_BIT, addr: &aq->state);
524 err = -ETIME;
525 goto out;
526 }
527
528 msleep(msecs: aq->poll_interval);
529 }
530
531 err = efa_com_comp_status_to_errno(comp_status: comp_ctx->user_cqe->acq_common_descriptor.status);
532out:
533 efa_com_put_comp_ctx(aq, comp_ctx);
534 return err;
535}
536
537static int efa_com_wait_and_process_admin_cq_interrupts(struct efa_comp_ctx *comp_ctx,
538 struct efa_com_admin_queue *aq)
539{
540 unsigned long flags;
541 int err;
542
543 wait_for_completion_timeout(x: &comp_ctx->wait_event,
544 timeout: usecs_to_jiffies(u: aq->completion_timeout));
545
546 /*
547 * In case the command wasn't completed find out the root cause.
548 * There might be 2 kinds of errors
549 * 1) No completion (timeout reached)
550 * 2) There is completion but the device didn't get any msi-x interrupt.
551 */
552 if (comp_ctx->status == EFA_CMD_SUBMITTED) {
553 spin_lock_irqsave(&aq->cq.lock, flags);
554 efa_com_handle_admin_completion(aq);
555 spin_unlock_irqrestore(lock: &aq->cq.lock, flags);
556
557 atomic64_inc(v: &aq->stats.no_completion);
558
559 if (comp_ctx->status == EFA_CMD_COMPLETED)
560 ibdev_err_ratelimited(
561 aq->efa_dev,
562 "The device sent a completion but the driver didn't receive any MSI-X interrupt for admin cmd %s(%d) status %d (id: %d, sq producer: %d, sq consumer: %d, cq consumer: %d)\n",
563 efa_com_cmd_str(comp_ctx->cmd_opcode),
564 comp_ctx->cmd_opcode, comp_ctx->status,
565 comp_ctx->cmd_id, aq->sq.pc, aq->sq.cc,
566 aq->cq.cc);
567 else
568 ibdev_err_ratelimited(
569 aq->efa_dev,
570 "The device didn't send any completion for admin cmd %s(%d) status %d (id: %d, sq producer: %d, sq consumer: %d, cq consumer: %d)\n",
571 efa_com_cmd_str(comp_ctx->cmd_opcode),
572 comp_ctx->cmd_opcode, comp_ctx->status,
573 comp_ctx->cmd_id, aq->sq.pc, aq->sq.cc,
574 aq->cq.cc);
575
576 clear_bit(nr: EFA_AQ_STATE_RUNNING_BIT, addr: &aq->state);
577 err = -ETIME;
578 goto out;
579 }
580
581 err = efa_com_comp_status_to_errno(comp_status: comp_ctx->user_cqe->acq_common_descriptor.status);
582out:
583 efa_com_put_comp_ctx(aq, comp_ctx);
584 return err;
585}
586
587/*
588 * There are two types to wait for completion.
589 * Polling mode - wait until the completion is available.
590 * Async mode - wait on wait queue until the completion is ready
591 * (or the timeout expired).
592 * It is expected that the IRQ called efa_com_handle_admin_completion
593 * to mark the completions.
594 */
595static int efa_com_wait_and_process_admin_cq(struct efa_comp_ctx *comp_ctx,
596 struct efa_com_admin_queue *aq)
597{
598 if (test_bit(EFA_AQ_STATE_POLLING_BIT, &aq->state))
599 return efa_com_wait_and_process_admin_cq_polling(comp_ctx, aq);
600
601 return efa_com_wait_and_process_admin_cq_interrupts(comp_ctx, aq);
602}
603
604/**
605 * efa_com_cmd_exec - Execute admin command
606 * @aq: admin queue.
607 * @cmd: the admin command to execute.
608 * @cmd_size: the command size.
609 * @comp: command completion return entry.
610 * @comp_size: command completion size.
611 * Submit an admin command and then wait until the device will return a
612 * completion.
613 * The completion will be copied into comp.
614 *
615 * @return - 0 on success, negative value on failure.
616 */
617int efa_com_cmd_exec(struct efa_com_admin_queue *aq,
618 struct efa_admin_aq_entry *cmd,
619 size_t cmd_size,
620 struct efa_admin_acq_entry *comp,
621 size_t comp_size)
622{
623 struct efa_comp_ctx *comp_ctx;
624 int err;
625
626 might_sleep();
627
628 /* In case of queue FULL */
629 down(sem: &aq->avail_cmds);
630
631 ibdev_dbg(aq->efa_dev, "%s (opcode %d)\n",
632 efa_com_cmd_str(cmd->aq_common_descriptor.opcode),
633 cmd->aq_common_descriptor.opcode);
634 comp_ctx = efa_com_submit_admin_cmd(aq, cmd, cmd_size_in_bytes: cmd_size, comp, comp_size_in_bytes: comp_size);
635 if (IS_ERR(ptr: comp_ctx)) {
636 ibdev_err_ratelimited(
637 aq->efa_dev,
638 "Failed to submit command %s (opcode %u) err %pe\n",
639 efa_com_cmd_str(cmd->aq_common_descriptor.opcode),
640 cmd->aq_common_descriptor.opcode, comp_ctx);
641
642 up(sem: &aq->avail_cmds);
643 atomic64_inc(v: &aq->stats.cmd_err);
644 return PTR_ERR(ptr: comp_ctx);
645 }
646
647 err = efa_com_wait_and_process_admin_cq(comp_ctx, aq);
648 if (err) {
649 ibdev_err_ratelimited(
650 aq->efa_dev,
651 "Failed to process command %s (opcode %u) comp_status %d err %d\n",
652 efa_com_cmd_str(cmd->aq_common_descriptor.opcode),
653 cmd->aq_common_descriptor.opcode,
654 comp_ctx->user_cqe->acq_common_descriptor.status, err);
655 atomic64_inc(v: &aq->stats.cmd_err);
656 }
657
658 up(sem: &aq->avail_cmds);
659
660 return err;
661}
662
663/**
664 * efa_com_admin_destroy - Destroy the admin and the async events queues.
665 * @edev: EFA communication layer struct
666 */
667void efa_com_admin_destroy(struct efa_com_dev *edev)
668{
669 struct efa_com_admin_queue *aq = &edev->aq;
670 struct efa_com_aenq *aenq = &edev->aenq;
671 struct efa_com_admin_cq *cq = &aq->cq;
672 struct efa_com_admin_sq *sq = &aq->sq;
673 u16 size;
674
675 clear_bit(nr: EFA_AQ_STATE_RUNNING_BIT, addr: &aq->state);
676
677 devm_kfree(dev: edev->dmadev, p: aq->comp_ctx_pool);
678 devm_kfree(dev: edev->dmadev, p: aq->comp_ctx);
679
680 size = aq->depth * sizeof(*sq->entries);
681 dma_free_coherent(dev: edev->dmadev, size, cpu_addr: sq->entries, dma_handle: sq->dma_addr);
682
683 size = aq->depth * sizeof(*cq->entries);
684 dma_free_coherent(dev: edev->dmadev, size, cpu_addr: cq->entries, dma_handle: cq->dma_addr);
685
686 size = aenq->depth * sizeof(*aenq->entries);
687 dma_free_coherent(dev: edev->dmadev, size, cpu_addr: aenq->entries, dma_handle: aenq->dma_addr);
688}
689
690/**
691 * efa_com_set_admin_polling_mode - Set the admin completion queue polling mode
692 * @edev: EFA communication layer struct
693 * @polling: Enable/Disable polling mode
694 *
695 * Set the admin completion mode.
696 */
697void efa_com_set_admin_polling_mode(struct efa_com_dev *edev, bool polling)
698{
699 u32 mask_value = 0;
700
701 if (polling)
702 EFA_SET(&mask_value, EFA_REGS_INTR_MASK_EN, 1);
703
704 writel(val: mask_value, addr: edev->reg_bar + EFA_REGS_INTR_MASK_OFF);
705 if (polling)
706 set_bit(nr: EFA_AQ_STATE_POLLING_BIT, addr: &edev->aq.state);
707 else
708 clear_bit(nr: EFA_AQ_STATE_POLLING_BIT, addr: &edev->aq.state);
709}
710
711static void efa_com_stats_init(struct efa_com_dev *edev)
712{
713 atomic64_t *s = (atomic64_t *)&edev->aq.stats;
714 int i;
715
716 for (i = 0; i < sizeof(edev->aq.stats) / sizeof(*s); i++, s++)
717 atomic64_set(v: s, i: 0);
718}
719
720/**
721 * efa_com_admin_init - Init the admin and the async queues
722 * @edev: EFA communication layer struct
723 * @aenq_handlers: Those handlers to be called upon event.
724 *
725 * Initialize the admin submission and completion queues.
726 * Initialize the asynchronous events notification queues.
727 *
728 * @return - 0 on success, negative value on failure.
729 */
730int efa_com_admin_init(struct efa_com_dev *edev,
731 struct efa_aenq_handlers *aenq_handlers)
732{
733 struct efa_com_admin_queue *aq = &edev->aq;
734 u32 timeout;
735 u32 dev_sts;
736 u32 cap;
737 int err;
738
739 dev_sts = efa_com_reg_read32(edev, EFA_REGS_DEV_STS_OFF);
740 if (!EFA_GET(&dev_sts, EFA_REGS_DEV_STS_READY)) {
741 ibdev_err(ibdev: edev->efa_dev,
742 format: "Device isn't ready, abort com init %#x\n", dev_sts);
743 return -ENODEV;
744 }
745
746 aq->depth = EFA_ADMIN_QUEUE_DEPTH;
747
748 aq->dmadev = edev->dmadev;
749 aq->efa_dev = edev->efa_dev;
750 set_bit(nr: EFA_AQ_STATE_POLLING_BIT, addr: &aq->state);
751
752 sema_init(sem: &aq->avail_cmds, val: aq->depth);
753
754 efa_com_stats_init(edev);
755
756 err = efa_com_init_comp_ctxt(aq);
757 if (err)
758 return err;
759
760 err = efa_com_admin_init_sq(edev);
761 if (err)
762 goto err_destroy_comp_ctxt;
763
764 err = efa_com_admin_init_cq(edev);
765 if (err)
766 goto err_destroy_sq;
767
768 efa_com_set_admin_polling_mode(edev, polling: false);
769
770 err = efa_com_admin_init_aenq(edev, aenq_handlers);
771 if (err)
772 goto err_destroy_cq;
773
774 cap = efa_com_reg_read32(edev, EFA_REGS_CAPS_OFF);
775 timeout = EFA_GET(&cap, EFA_REGS_CAPS_ADMIN_CMD_TO);
776 if (timeout)
777 /* the resolution of timeout reg is 100ms */
778 aq->completion_timeout = timeout * 100000;
779 else
780 aq->completion_timeout = ADMIN_CMD_TIMEOUT_US;
781
782 aq->poll_interval = EFA_POLL_INTERVAL_MS;
783
784 set_bit(nr: EFA_AQ_STATE_RUNNING_BIT, addr: &aq->state);
785
786 return 0;
787
788err_destroy_cq:
789 dma_free_coherent(dev: edev->dmadev, size: aq->depth * sizeof(*aq->cq.entries),
790 cpu_addr: aq->cq.entries, dma_handle: aq->cq.dma_addr);
791err_destroy_sq:
792 dma_free_coherent(dev: edev->dmadev, size: aq->depth * sizeof(*aq->sq.entries),
793 cpu_addr: aq->sq.entries, dma_handle: aq->sq.dma_addr);
794err_destroy_comp_ctxt:
795 devm_kfree(dev: edev->dmadev, p: aq->comp_ctx);
796
797 return err;
798}
799
800/**
801 * efa_com_admin_q_comp_intr_handler - admin queue interrupt handler
802 * @edev: EFA communication layer struct
803 *
804 * This method goes over the admin completion queue and wakes up
805 * all the pending threads that wait on the commands wait event.
806 *
807 * Note: Should be called after MSI-X interrupt.
808 */
809void efa_com_admin_q_comp_intr_handler(struct efa_com_dev *edev)
810{
811 unsigned long flags;
812
813 spin_lock_irqsave(&edev->aq.cq.lock, flags);
814 efa_com_handle_admin_completion(aq: &edev->aq);
815 spin_unlock_irqrestore(lock: &edev->aq.cq.lock, flags);
816}
817
818/*
819 * efa_handle_specific_aenq_event:
820 * return the handler that is relevant to the specific event group
821 */
822static efa_aenq_handler efa_com_get_specific_aenq_cb(struct efa_com_dev *edev,
823 u16 group)
824{
825 struct efa_aenq_handlers *aenq_handlers = edev->aenq.aenq_handlers;
826
827 if (group < EFA_MAX_HANDLERS && aenq_handlers->handlers[group])
828 return aenq_handlers->handlers[group];
829
830 return aenq_handlers->unimplemented_handler;
831}
832
833/**
834 * efa_com_aenq_intr_handler - AENQ interrupt handler
835 * @edev: EFA communication layer struct
836 * @data: Data of interrupt handler.
837 *
838 * Go over the async event notification queue and call the proper aenq handler.
839 */
840void efa_com_aenq_intr_handler(struct efa_com_dev *edev, void *data)
841{
842 struct efa_admin_aenq_common_desc *aenq_common;
843 struct efa_com_aenq *aenq = &edev->aenq;
844 struct efa_admin_aenq_entry *aenq_e;
845 efa_aenq_handler handler_cb;
846 u32 processed = 0;
847 u8 phase;
848 u32 ci;
849
850 ci = aenq->cc & (aenq->depth - 1);
851 phase = aenq->phase;
852 aenq_e = &aenq->entries[ci]; /* Get first entry */
853 aenq_common = &aenq_e->aenq_common_desc;
854
855 /* Go over all the events */
856 while ((READ_ONCE(aenq_common->flags) &
857 EFA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
858 /*
859 * Do not read the rest of the completion entry before the
860 * phase bit was validated
861 */
862 dma_rmb();
863
864 /* Handle specific event*/
865 handler_cb = efa_com_get_specific_aenq_cb(edev,
866 group: aenq_common->group);
867 handler_cb(data, aenq_e); /* call the actual event handler*/
868
869 /* Get next event entry */
870 ci++;
871 processed++;
872
873 if (ci == aenq->depth) {
874 ci = 0;
875 phase = !phase;
876 }
877 aenq_e = &aenq->entries[ci];
878 aenq_common = &aenq_e->aenq_common_desc;
879 }
880
881 aenq->cc += processed;
882 aenq->phase = phase;
883
884 /* Don't update aenq doorbell if there weren't any processed events */
885 if (!processed)
886 return;
887
888 /* barrier not needed in case of writel */
889 writel(val: aenq->cc, addr: edev->reg_bar + EFA_REGS_AENQ_CONS_DB_OFF);
890}
891
892static void efa_com_mmio_reg_read_resp_addr_init(struct efa_com_dev *edev)
893{
894 struct efa_com_mmio_read *mmio_read = &edev->mmio_read;
895 u32 addr_high;
896 u32 addr_low;
897
898 /* dma_addr_bits is unknown at this point */
899 addr_high = (mmio_read->read_resp_dma_addr >> 32) & GENMASK(31, 0);
900 addr_low = mmio_read->read_resp_dma_addr & GENMASK(31, 0);
901
902 writel(val: addr_high, addr: edev->reg_bar + EFA_REGS_MMIO_RESP_HI_OFF);
903 writel(val: addr_low, addr: edev->reg_bar + EFA_REGS_MMIO_RESP_LO_OFF);
904}
905
906int efa_com_mmio_reg_read_init(struct efa_com_dev *edev)
907{
908 struct efa_com_mmio_read *mmio_read = &edev->mmio_read;
909
910 spin_lock_init(&mmio_read->lock);
911 mmio_read->read_resp =
912 dma_alloc_coherent(dev: edev->dmadev, size: sizeof(*mmio_read->read_resp),
913 dma_handle: &mmio_read->read_resp_dma_addr, GFP_KERNEL);
914 if (!mmio_read->read_resp)
915 return -ENOMEM;
916
917 efa_com_mmio_reg_read_resp_addr_init(edev);
918
919 mmio_read->read_resp->req_id = 0;
920 mmio_read->seq_num = 0;
921 mmio_read->mmio_read_timeout = EFA_REG_READ_TIMEOUT_US;
922
923 return 0;
924}
925
926void efa_com_mmio_reg_read_destroy(struct efa_com_dev *edev)
927{
928 struct efa_com_mmio_read *mmio_read = &edev->mmio_read;
929
930 dma_free_coherent(dev: edev->dmadev, size: sizeof(*mmio_read->read_resp),
931 cpu_addr: mmio_read->read_resp, dma_handle: mmio_read->read_resp_dma_addr);
932}
933
934int efa_com_validate_version(struct efa_com_dev *edev)
935{
936 u32 min_ctrl_ver = 0;
937 u32 ctrl_ver_masked;
938 u32 min_ver = 0;
939 u32 ctrl_ver;
940 u32 ver;
941
942 /*
943 * Make sure the EFA version and the controller version are at least
944 * as the driver expects
945 */
946 ver = efa_com_reg_read32(edev, EFA_REGS_VERSION_OFF);
947 ctrl_ver = efa_com_reg_read32(edev,
948 EFA_REGS_CONTROLLER_VERSION_OFF);
949
950 ibdev_dbg(edev->efa_dev, "efa device version: %d.%d\n",
951 EFA_GET(&ver, EFA_REGS_VERSION_MAJOR_VERSION),
952 EFA_GET(&ver, EFA_REGS_VERSION_MINOR_VERSION));
953
954 EFA_SET(&min_ver, EFA_REGS_VERSION_MAJOR_VERSION,
955 EFA_ADMIN_API_VERSION_MAJOR);
956 EFA_SET(&min_ver, EFA_REGS_VERSION_MINOR_VERSION,
957 EFA_ADMIN_API_VERSION_MINOR);
958 if (ver < min_ver) {
959 ibdev_err(ibdev: edev->efa_dev,
960 format: "EFA version is lower than the minimal version the driver supports\n");
961 return -EOPNOTSUPP;
962 }
963
964 ibdev_dbg(
965 edev->efa_dev,
966 "efa controller version: %d.%d.%d implementation version %d\n",
967 EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION),
968 EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION),
969 EFA_GET(&ctrl_ver,
970 EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION),
971 EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_IMPL_ID));
972
973 ctrl_ver_masked =
974 EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION) |
975 EFA_GET(&ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION) |
976 EFA_GET(&ctrl_ver,
977 EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION);
978
979 EFA_SET(&min_ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MAJOR_VERSION,
980 EFA_CTRL_MAJOR);
981 EFA_SET(&min_ctrl_ver, EFA_REGS_CONTROLLER_VERSION_MINOR_VERSION,
982 EFA_CTRL_MINOR);
983 EFA_SET(&min_ctrl_ver, EFA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION,
984 EFA_CTRL_SUB_MINOR);
985 /* Validate the ctrl version without the implementation ID */
986 if (ctrl_ver_masked < min_ctrl_ver) {
987 ibdev_err(ibdev: edev->efa_dev,
988 format: "EFA ctrl version is lower than the minimal ctrl version the driver supports\n");
989 return -EOPNOTSUPP;
990 }
991
992 return 0;
993}
994
995/**
996 * efa_com_get_dma_width - Retrieve physical dma address width the device
997 * supports.
998 * @edev: EFA communication layer struct
999 *
1000 * Retrieve the maximum physical address bits the device can handle.
1001 *
1002 * @return: > 0 on Success and negative value otherwise.
1003 */
1004int efa_com_get_dma_width(struct efa_com_dev *edev)
1005{
1006 u32 caps = efa_com_reg_read32(edev, EFA_REGS_CAPS_OFF);
1007 int width;
1008
1009 width = EFA_GET(&caps, EFA_REGS_CAPS_DMA_ADDR_WIDTH);
1010
1011 ibdev_dbg(edev->efa_dev, "DMA width: %d\n", width);
1012
1013 if (width < 32 || width > 64) {
1014 ibdev_err(ibdev: edev->efa_dev, format: "DMA width illegal value: %d\n", width);
1015 return -EINVAL;
1016 }
1017
1018 edev->dma_addr_bits = width;
1019
1020 return width;
1021}
1022
1023static int wait_for_reset_state(struct efa_com_dev *edev, u32 timeout, int on)
1024{
1025 u32 val, i;
1026
1027 for (i = 0; i < timeout; i++) {
1028 val = efa_com_reg_read32(edev, EFA_REGS_DEV_STS_OFF);
1029
1030 if (EFA_GET(&val, EFA_REGS_DEV_STS_RESET_IN_PROGRESS) == on)
1031 return 0;
1032
1033 ibdev_dbg(edev->efa_dev, "Reset indication val %d\n", val);
1034 msleep(EFA_POLL_INTERVAL_MS);
1035 }
1036
1037 return -ETIME;
1038}
1039
1040/**
1041 * efa_com_dev_reset - Perform device FLR to the device.
1042 * @edev: EFA communication layer struct
1043 * @reset_reason: Specify what is the trigger for the reset in case of an error.
1044 *
1045 * @return - 0 on success, negative value on failure.
1046 */
1047int efa_com_dev_reset(struct efa_com_dev *edev,
1048 enum efa_regs_reset_reason_types reset_reason)
1049{
1050 u32 stat, timeout, cap;
1051 u32 reset_val = 0;
1052 int err;
1053
1054 stat = efa_com_reg_read32(edev, EFA_REGS_DEV_STS_OFF);
1055 cap = efa_com_reg_read32(edev, EFA_REGS_CAPS_OFF);
1056
1057 if (!EFA_GET(&stat, EFA_REGS_DEV_STS_READY)) {
1058 ibdev_err(ibdev: edev->efa_dev,
1059 format: "Device isn't ready, can't reset device\n");
1060 return -EINVAL;
1061 }
1062
1063 timeout = EFA_GET(&cap, EFA_REGS_CAPS_RESET_TIMEOUT);
1064 if (!timeout) {
1065 ibdev_err(ibdev: edev->efa_dev, format: "Invalid timeout value\n");
1066 return -EINVAL;
1067 }
1068
1069 /* start reset */
1070 EFA_SET(&reset_val, EFA_REGS_DEV_CTL_DEV_RESET, 1);
1071 EFA_SET(&reset_val, EFA_REGS_DEV_CTL_RESET_REASON, reset_reason);
1072 writel(val: reset_val, addr: edev->reg_bar + EFA_REGS_DEV_CTL_OFF);
1073
1074 /* reset clears the mmio readless address, restore it */
1075 efa_com_mmio_reg_read_resp_addr_init(edev);
1076
1077 err = wait_for_reset_state(edev, timeout, on: 1);
1078 if (err) {
1079 ibdev_err(ibdev: edev->efa_dev, format: "Reset indication didn't turn on\n");
1080 return err;
1081 }
1082
1083 /* reset done */
1084 writel(val: 0, addr: edev->reg_bar + EFA_REGS_DEV_CTL_OFF);
1085 err = wait_for_reset_state(edev, timeout, on: 0);
1086 if (err) {
1087 ibdev_err(ibdev: edev->efa_dev, format: "Reset indication didn't turn off\n");
1088 return err;
1089 }
1090
1091 timeout = EFA_GET(&cap, EFA_REGS_CAPS_ADMIN_CMD_TO);
1092 if (timeout)
1093 /* the resolution of timeout reg is 100ms */
1094 edev->aq.completion_timeout = timeout * 100000;
1095 else
1096 edev->aq.completion_timeout = ADMIN_CMD_TIMEOUT_US;
1097
1098 return 0;
1099}
1100
1101static int efa_com_create_eq(struct efa_com_dev *edev,
1102 struct efa_com_create_eq_params *params,
1103 struct efa_com_create_eq_result *result)
1104{
1105 struct efa_com_admin_queue *aq = &edev->aq;
1106 struct efa_admin_create_eq_resp resp = {};
1107 struct efa_admin_create_eq_cmd cmd = {};
1108 int err;
1109
1110 cmd.aq_common_descriptor.opcode = EFA_ADMIN_CREATE_EQ;
1111 EFA_SET(&cmd.caps, EFA_ADMIN_CREATE_EQ_CMD_ENTRY_SIZE_WORDS,
1112 params->entry_size_in_bytes / 4);
1113 cmd.depth = params->depth;
1114 cmd.event_bitmask = params->event_bitmask;
1115 cmd.msix_vec = params->msix_vec;
1116
1117 efa_com_set_dma_addr(addr: params->dma_addr, addr_high: &cmd.ba.mem_addr_high,
1118 addr_low: &cmd.ba.mem_addr_low);
1119
1120 err = efa_com_cmd_exec(aq,
1121 cmd: (struct efa_admin_aq_entry *)&cmd,
1122 cmd_size: sizeof(cmd),
1123 comp: (struct efa_admin_acq_entry *)&resp,
1124 comp_size: sizeof(resp));
1125 if (err) {
1126 ibdev_err_ratelimited(edev->efa_dev,
1127 "Failed to create eq[%d]\n", err);
1128 return err;
1129 }
1130
1131 result->eqn = resp.eqn;
1132
1133 return 0;
1134}
1135
1136static void efa_com_destroy_eq(struct efa_com_dev *edev,
1137 struct efa_com_destroy_eq_params *params)
1138{
1139 struct efa_com_admin_queue *aq = &edev->aq;
1140 struct efa_admin_destroy_eq_resp resp = {};
1141 struct efa_admin_destroy_eq_cmd cmd = {};
1142 int err;
1143
1144 cmd.aq_common_descriptor.opcode = EFA_ADMIN_DESTROY_EQ;
1145 cmd.eqn = params->eqn;
1146
1147 err = efa_com_cmd_exec(aq,
1148 cmd: (struct efa_admin_aq_entry *)&cmd,
1149 cmd_size: sizeof(cmd),
1150 comp: (struct efa_admin_acq_entry *)&resp,
1151 comp_size: sizeof(resp));
1152 if (err)
1153 ibdev_err_ratelimited(edev->efa_dev,
1154 "Failed to destroy EQ-%u [%d]\n", cmd.eqn,
1155 err);
1156}
1157
1158static void efa_com_arm_eq(struct efa_com_dev *edev, struct efa_com_eq *eeq)
1159{
1160 u32 val = 0;
1161
1162 EFA_SET(&val, EFA_REGS_EQ_DB_EQN, eeq->eqn);
1163 EFA_SET(&val, EFA_REGS_EQ_DB_ARM, 1);
1164
1165 writel(val, addr: edev->reg_bar + EFA_REGS_EQ_DB_OFF);
1166}
1167
1168void efa_com_eq_comp_intr_handler(struct efa_com_dev *edev,
1169 struct efa_com_eq *eeq)
1170{
1171 struct efa_admin_eqe *eqe;
1172 u32 processed = 0;
1173 u8 phase;
1174 u32 ci;
1175
1176 ci = eeq->cc & (eeq->depth - 1);
1177 phase = eeq->phase;
1178 eqe = &eeq->eqes[ci];
1179
1180 /* Go over all the events */
1181 while ((READ_ONCE(eqe->common) & EFA_ADMIN_EQE_PHASE_MASK) == phase) {
1182 /*
1183 * Do not read the rest of the completion entry before the
1184 * phase bit was validated
1185 */
1186 dma_rmb();
1187
1188 eeq->cb(eeq, eqe);
1189
1190 /* Get next event entry */
1191 ci++;
1192 processed++;
1193
1194 if (ci == eeq->depth) {
1195 ci = 0;
1196 phase = !phase;
1197 }
1198
1199 eqe = &eeq->eqes[ci];
1200 }
1201
1202 eeq->cc += processed;
1203 eeq->phase = phase;
1204 efa_com_arm_eq(edev: eeq->edev, eeq);
1205}
1206
1207void efa_com_eq_destroy(struct efa_com_dev *edev, struct efa_com_eq *eeq)
1208{
1209 struct efa_com_destroy_eq_params params = {
1210 .eqn = eeq->eqn,
1211 };
1212
1213 efa_com_destroy_eq(edev, params: &params);
1214 dma_free_coherent(dev: edev->dmadev, size: eeq->depth * sizeof(*eeq->eqes),
1215 cpu_addr: eeq->eqes, dma_handle: eeq->dma_addr);
1216}
1217
1218int efa_com_eq_init(struct efa_com_dev *edev, struct efa_com_eq *eeq,
1219 efa_eqe_handler cb, u16 depth, u8 msix_vec)
1220{
1221 struct efa_com_create_eq_params params = {};
1222 struct efa_com_create_eq_result result = {};
1223 int err;
1224
1225 params.depth = depth;
1226 params.entry_size_in_bytes = sizeof(*eeq->eqes);
1227 EFA_SET(&params.event_bitmask,
1228 EFA_ADMIN_CREATE_EQ_CMD_COMPLETION_EVENTS, 1);
1229 params.msix_vec = msix_vec;
1230
1231 eeq->eqes = dma_alloc_coherent(dev: edev->dmadev,
1232 size: params.depth * sizeof(*eeq->eqes),
1233 dma_handle: &params.dma_addr, GFP_KERNEL);
1234 if (!eeq->eqes)
1235 return -ENOMEM;
1236
1237 err = efa_com_create_eq(edev, params: &params, result: &result);
1238 if (err)
1239 goto err_free_coherent;
1240
1241 eeq->eqn = result.eqn;
1242 eeq->edev = edev;
1243 eeq->dma_addr = params.dma_addr;
1244 eeq->phase = 1;
1245 eeq->depth = params.depth;
1246 eeq->cb = cb;
1247 efa_com_arm_eq(edev, eeq);
1248
1249 return 0;
1250
1251err_free_coherent:
1252 dma_free_coherent(dev: edev->dmadev, size: params.depth * sizeof(*eeq->eqes),
1253 cpu_addr: eeq->eqes, dma_handle: params.dma_addr);
1254 return err;
1255}
1256

source code of linux/drivers/infiniband/hw/efa/efa_com.c