| 1 | // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause |
| 2 | /* |
| 3 | * Copyright(c) 2017 - 2018 Intel Corporation. |
| 4 | */ |
| 5 | |
| 6 | /* |
| 7 | * This file contains HFI1 support for VNIC SDMA functionality |
| 8 | */ |
| 9 | |
| 10 | #include "sdma.h" |
| 11 | #include "vnic.h" |
| 12 | |
| 13 | #define HFI1_VNIC_SDMA_Q_ACTIVE BIT(0) |
| 14 | #define HFI1_VNIC_SDMA_Q_DEFERRED BIT(1) |
| 15 | |
| 16 | #define HFI1_VNIC_TXREQ_NAME_LEN 32 |
| 17 | #define HFI1_VNIC_SDMA_DESC_WTRMRK 64 |
| 18 | |
| 19 | /* |
| 20 | * struct vnic_txreq - VNIC transmit descriptor |
| 21 | * @txreq: sdma transmit request |
| 22 | * @sdma: vnic sdma pointer |
| 23 | * @skb: skb to send |
| 24 | * @pad: pad buffer |
| 25 | * @plen: pad length |
| 26 | * @pbc_val: pbc value |
| 27 | */ |
| 28 | struct vnic_txreq { |
| 29 | struct sdma_txreq txreq; |
| 30 | struct hfi1_vnic_sdma *sdma; |
| 31 | |
| 32 | struct sk_buff *skb; |
| 33 | unsigned char pad[HFI1_VNIC_MAX_PAD]; |
| 34 | u16 plen; |
| 35 | __le64 pbc_val; |
| 36 | }; |
| 37 | |
| 38 | static void vnic_sdma_complete(struct sdma_txreq *txreq, |
| 39 | int status) |
| 40 | { |
| 41 | struct vnic_txreq *tx = container_of(txreq, struct vnic_txreq, txreq); |
| 42 | struct hfi1_vnic_sdma *vnic_sdma = tx->sdma; |
| 43 | |
| 44 | sdma_txclean(dd: vnic_sdma->dd, tx: txreq); |
| 45 | dev_kfree_skb_any(skb: tx->skb); |
| 46 | kmem_cache_free(s: vnic_sdma->dd->vnic.txreq_cache, objp: tx); |
| 47 | } |
| 48 | |
| 49 | static noinline int build_vnic_ulp_payload(struct sdma_engine *sde, |
| 50 | struct vnic_txreq *tx) |
| 51 | { |
| 52 | int i, ret = 0; |
| 53 | |
| 54 | ret = sdma_txadd_kvaddr( |
| 55 | dd: sde->dd, |
| 56 | tx: &tx->txreq, |
| 57 | kvaddr: tx->skb->data, |
| 58 | len: skb_headlen(skb: tx->skb)); |
| 59 | if (unlikely(ret)) |
| 60 | goto bail_txadd; |
| 61 | |
| 62 | for (i = 0; i < skb_shinfo(tx->skb)->nr_frags; i++) { |
| 63 | skb_frag_t *frag = &skb_shinfo(tx->skb)->frags[i]; |
| 64 | |
| 65 | /* combine physically continuous fragments later? */ |
| 66 | ret = sdma_txadd_page(dd: sde->dd, |
| 67 | tx: &tx->txreq, |
| 68 | page: skb_frag_page(frag), |
| 69 | offset: skb_frag_off(frag), |
| 70 | len: skb_frag_size(frag), |
| 71 | NULL, NULL, NULL); |
| 72 | if (unlikely(ret)) |
| 73 | goto bail_txadd; |
| 74 | } |
| 75 | |
| 76 | if (tx->plen) |
| 77 | ret = sdma_txadd_kvaddr(dd: sde->dd, tx: &tx->txreq, |
| 78 | kvaddr: tx->pad + HFI1_VNIC_MAX_PAD - tx->plen, |
| 79 | len: tx->plen); |
| 80 | |
| 81 | bail_txadd: |
| 82 | return ret; |
| 83 | } |
| 84 | |
| 85 | static int build_vnic_tx_desc(struct sdma_engine *sde, |
| 86 | struct vnic_txreq *tx, |
| 87 | u64 pbc) |
| 88 | { |
| 89 | int ret = 0; |
| 90 | u16 hdrbytes = 2 << 2; /* PBC */ |
| 91 | |
| 92 | ret = sdma_txinit_ahg( |
| 93 | tx: &tx->txreq, |
| 94 | flags: 0, |
| 95 | tlen: hdrbytes + tx->skb->len + tx->plen, |
| 96 | ahg_entry: 0, |
| 97 | num_ahg: 0, |
| 98 | NULL, |
| 99 | ahg_hlen: 0, |
| 100 | cb: vnic_sdma_complete); |
| 101 | if (unlikely(ret)) |
| 102 | goto bail_txadd; |
| 103 | |
| 104 | /* add pbc */ |
| 105 | tx->pbc_val = cpu_to_le64(pbc); |
| 106 | ret = sdma_txadd_kvaddr( |
| 107 | dd: sde->dd, |
| 108 | tx: &tx->txreq, |
| 109 | kvaddr: &tx->pbc_val, |
| 110 | len: hdrbytes); |
| 111 | if (unlikely(ret)) |
| 112 | goto bail_txadd; |
| 113 | |
| 114 | /* add the ulp payload */ |
| 115 | ret = build_vnic_ulp_payload(sde, tx); |
| 116 | bail_txadd: |
| 117 | return ret; |
| 118 | } |
| 119 | |
| 120 | /* setup the last plen bypes of pad */ |
| 121 | static inline void hfi1_vnic_update_pad(unsigned char *pad, u8 plen) |
| 122 | { |
| 123 | pad[HFI1_VNIC_MAX_PAD - 1] = plen - OPA_VNIC_ICRC_TAIL_LEN; |
| 124 | } |
| 125 | |
| 126 | int hfi1_vnic_send_dma(struct hfi1_devdata *dd, u8 q_idx, |
| 127 | struct hfi1_vnic_vport_info *vinfo, |
| 128 | struct sk_buff *skb, u64 pbc, u8 plen) |
| 129 | { |
| 130 | struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[q_idx]; |
| 131 | struct sdma_engine *sde = vnic_sdma->sde; |
| 132 | struct vnic_txreq *tx; |
| 133 | int ret = -ECOMM; |
| 134 | |
| 135 | if (unlikely(READ_ONCE(vnic_sdma->state) != HFI1_VNIC_SDMA_Q_ACTIVE)) |
| 136 | goto tx_err; |
| 137 | |
| 138 | if (unlikely(!sde || !sdma_running(sde))) |
| 139 | goto tx_err; |
| 140 | |
| 141 | tx = kmem_cache_alloc(dd->vnic.txreq_cache, GFP_ATOMIC); |
| 142 | if (unlikely(!tx)) { |
| 143 | ret = -ENOMEM; |
| 144 | goto tx_err; |
| 145 | } |
| 146 | |
| 147 | tx->sdma = vnic_sdma; |
| 148 | tx->skb = skb; |
| 149 | hfi1_vnic_update_pad(pad: tx->pad, plen); |
| 150 | tx->plen = plen; |
| 151 | ret = build_vnic_tx_desc(sde, tx, pbc); |
| 152 | if (unlikely(ret)) |
| 153 | goto free_desc; |
| 154 | |
| 155 | ret = sdma_send_txreq(sde, wait: iowait_get_ib_work(w: &vnic_sdma->wait), |
| 156 | tx: &tx->txreq, pkts_sent: vnic_sdma->pkts_sent); |
| 157 | /* When -ECOMM, sdma callback will be called with ABORT status */ |
| 158 | if (unlikely(ret && unlikely(ret != -ECOMM))) |
| 159 | goto free_desc; |
| 160 | |
| 161 | if (!ret) { |
| 162 | vnic_sdma->pkts_sent = true; |
| 163 | iowait_starve_clear(pkts_sent: vnic_sdma->pkts_sent, w: &vnic_sdma->wait); |
| 164 | } |
| 165 | return ret; |
| 166 | |
| 167 | free_desc: |
| 168 | sdma_txclean(dd, tx: &tx->txreq); |
| 169 | kmem_cache_free(s: dd->vnic.txreq_cache, objp: tx); |
| 170 | tx_err: |
| 171 | if (ret != -EBUSY) |
| 172 | dev_kfree_skb_any(skb); |
| 173 | else |
| 174 | vnic_sdma->pkts_sent = false; |
| 175 | return ret; |
| 176 | } |
| 177 | |
| 178 | /* |
| 179 | * hfi1_vnic_sdma_sleep - vnic sdma sleep function |
| 180 | * |
| 181 | * This function gets called from sdma_send_txreq() when there are not enough |
| 182 | * sdma descriptors available to send the packet. It adds Tx queue's wait |
| 183 | * structure to sdma engine's dmawait list to be woken up when descriptors |
| 184 | * become available. |
| 185 | */ |
| 186 | static int hfi1_vnic_sdma_sleep(struct sdma_engine *sde, |
| 187 | struct iowait_work *wait, |
| 188 | struct sdma_txreq *txreq, |
| 189 | uint seq, |
| 190 | bool pkts_sent) |
| 191 | { |
| 192 | struct hfi1_vnic_sdma *vnic_sdma = |
| 193 | container_of(wait->iow, struct hfi1_vnic_sdma, wait); |
| 194 | |
| 195 | write_seqlock(sl: &sde->waitlock); |
| 196 | if (sdma_progress(sde, seq, tx: txreq)) { |
| 197 | write_sequnlock(sl: &sde->waitlock); |
| 198 | return -EAGAIN; |
| 199 | } |
| 200 | |
| 201 | vnic_sdma->state = HFI1_VNIC_SDMA_Q_DEFERRED; |
| 202 | if (list_empty(head: &vnic_sdma->wait.list)) { |
| 203 | iowait_get_priority(w: wait->iow); |
| 204 | iowait_queue(pkts_sent, w: wait->iow, wait_head: &sde->dmawait); |
| 205 | } |
| 206 | write_sequnlock(sl: &sde->waitlock); |
| 207 | return -EBUSY; |
| 208 | } |
| 209 | |
| 210 | /* |
| 211 | * hfi1_vnic_sdma_wakeup - vnic sdma wakeup function |
| 212 | * |
| 213 | * This function gets called when SDMA descriptors becomes available and Tx |
| 214 | * queue's wait structure was previously added to sdma engine's dmawait list. |
| 215 | * It notifies the upper driver about Tx queue wakeup. |
| 216 | */ |
| 217 | static void hfi1_vnic_sdma_wakeup(struct iowait *wait, int reason) |
| 218 | { |
| 219 | struct hfi1_vnic_sdma *vnic_sdma = |
| 220 | container_of(wait, struct hfi1_vnic_sdma, wait); |
| 221 | struct hfi1_vnic_vport_info *vinfo = vnic_sdma->vinfo; |
| 222 | |
| 223 | vnic_sdma->state = HFI1_VNIC_SDMA_Q_ACTIVE; |
| 224 | if (__netif_subqueue_stopped(dev: vinfo->netdev, queue_index: vnic_sdma->q_idx)) |
| 225 | netif_wake_subqueue(dev: vinfo->netdev, queue_index: vnic_sdma->q_idx); |
| 226 | }; |
| 227 | |
| 228 | inline bool hfi1_vnic_sdma_write_avail(struct hfi1_vnic_vport_info *vinfo, |
| 229 | u8 q_idx) |
| 230 | { |
| 231 | struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[q_idx]; |
| 232 | |
| 233 | return (READ_ONCE(vnic_sdma->state) == HFI1_VNIC_SDMA_Q_ACTIVE); |
| 234 | } |
| 235 | |
| 236 | void hfi1_vnic_sdma_init(struct hfi1_vnic_vport_info *vinfo) |
| 237 | { |
| 238 | int i; |
| 239 | |
| 240 | for (i = 0; i < vinfo->num_tx_q; i++) { |
| 241 | struct hfi1_vnic_sdma *vnic_sdma = &vinfo->sdma[i]; |
| 242 | |
| 243 | iowait_init(wait: &vnic_sdma->wait, tx_limit: 0, NULL, NULL, |
| 244 | sleep: hfi1_vnic_sdma_sleep, |
| 245 | wakeup: hfi1_vnic_sdma_wakeup, NULL, NULL); |
| 246 | vnic_sdma->sde = &vinfo->dd->per_sdma[i]; |
| 247 | vnic_sdma->dd = vinfo->dd; |
| 248 | vnic_sdma->vinfo = vinfo; |
| 249 | vnic_sdma->q_idx = i; |
| 250 | vnic_sdma->state = HFI1_VNIC_SDMA_Q_ACTIVE; |
| 251 | |
| 252 | /* Add a free descriptor watermark for wakeups */ |
| 253 | if (vnic_sdma->sde->descq_cnt > HFI1_VNIC_SDMA_DESC_WTRMRK) { |
| 254 | struct iowait_work *work; |
| 255 | |
| 256 | INIT_LIST_HEAD(list: &vnic_sdma->stx.list); |
| 257 | vnic_sdma->stx.num_desc = HFI1_VNIC_SDMA_DESC_WTRMRK; |
| 258 | work = iowait_get_ib_work(w: &vnic_sdma->wait); |
| 259 | list_add_tail(new: &vnic_sdma->stx.list, head: &work->tx_head); |
| 260 | } |
| 261 | } |
| 262 | } |
| 263 | |
| 264 | int hfi1_vnic_txreq_init(struct hfi1_devdata *dd) |
| 265 | { |
| 266 | char buf[HFI1_VNIC_TXREQ_NAME_LEN]; |
| 267 | |
| 268 | snprintf(buf, size: sizeof(buf), fmt: "hfi1_%u_vnic_txreq_cache" , dd->unit); |
| 269 | dd->vnic.txreq_cache = kmem_cache_create(buf, |
| 270 | sizeof(struct vnic_txreq), |
| 271 | 0, SLAB_HWCACHE_ALIGN, |
| 272 | NULL); |
| 273 | if (!dd->vnic.txreq_cache) |
| 274 | return -ENOMEM; |
| 275 | return 0; |
| 276 | } |
| 277 | |
| 278 | void hfi1_vnic_txreq_deinit(struct hfi1_devdata *dd) |
| 279 | { |
| 280 | kmem_cache_destroy(s: dd->vnic.txreq_cache); |
| 281 | dd->vnic.txreq_cache = NULL; |
| 282 | } |
| 283 | |