| 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* |
| 3 | * Intel IFC VF NIC driver for virtio dataplane offloading |
| 4 | * |
| 5 | * Copyright (C) 2020 Intel Corporation. |
| 6 | * |
| 7 | * Author: Zhu Lingshan <lingshan.zhu@intel.com> |
| 8 | * |
| 9 | */ |
| 10 | |
| 11 | #include "ifcvf_base.h" |
| 12 | |
| 13 | u16 ifcvf_set_vq_vector(struct ifcvf_hw *hw, u16 qid, int vector) |
| 14 | { |
| 15 | struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg; |
| 16 | |
| 17 | vp_iowrite16(value: qid, addr: &cfg->queue_select); |
| 18 | vp_iowrite16(value: vector, addr: &cfg->queue_msix_vector); |
| 19 | |
| 20 | return vp_ioread16(addr: &cfg->queue_msix_vector); |
| 21 | } |
| 22 | |
| 23 | u16 ifcvf_set_config_vector(struct ifcvf_hw *hw, int vector) |
| 24 | { |
| 25 | struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg; |
| 26 | |
| 27 | vp_iowrite16(value: vector, addr: &cfg->msix_config); |
| 28 | |
| 29 | return vp_ioread16(addr: &cfg->msix_config); |
| 30 | } |
| 31 | |
| 32 | static void __iomem *get_cap_addr(struct ifcvf_hw *hw, |
| 33 | struct virtio_pci_cap *cap) |
| 34 | { |
| 35 | u32 length, offset; |
| 36 | u8 bar; |
| 37 | |
| 38 | length = le32_to_cpu(cap->length); |
| 39 | offset = le32_to_cpu(cap->offset); |
| 40 | bar = cap->bar; |
| 41 | |
| 42 | if (bar >= IFCVF_PCI_MAX_RESOURCE) { |
| 43 | IFCVF_DBG(hw->pdev, |
| 44 | "Invalid bar number %u to get capabilities\n" , bar); |
| 45 | return NULL; |
| 46 | } |
| 47 | |
| 48 | if (offset + length > pci_resource_len(hw->pdev, bar)) { |
| 49 | IFCVF_DBG(hw->pdev, |
| 50 | "offset(%u) + len(%u) overflows bar%u's capability\n" , |
| 51 | offset, length, bar); |
| 52 | return NULL; |
| 53 | } |
| 54 | |
| 55 | return hw->base[bar] + offset; |
| 56 | } |
| 57 | |
| 58 | static int ifcvf_read_config_range(struct pci_dev *dev, |
| 59 | uint32_t *val, int size, int where) |
| 60 | { |
| 61 | int ret, i; |
| 62 | |
| 63 | for (i = 0; i < size; i += 4) { |
| 64 | ret = pci_read_config_dword(dev, where: where + i, val: val + i / 4); |
| 65 | if (ret < 0) |
| 66 | return ret; |
| 67 | } |
| 68 | |
| 69 | return 0; |
| 70 | } |
| 71 | |
| 72 | u16 ifcvf_get_vq_size(struct ifcvf_hw *hw, u16 qid) |
| 73 | { |
| 74 | u16 queue_size; |
| 75 | |
| 76 | if (qid >= hw->nr_vring) |
| 77 | return 0; |
| 78 | |
| 79 | vp_iowrite16(value: qid, addr: &hw->common_cfg->queue_select); |
| 80 | queue_size = vp_ioread16(addr: &hw->common_cfg->queue_size); |
| 81 | |
| 82 | return queue_size; |
| 83 | } |
| 84 | |
| 85 | u16 ifcvf_get_max_vq_size(struct ifcvf_hw *hw) |
| 86 | { |
| 87 | u16 queue_size, max_size, qid; |
| 88 | |
| 89 | max_size = ifcvf_get_vq_size(hw, qid: 0); |
| 90 | for (qid = 1; qid < hw->nr_vring; qid++) { |
| 91 | queue_size = ifcvf_get_vq_size(hw, qid); |
| 92 | /* 0 means the queue is unavailable */ |
| 93 | if (!queue_size) |
| 94 | continue; |
| 95 | |
| 96 | max_size = max(queue_size, max_size); |
| 97 | } |
| 98 | |
| 99 | return max_size; |
| 100 | } |
| 101 | |
| 102 | int ifcvf_init_hw(struct ifcvf_hw *hw, struct pci_dev *pdev) |
| 103 | { |
| 104 | struct virtio_pci_cap cap; |
| 105 | u16 notify_off; |
| 106 | int ret; |
| 107 | u8 pos; |
| 108 | u32 i; |
| 109 | |
| 110 | ret = pci_read_config_byte(dev: pdev, PCI_CAPABILITY_LIST, val: &pos); |
| 111 | if (ret) { |
| 112 | IFCVF_ERR(pdev, "Failed to read PCI capability list\n" ); |
| 113 | return -EIO; |
| 114 | } |
| 115 | hw->pdev = pdev; |
| 116 | |
| 117 | while (pos) { |
| 118 | ret = ifcvf_read_config_range(dev: pdev, val: (u32 *)&cap, |
| 119 | size: sizeof(cap), where: pos); |
| 120 | if (ret < 0) { |
| 121 | IFCVF_ERR(pdev, |
| 122 | "Failed to get PCI capability at %x\n" , pos); |
| 123 | break; |
| 124 | } |
| 125 | |
| 126 | if (cap.cap_vndr != PCI_CAP_ID_VNDR) |
| 127 | goto next; |
| 128 | |
| 129 | switch (cap.cfg_type) { |
| 130 | case VIRTIO_PCI_CAP_COMMON_CFG: |
| 131 | hw->common_cfg = get_cap_addr(hw, cap: &cap); |
| 132 | IFCVF_DBG(pdev, "hw->common_cfg = %p\n" , |
| 133 | hw->common_cfg); |
| 134 | break; |
| 135 | case VIRTIO_PCI_CAP_NOTIFY_CFG: |
| 136 | pci_read_config_dword(dev: pdev, where: pos + sizeof(cap), |
| 137 | val: &hw->notify_off_multiplier); |
| 138 | hw->notify_bar = cap.bar; |
| 139 | hw->notify_base = get_cap_addr(hw, cap: &cap); |
| 140 | hw->notify_base_pa = pci_resource_start(pdev, cap.bar) + |
| 141 | le32_to_cpu(cap.offset); |
| 142 | IFCVF_DBG(pdev, "hw->notify_base = %p\n" , |
| 143 | hw->notify_base); |
| 144 | break; |
| 145 | case VIRTIO_PCI_CAP_ISR_CFG: |
| 146 | hw->isr = get_cap_addr(hw, cap: &cap); |
| 147 | IFCVF_DBG(pdev, "hw->isr = %p\n" , hw->isr); |
| 148 | break; |
| 149 | case VIRTIO_PCI_CAP_DEVICE_CFG: |
| 150 | hw->dev_cfg = get_cap_addr(hw, cap: &cap); |
| 151 | hw->cap_dev_config_size = le32_to_cpu(cap.length); |
| 152 | IFCVF_DBG(pdev, "hw->dev_cfg = %p\n" , hw->dev_cfg); |
| 153 | break; |
| 154 | } |
| 155 | |
| 156 | next: |
| 157 | pos = cap.cap_next; |
| 158 | } |
| 159 | |
| 160 | if (hw->common_cfg == NULL || hw->notify_base == NULL || |
| 161 | hw->isr == NULL || hw->dev_cfg == NULL) { |
| 162 | IFCVF_ERR(pdev, "Incomplete PCI capabilities\n" ); |
| 163 | return -EIO; |
| 164 | } |
| 165 | |
| 166 | hw->nr_vring = vp_ioread16(addr: &hw->common_cfg->num_queues); |
| 167 | hw->vring = kzalloc(sizeof(struct vring_info) * hw->nr_vring, GFP_KERNEL); |
| 168 | if (!hw->vring) |
| 169 | return -ENOMEM; |
| 170 | |
| 171 | for (i = 0; i < hw->nr_vring; i++) { |
| 172 | vp_iowrite16(value: i, addr: &hw->common_cfg->queue_select); |
| 173 | notify_off = vp_ioread16(addr: &hw->common_cfg->queue_notify_off); |
| 174 | hw->vring[i].notify_addr = hw->notify_base + |
| 175 | notify_off * hw->notify_off_multiplier; |
| 176 | hw->vring[i].notify_pa = hw->notify_base_pa + |
| 177 | notify_off * hw->notify_off_multiplier; |
| 178 | hw->vring[i].irq = -EINVAL; |
| 179 | } |
| 180 | |
| 181 | hw->lm_cfg = hw->base[IFCVF_LM_BAR]; |
| 182 | |
| 183 | IFCVF_DBG(pdev, |
| 184 | "PCI capability mapping: common cfg: %p, notify base: %p\n, isr cfg: %p, device cfg: %p, multiplier: %u\n" , |
| 185 | hw->common_cfg, hw->notify_base, hw->isr, |
| 186 | hw->dev_cfg, hw->notify_off_multiplier); |
| 187 | |
| 188 | hw->vqs_reused_irq = -EINVAL; |
| 189 | hw->config_irq = -EINVAL; |
| 190 | |
| 191 | return 0; |
| 192 | } |
| 193 | |
| 194 | u8 ifcvf_get_status(struct ifcvf_hw *hw) |
| 195 | { |
| 196 | return vp_ioread8(addr: &hw->common_cfg->device_status); |
| 197 | } |
| 198 | |
| 199 | void ifcvf_set_status(struct ifcvf_hw *hw, u8 status) |
| 200 | { |
| 201 | vp_iowrite8(value: status, addr: &hw->common_cfg->device_status); |
| 202 | } |
| 203 | |
| 204 | void ifcvf_reset(struct ifcvf_hw *hw) |
| 205 | { |
| 206 | ifcvf_set_status(hw, status: 0); |
| 207 | while (ifcvf_get_status(hw)) |
| 208 | msleep(msecs: 1); |
| 209 | } |
| 210 | |
| 211 | u64 ifcvf_get_hw_features(struct ifcvf_hw *hw) |
| 212 | { |
| 213 | struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg; |
| 214 | u32 features_lo, features_hi; |
| 215 | u64 features; |
| 216 | |
| 217 | vp_iowrite32(value: 0, addr: &cfg->device_feature_select); |
| 218 | features_lo = vp_ioread32(addr: &cfg->device_feature); |
| 219 | |
| 220 | vp_iowrite32(value: 1, addr: &cfg->device_feature_select); |
| 221 | features_hi = vp_ioread32(addr: &cfg->device_feature); |
| 222 | |
| 223 | features = ((u64)features_hi << 32) | features_lo; |
| 224 | |
| 225 | return features; |
| 226 | } |
| 227 | |
| 228 | /* return provisioned vDPA dev features */ |
| 229 | u64 ifcvf_get_dev_features(struct ifcvf_hw *hw) |
| 230 | { |
| 231 | return hw->dev_features; |
| 232 | } |
| 233 | |
| 234 | u64 ifcvf_get_driver_features(struct ifcvf_hw *hw) |
| 235 | { |
| 236 | struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg; |
| 237 | u32 features_lo, features_hi; |
| 238 | u64 features; |
| 239 | |
| 240 | vp_iowrite32(value: 0, addr: &cfg->device_feature_select); |
| 241 | features_lo = vp_ioread32(addr: &cfg->guest_feature); |
| 242 | |
| 243 | vp_iowrite32(value: 1, addr: &cfg->device_feature_select); |
| 244 | features_hi = vp_ioread32(addr: &cfg->guest_feature); |
| 245 | |
| 246 | features = ((u64)features_hi << 32) | features_lo; |
| 247 | |
| 248 | return features; |
| 249 | } |
| 250 | |
| 251 | int ifcvf_verify_min_features(struct ifcvf_hw *hw, u64 features) |
| 252 | { |
| 253 | if (!(features & BIT_ULL(VIRTIO_F_ACCESS_PLATFORM)) && features) { |
| 254 | IFCVF_ERR(hw->pdev, "VIRTIO_F_ACCESS_PLATFORM is not negotiated\n" ); |
| 255 | return -EINVAL; |
| 256 | } |
| 257 | |
| 258 | return 0; |
| 259 | } |
| 260 | |
| 261 | u32 ifcvf_get_config_size(struct ifcvf_hw *hw) |
| 262 | { |
| 263 | u32 net_config_size = sizeof(struct virtio_net_config); |
| 264 | u32 blk_config_size = sizeof(struct virtio_blk_config); |
| 265 | u32 cap_size = hw->cap_dev_config_size; |
| 266 | u32 config_size; |
| 267 | |
| 268 | /* If the onboard device config space size is greater than |
| 269 | * the size of struct virtio_net/blk_config, only the spec |
| 270 | * implementing contents size is returned, this is very |
| 271 | * unlikely, defensive programming. |
| 272 | */ |
| 273 | switch (hw->dev_type) { |
| 274 | case VIRTIO_ID_NET: |
| 275 | config_size = min(cap_size, net_config_size); |
| 276 | break; |
| 277 | case VIRTIO_ID_BLOCK: |
| 278 | config_size = min(cap_size, blk_config_size); |
| 279 | break; |
| 280 | default: |
| 281 | config_size = 0; |
| 282 | IFCVF_ERR(hw->pdev, "VIRTIO ID %u not supported\n" , hw->dev_type); |
| 283 | } |
| 284 | |
| 285 | return config_size; |
| 286 | } |
| 287 | |
| 288 | void ifcvf_read_dev_config(struct ifcvf_hw *hw, u64 offset, |
| 289 | void *dst, int length) |
| 290 | { |
| 291 | u8 old_gen, new_gen, *p; |
| 292 | int i; |
| 293 | |
| 294 | WARN_ON(offset + length > hw->config_size); |
| 295 | do { |
| 296 | old_gen = vp_ioread8(addr: &hw->common_cfg->config_generation); |
| 297 | p = dst; |
| 298 | for (i = 0; i < length; i++) |
| 299 | *p++ = vp_ioread8(addr: hw->dev_cfg + offset + i); |
| 300 | |
| 301 | new_gen = vp_ioread8(addr: &hw->common_cfg->config_generation); |
| 302 | } while (old_gen != new_gen); |
| 303 | } |
| 304 | |
| 305 | void ifcvf_write_dev_config(struct ifcvf_hw *hw, u64 offset, |
| 306 | const void *src, int length) |
| 307 | { |
| 308 | const u8 *p; |
| 309 | int i; |
| 310 | |
| 311 | p = src; |
| 312 | WARN_ON(offset + length > hw->config_size); |
| 313 | for (i = 0; i < length; i++) |
| 314 | vp_iowrite8(value: *p++, addr: hw->dev_cfg + offset + i); |
| 315 | } |
| 316 | |
| 317 | void ifcvf_set_driver_features(struct ifcvf_hw *hw, u64 features) |
| 318 | { |
| 319 | struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg; |
| 320 | |
| 321 | vp_iowrite32(value: 0, addr: &cfg->guest_feature_select); |
| 322 | vp_iowrite32(value: (u32)features, addr: &cfg->guest_feature); |
| 323 | |
| 324 | vp_iowrite32(value: 1, addr: &cfg->guest_feature_select); |
| 325 | vp_iowrite32(value: features >> 32, addr: &cfg->guest_feature); |
| 326 | } |
| 327 | |
| 328 | u16 ifcvf_get_vq_state(struct ifcvf_hw *hw, u16 qid) |
| 329 | { |
| 330 | struct ifcvf_lm_cfg __iomem *lm_cfg = hw->lm_cfg; |
| 331 | u16 last_avail_idx; |
| 332 | |
| 333 | last_avail_idx = vp_ioread16(addr: &lm_cfg->vq_state_region + qid * 2); |
| 334 | |
| 335 | return last_avail_idx; |
| 336 | } |
| 337 | |
| 338 | int ifcvf_set_vq_state(struct ifcvf_hw *hw, u16 qid, u16 num) |
| 339 | { |
| 340 | struct ifcvf_lm_cfg __iomem *lm_cfg = hw->lm_cfg; |
| 341 | |
| 342 | vp_iowrite16(value: num, addr: &lm_cfg->vq_state_region + qid * 2); |
| 343 | |
| 344 | return 0; |
| 345 | } |
| 346 | |
| 347 | void ifcvf_set_vq_num(struct ifcvf_hw *hw, u16 qid, u32 num) |
| 348 | { |
| 349 | struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg; |
| 350 | |
| 351 | vp_iowrite16(value: qid, addr: &cfg->queue_select); |
| 352 | vp_iowrite16(value: num, addr: &cfg->queue_size); |
| 353 | } |
| 354 | |
| 355 | int ifcvf_set_vq_address(struct ifcvf_hw *hw, u16 qid, u64 desc_area, |
| 356 | u64 driver_area, u64 device_area) |
| 357 | { |
| 358 | struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg; |
| 359 | |
| 360 | vp_iowrite16(value: qid, addr: &cfg->queue_select); |
| 361 | vp_iowrite64_twopart(val: desc_area, lo: &cfg->queue_desc_lo, |
| 362 | hi: &cfg->queue_desc_hi); |
| 363 | vp_iowrite64_twopart(val: driver_area, lo: &cfg->queue_avail_lo, |
| 364 | hi: &cfg->queue_avail_hi); |
| 365 | vp_iowrite64_twopart(val: device_area, lo: &cfg->queue_used_lo, |
| 366 | hi: &cfg->queue_used_hi); |
| 367 | |
| 368 | return 0; |
| 369 | } |
| 370 | |
| 371 | bool ifcvf_get_vq_ready(struct ifcvf_hw *hw, u16 qid) |
| 372 | { |
| 373 | struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg; |
| 374 | u16 queue_enable; |
| 375 | |
| 376 | vp_iowrite16(value: qid, addr: &cfg->queue_select); |
| 377 | queue_enable = vp_ioread16(addr: &cfg->queue_enable); |
| 378 | |
| 379 | return (bool)queue_enable; |
| 380 | } |
| 381 | |
| 382 | void ifcvf_set_vq_ready(struct ifcvf_hw *hw, u16 qid, bool ready) |
| 383 | { |
| 384 | struct virtio_pci_common_cfg __iomem *cfg = hw->common_cfg; |
| 385 | |
| 386 | vp_iowrite16(value: qid, addr: &cfg->queue_select); |
| 387 | vp_iowrite16(value: ready, addr: &cfg->queue_enable); |
| 388 | } |
| 389 | |
| 390 | static void ifcvf_reset_vring(struct ifcvf_hw *hw) |
| 391 | { |
| 392 | u16 qid; |
| 393 | |
| 394 | for (qid = 0; qid < hw->nr_vring; qid++) { |
| 395 | hw->vring[qid].cb.callback = NULL; |
| 396 | hw->vring[qid].cb.private = NULL; |
| 397 | ifcvf_set_vq_vector(hw, qid, VIRTIO_MSI_NO_VECTOR); |
| 398 | } |
| 399 | } |
| 400 | |
| 401 | static void ifcvf_reset_config_handler(struct ifcvf_hw *hw) |
| 402 | { |
| 403 | hw->config_cb.callback = NULL; |
| 404 | hw->config_cb.private = NULL; |
| 405 | ifcvf_set_config_vector(hw, VIRTIO_MSI_NO_VECTOR); |
| 406 | } |
| 407 | |
| 408 | static void ifcvf_synchronize_irq(struct ifcvf_hw *hw) |
| 409 | { |
| 410 | u32 nvectors = hw->num_msix_vectors; |
| 411 | struct pci_dev *pdev = hw->pdev; |
| 412 | int i, irq; |
| 413 | |
| 414 | for (i = 0; i < nvectors; i++) { |
| 415 | irq = pci_irq_vector(dev: pdev, nr: i); |
| 416 | if (irq >= 0) |
| 417 | synchronize_irq(irq); |
| 418 | } |
| 419 | } |
| 420 | |
| 421 | void ifcvf_stop(struct ifcvf_hw *hw) |
| 422 | { |
| 423 | ifcvf_synchronize_irq(hw); |
| 424 | ifcvf_reset_vring(hw); |
| 425 | ifcvf_reset_config_handler(hw); |
| 426 | } |
| 427 | |
| 428 | void ifcvf_notify_queue(struct ifcvf_hw *hw, u16 qid) |
| 429 | { |
| 430 | vp_iowrite16(value: qid, addr: hw->vring[qid].notify_addr); |
| 431 | } |
| 432 | |