| 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
| 2 | /* |
| 3 | * Copyright (C) 2012 Red Hat, Inc. All rights reserved. |
| 4 | * Author: Alex Williamson <alex.williamson@redhat.com> |
| 5 | * |
| 6 | * Derived from original vfio: |
| 7 | * Copyright 2010 Cisco Systems, Inc. All rights reserved. |
| 8 | * Author: Tom Lyon, pugs@cisco.com |
| 9 | */ |
| 10 | |
| 11 | #include <linux/mutex.h> |
| 12 | #include <linux/pci.h> |
| 13 | #include <linux/vfio.h> |
| 14 | #include <linux/irqbypass.h> |
| 15 | #include <linux/rcupdate.h> |
| 16 | #include <linux/types.h> |
| 17 | #include <linux/uuid.h> |
| 18 | #include <linux/notifier.h> |
| 19 | |
| 20 | #ifndef VFIO_PCI_CORE_H |
| 21 | #define VFIO_PCI_CORE_H |
| 22 | |
| 23 | #define VFIO_PCI_OFFSET_SHIFT 40 |
| 24 | #define VFIO_PCI_OFFSET_TO_INDEX(off) (off >> VFIO_PCI_OFFSET_SHIFT) |
| 25 | #define VFIO_PCI_INDEX_TO_OFFSET(index) ((u64)(index) << VFIO_PCI_OFFSET_SHIFT) |
| 26 | #define VFIO_PCI_OFFSET_MASK (((u64)(1) << VFIO_PCI_OFFSET_SHIFT) - 1) |
| 27 | |
| 28 | struct vfio_pci_core_device; |
| 29 | struct vfio_pci_region; |
| 30 | struct p2pdma_provider; |
| 31 | struct dma_buf_phys_vec; |
| 32 | struct dma_buf_attachment; |
| 33 | |
| 34 | struct vfio_pci_eventfd { |
| 35 | struct eventfd_ctx *ctx; |
| 36 | struct rcu_head rcu; |
| 37 | }; |
| 38 | |
| 39 | struct vfio_pci_regops { |
| 40 | ssize_t (*rw)(struct vfio_pci_core_device *vdev, char __user *buf, |
| 41 | size_t count, loff_t *ppos, bool iswrite); |
| 42 | void (*release)(struct vfio_pci_core_device *vdev, |
| 43 | struct vfio_pci_region *region); |
| 44 | int (*mmap)(struct vfio_pci_core_device *vdev, |
| 45 | struct vfio_pci_region *region, |
| 46 | struct vm_area_struct *vma); |
| 47 | int (*add_capability)(struct vfio_pci_core_device *vdev, |
| 48 | struct vfio_pci_region *region, |
| 49 | struct vfio_info_cap *caps); |
| 50 | }; |
| 51 | |
| 52 | struct vfio_pci_region { |
| 53 | u32 type; |
| 54 | u32 subtype; |
| 55 | const struct vfio_pci_regops *ops; |
| 56 | void *data; |
| 57 | size_t size; |
| 58 | u32 flags; |
| 59 | }; |
| 60 | |
| 61 | struct vfio_pci_device_ops { |
| 62 | int (*get_dmabuf_phys)(struct vfio_pci_core_device *vdev, |
| 63 | struct p2pdma_provider **provider, |
| 64 | unsigned int region_index, |
| 65 | struct dma_buf_phys_vec *phys_vec, |
| 66 | struct vfio_region_dma_range *dma_ranges, |
| 67 | size_t nr_ranges); |
| 68 | }; |
| 69 | |
| 70 | #if IS_ENABLED(CONFIG_VFIO_PCI_DMABUF) |
| 71 | int vfio_pci_core_fill_phys_vec(struct dma_buf_phys_vec *phys_vec, |
| 72 | struct vfio_region_dma_range *dma_ranges, |
| 73 | size_t nr_ranges, phys_addr_t start, |
| 74 | phys_addr_t len); |
| 75 | int vfio_pci_core_get_dmabuf_phys(struct vfio_pci_core_device *vdev, |
| 76 | struct p2pdma_provider **provider, |
| 77 | unsigned int region_index, |
| 78 | struct dma_buf_phys_vec *phys_vec, |
| 79 | struct vfio_region_dma_range *dma_ranges, |
| 80 | size_t nr_ranges); |
| 81 | #else |
| 82 | static inline int |
| 83 | vfio_pci_core_fill_phys_vec(struct dma_buf_phys_vec *phys_vec, |
| 84 | struct vfio_region_dma_range *dma_ranges, |
| 85 | size_t nr_ranges, phys_addr_t start, |
| 86 | phys_addr_t len) |
| 87 | { |
| 88 | return -EINVAL; |
| 89 | } |
| 90 | static inline int vfio_pci_core_get_dmabuf_phys( |
| 91 | struct vfio_pci_core_device *vdev, struct p2pdma_provider **provider, |
| 92 | unsigned int region_index, struct dma_buf_phys_vec *phys_vec, |
| 93 | struct vfio_region_dma_range *dma_ranges, size_t nr_ranges) |
| 94 | { |
| 95 | return -EOPNOTSUPP; |
| 96 | } |
| 97 | #endif |
| 98 | |
| 99 | struct vfio_pci_core_device { |
| 100 | struct vfio_device vdev; |
| 101 | struct pci_dev *pdev; |
| 102 | const struct vfio_pci_device_ops *pci_ops; |
| 103 | void __iomem *barmap[PCI_STD_NUM_BARS]; |
| 104 | bool bar_mmap_supported[PCI_STD_NUM_BARS]; |
| 105 | u8 *pci_config_map; |
| 106 | u8 *vconfig; |
| 107 | struct perm_bits *msi_perm; |
| 108 | spinlock_t irqlock; |
| 109 | struct mutex igate; |
| 110 | struct xarray ctx; |
| 111 | int irq_type; |
| 112 | int num_regions; |
| 113 | struct vfio_pci_region *region; |
| 114 | u8 msi_qmax; |
| 115 | u8 msix_bar; |
| 116 | u16 msix_size; |
| 117 | u32 msix_offset; |
| 118 | u32 rbar[7]; |
| 119 | bool has_dyn_msix:1; |
| 120 | bool pci_2_3:1; |
| 121 | bool virq_disabled:1; |
| 122 | bool reset_works:1; |
| 123 | bool extended_caps:1; |
| 124 | bool bardirty:1; |
| 125 | bool has_vga:1; |
| 126 | bool needs_reset:1; |
| 127 | bool nointx:1; |
| 128 | bool needs_pm_restore:1; |
| 129 | bool pm_intx_masked:1; |
| 130 | bool pm_runtime_engaged:1; |
| 131 | struct pci_saved_state *pci_saved_state; |
| 132 | struct pci_saved_state *pm_save; |
| 133 | int ioeventfds_nr; |
| 134 | struct vfio_pci_eventfd __rcu *err_trigger; |
| 135 | struct vfio_pci_eventfd __rcu *req_trigger; |
| 136 | struct eventfd_ctx *pm_wake_eventfd_ctx; |
| 137 | struct list_head dummy_resources_list; |
| 138 | struct mutex ioeventfds_lock; |
| 139 | struct list_head ioeventfds_list; |
| 140 | struct vfio_pci_vf_token *vf_token; |
| 141 | struct list_head sriov_pfs_item; |
| 142 | struct vfio_pci_core_device *sriov_pf_core_dev; |
| 143 | struct notifier_block nb; |
| 144 | struct rw_semaphore memory_lock; |
| 145 | struct list_head dmabufs; |
| 146 | }; |
| 147 | |
| 148 | enum vfio_pci_io_width { |
| 149 | VFIO_PCI_IO_WIDTH_1 = 1, |
| 150 | VFIO_PCI_IO_WIDTH_2 = 2, |
| 151 | VFIO_PCI_IO_WIDTH_4 = 4, |
| 152 | VFIO_PCI_IO_WIDTH_8 = 8, |
| 153 | }; |
| 154 | |
| 155 | /* Will be exported for vfio pci drivers usage */ |
| 156 | int vfio_pci_core_register_dev_region(struct vfio_pci_core_device *vdev, |
| 157 | unsigned int type, unsigned int subtype, |
| 158 | const struct vfio_pci_regops *ops, |
| 159 | size_t size, u32 flags, void *data); |
| 160 | void vfio_pci_core_set_params(bool nointxmask, bool is_disable_vga, |
| 161 | bool is_disable_idle_d3); |
| 162 | void vfio_pci_core_close_device(struct vfio_device *core_vdev); |
| 163 | int vfio_pci_core_init_dev(struct vfio_device *core_vdev); |
| 164 | void vfio_pci_core_release_dev(struct vfio_device *core_vdev); |
| 165 | int vfio_pci_core_register_device(struct vfio_pci_core_device *vdev); |
| 166 | void vfio_pci_core_unregister_device(struct vfio_pci_core_device *vdev); |
| 167 | extern const struct pci_error_handlers vfio_pci_core_err_handlers; |
| 168 | int vfio_pci_core_sriov_configure(struct vfio_pci_core_device *vdev, |
| 169 | int nr_virtfn); |
| 170 | long vfio_pci_core_ioctl(struct vfio_device *core_vdev, unsigned int cmd, |
| 171 | unsigned long arg); |
| 172 | int vfio_pci_core_ioctl_feature(struct vfio_device *device, u32 flags, |
| 173 | void __user *arg, size_t argsz); |
| 174 | int vfio_pci_ioctl_get_region_info(struct vfio_device *core_vdev, |
| 175 | struct vfio_region_info *info, |
| 176 | struct vfio_info_cap *caps); |
| 177 | ssize_t vfio_pci_core_read(struct vfio_device *core_vdev, char __user *buf, |
| 178 | size_t count, loff_t *ppos); |
| 179 | ssize_t vfio_pci_core_write(struct vfio_device *core_vdev, const char __user *buf, |
| 180 | size_t count, loff_t *ppos); |
| 181 | vm_fault_t vfio_pci_vmf_insert_pfn(struct vfio_pci_core_device *vdev, |
| 182 | struct vm_fault *vmf, unsigned long pfn, |
| 183 | unsigned int order); |
| 184 | int vfio_pci_core_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma); |
| 185 | void vfio_pci_core_request(struct vfio_device *core_vdev, unsigned int count); |
| 186 | int vfio_pci_core_match(struct vfio_device *core_vdev, char *buf); |
| 187 | int vfio_pci_core_match_token_uuid(struct vfio_device *core_vdev, |
| 188 | const uuid_t *uuid); |
| 189 | int vfio_pci_core_enable(struct vfio_pci_core_device *vdev); |
| 190 | void vfio_pci_core_disable(struct vfio_pci_core_device *vdev); |
| 191 | void vfio_pci_core_finish_enable(struct vfio_pci_core_device *vdev); |
| 192 | int vfio_pci_core_setup_barmap(struct vfio_pci_core_device *vdev, int bar); |
| 193 | pci_ers_result_t vfio_pci_core_aer_err_detected(struct pci_dev *pdev, |
| 194 | pci_channel_state_t state); |
| 195 | ssize_t vfio_pci_core_do_io_rw(struct vfio_pci_core_device *vdev, bool test_mem, |
| 196 | void __iomem *io, char __user *buf, |
| 197 | loff_t off, size_t count, size_t x_start, |
| 198 | size_t x_end, bool iswrite, |
| 199 | enum vfio_pci_io_width max_width); |
| 200 | bool __vfio_pci_memory_enabled(struct vfio_pci_core_device *vdev); |
| 201 | bool vfio_pci_core_range_intersect_range(loff_t buf_start, size_t buf_cnt, |
| 202 | loff_t reg_start, size_t reg_cnt, |
| 203 | loff_t *buf_offset, |
| 204 | size_t *intersect_count, |
| 205 | size_t *register_offset); |
| 206 | #define VFIO_IOWRITE_DECLARATION(size) \ |
| 207 | int vfio_pci_core_iowrite##size(struct vfio_pci_core_device *vdev, \ |
| 208 | bool test_mem, u##size val, void __iomem *io); |
| 209 | |
| 210 | VFIO_IOWRITE_DECLARATION(8) |
| 211 | VFIO_IOWRITE_DECLARATION(16) |
| 212 | VFIO_IOWRITE_DECLARATION(32) |
| 213 | #ifdef iowrite64 |
| 214 | VFIO_IOWRITE_DECLARATION(64) |
| 215 | #endif |
| 216 | |
| 217 | #define VFIO_IOREAD_DECLARATION(size) \ |
| 218 | int vfio_pci_core_ioread##size(struct vfio_pci_core_device *vdev, \ |
| 219 | bool test_mem, u##size *val, void __iomem *io); |
| 220 | |
| 221 | VFIO_IOREAD_DECLARATION(8) |
| 222 | VFIO_IOREAD_DECLARATION(16) |
| 223 | VFIO_IOREAD_DECLARATION(32) |
| 224 | #ifdef ioread64 |
| 225 | VFIO_IOREAD_DECLARATION(64) |
| 226 | #endif |
| 227 | |
| 228 | static inline bool is_aligned_for_order(struct vm_area_struct *vma, |
| 229 | unsigned long addr, |
| 230 | unsigned long pfn, |
| 231 | unsigned int order) |
| 232 | { |
| 233 | return !(order && (addr < vma->vm_start || |
| 234 | addr + (PAGE_SIZE << order) > vma->vm_end || |
| 235 | !IS_ALIGNED(pfn, 1 << order))); |
| 236 | } |
| 237 | |
| 238 | int vfio_pci_dma_buf_iommufd_map(struct dma_buf_attachment *attachment, |
| 239 | struct dma_buf_phys_vec *phys); |
| 240 | |
| 241 | #endif /* VFIO_PCI_CORE_H */ |
| 242 | |