| 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* |
| 3 | * Topcliff PCH DMA controller driver |
| 4 | * Copyright (c) 2010 Intel Corporation |
| 5 | * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd. |
| 6 | */ |
| 7 | |
| 8 | #include <linux/dmaengine.h> |
| 9 | #include <linux/dma-mapping.h> |
| 10 | #include <linux/init.h> |
| 11 | #include <linux/pci.h> |
| 12 | #include <linux/slab.h> |
| 13 | #include <linux/interrupt.h> |
| 14 | #include <linux/module.h> |
| 15 | #include <linux/pch_dma.h> |
| 16 | |
| 17 | #include "dmaengine.h" |
| 18 | |
| 19 | #define DRV_NAME "pch-dma" |
| 20 | |
| 21 | #define DMA_CTL0_DISABLE 0x0 |
| 22 | #define DMA_CTL0_SG 0x1 |
| 23 | #define DMA_CTL0_ONESHOT 0x2 |
| 24 | #define DMA_CTL0_MODE_MASK_BITS 0x3 |
| 25 | #define DMA_CTL0_DIR_SHIFT_BITS 2 |
| 26 | #define DMA_CTL0_BITS_PER_CH 4 |
| 27 | |
| 28 | #define DMA_CTL2_START_SHIFT_BITS 8 |
| 29 | #define DMA_CTL2_IRQ_ENABLE_MASK ((1UL << DMA_CTL2_START_SHIFT_BITS) - 1) |
| 30 | |
| 31 | #define DMA_STATUS_IDLE 0x0 |
| 32 | #define DMA_STATUS_DESC_READ 0x1 |
| 33 | #define DMA_STATUS_WAIT 0x2 |
| 34 | #define DMA_STATUS_ACCESS 0x3 |
| 35 | #define DMA_STATUS_BITS_PER_CH 2 |
| 36 | #define DMA_STATUS_MASK_BITS 0x3 |
| 37 | #define DMA_STATUS_SHIFT_BITS 16 |
| 38 | #define DMA_STATUS_IRQ(x) (0x1 << (x)) |
| 39 | #define DMA_STATUS0_ERR(x) (0x1 << ((x) + 8)) |
| 40 | #define DMA_STATUS2_ERR(x) (0x1 << (x)) |
| 41 | |
| 42 | #define DMA_DESC_WIDTH_SHIFT_BITS 12 |
| 43 | #define DMA_DESC_WIDTH_1_BYTE (0x3 << DMA_DESC_WIDTH_SHIFT_BITS) |
| 44 | #define DMA_DESC_WIDTH_2_BYTES (0x2 << DMA_DESC_WIDTH_SHIFT_BITS) |
| 45 | #define DMA_DESC_WIDTH_4_BYTES (0x0 << DMA_DESC_WIDTH_SHIFT_BITS) |
| 46 | #define DMA_DESC_MAX_COUNT_1_BYTE 0x3FF |
| 47 | #define DMA_DESC_MAX_COUNT_2_BYTES 0x3FF |
| 48 | #define DMA_DESC_MAX_COUNT_4_BYTES 0x7FF |
| 49 | #define DMA_DESC_END_WITHOUT_IRQ 0x0 |
| 50 | #define DMA_DESC_END_WITH_IRQ 0x1 |
| 51 | #define DMA_DESC_FOLLOW_WITHOUT_IRQ 0x2 |
| 52 | #define DMA_DESC_FOLLOW_WITH_IRQ 0x3 |
| 53 | |
| 54 | #define MAX_CHAN_NR 12 |
| 55 | |
| 56 | #define DMA_MASK_CTL0_MODE 0x33333333 |
| 57 | #define DMA_MASK_CTL2_MODE 0x00003333 |
| 58 | |
| 59 | static unsigned int init_nr_desc_per_channel = 64; |
| 60 | module_param(init_nr_desc_per_channel, uint, 0644); |
| 61 | MODULE_PARM_DESC(init_nr_desc_per_channel, |
| 62 | "initial descriptors per channel (default: 64)" ); |
| 63 | |
| 64 | struct pch_dma_desc_regs { |
| 65 | u32 dev_addr; |
| 66 | u32 mem_addr; |
| 67 | u32 size; |
| 68 | u32 next; |
| 69 | }; |
| 70 | |
| 71 | struct pch_dma_regs { |
| 72 | u32 dma_ctl0; |
| 73 | u32 dma_ctl1; |
| 74 | u32 dma_ctl2; |
| 75 | u32 dma_ctl3; |
| 76 | u32 dma_sts0; |
| 77 | u32 dma_sts1; |
| 78 | u32 dma_sts2; |
| 79 | u32 reserved3; |
| 80 | struct pch_dma_desc_regs desc[MAX_CHAN_NR]; |
| 81 | }; |
| 82 | |
| 83 | struct pch_dma_desc { |
| 84 | struct pch_dma_desc_regs regs; |
| 85 | struct dma_async_tx_descriptor txd; |
| 86 | struct list_head desc_node; |
| 87 | struct list_head tx_list; |
| 88 | }; |
| 89 | |
| 90 | struct pch_dma_chan { |
| 91 | struct dma_chan chan; |
| 92 | void __iomem *membase; |
| 93 | enum dma_transfer_direction dir; |
| 94 | struct tasklet_struct tasklet; |
| 95 | unsigned long err_status; |
| 96 | |
| 97 | spinlock_t lock; |
| 98 | |
| 99 | struct list_head active_list; |
| 100 | struct list_head queue; |
| 101 | struct list_head free_list; |
| 102 | unsigned int descs_allocated; |
| 103 | }; |
| 104 | |
| 105 | #define PDC_DEV_ADDR 0x00 |
| 106 | #define PDC_MEM_ADDR 0x04 |
| 107 | #define PDC_SIZE 0x08 |
| 108 | #define PDC_NEXT 0x0C |
| 109 | |
| 110 | #define channel_readl(pdc, name) \ |
| 111 | readl((pdc)->membase + PDC_##name) |
| 112 | #define channel_writel(pdc, name, val) \ |
| 113 | writel((val), (pdc)->membase + PDC_##name) |
| 114 | |
| 115 | struct pch_dma { |
| 116 | struct dma_device dma; |
| 117 | void __iomem *membase; |
| 118 | struct dma_pool *pool; |
| 119 | struct pch_dma_regs regs; |
| 120 | struct pch_dma_desc_regs ch_regs[MAX_CHAN_NR]; |
| 121 | struct pch_dma_chan channels[MAX_CHAN_NR]; |
| 122 | }; |
| 123 | |
| 124 | #define PCH_DMA_CTL0 0x00 |
| 125 | #define PCH_DMA_CTL1 0x04 |
| 126 | #define PCH_DMA_CTL2 0x08 |
| 127 | #define PCH_DMA_CTL3 0x0C |
| 128 | #define PCH_DMA_STS0 0x10 |
| 129 | #define PCH_DMA_STS1 0x14 |
| 130 | #define PCH_DMA_STS2 0x18 |
| 131 | |
| 132 | #define dma_readl(pd, name) \ |
| 133 | readl((pd)->membase + PCH_DMA_##name) |
| 134 | #define dma_writel(pd, name, val) \ |
| 135 | writel((val), (pd)->membase + PCH_DMA_##name) |
| 136 | |
| 137 | static inline |
| 138 | struct pch_dma_desc *to_pd_desc(struct dma_async_tx_descriptor *txd) |
| 139 | { |
| 140 | return container_of(txd, struct pch_dma_desc, txd); |
| 141 | } |
| 142 | |
| 143 | static inline struct pch_dma_chan *to_pd_chan(struct dma_chan *chan) |
| 144 | { |
| 145 | return container_of(chan, struct pch_dma_chan, chan); |
| 146 | } |
| 147 | |
| 148 | static inline struct pch_dma *to_pd(struct dma_device *ddev) |
| 149 | { |
| 150 | return container_of(ddev, struct pch_dma, dma); |
| 151 | } |
| 152 | |
| 153 | static inline struct device *chan2dev(struct dma_chan *chan) |
| 154 | { |
| 155 | return &chan->dev->device; |
| 156 | } |
| 157 | |
| 158 | static inline |
| 159 | struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan) |
| 160 | { |
| 161 | return list_first_entry(&pd_chan->active_list, |
| 162 | struct pch_dma_desc, desc_node); |
| 163 | } |
| 164 | |
| 165 | static inline |
| 166 | struct pch_dma_desc *pdc_first_queued(struct pch_dma_chan *pd_chan) |
| 167 | { |
| 168 | return list_first_entry(&pd_chan->queue, |
| 169 | struct pch_dma_desc, desc_node); |
| 170 | } |
| 171 | |
| 172 | static void pdc_enable_irq(struct dma_chan *chan, int enable) |
| 173 | { |
| 174 | struct pch_dma *pd = to_pd(ddev: chan->device); |
| 175 | u32 val; |
| 176 | int pos; |
| 177 | |
| 178 | if (chan->chan_id < 8) |
| 179 | pos = chan->chan_id; |
| 180 | else |
| 181 | pos = chan->chan_id + 8; |
| 182 | |
| 183 | val = dma_readl(pd, CTL2); |
| 184 | |
| 185 | if (enable) |
| 186 | val |= 0x1 << pos; |
| 187 | else |
| 188 | val &= ~(0x1 << pos); |
| 189 | |
| 190 | dma_writel(pd, CTL2, val); |
| 191 | |
| 192 | dev_dbg(chan2dev(chan), "pdc_enable_irq: chan %d -> %x\n" , |
| 193 | chan->chan_id, val); |
| 194 | } |
| 195 | |
| 196 | static void pdc_set_dir(struct dma_chan *chan) |
| 197 | { |
| 198 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); |
| 199 | struct pch_dma *pd = to_pd(ddev: chan->device); |
| 200 | u32 val; |
| 201 | u32 mask_mode; |
| 202 | u32 mask_ctl; |
| 203 | |
| 204 | if (chan->chan_id < 8) { |
| 205 | val = dma_readl(pd, CTL0); |
| 206 | |
| 207 | mask_mode = DMA_CTL0_MODE_MASK_BITS << |
| 208 | (DMA_CTL0_BITS_PER_CH * chan->chan_id); |
| 209 | mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS << |
| 210 | (DMA_CTL0_BITS_PER_CH * chan->chan_id)); |
| 211 | val &= mask_mode; |
| 212 | if (pd_chan->dir == DMA_MEM_TO_DEV) |
| 213 | val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + |
| 214 | DMA_CTL0_DIR_SHIFT_BITS); |
| 215 | else |
| 216 | val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id + |
| 217 | DMA_CTL0_DIR_SHIFT_BITS)); |
| 218 | |
| 219 | val |= mask_ctl; |
| 220 | dma_writel(pd, CTL0, val); |
| 221 | } else { |
| 222 | int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */ |
| 223 | val = dma_readl(pd, CTL3); |
| 224 | |
| 225 | mask_mode = DMA_CTL0_MODE_MASK_BITS << |
| 226 | (DMA_CTL0_BITS_PER_CH * ch); |
| 227 | mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS << |
| 228 | (DMA_CTL0_BITS_PER_CH * ch)); |
| 229 | val &= mask_mode; |
| 230 | if (pd_chan->dir == DMA_MEM_TO_DEV) |
| 231 | val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch + |
| 232 | DMA_CTL0_DIR_SHIFT_BITS); |
| 233 | else |
| 234 | val &= ~(0x1 << (DMA_CTL0_BITS_PER_CH * ch + |
| 235 | DMA_CTL0_DIR_SHIFT_BITS)); |
| 236 | val |= mask_ctl; |
| 237 | dma_writel(pd, CTL3, val); |
| 238 | } |
| 239 | |
| 240 | dev_dbg(chan2dev(chan), "pdc_set_dir: chan %d -> %x\n" , |
| 241 | chan->chan_id, val); |
| 242 | } |
| 243 | |
| 244 | static void pdc_set_mode(struct dma_chan *chan, u32 mode) |
| 245 | { |
| 246 | struct pch_dma *pd = to_pd(ddev: chan->device); |
| 247 | u32 val; |
| 248 | u32 mask_ctl; |
| 249 | u32 mask_dir; |
| 250 | |
| 251 | if (chan->chan_id < 8) { |
| 252 | mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS << |
| 253 | (DMA_CTL0_BITS_PER_CH * chan->chan_id)); |
| 254 | mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +\ |
| 255 | DMA_CTL0_DIR_SHIFT_BITS); |
| 256 | val = dma_readl(pd, CTL0); |
| 257 | val &= mask_dir; |
| 258 | val |= mode << (DMA_CTL0_BITS_PER_CH * chan->chan_id); |
| 259 | val |= mask_ctl; |
| 260 | dma_writel(pd, CTL0, val); |
| 261 | } else { |
| 262 | int ch = chan->chan_id - 8; /* ch8-->0 ch9-->1 ... ch11->3 */ |
| 263 | mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS << |
| 264 | (DMA_CTL0_BITS_PER_CH * ch)); |
| 265 | mask_dir = 1 << (DMA_CTL0_BITS_PER_CH * ch +\ |
| 266 | DMA_CTL0_DIR_SHIFT_BITS); |
| 267 | val = dma_readl(pd, CTL3); |
| 268 | val &= mask_dir; |
| 269 | val |= mode << (DMA_CTL0_BITS_PER_CH * ch); |
| 270 | val |= mask_ctl; |
| 271 | dma_writel(pd, CTL3, val); |
| 272 | } |
| 273 | |
| 274 | dev_dbg(chan2dev(chan), "pdc_set_mode: chan %d -> %x\n" , |
| 275 | chan->chan_id, val); |
| 276 | } |
| 277 | |
| 278 | static u32 pdc_get_status0(struct pch_dma_chan *pd_chan) |
| 279 | { |
| 280 | struct pch_dma *pd = to_pd(ddev: pd_chan->chan.device); |
| 281 | u32 val; |
| 282 | |
| 283 | val = dma_readl(pd, STS0); |
| 284 | return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS + |
| 285 | DMA_STATUS_BITS_PER_CH * pd_chan->chan.chan_id)); |
| 286 | } |
| 287 | |
| 288 | static u32 pdc_get_status2(struct pch_dma_chan *pd_chan) |
| 289 | { |
| 290 | struct pch_dma *pd = to_pd(ddev: pd_chan->chan.device); |
| 291 | u32 val; |
| 292 | |
| 293 | val = dma_readl(pd, STS2); |
| 294 | return DMA_STATUS_MASK_BITS & (val >> (DMA_STATUS_SHIFT_BITS + |
| 295 | DMA_STATUS_BITS_PER_CH * (pd_chan->chan.chan_id - 8))); |
| 296 | } |
| 297 | |
| 298 | static bool pdc_is_idle(struct pch_dma_chan *pd_chan) |
| 299 | { |
| 300 | u32 sts; |
| 301 | |
| 302 | if (pd_chan->chan.chan_id < 8) |
| 303 | sts = pdc_get_status0(pd_chan); |
| 304 | else |
| 305 | sts = pdc_get_status2(pd_chan); |
| 306 | |
| 307 | |
| 308 | if (sts == DMA_STATUS_IDLE) |
| 309 | return true; |
| 310 | else |
| 311 | return false; |
| 312 | } |
| 313 | |
| 314 | static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc) |
| 315 | { |
| 316 | if (!pdc_is_idle(pd_chan)) { |
| 317 | dev_err(chan2dev(&pd_chan->chan), |
| 318 | "BUG: Attempt to start non-idle channel\n" ); |
| 319 | return; |
| 320 | } |
| 321 | |
| 322 | dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> dev_addr: %x\n" , |
| 323 | pd_chan->chan.chan_id, desc->regs.dev_addr); |
| 324 | dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> mem_addr: %x\n" , |
| 325 | pd_chan->chan.chan_id, desc->regs.mem_addr); |
| 326 | dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> size: %x\n" , |
| 327 | pd_chan->chan.chan_id, desc->regs.size); |
| 328 | dev_dbg(chan2dev(&pd_chan->chan), "chan %d -> next: %x\n" , |
| 329 | pd_chan->chan.chan_id, desc->regs.next); |
| 330 | |
| 331 | if (list_empty(head: &desc->tx_list)) { |
| 332 | channel_writel(pd_chan, DEV_ADDR, desc->regs.dev_addr); |
| 333 | channel_writel(pd_chan, MEM_ADDR, desc->regs.mem_addr); |
| 334 | channel_writel(pd_chan, SIZE, desc->regs.size); |
| 335 | channel_writel(pd_chan, NEXT, desc->regs.next); |
| 336 | pdc_set_mode(chan: &pd_chan->chan, DMA_CTL0_ONESHOT); |
| 337 | } else { |
| 338 | channel_writel(pd_chan, NEXT, desc->txd.phys); |
| 339 | pdc_set_mode(chan: &pd_chan->chan, DMA_CTL0_SG); |
| 340 | } |
| 341 | } |
| 342 | |
| 343 | static void pdc_chain_complete(struct pch_dma_chan *pd_chan, |
| 344 | struct pch_dma_desc *desc) |
| 345 | { |
| 346 | struct dma_async_tx_descriptor *txd = &desc->txd; |
| 347 | struct dmaengine_desc_callback cb; |
| 348 | |
| 349 | dmaengine_desc_get_callback(tx: txd, cb: &cb); |
| 350 | list_splice_init(list: &desc->tx_list, head: &pd_chan->free_list); |
| 351 | list_move(list: &desc->desc_node, head: &pd_chan->free_list); |
| 352 | |
| 353 | dmaengine_desc_callback_invoke(cb: &cb, NULL); |
| 354 | } |
| 355 | |
| 356 | static void pdc_complete_all(struct pch_dma_chan *pd_chan) |
| 357 | { |
| 358 | struct pch_dma_desc *desc, *_d; |
| 359 | LIST_HEAD(list); |
| 360 | |
| 361 | BUG_ON(!pdc_is_idle(pd_chan)); |
| 362 | |
| 363 | if (!list_empty(head: &pd_chan->queue)) |
| 364 | pdc_dostart(pd_chan, desc: pdc_first_queued(pd_chan)); |
| 365 | |
| 366 | list_splice_init(list: &pd_chan->active_list, head: &list); |
| 367 | list_splice_init(list: &pd_chan->queue, head: &pd_chan->active_list); |
| 368 | |
| 369 | list_for_each_entry_safe(desc, _d, &list, desc_node) |
| 370 | pdc_chain_complete(pd_chan, desc); |
| 371 | } |
| 372 | |
| 373 | static void pdc_handle_error(struct pch_dma_chan *pd_chan) |
| 374 | { |
| 375 | struct pch_dma_desc *bad_desc; |
| 376 | |
| 377 | bad_desc = pdc_first_active(pd_chan); |
| 378 | list_del(entry: &bad_desc->desc_node); |
| 379 | |
| 380 | list_splice_init(list: &pd_chan->queue, head: pd_chan->active_list.prev); |
| 381 | |
| 382 | if (!list_empty(head: &pd_chan->active_list)) |
| 383 | pdc_dostart(pd_chan, desc: pdc_first_active(pd_chan)); |
| 384 | |
| 385 | dev_crit(chan2dev(&pd_chan->chan), "Bad descriptor submitted\n" ); |
| 386 | dev_crit(chan2dev(&pd_chan->chan), "descriptor cookie: %d\n" , |
| 387 | bad_desc->txd.cookie); |
| 388 | |
| 389 | pdc_chain_complete(pd_chan, desc: bad_desc); |
| 390 | } |
| 391 | |
| 392 | static void pdc_advance_work(struct pch_dma_chan *pd_chan) |
| 393 | { |
| 394 | if (list_empty(head: &pd_chan->active_list) || |
| 395 | list_is_singular(head: &pd_chan->active_list)) { |
| 396 | pdc_complete_all(pd_chan); |
| 397 | } else { |
| 398 | pdc_chain_complete(pd_chan, desc: pdc_first_active(pd_chan)); |
| 399 | pdc_dostart(pd_chan, desc: pdc_first_active(pd_chan)); |
| 400 | } |
| 401 | } |
| 402 | |
| 403 | static dma_cookie_t pd_tx_submit(struct dma_async_tx_descriptor *txd) |
| 404 | { |
| 405 | struct pch_dma_desc *desc = to_pd_desc(txd); |
| 406 | struct pch_dma_chan *pd_chan = to_pd_chan(chan: txd->chan); |
| 407 | |
| 408 | spin_lock(lock: &pd_chan->lock); |
| 409 | |
| 410 | if (list_empty(head: &pd_chan->active_list)) { |
| 411 | list_add_tail(new: &desc->desc_node, head: &pd_chan->active_list); |
| 412 | pdc_dostart(pd_chan, desc); |
| 413 | } else { |
| 414 | list_add_tail(new: &desc->desc_node, head: &pd_chan->queue); |
| 415 | } |
| 416 | |
| 417 | spin_unlock(lock: &pd_chan->lock); |
| 418 | return 0; |
| 419 | } |
| 420 | |
| 421 | static struct pch_dma_desc *pdc_alloc_desc(struct dma_chan *chan, gfp_t flags) |
| 422 | { |
| 423 | struct pch_dma_desc *desc = NULL; |
| 424 | struct pch_dma *pd = to_pd(ddev: chan->device); |
| 425 | dma_addr_t addr; |
| 426 | |
| 427 | desc = dma_pool_zalloc(pool: pd->pool, mem_flags: flags, handle: &addr); |
| 428 | if (desc) { |
| 429 | INIT_LIST_HEAD(list: &desc->tx_list); |
| 430 | dma_async_tx_descriptor_init(tx: &desc->txd, chan); |
| 431 | desc->txd.tx_submit = pd_tx_submit; |
| 432 | desc->txd.flags = DMA_CTRL_ACK; |
| 433 | desc->txd.phys = addr; |
| 434 | } |
| 435 | |
| 436 | return desc; |
| 437 | } |
| 438 | |
| 439 | static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan) |
| 440 | { |
| 441 | struct pch_dma_desc *desc, *_d; |
| 442 | struct pch_dma_desc *ret = NULL; |
| 443 | int i = 0; |
| 444 | |
| 445 | spin_lock(lock: &pd_chan->lock); |
| 446 | list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) { |
| 447 | i++; |
| 448 | if (async_tx_test_ack(tx: &desc->txd)) { |
| 449 | list_del(entry: &desc->desc_node); |
| 450 | ret = desc; |
| 451 | break; |
| 452 | } |
| 453 | dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n" , desc); |
| 454 | } |
| 455 | spin_unlock(lock: &pd_chan->lock); |
| 456 | dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n" , i); |
| 457 | |
| 458 | if (!ret) { |
| 459 | ret = pdc_alloc_desc(chan: &pd_chan->chan, GFP_ATOMIC); |
| 460 | if (ret) { |
| 461 | spin_lock(lock: &pd_chan->lock); |
| 462 | pd_chan->descs_allocated++; |
| 463 | spin_unlock(lock: &pd_chan->lock); |
| 464 | } else { |
| 465 | dev_err(chan2dev(&pd_chan->chan), |
| 466 | "failed to alloc desc\n" ); |
| 467 | } |
| 468 | } |
| 469 | |
| 470 | return ret; |
| 471 | } |
| 472 | |
| 473 | static void pdc_desc_put(struct pch_dma_chan *pd_chan, |
| 474 | struct pch_dma_desc *desc) |
| 475 | { |
| 476 | if (desc) { |
| 477 | spin_lock(lock: &pd_chan->lock); |
| 478 | list_splice_init(list: &desc->tx_list, head: &pd_chan->free_list); |
| 479 | list_add(new: &desc->desc_node, head: &pd_chan->free_list); |
| 480 | spin_unlock(lock: &pd_chan->lock); |
| 481 | } |
| 482 | } |
| 483 | |
| 484 | static int pd_alloc_chan_resources(struct dma_chan *chan) |
| 485 | { |
| 486 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); |
| 487 | struct pch_dma_desc *desc; |
| 488 | LIST_HEAD(tmp_list); |
| 489 | int i; |
| 490 | |
| 491 | if (!pdc_is_idle(pd_chan)) { |
| 492 | dev_dbg(chan2dev(chan), "DMA channel not idle ?\n" ); |
| 493 | return -EIO; |
| 494 | } |
| 495 | |
| 496 | if (!list_empty(head: &pd_chan->free_list)) |
| 497 | return pd_chan->descs_allocated; |
| 498 | |
| 499 | for (i = 0; i < init_nr_desc_per_channel; i++) { |
| 500 | desc = pdc_alloc_desc(chan, GFP_KERNEL); |
| 501 | |
| 502 | if (!desc) { |
| 503 | dev_warn(chan2dev(chan), |
| 504 | "Only allocated %d initial descriptors\n" , i); |
| 505 | break; |
| 506 | } |
| 507 | |
| 508 | list_add_tail(new: &desc->desc_node, head: &tmp_list); |
| 509 | } |
| 510 | |
| 511 | spin_lock_irq(lock: &pd_chan->lock); |
| 512 | list_splice(list: &tmp_list, head: &pd_chan->free_list); |
| 513 | pd_chan->descs_allocated = i; |
| 514 | dma_cookie_init(chan); |
| 515 | spin_unlock_irq(lock: &pd_chan->lock); |
| 516 | |
| 517 | pdc_enable_irq(chan, enable: 1); |
| 518 | |
| 519 | return pd_chan->descs_allocated; |
| 520 | } |
| 521 | |
| 522 | static void pd_free_chan_resources(struct dma_chan *chan) |
| 523 | { |
| 524 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); |
| 525 | struct pch_dma *pd = to_pd(ddev: chan->device); |
| 526 | struct pch_dma_desc *desc, *_d; |
| 527 | LIST_HEAD(tmp_list); |
| 528 | |
| 529 | BUG_ON(!pdc_is_idle(pd_chan)); |
| 530 | BUG_ON(!list_empty(&pd_chan->active_list)); |
| 531 | BUG_ON(!list_empty(&pd_chan->queue)); |
| 532 | |
| 533 | spin_lock_irq(lock: &pd_chan->lock); |
| 534 | list_splice_init(list: &pd_chan->free_list, head: &tmp_list); |
| 535 | pd_chan->descs_allocated = 0; |
| 536 | spin_unlock_irq(lock: &pd_chan->lock); |
| 537 | |
| 538 | list_for_each_entry_safe(desc, _d, &tmp_list, desc_node) |
| 539 | dma_pool_free(pool: pd->pool, vaddr: desc, addr: desc->txd.phys); |
| 540 | |
| 541 | pdc_enable_irq(chan, enable: 0); |
| 542 | } |
| 543 | |
| 544 | static enum dma_status pd_tx_status(struct dma_chan *chan, dma_cookie_t cookie, |
| 545 | struct dma_tx_state *txstate) |
| 546 | { |
| 547 | return dma_cookie_status(chan, cookie, state: txstate); |
| 548 | } |
| 549 | |
| 550 | static void pd_issue_pending(struct dma_chan *chan) |
| 551 | { |
| 552 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); |
| 553 | |
| 554 | if (pdc_is_idle(pd_chan)) { |
| 555 | spin_lock(lock: &pd_chan->lock); |
| 556 | pdc_advance_work(pd_chan); |
| 557 | spin_unlock(lock: &pd_chan->lock); |
| 558 | } |
| 559 | } |
| 560 | |
| 561 | static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan, |
| 562 | struct scatterlist *sgl, unsigned int sg_len, |
| 563 | enum dma_transfer_direction direction, unsigned long flags, |
| 564 | void *context) |
| 565 | { |
| 566 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); |
| 567 | struct pch_dma_slave *pd_slave = chan->private; |
| 568 | struct pch_dma_desc *first = NULL; |
| 569 | struct pch_dma_desc *prev = NULL; |
| 570 | struct pch_dma_desc *desc = NULL; |
| 571 | struct scatterlist *sg; |
| 572 | dma_addr_t reg; |
| 573 | int i; |
| 574 | |
| 575 | if (unlikely(!sg_len)) { |
| 576 | dev_info(chan2dev(chan), "prep_slave_sg: length is zero!\n" ); |
| 577 | return NULL; |
| 578 | } |
| 579 | |
| 580 | if (direction == DMA_DEV_TO_MEM) |
| 581 | reg = pd_slave->rx_reg; |
| 582 | else if (direction == DMA_MEM_TO_DEV) |
| 583 | reg = pd_slave->tx_reg; |
| 584 | else |
| 585 | return NULL; |
| 586 | |
| 587 | pd_chan->dir = direction; |
| 588 | pdc_set_dir(chan); |
| 589 | |
| 590 | for_each_sg(sgl, sg, sg_len, i) { |
| 591 | desc = pdc_desc_get(pd_chan); |
| 592 | |
| 593 | if (!desc) |
| 594 | goto err_desc_get; |
| 595 | |
| 596 | desc->regs.dev_addr = reg; |
| 597 | desc->regs.mem_addr = sg_dma_address(sg); |
| 598 | desc->regs.size = sg_dma_len(sg); |
| 599 | desc->regs.next = DMA_DESC_FOLLOW_WITHOUT_IRQ; |
| 600 | |
| 601 | switch (pd_slave->width) { |
| 602 | case PCH_DMA_WIDTH_1_BYTE: |
| 603 | if (desc->regs.size > DMA_DESC_MAX_COUNT_1_BYTE) |
| 604 | goto err_desc_get; |
| 605 | desc->regs.size |= DMA_DESC_WIDTH_1_BYTE; |
| 606 | break; |
| 607 | case PCH_DMA_WIDTH_2_BYTES: |
| 608 | if (desc->regs.size > DMA_DESC_MAX_COUNT_2_BYTES) |
| 609 | goto err_desc_get; |
| 610 | desc->regs.size |= DMA_DESC_WIDTH_2_BYTES; |
| 611 | break; |
| 612 | case PCH_DMA_WIDTH_4_BYTES: |
| 613 | if (desc->regs.size > DMA_DESC_MAX_COUNT_4_BYTES) |
| 614 | goto err_desc_get; |
| 615 | desc->regs.size |= DMA_DESC_WIDTH_4_BYTES; |
| 616 | break; |
| 617 | default: |
| 618 | goto err_desc_get; |
| 619 | } |
| 620 | |
| 621 | if (!first) { |
| 622 | first = desc; |
| 623 | } else { |
| 624 | prev->regs.next |= desc->txd.phys; |
| 625 | list_add_tail(new: &desc->desc_node, head: &first->tx_list); |
| 626 | } |
| 627 | |
| 628 | prev = desc; |
| 629 | } |
| 630 | |
| 631 | if (flags & DMA_PREP_INTERRUPT) |
| 632 | desc->regs.next = DMA_DESC_END_WITH_IRQ; |
| 633 | else |
| 634 | desc->regs.next = DMA_DESC_END_WITHOUT_IRQ; |
| 635 | |
| 636 | first->txd.cookie = -EBUSY; |
| 637 | desc->txd.flags = flags; |
| 638 | |
| 639 | return &first->txd; |
| 640 | |
| 641 | err_desc_get: |
| 642 | dev_err(chan2dev(chan), "failed to get desc or wrong parameters\n" ); |
| 643 | pdc_desc_put(pd_chan, desc: first); |
| 644 | return NULL; |
| 645 | } |
| 646 | |
| 647 | static int pd_device_terminate_all(struct dma_chan *chan) |
| 648 | { |
| 649 | struct pch_dma_chan *pd_chan = to_pd_chan(chan); |
| 650 | struct pch_dma_desc *desc, *_d; |
| 651 | LIST_HEAD(list); |
| 652 | |
| 653 | spin_lock_irq(lock: &pd_chan->lock); |
| 654 | |
| 655 | pdc_set_mode(chan: &pd_chan->chan, DMA_CTL0_DISABLE); |
| 656 | |
| 657 | list_splice_init(list: &pd_chan->active_list, head: &list); |
| 658 | list_splice_init(list: &pd_chan->queue, head: &list); |
| 659 | |
| 660 | list_for_each_entry_safe(desc, _d, &list, desc_node) |
| 661 | pdc_chain_complete(pd_chan, desc); |
| 662 | |
| 663 | spin_unlock_irq(lock: &pd_chan->lock); |
| 664 | |
| 665 | return 0; |
| 666 | } |
| 667 | |
| 668 | static void pdc_tasklet(struct tasklet_struct *t) |
| 669 | { |
| 670 | struct pch_dma_chan *pd_chan = from_tasklet(pd_chan, t, tasklet); |
| 671 | unsigned long flags; |
| 672 | |
| 673 | if (!pdc_is_idle(pd_chan)) { |
| 674 | dev_err(chan2dev(&pd_chan->chan), |
| 675 | "BUG: handle non-idle channel in tasklet\n" ); |
| 676 | return; |
| 677 | } |
| 678 | |
| 679 | spin_lock_irqsave(&pd_chan->lock, flags); |
| 680 | if (test_and_clear_bit(nr: 0, addr: &pd_chan->err_status)) |
| 681 | pdc_handle_error(pd_chan); |
| 682 | else |
| 683 | pdc_advance_work(pd_chan); |
| 684 | spin_unlock_irqrestore(lock: &pd_chan->lock, flags); |
| 685 | } |
| 686 | |
| 687 | static irqreturn_t pd_irq(int irq, void *devid) |
| 688 | { |
| 689 | struct pch_dma *pd = (struct pch_dma *)devid; |
| 690 | struct pch_dma_chan *pd_chan; |
| 691 | u32 sts0; |
| 692 | u32 sts2; |
| 693 | int i; |
| 694 | int ret0 = IRQ_NONE; |
| 695 | int ret2 = IRQ_NONE; |
| 696 | |
| 697 | sts0 = dma_readl(pd, STS0); |
| 698 | sts2 = dma_readl(pd, STS2); |
| 699 | |
| 700 | dev_dbg(pd->dma.dev, "pd_irq sts0: %x\n" , sts0); |
| 701 | |
| 702 | for (i = 0; i < pd->dma.chancnt; i++) { |
| 703 | pd_chan = &pd->channels[i]; |
| 704 | |
| 705 | if (i < 8) { |
| 706 | if (sts0 & DMA_STATUS_IRQ(i)) { |
| 707 | if (sts0 & DMA_STATUS0_ERR(i)) |
| 708 | set_bit(nr: 0, addr: &pd_chan->err_status); |
| 709 | |
| 710 | tasklet_schedule(t: &pd_chan->tasklet); |
| 711 | ret0 = IRQ_HANDLED; |
| 712 | } |
| 713 | } else { |
| 714 | if (sts2 & DMA_STATUS_IRQ(i - 8)) { |
| 715 | if (sts2 & DMA_STATUS2_ERR(i)) |
| 716 | set_bit(nr: 0, addr: &pd_chan->err_status); |
| 717 | |
| 718 | tasklet_schedule(t: &pd_chan->tasklet); |
| 719 | ret2 = IRQ_HANDLED; |
| 720 | } |
| 721 | } |
| 722 | } |
| 723 | |
| 724 | /* clear interrupt bits in status register */ |
| 725 | if (ret0) |
| 726 | dma_writel(pd, STS0, sts0); |
| 727 | if (ret2) |
| 728 | dma_writel(pd, STS2, sts2); |
| 729 | |
| 730 | return ret0 | ret2; |
| 731 | } |
| 732 | |
| 733 | static void __maybe_unused pch_dma_save_regs(struct pch_dma *pd) |
| 734 | { |
| 735 | struct pch_dma_chan *pd_chan; |
| 736 | struct dma_chan *chan, *_c; |
| 737 | int i = 0; |
| 738 | |
| 739 | pd->regs.dma_ctl0 = dma_readl(pd, CTL0); |
| 740 | pd->regs.dma_ctl1 = dma_readl(pd, CTL1); |
| 741 | pd->regs.dma_ctl2 = dma_readl(pd, CTL2); |
| 742 | pd->regs.dma_ctl3 = dma_readl(pd, CTL3); |
| 743 | |
| 744 | list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) { |
| 745 | pd_chan = to_pd_chan(chan); |
| 746 | |
| 747 | pd->ch_regs[i].dev_addr = channel_readl(pd_chan, DEV_ADDR); |
| 748 | pd->ch_regs[i].mem_addr = channel_readl(pd_chan, MEM_ADDR); |
| 749 | pd->ch_regs[i].size = channel_readl(pd_chan, SIZE); |
| 750 | pd->ch_regs[i].next = channel_readl(pd_chan, NEXT); |
| 751 | |
| 752 | i++; |
| 753 | } |
| 754 | } |
| 755 | |
| 756 | static void __maybe_unused pch_dma_restore_regs(struct pch_dma *pd) |
| 757 | { |
| 758 | struct pch_dma_chan *pd_chan; |
| 759 | struct dma_chan *chan, *_c; |
| 760 | int i = 0; |
| 761 | |
| 762 | dma_writel(pd, CTL0, pd->regs.dma_ctl0); |
| 763 | dma_writel(pd, CTL1, pd->regs.dma_ctl1); |
| 764 | dma_writel(pd, CTL2, pd->regs.dma_ctl2); |
| 765 | dma_writel(pd, CTL3, pd->regs.dma_ctl3); |
| 766 | |
| 767 | list_for_each_entry_safe(chan, _c, &pd->dma.channels, device_node) { |
| 768 | pd_chan = to_pd_chan(chan); |
| 769 | |
| 770 | channel_writel(pd_chan, DEV_ADDR, pd->ch_regs[i].dev_addr); |
| 771 | channel_writel(pd_chan, MEM_ADDR, pd->ch_regs[i].mem_addr); |
| 772 | channel_writel(pd_chan, SIZE, pd->ch_regs[i].size); |
| 773 | channel_writel(pd_chan, NEXT, pd->ch_regs[i].next); |
| 774 | |
| 775 | i++; |
| 776 | } |
| 777 | } |
| 778 | |
| 779 | static int __maybe_unused pch_dma_suspend(struct device *dev) |
| 780 | { |
| 781 | struct pch_dma *pd = dev_get_drvdata(dev); |
| 782 | |
| 783 | if (pd) |
| 784 | pch_dma_save_regs(pd); |
| 785 | |
| 786 | return 0; |
| 787 | } |
| 788 | |
| 789 | static int __maybe_unused pch_dma_resume(struct device *dev) |
| 790 | { |
| 791 | struct pch_dma *pd = dev_get_drvdata(dev); |
| 792 | |
| 793 | if (pd) |
| 794 | pch_dma_restore_regs(pd); |
| 795 | |
| 796 | return 0; |
| 797 | } |
| 798 | |
| 799 | static int pch_dma_probe(struct pci_dev *pdev, |
| 800 | const struct pci_device_id *id) |
| 801 | { |
| 802 | struct pch_dma *pd; |
| 803 | struct pch_dma_regs *regs; |
| 804 | unsigned int nr_channels; |
| 805 | int err; |
| 806 | int i; |
| 807 | |
| 808 | nr_channels = id->driver_data; |
| 809 | pd = kzalloc(sizeof(*pd), GFP_KERNEL); |
| 810 | if (!pd) |
| 811 | return -ENOMEM; |
| 812 | |
| 813 | pci_set_drvdata(pdev, data: pd); |
| 814 | |
| 815 | err = pci_enable_device(dev: pdev); |
| 816 | if (err) { |
| 817 | dev_err(&pdev->dev, "Cannot enable PCI device\n" ); |
| 818 | goto err_free_mem; |
| 819 | } |
| 820 | |
| 821 | if (!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) { |
| 822 | dev_err(&pdev->dev, "Cannot find proper base address\n" ); |
| 823 | err = -ENODEV; |
| 824 | goto err_disable_pdev; |
| 825 | } |
| 826 | |
| 827 | err = pci_request_regions(pdev, DRV_NAME); |
| 828 | if (err) { |
| 829 | dev_err(&pdev->dev, "Cannot obtain PCI resources\n" ); |
| 830 | goto err_disable_pdev; |
| 831 | } |
| 832 | |
| 833 | err = dma_set_mask(dev: &pdev->dev, DMA_BIT_MASK(32)); |
| 834 | if (err) { |
| 835 | dev_err(&pdev->dev, "Cannot set proper DMA config\n" ); |
| 836 | goto err_free_res; |
| 837 | } |
| 838 | |
| 839 | regs = pd->membase = pci_iomap(dev: pdev, bar: 1, max: 0); |
| 840 | if (!pd->membase) { |
| 841 | dev_err(&pdev->dev, "Cannot map MMIO registers\n" ); |
| 842 | err = -ENOMEM; |
| 843 | goto err_free_res; |
| 844 | } |
| 845 | |
| 846 | pci_set_master(dev: pdev); |
| 847 | pd->dma.dev = &pdev->dev; |
| 848 | |
| 849 | err = request_irq(irq: pdev->irq, handler: pd_irq, IRQF_SHARED, DRV_NAME, dev: pd); |
| 850 | if (err) { |
| 851 | dev_err(&pdev->dev, "Failed to request IRQ\n" ); |
| 852 | goto err_iounmap; |
| 853 | } |
| 854 | |
| 855 | pd->pool = dma_pool_create(name: "pch_dma_desc_pool" , dev: &pdev->dev, |
| 856 | size: sizeof(struct pch_dma_desc), align: 4, boundary: 0); |
| 857 | if (!pd->pool) { |
| 858 | dev_err(&pdev->dev, "Failed to alloc DMA descriptors\n" ); |
| 859 | err = -ENOMEM; |
| 860 | goto err_free_irq; |
| 861 | } |
| 862 | |
| 863 | |
| 864 | INIT_LIST_HEAD(list: &pd->dma.channels); |
| 865 | |
| 866 | for (i = 0; i < nr_channels; i++) { |
| 867 | struct pch_dma_chan *pd_chan = &pd->channels[i]; |
| 868 | |
| 869 | pd_chan->chan.device = &pd->dma; |
| 870 | dma_cookie_init(chan: &pd_chan->chan); |
| 871 | |
| 872 | pd_chan->membase = ®s->desc[i]; |
| 873 | |
| 874 | spin_lock_init(&pd_chan->lock); |
| 875 | |
| 876 | INIT_LIST_HEAD(list: &pd_chan->active_list); |
| 877 | INIT_LIST_HEAD(list: &pd_chan->queue); |
| 878 | INIT_LIST_HEAD(list: &pd_chan->free_list); |
| 879 | |
| 880 | tasklet_setup(t: &pd_chan->tasklet, callback: pdc_tasklet); |
| 881 | list_add_tail(new: &pd_chan->chan.device_node, head: &pd->dma.channels); |
| 882 | } |
| 883 | |
| 884 | dma_cap_zero(pd->dma.cap_mask); |
| 885 | dma_cap_set(DMA_PRIVATE, pd->dma.cap_mask); |
| 886 | dma_cap_set(DMA_SLAVE, pd->dma.cap_mask); |
| 887 | |
| 888 | pd->dma.device_alloc_chan_resources = pd_alloc_chan_resources; |
| 889 | pd->dma.device_free_chan_resources = pd_free_chan_resources; |
| 890 | pd->dma.device_tx_status = pd_tx_status; |
| 891 | pd->dma.device_issue_pending = pd_issue_pending; |
| 892 | pd->dma.device_prep_slave_sg = pd_prep_slave_sg; |
| 893 | pd->dma.device_terminate_all = pd_device_terminate_all; |
| 894 | |
| 895 | err = dma_async_device_register(device: &pd->dma); |
| 896 | if (err) { |
| 897 | dev_err(&pdev->dev, "Failed to register DMA device\n" ); |
| 898 | goto err_free_pool; |
| 899 | } |
| 900 | |
| 901 | return 0; |
| 902 | |
| 903 | err_free_pool: |
| 904 | dma_pool_destroy(pool: pd->pool); |
| 905 | err_free_irq: |
| 906 | free_irq(pdev->irq, pd); |
| 907 | err_iounmap: |
| 908 | pci_iounmap(dev: pdev, pd->membase); |
| 909 | err_free_res: |
| 910 | pci_release_regions(pdev); |
| 911 | err_disable_pdev: |
| 912 | pci_disable_device(dev: pdev); |
| 913 | err_free_mem: |
| 914 | kfree(objp: pd); |
| 915 | return err; |
| 916 | } |
| 917 | |
| 918 | static void pch_dma_remove(struct pci_dev *pdev) |
| 919 | { |
| 920 | struct pch_dma *pd = pci_get_drvdata(pdev); |
| 921 | struct pch_dma_chan *pd_chan; |
| 922 | struct dma_chan *chan, *_c; |
| 923 | |
| 924 | if (pd) { |
| 925 | dma_async_device_unregister(device: &pd->dma); |
| 926 | |
| 927 | free_irq(pdev->irq, pd); |
| 928 | |
| 929 | list_for_each_entry_safe(chan, _c, &pd->dma.channels, |
| 930 | device_node) { |
| 931 | pd_chan = to_pd_chan(chan); |
| 932 | |
| 933 | tasklet_kill(t: &pd_chan->tasklet); |
| 934 | } |
| 935 | |
| 936 | dma_pool_destroy(pool: pd->pool); |
| 937 | pci_iounmap(dev: pdev, pd->membase); |
| 938 | pci_release_regions(pdev); |
| 939 | pci_disable_device(dev: pdev); |
| 940 | kfree(objp: pd); |
| 941 | } |
| 942 | } |
| 943 | |
| 944 | /* PCI Device ID of DMA device */ |
| 945 | #define PCI_DEVICE_ID_EG20T_PCH_DMA_8CH 0x8810 |
| 946 | #define PCI_DEVICE_ID_EG20T_PCH_DMA_4CH 0x8815 |
| 947 | #define PCI_DEVICE_ID_ML7213_DMA1_8CH 0x8026 |
| 948 | #define PCI_DEVICE_ID_ML7213_DMA2_8CH 0x802B |
| 949 | #define PCI_DEVICE_ID_ML7213_DMA3_4CH 0x8034 |
| 950 | #define PCI_DEVICE_ID_ML7213_DMA4_12CH 0x8032 |
| 951 | #define PCI_DEVICE_ID_ML7223_DMA1_4CH 0x800B |
| 952 | #define PCI_DEVICE_ID_ML7223_DMA2_4CH 0x800E |
| 953 | #define PCI_DEVICE_ID_ML7223_DMA3_4CH 0x8017 |
| 954 | #define PCI_DEVICE_ID_ML7223_DMA4_4CH 0x803B |
| 955 | #define PCI_DEVICE_ID_ML7831_DMA1_8CH 0x8810 |
| 956 | #define PCI_DEVICE_ID_ML7831_DMA2_4CH 0x8815 |
| 957 | |
| 958 | static const struct pci_device_id pch_dma_id_table[] = { |
| 959 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 }, |
| 960 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 }, |
| 961 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */ |
| 962 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA2_8CH), 8}, /* PCMIF SPI */ |
| 963 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA3_4CH), 4}, /* FPGA */ |
| 964 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA4_12CH), 12}, /* I2S */ |
| 965 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA1_4CH), 4}, /* UART */ |
| 966 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA2_4CH), 4}, /* Video SPI */ |
| 967 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA3_4CH), 4}, /* Security */ |
| 968 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA4_4CH), 4}, /* FPGA */ |
| 969 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA1_8CH), 8}, /* UART */ |
| 970 | { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA2_4CH), 4}, /* SPI */ |
| 971 | { 0, }, |
| 972 | }; |
| 973 | |
| 974 | static SIMPLE_DEV_PM_OPS(pch_dma_pm_ops, pch_dma_suspend, pch_dma_resume); |
| 975 | |
| 976 | static struct pci_driver pch_dma_driver = { |
| 977 | .name = DRV_NAME, |
| 978 | .id_table = pch_dma_id_table, |
| 979 | .probe = pch_dma_probe, |
| 980 | .remove = pch_dma_remove, |
| 981 | .driver.pm = &pch_dma_pm_ops, |
| 982 | }; |
| 983 | |
| 984 | module_pci_driver(pch_dma_driver); |
| 985 | |
| 986 | MODULE_DESCRIPTION("Intel EG20T PCH / LAPIS Semicon ML7213/ML7223/ML7831 IOH " |
| 987 | "DMA controller driver" ); |
| 988 | MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>" ); |
| 989 | MODULE_LICENSE("GPL v2" ); |
| 990 | MODULE_DEVICE_TABLE(pci, pch_dma_id_table); |
| 991 | |