| 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* |
| 3 | * Copyright (c) 2016, The Linux Foundation. All rights reserved. |
| 4 | * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved |
| 5 | */ |
| 6 | #include <linux/clk.h> |
| 7 | #include <linux/delay.h> |
| 8 | #include <linux/dmaengine.h> |
| 9 | #include <linux/dma-mapping.h> |
| 10 | #include <linux/dma/qcom_adm.h> |
| 11 | #include <linux/dma/qcom_bam_dma.h> |
| 12 | #include <linux/module.h> |
| 13 | #include <linux/of.h> |
| 14 | #include <linux/platform_device.h> |
| 15 | #include <linux/slab.h> |
| 16 | #include <linux/mtd/nand-qpic-common.h> |
| 17 | |
| 18 | /** |
| 19 | * qcom_free_bam_transaction() - Frees the BAM transaction memory |
| 20 | * @nandc: qpic nand controller |
| 21 | * |
| 22 | * This function frees the bam transaction memory |
| 23 | */ |
| 24 | void qcom_free_bam_transaction(struct qcom_nand_controller *nandc) |
| 25 | { |
| 26 | struct bam_transaction *bam_txn = nandc->bam_txn; |
| 27 | |
| 28 | kfree(objp: bam_txn); |
| 29 | } |
| 30 | EXPORT_SYMBOL(qcom_free_bam_transaction); |
| 31 | |
| 32 | /** |
| 33 | * qcom_alloc_bam_transaction() - allocate BAM transaction |
| 34 | * @nandc: qpic nand controller |
| 35 | * |
| 36 | * This function will allocate and initialize the BAM transaction structure |
| 37 | */ |
| 38 | struct bam_transaction * |
| 39 | qcom_alloc_bam_transaction(struct qcom_nand_controller *nandc) |
| 40 | { |
| 41 | struct bam_transaction *bam_txn; |
| 42 | size_t bam_txn_size; |
| 43 | unsigned int num_cw = nandc->max_cwperpage; |
| 44 | void *bam_txn_buf; |
| 45 | |
| 46 | bam_txn_size = |
| 47 | sizeof(*bam_txn) + num_cw * |
| 48 | ((sizeof(*bam_txn->bam_ce) * QPIC_PER_CW_CMD_ELEMENTS) + |
| 49 | (sizeof(*bam_txn->cmd_sgl) * QPIC_PER_CW_CMD_SGL) + |
| 50 | (sizeof(*bam_txn->data_sgl) * QPIC_PER_CW_DATA_SGL)); |
| 51 | |
| 52 | bam_txn_buf = kzalloc(bam_txn_size, GFP_KERNEL); |
| 53 | if (!bam_txn_buf) |
| 54 | return NULL; |
| 55 | |
| 56 | bam_txn = bam_txn_buf; |
| 57 | bam_txn_buf += sizeof(*bam_txn); |
| 58 | |
| 59 | bam_txn->bam_ce = bam_txn_buf; |
| 60 | bam_txn->bam_ce_nitems = QPIC_PER_CW_CMD_ELEMENTS * num_cw; |
| 61 | bam_txn_buf += sizeof(*bam_txn->bam_ce) * bam_txn->bam_ce_nitems; |
| 62 | |
| 63 | bam_txn->cmd_sgl = bam_txn_buf; |
| 64 | bam_txn->cmd_sgl_nitems = QPIC_PER_CW_CMD_SGL * num_cw; |
| 65 | bam_txn_buf += sizeof(*bam_txn->cmd_sgl) * bam_txn->cmd_sgl_nitems; |
| 66 | |
| 67 | bam_txn->data_sgl = bam_txn_buf; |
| 68 | bam_txn->data_sgl_nitems = QPIC_PER_CW_DATA_SGL * num_cw; |
| 69 | |
| 70 | init_completion(x: &bam_txn->txn_done); |
| 71 | |
| 72 | return bam_txn; |
| 73 | } |
| 74 | EXPORT_SYMBOL(qcom_alloc_bam_transaction); |
| 75 | |
| 76 | /** |
| 77 | * qcom_clear_bam_transaction() - Clears the BAM transaction |
| 78 | * @nandc: qpic nand controller |
| 79 | * |
| 80 | * This function will clear the BAM transaction indexes. |
| 81 | */ |
| 82 | void qcom_clear_bam_transaction(struct qcom_nand_controller *nandc) |
| 83 | { |
| 84 | struct bam_transaction *bam_txn = nandc->bam_txn; |
| 85 | |
| 86 | if (!nandc->props->supports_bam) |
| 87 | return; |
| 88 | |
| 89 | memset(&bam_txn->bam_positions, 0, sizeof(bam_txn->bam_positions)); |
| 90 | bam_txn->last_data_desc = NULL; |
| 91 | |
| 92 | sg_init_table(bam_txn->cmd_sgl, bam_txn->cmd_sgl_nitems); |
| 93 | sg_init_table(bam_txn->data_sgl, bam_txn->data_sgl_nitems); |
| 94 | |
| 95 | reinit_completion(x: &bam_txn->txn_done); |
| 96 | } |
| 97 | EXPORT_SYMBOL(qcom_clear_bam_transaction); |
| 98 | |
| 99 | /** |
| 100 | * qcom_qpic_bam_dma_done() - Callback for DMA descriptor completion |
| 101 | * @data: data pointer |
| 102 | * |
| 103 | * This function is a callback for DMA descriptor completion |
| 104 | */ |
| 105 | void qcom_qpic_bam_dma_done(void *data) |
| 106 | { |
| 107 | struct bam_transaction *bam_txn = data; |
| 108 | |
| 109 | complete(&bam_txn->txn_done); |
| 110 | } |
| 111 | EXPORT_SYMBOL(qcom_qpic_bam_dma_done); |
| 112 | |
| 113 | /** |
| 114 | * qcom_nandc_dev_to_mem() - Check for dma sync for cpu or device |
| 115 | * @nandc: qpic nand controller |
| 116 | * @is_cpu: cpu or Device |
| 117 | * |
| 118 | * This function will check for dma sync for cpu or device |
| 119 | */ |
| 120 | inline void qcom_nandc_dev_to_mem(struct qcom_nand_controller *nandc, bool is_cpu) |
| 121 | { |
| 122 | if (!nandc->props->supports_bam) |
| 123 | return; |
| 124 | |
| 125 | if (is_cpu) |
| 126 | dma_sync_single_for_cpu(dev: nandc->dev, addr: nandc->reg_read_dma, |
| 127 | MAX_REG_RD * |
| 128 | sizeof(*nandc->reg_read_buf), |
| 129 | dir: DMA_FROM_DEVICE); |
| 130 | else |
| 131 | dma_sync_single_for_device(dev: nandc->dev, addr: nandc->reg_read_dma, |
| 132 | MAX_REG_RD * |
| 133 | sizeof(*nandc->reg_read_buf), |
| 134 | dir: DMA_FROM_DEVICE); |
| 135 | } |
| 136 | EXPORT_SYMBOL(qcom_nandc_dev_to_mem); |
| 137 | |
| 138 | /** |
| 139 | * qcom_prepare_bam_async_desc() - Prepare DMA descriptor |
| 140 | * @nandc: qpic nand controller |
| 141 | * @chan: dma channel |
| 142 | * @flags: flags to control DMA descriptor preparation |
| 143 | * |
| 144 | * This function maps the scatter gather list for DMA transfer and forms the |
| 145 | * DMA descriptor for BAM.This descriptor will be added in the NAND DMA |
| 146 | * descriptor queue which will be submitted to DMA engine. |
| 147 | */ |
| 148 | int qcom_prepare_bam_async_desc(struct qcom_nand_controller *nandc, |
| 149 | struct dma_chan *chan, unsigned long flags) |
| 150 | { |
| 151 | struct desc_info *desc; |
| 152 | struct scatterlist *sgl; |
| 153 | unsigned int sgl_cnt; |
| 154 | int ret; |
| 155 | struct bam_transaction *bam_txn = nandc->bam_txn; |
| 156 | enum dma_transfer_direction dir_eng; |
| 157 | struct dma_async_tx_descriptor *dma_desc; |
| 158 | |
| 159 | desc = kzalloc(sizeof(*desc), GFP_KERNEL); |
| 160 | if (!desc) |
| 161 | return -ENOMEM; |
| 162 | |
| 163 | if (chan == nandc->cmd_chan) { |
| 164 | sgl = &bam_txn->cmd_sgl[bam_txn->cmd_sgl_start]; |
| 165 | sgl_cnt = bam_txn->cmd_sgl_pos - bam_txn->cmd_sgl_start; |
| 166 | bam_txn->cmd_sgl_start = bam_txn->cmd_sgl_pos; |
| 167 | dir_eng = DMA_MEM_TO_DEV; |
| 168 | desc->dir = DMA_TO_DEVICE; |
| 169 | } else if (chan == nandc->tx_chan) { |
| 170 | sgl = &bam_txn->data_sgl[bam_txn->tx_sgl_start]; |
| 171 | sgl_cnt = bam_txn->tx_sgl_pos - bam_txn->tx_sgl_start; |
| 172 | bam_txn->tx_sgl_start = bam_txn->tx_sgl_pos; |
| 173 | dir_eng = DMA_MEM_TO_DEV; |
| 174 | desc->dir = DMA_TO_DEVICE; |
| 175 | } else { |
| 176 | sgl = &bam_txn->data_sgl[bam_txn->rx_sgl_start]; |
| 177 | sgl_cnt = bam_txn->rx_sgl_pos - bam_txn->rx_sgl_start; |
| 178 | bam_txn->rx_sgl_start = bam_txn->rx_sgl_pos; |
| 179 | dir_eng = DMA_DEV_TO_MEM; |
| 180 | desc->dir = DMA_FROM_DEVICE; |
| 181 | } |
| 182 | |
| 183 | sg_mark_end(sg: sgl + sgl_cnt - 1); |
| 184 | ret = dma_map_sg(nandc->dev, sgl, sgl_cnt, desc->dir); |
| 185 | if (ret == 0) { |
| 186 | dev_err(nandc->dev, "failure in mapping desc\n" ); |
| 187 | kfree(objp: desc); |
| 188 | return -ENOMEM; |
| 189 | } |
| 190 | |
| 191 | desc->sgl_cnt = sgl_cnt; |
| 192 | desc->bam_sgl = sgl; |
| 193 | |
| 194 | dma_desc = dmaengine_prep_slave_sg(chan, sgl, sg_len: sgl_cnt, dir: dir_eng, |
| 195 | flags); |
| 196 | |
| 197 | if (!dma_desc) { |
| 198 | dev_err(nandc->dev, "failure in prep desc\n" ); |
| 199 | dma_unmap_sg(nandc->dev, sgl, sgl_cnt, desc->dir); |
| 200 | kfree(objp: desc); |
| 201 | return -EINVAL; |
| 202 | } |
| 203 | |
| 204 | desc->dma_desc = dma_desc; |
| 205 | |
| 206 | /* update last data/command descriptor */ |
| 207 | if (chan == nandc->cmd_chan) |
| 208 | bam_txn->last_cmd_desc = dma_desc; |
| 209 | else |
| 210 | bam_txn->last_data_desc = dma_desc; |
| 211 | |
| 212 | list_add_tail(new: &desc->node, head: &nandc->desc_list); |
| 213 | |
| 214 | return 0; |
| 215 | } |
| 216 | EXPORT_SYMBOL(qcom_prepare_bam_async_desc); |
| 217 | |
| 218 | /** |
| 219 | * qcom_prep_bam_dma_desc_cmd() - Prepares the command descriptor for BAM DMA |
| 220 | * @nandc: qpic nand controller |
| 221 | * @read: read or write type |
| 222 | * @reg_off: offset within the controller's data buffer |
| 223 | * @vaddr: virtual address of the buffer we want to write to |
| 224 | * @size: DMA transaction size in bytes |
| 225 | * @flags: flags to control DMA descriptor preparation |
| 226 | * |
| 227 | * This function will prepares the command descriptor for BAM DMA |
| 228 | * which will be used for NAND register reads and writes. |
| 229 | */ |
| 230 | int qcom_prep_bam_dma_desc_cmd(struct qcom_nand_controller *nandc, bool read, |
| 231 | int reg_off, const void *vaddr, |
| 232 | int size, unsigned int flags) |
| 233 | { |
| 234 | int bam_ce_size; |
| 235 | int i, ret; |
| 236 | struct bam_cmd_element *bam_ce_buffer; |
| 237 | struct bam_transaction *bam_txn = nandc->bam_txn; |
| 238 | u32 offset; |
| 239 | |
| 240 | if (bam_txn->bam_ce_pos + size > bam_txn->bam_ce_nitems) { |
| 241 | dev_err(nandc->dev, "BAM %s array is full\n" , "CE" ); |
| 242 | return -EINVAL; |
| 243 | } |
| 244 | |
| 245 | bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_pos]; |
| 246 | |
| 247 | /* fill the command desc */ |
| 248 | for (i = 0; i < size; i++) { |
| 249 | offset = nandc->props->bam_offset + reg_off + 4 * i; |
| 250 | if (read) |
| 251 | bam_prep_ce(bam_ce: &bam_ce_buffer[i], |
| 252 | addr: offset, cmd: BAM_READ_COMMAND, |
| 253 | reg_buf_dma_addr(nandc, |
| 254 | (__le32 *)vaddr + i)); |
| 255 | else |
| 256 | bam_prep_ce_le32(bam_ce: &bam_ce_buffer[i], |
| 257 | addr: offset, cmd: BAM_WRITE_COMMAND, |
| 258 | data: *((__le32 *)vaddr + i)); |
| 259 | } |
| 260 | |
| 261 | bam_txn->bam_ce_pos += size; |
| 262 | |
| 263 | /* use the separate sgl after this command */ |
| 264 | if (flags & NAND_BAM_NEXT_SGL) { |
| 265 | if (bam_txn->cmd_sgl_pos >= bam_txn->cmd_sgl_nitems) { |
| 266 | dev_err(nandc->dev, "BAM %s array is full\n" , |
| 267 | "CMD sgl" ); |
| 268 | return -EINVAL; |
| 269 | } |
| 270 | |
| 271 | bam_ce_buffer = &bam_txn->bam_ce[bam_txn->bam_ce_start]; |
| 272 | bam_ce_size = (bam_txn->bam_ce_pos - |
| 273 | bam_txn->bam_ce_start) * |
| 274 | sizeof(struct bam_cmd_element); |
| 275 | sg_set_buf(sg: &bam_txn->cmd_sgl[bam_txn->cmd_sgl_pos], |
| 276 | buf: bam_ce_buffer, buflen: bam_ce_size); |
| 277 | bam_txn->cmd_sgl_pos++; |
| 278 | bam_txn->bam_ce_start = bam_txn->bam_ce_pos; |
| 279 | |
| 280 | if (flags & NAND_BAM_NWD) { |
| 281 | ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan, |
| 282 | DMA_PREP_FENCE | DMA_PREP_CMD); |
| 283 | if (ret) |
| 284 | return ret; |
| 285 | } |
| 286 | } |
| 287 | |
| 288 | return 0; |
| 289 | } |
| 290 | EXPORT_SYMBOL(qcom_prep_bam_dma_desc_cmd); |
| 291 | |
| 292 | /** |
| 293 | * qcom_prep_bam_dma_desc_data() - Prepares the data descriptor for BAM DMA |
| 294 | * @nandc: qpic nand controller |
| 295 | * @read: read or write type |
| 296 | * @vaddr: virtual address of the buffer we want to write to |
| 297 | * @size: DMA transaction size in bytes |
| 298 | * @flags: flags to control DMA descriptor preparation |
| 299 | * |
| 300 | * This function will prepares the data descriptor for BAM DMA which |
| 301 | * will be used for NAND data reads and writes. |
| 302 | */ |
| 303 | int qcom_prep_bam_dma_desc_data(struct qcom_nand_controller *nandc, bool read, |
| 304 | const void *vaddr, int size, unsigned int flags) |
| 305 | { |
| 306 | int ret; |
| 307 | struct bam_transaction *bam_txn = nandc->bam_txn; |
| 308 | |
| 309 | if (read) { |
| 310 | if (bam_txn->rx_sgl_pos >= bam_txn->data_sgl_nitems) { |
| 311 | dev_err(nandc->dev, "BAM %s array is full\n" , "RX sgl" ); |
| 312 | return -EINVAL; |
| 313 | } |
| 314 | |
| 315 | sg_set_buf(sg: &bam_txn->data_sgl[bam_txn->rx_sgl_pos], |
| 316 | buf: vaddr, buflen: size); |
| 317 | bam_txn->rx_sgl_pos++; |
| 318 | } else { |
| 319 | if (bam_txn->tx_sgl_pos >= bam_txn->data_sgl_nitems) { |
| 320 | dev_err(nandc->dev, "BAM %s array is full\n" , "TX sgl" ); |
| 321 | return -EINVAL; |
| 322 | } |
| 323 | |
| 324 | sg_set_buf(sg: &bam_txn->data_sgl[bam_txn->tx_sgl_pos], |
| 325 | buf: vaddr, buflen: size); |
| 326 | bam_txn->tx_sgl_pos++; |
| 327 | |
| 328 | /* |
| 329 | * BAM will only set EOT for DMA_PREP_INTERRUPT so if this flag |
| 330 | * is not set, form the DMA descriptor |
| 331 | */ |
| 332 | if (!(flags & NAND_BAM_NO_EOT)) { |
| 333 | ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan, |
| 334 | DMA_PREP_INTERRUPT); |
| 335 | if (ret) |
| 336 | return ret; |
| 337 | } |
| 338 | } |
| 339 | |
| 340 | return 0; |
| 341 | } |
| 342 | EXPORT_SYMBOL(qcom_prep_bam_dma_desc_data); |
| 343 | |
| 344 | /** |
| 345 | * qcom_prep_adm_dma_desc() - Prepare descriptor for adma |
| 346 | * @nandc: qpic nand controller |
| 347 | * @read: read or write type |
| 348 | * @reg_off: offset within the controller's data buffer |
| 349 | * @vaddr: virtual address of the buffer we want to write to |
| 350 | * @size: adm dma transaction size in bytes |
| 351 | * @flow_control: flow controller |
| 352 | * |
| 353 | * This function will prepare descriptor for adma |
| 354 | */ |
| 355 | int qcom_prep_adm_dma_desc(struct qcom_nand_controller *nandc, bool read, |
| 356 | int reg_off, const void *vaddr, int size, |
| 357 | bool flow_control) |
| 358 | { |
| 359 | struct qcom_adm_peripheral_config periph_conf = {}; |
| 360 | struct dma_async_tx_descriptor *dma_desc; |
| 361 | struct dma_slave_config slave_conf = {0}; |
| 362 | enum dma_transfer_direction dir_eng; |
| 363 | struct desc_info *desc; |
| 364 | struct scatterlist *sgl; |
| 365 | int ret; |
| 366 | |
| 367 | desc = kzalloc(sizeof(*desc), GFP_KERNEL); |
| 368 | if (!desc) |
| 369 | return -ENOMEM; |
| 370 | |
| 371 | sgl = &desc->adm_sgl; |
| 372 | |
| 373 | sg_init_one(sgl, vaddr, size); |
| 374 | |
| 375 | if (read) { |
| 376 | dir_eng = DMA_DEV_TO_MEM; |
| 377 | desc->dir = DMA_FROM_DEVICE; |
| 378 | } else { |
| 379 | dir_eng = DMA_MEM_TO_DEV; |
| 380 | desc->dir = DMA_TO_DEVICE; |
| 381 | } |
| 382 | |
| 383 | ret = dma_map_sg(nandc->dev, sgl, 1, desc->dir); |
| 384 | if (!ret) { |
| 385 | ret = -ENOMEM; |
| 386 | goto err; |
| 387 | } |
| 388 | |
| 389 | slave_conf.device_fc = flow_control; |
| 390 | if (read) { |
| 391 | slave_conf.src_maxburst = 16; |
| 392 | slave_conf.src_addr = nandc->base_dma + reg_off; |
| 393 | if (nandc->data_crci) { |
| 394 | periph_conf.crci = nandc->data_crci; |
| 395 | slave_conf.peripheral_config = &periph_conf; |
| 396 | slave_conf.peripheral_size = sizeof(periph_conf); |
| 397 | } |
| 398 | } else { |
| 399 | slave_conf.dst_maxburst = 16; |
| 400 | slave_conf.dst_addr = nandc->base_dma + reg_off; |
| 401 | if (nandc->cmd_crci) { |
| 402 | periph_conf.crci = nandc->cmd_crci; |
| 403 | slave_conf.peripheral_config = &periph_conf; |
| 404 | slave_conf.peripheral_size = sizeof(periph_conf); |
| 405 | } |
| 406 | } |
| 407 | |
| 408 | ret = dmaengine_slave_config(chan: nandc->chan, config: &slave_conf); |
| 409 | if (ret) { |
| 410 | dev_err(nandc->dev, "failed to configure dma channel\n" ); |
| 411 | goto err; |
| 412 | } |
| 413 | |
| 414 | dma_desc = dmaengine_prep_slave_sg(chan: nandc->chan, sgl, sg_len: 1, dir: dir_eng, flags: 0); |
| 415 | if (!dma_desc) { |
| 416 | dev_err(nandc->dev, "failed to prepare desc\n" ); |
| 417 | ret = -EINVAL; |
| 418 | goto err; |
| 419 | } |
| 420 | |
| 421 | desc->dma_desc = dma_desc; |
| 422 | |
| 423 | list_add_tail(new: &desc->node, head: &nandc->desc_list); |
| 424 | |
| 425 | return 0; |
| 426 | err: |
| 427 | kfree(objp: desc); |
| 428 | |
| 429 | return ret; |
| 430 | } |
| 431 | EXPORT_SYMBOL(qcom_prep_adm_dma_desc); |
| 432 | |
| 433 | /** |
| 434 | * qcom_read_reg_dma() - read a given number of registers to the reg_read_buf pointer |
| 435 | * @nandc: qpic nand controller |
| 436 | * @first: offset of the first register in the contiguous block |
| 437 | * @num_regs: number of registers to read |
| 438 | * @flags: flags to control DMA descriptor preparation |
| 439 | * |
| 440 | * This function will prepares a descriptor to read a given number of |
| 441 | * contiguous registers to the reg_read_buf pointer. |
| 442 | */ |
| 443 | int qcom_read_reg_dma(struct qcom_nand_controller *nandc, int first, |
| 444 | int num_regs, unsigned int flags) |
| 445 | { |
| 446 | bool flow_control = false; |
| 447 | void *vaddr; |
| 448 | |
| 449 | vaddr = nandc->reg_read_buf + nandc->reg_read_pos; |
| 450 | nandc->reg_read_pos += num_regs; |
| 451 | |
| 452 | if (first == NAND_DEV_CMD_VLD || first == NAND_DEV_CMD1) |
| 453 | first = dev_cmd_reg_addr(nandc, first); |
| 454 | |
| 455 | if (nandc->props->supports_bam) |
| 456 | return qcom_prep_bam_dma_desc_cmd(nandc, true, first, vaddr, |
| 457 | num_regs, flags); |
| 458 | |
| 459 | if (first == NAND_READ_ID || first == NAND_FLASH_STATUS) |
| 460 | flow_control = true; |
| 461 | |
| 462 | return qcom_prep_adm_dma_desc(nandc, true, first, vaddr, |
| 463 | num_regs * sizeof(u32), flow_control); |
| 464 | } |
| 465 | EXPORT_SYMBOL(qcom_read_reg_dma); |
| 466 | |
| 467 | /** |
| 468 | * qcom_write_reg_dma() - write a given number of registers |
| 469 | * @nandc: qpic nand controller |
| 470 | * @vaddr: contiguous memory from where register value will |
| 471 | * be written |
| 472 | * @first: offset of the first register in the contiguous block |
| 473 | * @num_regs: number of registers to write |
| 474 | * @flags: flags to control DMA descriptor preparation |
| 475 | * |
| 476 | * This function will prepares a descriptor to write a given number of |
| 477 | * contiguous registers |
| 478 | */ |
| 479 | int qcom_write_reg_dma(struct qcom_nand_controller *nandc, __le32 *vaddr, |
| 480 | int first, int num_regs, unsigned int flags) |
| 481 | { |
| 482 | bool flow_control = false; |
| 483 | |
| 484 | if (first == NAND_EXEC_CMD) |
| 485 | flags |= NAND_BAM_NWD; |
| 486 | |
| 487 | if (first == NAND_DEV_CMD1_RESTORE || first == NAND_DEV_CMD1) |
| 488 | first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD1); |
| 489 | |
| 490 | if (first == NAND_DEV_CMD_VLD_RESTORE || first == NAND_DEV_CMD_VLD) |
| 491 | first = dev_cmd_reg_addr(nandc, NAND_DEV_CMD_VLD); |
| 492 | |
| 493 | if (nandc->props->supports_bam) |
| 494 | return qcom_prep_bam_dma_desc_cmd(nandc, false, first, vaddr, |
| 495 | num_regs, flags); |
| 496 | |
| 497 | if (first == NAND_FLASH_CMD) |
| 498 | flow_control = true; |
| 499 | |
| 500 | return qcom_prep_adm_dma_desc(nandc, false, first, vaddr, |
| 501 | num_regs * sizeof(u32), flow_control); |
| 502 | } |
| 503 | EXPORT_SYMBOL(qcom_write_reg_dma); |
| 504 | |
| 505 | /** |
| 506 | * qcom_read_data_dma() - transfer data |
| 507 | * @nandc: qpic nand controller |
| 508 | * @reg_off: offset within the controller's data buffer |
| 509 | * @vaddr: virtual address of the buffer we want to write to |
| 510 | * @size: DMA transaction size in bytes |
| 511 | * @flags: flags to control DMA descriptor preparation |
| 512 | * |
| 513 | * This function will prepares a DMA descriptor to transfer data from the |
| 514 | * controller's internal buffer to the buffer 'vaddr' |
| 515 | */ |
| 516 | int qcom_read_data_dma(struct qcom_nand_controller *nandc, int reg_off, |
| 517 | const u8 *vaddr, int size, unsigned int flags) |
| 518 | { |
| 519 | if (nandc->props->supports_bam) |
| 520 | return qcom_prep_bam_dma_desc_data(nandc, true, vaddr, size, flags); |
| 521 | |
| 522 | return qcom_prep_adm_dma_desc(nandc, true, reg_off, vaddr, size, false); |
| 523 | } |
| 524 | EXPORT_SYMBOL(qcom_read_data_dma); |
| 525 | |
| 526 | /** |
| 527 | * qcom_write_data_dma() - transfer data |
| 528 | * @nandc: qpic nand controller |
| 529 | * @reg_off: offset within the controller's data buffer |
| 530 | * @vaddr: virtual address of the buffer we want to read from |
| 531 | * @size: DMA transaction size in bytes |
| 532 | * @flags: flags to control DMA descriptor preparation |
| 533 | * |
| 534 | * This function will prepares a DMA descriptor to transfer data from |
| 535 | * 'vaddr' to the controller's internal buffer |
| 536 | */ |
| 537 | int qcom_write_data_dma(struct qcom_nand_controller *nandc, int reg_off, |
| 538 | const u8 *vaddr, int size, unsigned int flags) |
| 539 | { |
| 540 | if (nandc->props->supports_bam) |
| 541 | return qcom_prep_bam_dma_desc_data(nandc, false, vaddr, size, flags); |
| 542 | |
| 543 | return qcom_prep_adm_dma_desc(nandc, false, reg_off, vaddr, size, false); |
| 544 | } |
| 545 | EXPORT_SYMBOL(qcom_write_data_dma); |
| 546 | |
| 547 | /** |
| 548 | * qcom_submit_descs() - submit dma descriptor |
| 549 | * @nandc: qpic nand controller |
| 550 | * |
| 551 | * This function will submit all the prepared dma descriptor |
| 552 | * cmd or data descriptor |
| 553 | */ |
| 554 | int qcom_submit_descs(struct qcom_nand_controller *nandc) |
| 555 | { |
| 556 | struct desc_info *desc, *n; |
| 557 | dma_cookie_t cookie = 0; |
| 558 | struct bam_transaction *bam_txn = nandc->bam_txn; |
| 559 | int ret = 0; |
| 560 | |
| 561 | if (nandc->props->supports_bam) { |
| 562 | if (bam_txn->rx_sgl_pos > bam_txn->rx_sgl_start) { |
| 563 | ret = qcom_prepare_bam_async_desc(nandc, nandc->rx_chan, 0); |
| 564 | if (ret) |
| 565 | goto err_unmap_free_desc; |
| 566 | } |
| 567 | |
| 568 | if (bam_txn->tx_sgl_pos > bam_txn->tx_sgl_start) { |
| 569 | ret = qcom_prepare_bam_async_desc(nandc, nandc->tx_chan, |
| 570 | DMA_PREP_INTERRUPT); |
| 571 | if (ret) |
| 572 | goto err_unmap_free_desc; |
| 573 | } |
| 574 | |
| 575 | if (bam_txn->cmd_sgl_pos > bam_txn->cmd_sgl_start) { |
| 576 | ret = qcom_prepare_bam_async_desc(nandc, nandc->cmd_chan, |
| 577 | DMA_PREP_CMD); |
| 578 | if (ret) |
| 579 | goto err_unmap_free_desc; |
| 580 | } |
| 581 | } |
| 582 | |
| 583 | list_for_each_entry(desc, &nandc->desc_list, node) |
| 584 | cookie = dmaengine_submit(desc: desc->dma_desc); |
| 585 | |
| 586 | if (nandc->props->supports_bam) { |
| 587 | bam_txn->last_cmd_desc->callback = qcom_qpic_bam_dma_done; |
| 588 | bam_txn->last_cmd_desc->callback_param = bam_txn; |
| 589 | |
| 590 | dma_async_issue_pending(chan: nandc->tx_chan); |
| 591 | dma_async_issue_pending(chan: nandc->rx_chan); |
| 592 | dma_async_issue_pending(chan: nandc->cmd_chan); |
| 593 | |
| 594 | if (!wait_for_completion_timeout(x: &bam_txn->txn_done, |
| 595 | QPIC_NAND_COMPLETION_TIMEOUT)) |
| 596 | ret = -ETIMEDOUT; |
| 597 | } else { |
| 598 | if (dma_sync_wait(chan: nandc->chan, cookie) != DMA_COMPLETE) |
| 599 | ret = -ETIMEDOUT; |
| 600 | } |
| 601 | |
| 602 | err_unmap_free_desc: |
| 603 | /* |
| 604 | * Unmap the dma sg_list and free the desc allocated by both |
| 605 | * qcom_prepare_bam_async_desc() and qcom_prep_adm_dma_desc() functions. |
| 606 | */ |
| 607 | list_for_each_entry_safe(desc, n, &nandc->desc_list, node) { |
| 608 | list_del(entry: &desc->node); |
| 609 | |
| 610 | if (nandc->props->supports_bam) |
| 611 | dma_unmap_sg(nandc->dev, desc->bam_sgl, |
| 612 | desc->sgl_cnt, desc->dir); |
| 613 | else |
| 614 | dma_unmap_sg(nandc->dev, &desc->adm_sgl, 1, |
| 615 | desc->dir); |
| 616 | |
| 617 | kfree(objp: desc); |
| 618 | } |
| 619 | |
| 620 | return ret; |
| 621 | } |
| 622 | EXPORT_SYMBOL(qcom_submit_descs); |
| 623 | |
| 624 | /** |
| 625 | * qcom_clear_read_regs() - reset the read register buffer |
| 626 | * @nandc: qpic nand controller |
| 627 | * |
| 628 | * This function reset the register read buffer for next NAND operation |
| 629 | */ |
| 630 | void qcom_clear_read_regs(struct qcom_nand_controller *nandc) |
| 631 | { |
| 632 | nandc->reg_read_pos = 0; |
| 633 | qcom_nandc_dev_to_mem(nandc, false); |
| 634 | } |
| 635 | EXPORT_SYMBOL(qcom_clear_read_regs); |
| 636 | |
| 637 | /** |
| 638 | * qcom_nandc_unalloc() - unallocate qpic nand controller |
| 639 | * @nandc: qpic nand controller |
| 640 | * |
| 641 | * This function will unallocate memory alloacted for qpic nand controller |
| 642 | */ |
| 643 | void qcom_nandc_unalloc(struct qcom_nand_controller *nandc) |
| 644 | { |
| 645 | if (nandc->props->supports_bam) { |
| 646 | if (!dma_mapping_error(dev: nandc->dev, dma_addr: nandc->reg_read_dma)) |
| 647 | dma_unmap_single(nandc->dev, nandc->reg_read_dma, |
| 648 | MAX_REG_RD * |
| 649 | sizeof(*nandc->reg_read_buf), |
| 650 | DMA_FROM_DEVICE); |
| 651 | |
| 652 | if (nandc->tx_chan) |
| 653 | dma_release_channel(chan: nandc->tx_chan); |
| 654 | |
| 655 | if (nandc->rx_chan) |
| 656 | dma_release_channel(chan: nandc->rx_chan); |
| 657 | |
| 658 | if (nandc->cmd_chan) |
| 659 | dma_release_channel(chan: nandc->cmd_chan); |
| 660 | } else { |
| 661 | if (nandc->chan) |
| 662 | dma_release_channel(chan: nandc->chan); |
| 663 | } |
| 664 | } |
| 665 | EXPORT_SYMBOL(qcom_nandc_unalloc); |
| 666 | |
| 667 | /** |
| 668 | * qcom_nandc_alloc() - Allocate qpic nand controller |
| 669 | * @nandc: qpic nand controller |
| 670 | * |
| 671 | * This function will allocate memory for qpic nand controller |
| 672 | */ |
| 673 | int qcom_nandc_alloc(struct qcom_nand_controller *nandc) |
| 674 | { |
| 675 | int ret; |
| 676 | |
| 677 | ret = dma_set_coherent_mask(dev: nandc->dev, DMA_BIT_MASK(32)); |
| 678 | if (ret) { |
| 679 | dev_err(nandc->dev, "failed to set DMA mask\n" ); |
| 680 | return ret; |
| 681 | } |
| 682 | |
| 683 | /* |
| 684 | * we use the internal buffer for reading ONFI params, reading small |
| 685 | * data like ID and status, and preforming read-copy-write operations |
| 686 | * when writing to a codeword partially. 532 is the maximum possible |
| 687 | * size of a codeword for our nand controller |
| 688 | */ |
| 689 | nandc->buf_size = 532; |
| 690 | |
| 691 | nandc->data_buffer = devm_kzalloc(dev: nandc->dev, size: nandc->buf_size, GFP_KERNEL); |
| 692 | if (!nandc->data_buffer) |
| 693 | return -ENOMEM; |
| 694 | |
| 695 | nandc->regs = devm_kzalloc(dev: nandc->dev, size: sizeof(*nandc->regs), GFP_KERNEL); |
| 696 | if (!nandc->regs) |
| 697 | return -ENOMEM; |
| 698 | |
| 699 | nandc->reg_read_buf = devm_kcalloc(dev: nandc->dev, MAX_REG_RD, |
| 700 | size: sizeof(*nandc->reg_read_buf), |
| 701 | GFP_KERNEL); |
| 702 | if (!nandc->reg_read_buf) |
| 703 | return -ENOMEM; |
| 704 | |
| 705 | if (nandc->props->supports_bam) { |
| 706 | nandc->reg_read_dma = |
| 707 | dma_map_single(nandc->dev, nandc->reg_read_buf, |
| 708 | MAX_REG_RD * |
| 709 | sizeof(*nandc->reg_read_buf), |
| 710 | DMA_FROM_DEVICE); |
| 711 | if (dma_mapping_error(dev: nandc->dev, dma_addr: nandc->reg_read_dma)) { |
| 712 | dev_err(nandc->dev, "failed to DMA MAP reg buffer\n" ); |
| 713 | return -EIO; |
| 714 | } |
| 715 | |
| 716 | nandc->tx_chan = dma_request_chan(dev: nandc->dev, name: "tx" ); |
| 717 | if (IS_ERR(ptr: nandc->tx_chan)) { |
| 718 | ret = PTR_ERR(ptr: nandc->tx_chan); |
| 719 | nandc->tx_chan = NULL; |
| 720 | dev_err_probe(dev: nandc->dev, err: ret, |
| 721 | fmt: "tx DMA channel request failed\n" ); |
| 722 | goto unalloc; |
| 723 | } |
| 724 | |
| 725 | nandc->rx_chan = dma_request_chan(dev: nandc->dev, name: "rx" ); |
| 726 | if (IS_ERR(ptr: nandc->rx_chan)) { |
| 727 | ret = PTR_ERR(ptr: nandc->rx_chan); |
| 728 | nandc->rx_chan = NULL; |
| 729 | dev_err_probe(dev: nandc->dev, err: ret, |
| 730 | fmt: "rx DMA channel request failed\n" ); |
| 731 | goto unalloc; |
| 732 | } |
| 733 | |
| 734 | nandc->cmd_chan = dma_request_chan(dev: nandc->dev, name: "cmd" ); |
| 735 | if (IS_ERR(ptr: nandc->cmd_chan)) { |
| 736 | ret = PTR_ERR(ptr: nandc->cmd_chan); |
| 737 | nandc->cmd_chan = NULL; |
| 738 | dev_err_probe(dev: nandc->dev, err: ret, |
| 739 | fmt: "cmd DMA channel request failed\n" ); |
| 740 | goto unalloc; |
| 741 | } |
| 742 | |
| 743 | /* |
| 744 | * Initially allocate BAM transaction to read ONFI param page. |
| 745 | * After detecting all the devices, this BAM transaction will |
| 746 | * be freed and the next BAM transaction will be allocated with |
| 747 | * maximum codeword size |
| 748 | */ |
| 749 | nandc->max_cwperpage = 1; |
| 750 | nandc->bam_txn = qcom_alloc_bam_transaction(nandc); |
| 751 | if (!nandc->bam_txn) { |
| 752 | dev_err(nandc->dev, |
| 753 | "failed to allocate bam transaction\n" ); |
| 754 | ret = -ENOMEM; |
| 755 | goto unalloc; |
| 756 | } |
| 757 | } else { |
| 758 | nandc->chan = dma_request_chan(dev: nandc->dev, name: "rxtx" ); |
| 759 | if (IS_ERR(ptr: nandc->chan)) { |
| 760 | ret = PTR_ERR(ptr: nandc->chan); |
| 761 | nandc->chan = NULL; |
| 762 | dev_err_probe(dev: nandc->dev, err: ret, |
| 763 | fmt: "rxtx DMA channel request failed\n" ); |
| 764 | return ret; |
| 765 | } |
| 766 | } |
| 767 | |
| 768 | INIT_LIST_HEAD(list: &nandc->desc_list); |
| 769 | INIT_LIST_HEAD(list: &nandc->host_list); |
| 770 | |
| 771 | return 0; |
| 772 | unalloc: |
| 773 | qcom_nandc_unalloc(nandc); |
| 774 | return ret; |
| 775 | } |
| 776 | EXPORT_SYMBOL(qcom_nandc_alloc); |
| 777 | |
| 778 | MODULE_DESCRIPTION("QPIC controller common api" ); |
| 779 | MODULE_LICENSE("GPL" ); |
| 780 | |