| 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
| 2 | /* |
| 3 | * Isochronous I/O functionality: |
| 4 | * - Isochronous DMA context management |
| 5 | * - Isochronous bus resource management (channels, bandwidth), client side |
| 6 | * |
| 7 | * Copyright (C) 2006 Kristian Hoegsberg <krh@bitplanet.net> |
| 8 | */ |
| 9 | |
| 10 | #include <linux/dma-mapping.h> |
| 11 | #include <linux/errno.h> |
| 12 | #include <linux/firewire.h> |
| 13 | #include <linux/firewire-constants.h> |
| 14 | #include <linux/kernel.h> |
| 15 | #include <linux/mm.h> |
| 16 | #include <linux/slab.h> |
| 17 | #include <linux/spinlock.h> |
| 18 | #include <linux/vmalloc.h> |
| 19 | #include <linux/export.h> |
| 20 | |
| 21 | #include <asm/byteorder.h> |
| 22 | |
| 23 | #include "core.h" |
| 24 | |
| 25 | #include <trace/events/firewire.h> |
| 26 | |
| 27 | /* |
| 28 | * Isochronous DMA context management |
| 29 | */ |
| 30 | |
| 31 | int fw_iso_buffer_alloc(struct fw_iso_buffer *buffer, int page_count) |
| 32 | { |
| 33 | int i; |
| 34 | |
| 35 | buffer->page_count = 0; |
| 36 | buffer->page_count_mapped = 0; |
| 37 | buffer->pages = kmalloc_array(page_count, sizeof(buffer->pages[0]), |
| 38 | GFP_KERNEL); |
| 39 | if (buffer->pages == NULL) |
| 40 | return -ENOMEM; |
| 41 | |
| 42 | for (i = 0; i < page_count; i++) { |
| 43 | buffer->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); |
| 44 | if (buffer->pages[i] == NULL) |
| 45 | break; |
| 46 | } |
| 47 | buffer->page_count = i; |
| 48 | if (i < page_count) { |
| 49 | fw_iso_buffer_destroy(buffer, NULL); |
| 50 | return -ENOMEM; |
| 51 | } |
| 52 | |
| 53 | return 0; |
| 54 | } |
| 55 | |
| 56 | int fw_iso_buffer_map_dma(struct fw_iso_buffer *buffer, struct fw_card *card, |
| 57 | enum dma_data_direction direction) |
| 58 | { |
| 59 | dma_addr_t address; |
| 60 | int i; |
| 61 | |
| 62 | buffer->direction = direction; |
| 63 | |
| 64 | for (i = 0; i < buffer->page_count; i++) { |
| 65 | address = dma_map_page(card->device, buffer->pages[i], |
| 66 | 0, PAGE_SIZE, direction); |
| 67 | if (dma_mapping_error(dev: card->device, dma_addr: address)) |
| 68 | break; |
| 69 | |
| 70 | set_page_private(page: buffer->pages[i], private: address); |
| 71 | } |
| 72 | buffer->page_count_mapped = i; |
| 73 | if (i < buffer->page_count) |
| 74 | return -ENOMEM; |
| 75 | |
| 76 | return 0; |
| 77 | } |
| 78 | |
| 79 | int fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card, |
| 80 | int page_count, enum dma_data_direction direction) |
| 81 | { |
| 82 | int ret; |
| 83 | |
| 84 | ret = fw_iso_buffer_alloc(buffer, page_count); |
| 85 | if (ret < 0) |
| 86 | return ret; |
| 87 | |
| 88 | ret = fw_iso_buffer_map_dma(buffer, card, direction); |
| 89 | if (ret < 0) |
| 90 | fw_iso_buffer_destroy(buffer, card); |
| 91 | |
| 92 | return ret; |
| 93 | } |
| 94 | EXPORT_SYMBOL(fw_iso_buffer_init); |
| 95 | |
| 96 | void fw_iso_buffer_destroy(struct fw_iso_buffer *buffer, |
| 97 | struct fw_card *card) |
| 98 | { |
| 99 | int i; |
| 100 | dma_addr_t address; |
| 101 | |
| 102 | for (i = 0; i < buffer->page_count_mapped; i++) { |
| 103 | address = page_private(buffer->pages[i]); |
| 104 | dma_unmap_page(card->device, address, |
| 105 | PAGE_SIZE, buffer->direction); |
| 106 | } |
| 107 | for (i = 0; i < buffer->page_count; i++) |
| 108 | __free_page(buffer->pages[i]); |
| 109 | |
| 110 | kfree(objp: buffer->pages); |
| 111 | buffer->pages = NULL; |
| 112 | buffer->page_count = 0; |
| 113 | buffer->page_count_mapped = 0; |
| 114 | } |
| 115 | EXPORT_SYMBOL(fw_iso_buffer_destroy); |
| 116 | |
| 117 | /* Convert DMA address to offset into virtually contiguous buffer. */ |
| 118 | size_t fw_iso_buffer_lookup(struct fw_iso_buffer *buffer, dma_addr_t completed) |
| 119 | { |
| 120 | size_t i; |
| 121 | dma_addr_t address; |
| 122 | ssize_t offset; |
| 123 | |
| 124 | for (i = 0; i < buffer->page_count; i++) { |
| 125 | address = page_private(buffer->pages[i]); |
| 126 | offset = (ssize_t)completed - (ssize_t)address; |
| 127 | if (offset > 0 && offset <= PAGE_SIZE) |
| 128 | return (i << PAGE_SHIFT) + offset; |
| 129 | } |
| 130 | |
| 131 | return 0; |
| 132 | } |
| 133 | |
| 134 | struct fw_iso_context *fw_iso_context_create(struct fw_card *card, |
| 135 | int type, int channel, int speed, size_t , |
| 136 | fw_iso_callback_t callback, void *callback_data) |
| 137 | { |
| 138 | struct fw_iso_context *ctx; |
| 139 | |
| 140 | ctx = card->driver->allocate_iso_context(card, |
| 141 | type, channel, header_size); |
| 142 | if (IS_ERR(ptr: ctx)) |
| 143 | return ctx; |
| 144 | |
| 145 | ctx->card = card; |
| 146 | ctx->type = type; |
| 147 | ctx->channel = channel; |
| 148 | ctx->speed = speed; |
| 149 | ctx->header_size = header_size; |
| 150 | ctx->callback.sc = callback; |
| 151 | ctx->callback_data = callback_data; |
| 152 | |
| 153 | trace_isoc_outbound_allocate(ctx, channel, scode: speed); |
| 154 | trace_isoc_inbound_single_allocate(ctx, channel, header_size); |
| 155 | trace_isoc_inbound_multiple_allocate(ctx); |
| 156 | |
| 157 | return ctx; |
| 158 | } |
| 159 | EXPORT_SYMBOL(fw_iso_context_create); |
| 160 | |
| 161 | void fw_iso_context_destroy(struct fw_iso_context *ctx) |
| 162 | { |
| 163 | trace_isoc_outbound_destroy(ctx); |
| 164 | trace_isoc_inbound_single_destroy(ctx); |
| 165 | trace_isoc_inbound_multiple_destroy(ctx); |
| 166 | |
| 167 | ctx->card->driver->free_iso_context(ctx); |
| 168 | } |
| 169 | EXPORT_SYMBOL(fw_iso_context_destroy); |
| 170 | |
| 171 | int fw_iso_context_start(struct fw_iso_context *ctx, |
| 172 | int cycle, int sync, int tags) |
| 173 | { |
| 174 | trace_isoc_outbound_start(ctx, cycle_match: cycle); |
| 175 | trace_isoc_inbound_single_start(ctx, cycle_match: cycle, sync, tags); |
| 176 | trace_isoc_inbound_multiple_start(ctx, cycle_match: cycle, sync, tags); |
| 177 | |
| 178 | return ctx->card->driver->start_iso(ctx, cycle, sync, tags); |
| 179 | } |
| 180 | EXPORT_SYMBOL(fw_iso_context_start); |
| 181 | |
| 182 | int fw_iso_context_set_channels(struct fw_iso_context *ctx, u64 *channels) |
| 183 | { |
| 184 | trace_isoc_inbound_multiple_channels(ctx, channels: *channels); |
| 185 | |
| 186 | return ctx->card->driver->set_iso_channels(ctx, channels); |
| 187 | } |
| 188 | |
| 189 | int fw_iso_context_queue(struct fw_iso_context *ctx, |
| 190 | struct fw_iso_packet *packet, |
| 191 | struct fw_iso_buffer *buffer, |
| 192 | unsigned long payload) |
| 193 | { |
| 194 | trace_isoc_outbound_queue(ctx, buffer_offset: payload, packet); |
| 195 | trace_isoc_inbound_single_queue(ctx, buffer_offset: payload, packet); |
| 196 | trace_isoc_inbound_multiple_queue(ctx, buffer_offset: payload, packet); |
| 197 | |
| 198 | return ctx->card->driver->queue_iso(ctx, packet, buffer, payload); |
| 199 | } |
| 200 | EXPORT_SYMBOL(fw_iso_context_queue); |
| 201 | |
| 202 | void fw_iso_context_queue_flush(struct fw_iso_context *ctx) |
| 203 | { |
| 204 | trace_isoc_outbound_flush(ctx); |
| 205 | trace_isoc_inbound_single_flush(ctx); |
| 206 | trace_isoc_inbound_multiple_flush(ctx); |
| 207 | |
| 208 | ctx->card->driver->flush_queue_iso(ctx); |
| 209 | } |
| 210 | EXPORT_SYMBOL(fw_iso_context_queue_flush); |
| 211 | |
| 212 | /** |
| 213 | * fw_iso_context_flush_completions() - process isochronous context in current process context. |
| 214 | * @ctx: the isochronous context |
| 215 | * |
| 216 | * Process the isochronous context in the current process context. The registered callback function |
| 217 | * is called when a queued packet buffer with the interrupt flag is completed, either after |
| 218 | * transmission in the IT context or after being filled in the IR context. Additionally, the |
| 219 | * callback function is also called for the packet buffer completed at last. Furthermore, the |
| 220 | * callback function is called as well when the header buffer in the context becomes full. If it is |
| 221 | * required to process the context asynchronously, fw_iso_context_schedule_flush_completions() is |
| 222 | * available instead. |
| 223 | * |
| 224 | * Context: Process context. May sleep due to disable_work_sync(). |
| 225 | */ |
| 226 | int fw_iso_context_flush_completions(struct fw_iso_context *ctx) |
| 227 | { |
| 228 | int err; |
| 229 | |
| 230 | trace_isoc_outbound_flush_completions(ctx); |
| 231 | trace_isoc_inbound_single_flush_completions(ctx); |
| 232 | trace_isoc_inbound_multiple_flush_completions(ctx); |
| 233 | |
| 234 | might_sleep(); |
| 235 | |
| 236 | // Avoid dead lock due to programming mistake. |
| 237 | if (WARN_ON_ONCE(current_work() == &ctx->work)) |
| 238 | return 0; |
| 239 | |
| 240 | disable_work_sync(work: &ctx->work); |
| 241 | |
| 242 | err = ctx->card->driver->flush_iso_completions(ctx); |
| 243 | |
| 244 | enable_work(work: &ctx->work); |
| 245 | |
| 246 | return err; |
| 247 | } |
| 248 | EXPORT_SYMBOL(fw_iso_context_flush_completions); |
| 249 | |
| 250 | int fw_iso_context_stop(struct fw_iso_context *ctx) |
| 251 | { |
| 252 | int err; |
| 253 | |
| 254 | trace_isoc_outbound_stop(ctx); |
| 255 | trace_isoc_inbound_single_stop(ctx); |
| 256 | trace_isoc_inbound_multiple_stop(ctx); |
| 257 | |
| 258 | might_sleep(); |
| 259 | |
| 260 | // Avoid dead lock due to programming mistake. |
| 261 | if (WARN_ON_ONCE(current_work() == &ctx->work)) |
| 262 | return 0; |
| 263 | |
| 264 | err = ctx->card->driver->stop_iso(ctx); |
| 265 | |
| 266 | cancel_work_sync(work: &ctx->work); |
| 267 | |
| 268 | return err; |
| 269 | } |
| 270 | EXPORT_SYMBOL(fw_iso_context_stop); |
| 271 | |
| 272 | /* |
| 273 | * Isochronous bus resource management (channels, bandwidth), client side |
| 274 | */ |
| 275 | |
| 276 | static int manage_bandwidth(struct fw_card *card, int irm_id, int generation, |
| 277 | int bandwidth, bool allocate) |
| 278 | { |
| 279 | int try, new, old = allocate ? BANDWIDTH_AVAILABLE_INITIAL : 0; |
| 280 | __be32 data[2]; |
| 281 | |
| 282 | /* |
| 283 | * On a 1394a IRM with low contention, try < 1 is enough. |
| 284 | * On a 1394-1995 IRM, we need at least try < 2. |
| 285 | * Let's just do try < 5. |
| 286 | */ |
| 287 | for (try = 0; try < 5; try++) { |
| 288 | new = allocate ? old - bandwidth : old + bandwidth; |
| 289 | if (new < 0 || new > BANDWIDTH_AVAILABLE_INITIAL) |
| 290 | return -EBUSY; |
| 291 | |
| 292 | data[0] = cpu_to_be32(old); |
| 293 | data[1] = cpu_to_be32(new); |
| 294 | switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, |
| 295 | destination_id: irm_id, generation, SCODE_100, |
| 296 | CSR_REGISTER_BASE + CSR_BANDWIDTH_AVAILABLE, |
| 297 | payload: data, length: 8)) { |
| 298 | case RCODE_GENERATION: |
| 299 | /* A generation change frees all bandwidth. */ |
| 300 | return allocate ? -EAGAIN : bandwidth; |
| 301 | |
| 302 | case RCODE_COMPLETE: |
| 303 | if (be32_to_cpup(p: data) == old) |
| 304 | return bandwidth; |
| 305 | |
| 306 | old = be32_to_cpup(p: data); |
| 307 | /* Fall through. */ |
| 308 | } |
| 309 | } |
| 310 | |
| 311 | return -EIO; |
| 312 | } |
| 313 | |
| 314 | static int manage_channel(struct fw_card *card, int irm_id, int generation, |
| 315 | u32 channels_mask, u64 offset, bool allocate) |
| 316 | { |
| 317 | __be32 bit, all, old; |
| 318 | __be32 data[2]; |
| 319 | int channel, ret = -EIO, retry = 5; |
| 320 | |
| 321 | old = all = allocate ? cpu_to_be32(~0) : 0; |
| 322 | |
| 323 | for (channel = 0; channel < 32; channel++) { |
| 324 | if (!(channels_mask & 1 << channel)) |
| 325 | continue; |
| 326 | |
| 327 | ret = -EBUSY; |
| 328 | |
| 329 | bit = cpu_to_be32(1 << (31 - channel)); |
| 330 | if ((old & bit) != (all & bit)) |
| 331 | continue; |
| 332 | |
| 333 | data[0] = old; |
| 334 | data[1] = old ^ bit; |
| 335 | switch (fw_run_transaction(card, TCODE_LOCK_COMPARE_SWAP, |
| 336 | destination_id: irm_id, generation, SCODE_100, |
| 337 | offset, payload: data, length: 8)) { |
| 338 | case RCODE_GENERATION: |
| 339 | /* A generation change frees all channels. */ |
| 340 | return allocate ? -EAGAIN : channel; |
| 341 | |
| 342 | case RCODE_COMPLETE: |
| 343 | if (data[0] == old) |
| 344 | return channel; |
| 345 | |
| 346 | old = data[0]; |
| 347 | |
| 348 | /* Is the IRM 1394a-2000 compliant? */ |
| 349 | if ((data[0] & bit) == (data[1] & bit)) |
| 350 | continue; |
| 351 | |
| 352 | fallthrough; /* It's a 1394-1995 IRM, retry */ |
| 353 | default: |
| 354 | if (retry) { |
| 355 | retry--; |
| 356 | channel--; |
| 357 | } else { |
| 358 | ret = -EIO; |
| 359 | } |
| 360 | } |
| 361 | } |
| 362 | |
| 363 | return ret; |
| 364 | } |
| 365 | |
| 366 | static void deallocate_channel(struct fw_card *card, int irm_id, |
| 367 | int generation, int channel) |
| 368 | { |
| 369 | u32 mask; |
| 370 | u64 offset; |
| 371 | |
| 372 | mask = channel < 32 ? 1 << channel : 1 << (channel - 32); |
| 373 | offset = channel < 32 ? CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI : |
| 374 | CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO; |
| 375 | |
| 376 | manage_channel(card, irm_id, generation, channels_mask: mask, offset, allocate: false); |
| 377 | } |
| 378 | |
| 379 | /** |
| 380 | * fw_iso_resource_manage() - Allocate or deallocate a channel and/or bandwidth |
| 381 | * @card: card interface for this action |
| 382 | * @generation: bus generation |
| 383 | * @channels_mask: bitmask for channel allocation |
| 384 | * @channel: pointer for returning channel allocation result |
| 385 | * @bandwidth: pointer for returning bandwidth allocation result |
| 386 | * @allocate: whether to allocate (true) or deallocate (false) |
| 387 | * |
| 388 | * In parameters: card, generation, channels_mask, bandwidth, allocate |
| 389 | * Out parameters: channel, bandwidth |
| 390 | * |
| 391 | * This function blocks (sleeps) during communication with the IRM. |
| 392 | * |
| 393 | * Allocates or deallocates at most one channel out of channels_mask. |
| 394 | * channels_mask is a bitfield with MSB for channel 63 and LSB for channel 0. |
| 395 | * (Note, the IRM's CHANNELS_AVAILABLE is a big-endian bitfield with MSB for |
| 396 | * channel 0 and LSB for channel 63.) |
| 397 | * Allocates or deallocates as many bandwidth allocation units as specified. |
| 398 | * |
| 399 | * Returns channel < 0 if no channel was allocated or deallocated. |
| 400 | * Returns bandwidth = 0 if no bandwidth was allocated or deallocated. |
| 401 | * |
| 402 | * If generation is stale, deallocations succeed but allocations fail with |
| 403 | * channel = -EAGAIN. |
| 404 | * |
| 405 | * If channel allocation fails, no bandwidth will be allocated either. |
| 406 | * If bandwidth allocation fails, no channel will be allocated either. |
| 407 | * But deallocations of channel and bandwidth are tried independently |
| 408 | * of each other's success. |
| 409 | */ |
| 410 | void fw_iso_resource_manage(struct fw_card *card, int generation, |
| 411 | u64 channels_mask, int *channel, int *bandwidth, |
| 412 | bool allocate) |
| 413 | { |
| 414 | u32 channels_hi = channels_mask; /* channels 31...0 */ |
| 415 | u32 channels_lo = channels_mask >> 32; /* channels 63...32 */ |
| 416 | int irm_id, ret, c = -EINVAL; |
| 417 | |
| 418 | scoped_guard(spinlock_irq, &card->lock) |
| 419 | irm_id = card->irm_node->node_id; |
| 420 | |
| 421 | if (channels_hi) |
| 422 | c = manage_channel(card, irm_id, generation, channels_mask: channels_hi, |
| 423 | CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_HI, |
| 424 | allocate); |
| 425 | if (channels_lo && c < 0) { |
| 426 | c = manage_channel(card, irm_id, generation, channels_mask: channels_lo, |
| 427 | CSR_REGISTER_BASE + CSR_CHANNELS_AVAILABLE_LO, |
| 428 | allocate); |
| 429 | if (c >= 0) |
| 430 | c += 32; |
| 431 | } |
| 432 | *channel = c; |
| 433 | |
| 434 | if (allocate && channels_mask != 0 && c < 0) |
| 435 | *bandwidth = 0; |
| 436 | |
| 437 | if (*bandwidth == 0) |
| 438 | return; |
| 439 | |
| 440 | ret = manage_bandwidth(card, irm_id, generation, bandwidth: *bandwidth, allocate); |
| 441 | if (ret < 0) |
| 442 | *bandwidth = 0; |
| 443 | |
| 444 | if (allocate && ret < 0) { |
| 445 | if (c >= 0) |
| 446 | deallocate_channel(card, irm_id, generation, channel: c); |
| 447 | *channel = ret; |
| 448 | } |
| 449 | } |
| 450 | EXPORT_SYMBOL(fw_iso_resource_manage); |
| 451 | |