| 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* |
| 3 | * Copyright (C) 2021 Western Digital Corporation or its affiliates. |
| 4 | */ |
| 5 | |
| 6 | #include <linux/blkdev.h> |
| 7 | #include <linux/mm.h> |
| 8 | #include <linux/sched/mm.h> |
| 9 | #include <linux/slab.h> |
| 10 | #include <linux/bitmap.h> |
| 11 | |
| 12 | #include "dm-core.h" |
| 13 | |
| 14 | #define DM_MSG_PREFIX "zone" |
| 15 | |
| 16 | /* |
| 17 | * For internal zone reports bypassing the top BIO submission path. |
| 18 | */ |
| 19 | static int dm_blk_do_report_zones(struct mapped_device *md, struct dm_table *t, |
| 20 | unsigned int nr_zones, |
| 21 | struct dm_report_zones_args *args) |
| 22 | { |
| 23 | do { |
| 24 | struct dm_target *tgt; |
| 25 | int ret; |
| 26 | |
| 27 | tgt = dm_table_find_target(t, sector: args->next_sector); |
| 28 | if (WARN_ON_ONCE(!tgt->type->report_zones)) |
| 29 | return -EIO; |
| 30 | |
| 31 | args->tgt = tgt; |
| 32 | ret = tgt->type->report_zones(tgt, args, |
| 33 | nr_zones - args->zone_idx); |
| 34 | if (ret < 0) |
| 35 | return ret; |
| 36 | } while (args->zone_idx < nr_zones && |
| 37 | args->next_sector < get_capacity(disk: md->disk)); |
| 38 | |
| 39 | return args->zone_idx; |
| 40 | } |
| 41 | |
| 42 | /* |
| 43 | * User facing dm device block device report zone operation. This calls the |
| 44 | * report_zones operation for each target of a device table. This operation is |
| 45 | * generally implemented by targets using dm_report_zones(). |
| 46 | */ |
| 47 | int dm_blk_report_zones(struct gendisk *disk, sector_t sector, |
| 48 | unsigned int nr_zones, |
| 49 | struct blk_report_zones_args *args) |
| 50 | { |
| 51 | struct mapped_device *md = disk->private_data; |
| 52 | struct dm_table *map; |
| 53 | struct dm_table *zone_revalidate_map = md->zone_revalidate_map; |
| 54 | int srcu_idx, ret = -EIO; |
| 55 | bool put_table = false; |
| 56 | |
| 57 | if (!zone_revalidate_map || md->revalidate_map_task != current) { |
| 58 | /* |
| 59 | * Regular user context or |
| 60 | * Zone revalidation during __bind() is in progress, but this |
| 61 | * call is from a different process |
| 62 | */ |
| 63 | if (dm_suspended_md(md)) |
| 64 | return -EAGAIN; |
| 65 | |
| 66 | map = dm_get_live_table(md, srcu_idx: &srcu_idx); |
| 67 | put_table = true; |
| 68 | } else { |
| 69 | /* Zone revalidation during __bind() */ |
| 70 | map = zone_revalidate_map; |
| 71 | } |
| 72 | |
| 73 | if (map) { |
| 74 | struct dm_report_zones_args dm_args = { |
| 75 | .disk = md->disk, |
| 76 | .next_sector = sector, |
| 77 | .rep_args = args, |
| 78 | }; |
| 79 | ret = dm_blk_do_report_zones(md, t: map, nr_zones, args: &dm_args); |
| 80 | } |
| 81 | |
| 82 | if (put_table) |
| 83 | dm_put_live_table(md, srcu_idx); |
| 84 | |
| 85 | return ret; |
| 86 | } |
| 87 | |
| 88 | static int dm_report_zones_cb(struct blk_zone *zone, unsigned int idx, |
| 89 | void *data) |
| 90 | { |
| 91 | struct dm_report_zones_args *args = data; |
| 92 | sector_t sector_diff = args->tgt->begin - args->start; |
| 93 | |
| 94 | /* |
| 95 | * Ignore zones beyond the target range. |
| 96 | */ |
| 97 | if (zone->start >= args->start + args->tgt->len) |
| 98 | return 0; |
| 99 | |
| 100 | /* |
| 101 | * Remap the start sector and write pointer position of the zone |
| 102 | * to match its position in the target range. |
| 103 | */ |
| 104 | zone->start += sector_diff; |
| 105 | if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) { |
| 106 | if (zone->cond == BLK_ZONE_COND_FULL) |
| 107 | zone->wp = zone->start + zone->len; |
| 108 | else if (zone->cond == BLK_ZONE_COND_EMPTY) |
| 109 | zone->wp = zone->start; |
| 110 | else |
| 111 | zone->wp += sector_diff; |
| 112 | } |
| 113 | |
| 114 | args->next_sector = zone->start + zone->len; |
| 115 | |
| 116 | /* If we have an internal callback, call it first. */ |
| 117 | if (args->cb) { |
| 118 | int ret; |
| 119 | |
| 120 | ret = args->cb(zone, args->zone_idx, args->data); |
| 121 | if (ret) |
| 122 | return ret; |
| 123 | } |
| 124 | |
| 125 | return disk_report_zone(disk: args->disk, zone, idx: args->zone_idx++, |
| 126 | args: args->rep_args); |
| 127 | } |
| 128 | |
| 129 | /* |
| 130 | * Helper for drivers of zoned targets to implement struct target_type |
| 131 | * report_zones operation. |
| 132 | */ |
| 133 | int dm_report_zones(struct block_device *bdev, sector_t start, sector_t sector, |
| 134 | struct dm_report_zones_args *args, unsigned int nr_zones) |
| 135 | { |
| 136 | /* |
| 137 | * Set the target mapping start sector first so that |
| 138 | * dm_report_zones_cb() can correctly remap zone information. |
| 139 | */ |
| 140 | args->start = start; |
| 141 | |
| 142 | return blkdev_report_zones(bdev, sector, nr_zones, |
| 143 | cb: dm_report_zones_cb, data: args); |
| 144 | } |
| 145 | EXPORT_SYMBOL_GPL(dm_report_zones); |
| 146 | |
| 147 | bool dm_is_zone_write(struct mapped_device *md, struct bio *bio) |
| 148 | { |
| 149 | struct request_queue *q = md->queue; |
| 150 | |
| 151 | if (!blk_queue_is_zoned(q)) |
| 152 | return false; |
| 153 | |
| 154 | switch (bio_op(bio)) { |
| 155 | case REQ_OP_WRITE_ZEROES: |
| 156 | case REQ_OP_WRITE: |
| 157 | return !op_is_flush(op: bio->bi_opf) && bio_sectors(bio); |
| 158 | default: |
| 159 | return false; |
| 160 | } |
| 161 | } |
| 162 | |
| 163 | /* |
| 164 | * Revalidate the zones of a mapped device to initialize resource necessary |
| 165 | * for zone append emulation. Note that we cannot simply use the block layer |
| 166 | * blk_revalidate_disk_zones() function here as the mapped device is suspended |
| 167 | * (this is called from __bind() context). |
| 168 | */ |
| 169 | int dm_revalidate_zones(struct dm_table *t, struct request_queue *q) |
| 170 | { |
| 171 | struct mapped_device *md = t->md; |
| 172 | struct gendisk *disk = md->disk; |
| 173 | unsigned int nr_zones = disk->nr_zones; |
| 174 | int ret; |
| 175 | |
| 176 | if (!get_capacity(disk)) |
| 177 | return 0; |
| 178 | |
| 179 | /* |
| 180 | * Do not revalidate if zone write plug resources have already |
| 181 | * been allocated. |
| 182 | */ |
| 183 | if (dm_has_zone_plugs(md)) |
| 184 | return 0; |
| 185 | |
| 186 | DMINFO("%s using %s zone append" , disk->disk_name, |
| 187 | queue_emulates_zone_append(q) ? "emulated" : "native" ); |
| 188 | |
| 189 | /* |
| 190 | * Our table is not live yet. So the call to dm_get_live_table() |
| 191 | * in dm_blk_report_zones() will fail. Set a temporary pointer to |
| 192 | * our table for dm_blk_report_zones() to use directly. |
| 193 | */ |
| 194 | md->zone_revalidate_map = t; |
| 195 | md->revalidate_map_task = current; |
| 196 | ret = blk_revalidate_disk_zones(disk); |
| 197 | md->revalidate_map_task = NULL; |
| 198 | md->zone_revalidate_map = NULL; |
| 199 | |
| 200 | if (ret) { |
| 201 | DMERR("Revalidate zones failed %d" , ret); |
| 202 | disk->nr_zones = nr_zones; |
| 203 | return ret; |
| 204 | } |
| 205 | |
| 206 | return 0; |
| 207 | } |
| 208 | |
| 209 | static int device_not_zone_append_capable(struct dm_target *ti, |
| 210 | struct dm_dev *dev, sector_t start, |
| 211 | sector_t len, void *data) |
| 212 | { |
| 213 | return !bdev_is_zoned(bdev: dev->bdev); |
| 214 | } |
| 215 | |
| 216 | static bool dm_table_supports_zone_append(struct dm_table *t) |
| 217 | { |
| 218 | for (unsigned int i = 0; i < t->num_targets; i++) { |
| 219 | struct dm_target *ti = dm_table_get_target(t, index: i); |
| 220 | |
| 221 | if (ti->emulate_zone_append) |
| 222 | return false; |
| 223 | |
| 224 | if (!ti->type->iterate_devices || |
| 225 | ti->type->iterate_devices(ti, device_not_zone_append_capable, NULL)) |
| 226 | return false; |
| 227 | } |
| 228 | |
| 229 | return true; |
| 230 | } |
| 231 | |
| 232 | struct dm_device_zone_count { |
| 233 | sector_t start; |
| 234 | sector_t len; |
| 235 | unsigned int total_nr_seq_zones; |
| 236 | unsigned int target_nr_seq_zones; |
| 237 | }; |
| 238 | |
| 239 | /* |
| 240 | * Count the total number of and the number of mapped sequential zones of a |
| 241 | * target zoned device. |
| 242 | */ |
| 243 | static int dm_device_count_zones_cb(struct blk_zone *zone, |
| 244 | unsigned int idx, void *data) |
| 245 | { |
| 246 | struct dm_device_zone_count *zc = data; |
| 247 | |
| 248 | if (zone->type != BLK_ZONE_TYPE_CONVENTIONAL) { |
| 249 | zc->total_nr_seq_zones++; |
| 250 | if (zone->start >= zc->start && |
| 251 | zone->start < zc->start + zc->len) |
| 252 | zc->target_nr_seq_zones++; |
| 253 | } |
| 254 | |
| 255 | return 0; |
| 256 | } |
| 257 | |
| 258 | static int dm_device_count_zones(struct dm_dev *dev, |
| 259 | struct dm_device_zone_count *zc) |
| 260 | { |
| 261 | int ret; |
| 262 | |
| 263 | ret = blkdev_report_zones(bdev: dev->bdev, sector: 0, BLK_ALL_ZONES, |
| 264 | cb: dm_device_count_zones_cb, data: zc); |
| 265 | if (ret < 0) |
| 266 | return ret; |
| 267 | if (!ret) |
| 268 | return -EIO; |
| 269 | return 0; |
| 270 | } |
| 271 | |
| 272 | struct dm_zone_resource_limits { |
| 273 | unsigned int mapped_nr_seq_zones; |
| 274 | struct queue_limits *lim; |
| 275 | bool reliable_limits; |
| 276 | }; |
| 277 | |
| 278 | static int device_get_zone_resource_limits(struct dm_target *ti, |
| 279 | struct dm_dev *dev, sector_t start, |
| 280 | sector_t len, void *data) |
| 281 | { |
| 282 | struct dm_zone_resource_limits *zlim = data; |
| 283 | struct gendisk *disk = dev->bdev->bd_disk; |
| 284 | unsigned int max_open_zones, max_active_zones; |
| 285 | int ret; |
| 286 | struct dm_device_zone_count zc = { |
| 287 | .start = start, |
| 288 | .len = len, |
| 289 | }; |
| 290 | |
| 291 | /* |
| 292 | * If the target is not the whole device, the device zone resources may |
| 293 | * be shared between different targets. Check this by counting the |
| 294 | * number of mapped sequential zones: if this number is smaller than the |
| 295 | * total number of sequential zones of the target device, then resource |
| 296 | * sharing may happen and the zone limits will not be reliable. |
| 297 | */ |
| 298 | ret = dm_device_count_zones(dev, zc: &zc); |
| 299 | if (ret) { |
| 300 | DMERR("Count %s zones failed %d" , disk->disk_name, ret); |
| 301 | return ret; |
| 302 | } |
| 303 | |
| 304 | /* |
| 305 | * If the target does not map any sequential zones, then we do not need |
| 306 | * any zone resource limits. |
| 307 | */ |
| 308 | if (!zc.target_nr_seq_zones) |
| 309 | return 0; |
| 310 | |
| 311 | /* |
| 312 | * If the target does not map all sequential zones, the limits |
| 313 | * will not be reliable and we cannot use REQ_OP_ZONE_RESET_ALL. |
| 314 | */ |
| 315 | if (zc.target_nr_seq_zones < zc.total_nr_seq_zones) { |
| 316 | zlim->reliable_limits = false; |
| 317 | ti->zone_reset_all_supported = false; |
| 318 | } |
| 319 | |
| 320 | /* |
| 321 | * If the target maps less sequential zones than the limit values, then |
| 322 | * we do not have limits for this target. |
| 323 | */ |
| 324 | max_active_zones = disk->queue->limits.max_active_zones; |
| 325 | if (max_active_zones >= zc.target_nr_seq_zones) |
| 326 | max_active_zones = 0; |
| 327 | zlim->lim->max_active_zones = |
| 328 | min_not_zero(max_active_zones, zlim->lim->max_active_zones); |
| 329 | |
| 330 | max_open_zones = disk->queue->limits.max_open_zones; |
| 331 | if (max_open_zones >= zc.target_nr_seq_zones) |
| 332 | max_open_zones = 0; |
| 333 | zlim->lim->max_open_zones = |
| 334 | min_not_zero(max_open_zones, zlim->lim->max_open_zones); |
| 335 | |
| 336 | /* |
| 337 | * Also count the total number of sequential zones for the mapped |
| 338 | * device so that when we are done inspecting all its targets, we are |
| 339 | * able to check if the mapped device actually has any sequential zones. |
| 340 | */ |
| 341 | zlim->mapped_nr_seq_zones += zc.target_nr_seq_zones; |
| 342 | |
| 343 | return 0; |
| 344 | } |
| 345 | |
| 346 | int dm_set_zones_restrictions(struct dm_table *t, struct request_queue *q, |
| 347 | struct queue_limits *lim) |
| 348 | { |
| 349 | struct mapped_device *md = t->md; |
| 350 | struct gendisk *disk = md->disk; |
| 351 | struct dm_zone_resource_limits zlim = { |
| 352 | .reliable_limits = true, |
| 353 | .lim = lim, |
| 354 | }; |
| 355 | |
| 356 | /* |
| 357 | * Check if zone append is natively supported, and if not, set the |
| 358 | * mapped device queue as needing zone append emulation. If zone |
| 359 | * append is natively supported, make sure that |
| 360 | * max_hw_zone_append_sectors is not set to 0. |
| 361 | */ |
| 362 | WARN_ON_ONCE(queue_is_mq(q)); |
| 363 | if (!dm_table_supports_zone_append(t)) |
| 364 | lim->max_hw_zone_append_sectors = 0; |
| 365 | else if (lim->max_hw_zone_append_sectors == 0) |
| 366 | lim->max_hw_zone_append_sectors = lim->max_zone_append_sectors; |
| 367 | |
| 368 | /* |
| 369 | * Determine the max open and max active zone limits for the mapped |
| 370 | * device by inspecting the zone resource limits and the zones mapped |
| 371 | * by each target. |
| 372 | */ |
| 373 | for (unsigned int i = 0; i < t->num_targets; i++) { |
| 374 | struct dm_target *ti = dm_table_get_target(t, index: i); |
| 375 | |
| 376 | /* |
| 377 | * Assume that the target can accept REQ_OP_ZONE_RESET_ALL. |
| 378 | * device_get_zone_resource_limits() may adjust this if one of |
| 379 | * the device used by the target does not have all its |
| 380 | * sequential write required zones mapped. |
| 381 | */ |
| 382 | ti->zone_reset_all_supported = true; |
| 383 | |
| 384 | if (!ti->type->iterate_devices || |
| 385 | ti->type->iterate_devices(ti, |
| 386 | device_get_zone_resource_limits, &zlim)) { |
| 387 | DMERR("Could not determine %s zone resource limits" , |
| 388 | disk->disk_name); |
| 389 | return -ENODEV; |
| 390 | } |
| 391 | } |
| 392 | |
| 393 | /* |
| 394 | * If we only have conventional zones mapped, expose the mapped device |
| 395 | + as a regular device. |
| 396 | */ |
| 397 | if (!zlim.mapped_nr_seq_zones) { |
| 398 | lim->max_open_zones = 0; |
| 399 | lim->max_active_zones = 0; |
| 400 | lim->max_hw_zone_append_sectors = 0; |
| 401 | lim->max_zone_append_sectors = 0; |
| 402 | lim->zone_write_granularity = 0; |
| 403 | lim->chunk_sectors = 0; |
| 404 | lim->features &= ~BLK_FEAT_ZONED; |
| 405 | return 0; |
| 406 | } |
| 407 | |
| 408 | if (get_capacity(disk) && dm_has_zone_plugs(t->md)) { |
| 409 | if (q->limits.chunk_sectors != lim->chunk_sectors) { |
| 410 | DMWARN("%s: device has zone write plug resources. " |
| 411 | "Cannot change zone size" , |
| 412 | disk->disk_name); |
| 413 | return -EINVAL; |
| 414 | } |
| 415 | if (lim->max_hw_zone_append_sectors != 0 && |
| 416 | !dm_table_is_wildcard(t)) { |
| 417 | DMWARN("%s: device has zone write plug resources. " |
| 418 | "New table must emulate zone append" , |
| 419 | disk->disk_name); |
| 420 | return -EINVAL; |
| 421 | } |
| 422 | } |
| 423 | /* |
| 424 | * Warn once (when the capacity is not yet set) if the mapped device is |
| 425 | * partially using zone resources of the target devices as that leads to |
| 426 | * unreliable limits, i.e. if another mapped device uses the same |
| 427 | * underlying devices, we cannot enforce zone limits to guarantee that |
| 428 | * writing will not lead to errors. Note that we really should return |
| 429 | * an error for such case but there is no easy way to find out if |
| 430 | * another mapped device uses the same underlying zoned devices. |
| 431 | */ |
| 432 | if (!get_capacity(disk) && !zlim.reliable_limits) |
| 433 | DMWARN("%s zone resource limits may be unreliable" , |
| 434 | disk->disk_name); |
| 435 | |
| 436 | if (lim->features & BLK_FEAT_ZONED && |
| 437 | !static_key_enabled(&zoned_enabled.key)) |
| 438 | static_branch_enable(&zoned_enabled); |
| 439 | return 0; |
| 440 | } |
| 441 | |
| 442 | void dm_finalize_zone_settings(struct dm_table *t, struct queue_limits *lim) |
| 443 | { |
| 444 | struct mapped_device *md = t->md; |
| 445 | |
| 446 | if (lim->features & BLK_FEAT_ZONED) { |
| 447 | if (dm_table_supports_zone_append(t)) |
| 448 | clear_bit(DMF_EMULATE_ZONE_APPEND, addr: &md->flags); |
| 449 | else |
| 450 | set_bit(DMF_EMULATE_ZONE_APPEND, addr: &md->flags); |
| 451 | } else { |
| 452 | clear_bit(DMF_EMULATE_ZONE_APPEND, addr: &md->flags); |
| 453 | md->disk->nr_zones = 0; |
| 454 | } |
| 455 | } |
| 456 | |
| 457 | |
| 458 | /* |
| 459 | * IO completion callback called from clone_endio(). |
| 460 | */ |
| 461 | void dm_zone_endio(struct dm_io *io, struct bio *clone) |
| 462 | { |
| 463 | struct mapped_device *md = io->md; |
| 464 | struct gendisk *disk = md->disk; |
| 465 | struct bio *orig_bio = io->orig_bio; |
| 466 | |
| 467 | /* |
| 468 | * Get the offset within the zone of the written sector |
| 469 | * and add that to the original bio sector position. |
| 470 | */ |
| 471 | if (clone->bi_status == BLK_STS_OK && |
| 472 | bio_op(bio: clone) == REQ_OP_ZONE_APPEND) { |
| 473 | orig_bio->bi_iter.bi_sector += |
| 474 | bdev_offset_from_zone_start(bdev: disk->part0, |
| 475 | sector: clone->bi_iter.bi_sector); |
| 476 | } |
| 477 | } |
| 478 | |
| 479 | static int dm_zone_need_reset_cb(struct blk_zone *zone, unsigned int idx, |
| 480 | void *data) |
| 481 | { |
| 482 | /* |
| 483 | * For an all-zones reset, ignore conventional, empty, read-only |
| 484 | * and offline zones. |
| 485 | */ |
| 486 | switch (zone->cond) { |
| 487 | case BLK_ZONE_COND_NOT_WP: |
| 488 | case BLK_ZONE_COND_EMPTY: |
| 489 | case BLK_ZONE_COND_READONLY: |
| 490 | case BLK_ZONE_COND_OFFLINE: |
| 491 | return 0; |
| 492 | default: |
| 493 | set_bit(nr: idx, addr: (unsigned long *)data); |
| 494 | return 0; |
| 495 | } |
| 496 | } |
| 497 | |
| 498 | int dm_zone_get_reset_bitmap(struct mapped_device *md, struct dm_table *t, |
| 499 | sector_t sector, unsigned int nr_zones, |
| 500 | unsigned long *need_reset) |
| 501 | { |
| 502 | struct dm_report_zones_args args = { |
| 503 | .disk = md->disk, |
| 504 | .next_sector = sector, |
| 505 | .cb = dm_zone_need_reset_cb, |
| 506 | .data = need_reset, |
| 507 | }; |
| 508 | int ret; |
| 509 | |
| 510 | ret = dm_blk_do_report_zones(md, t, nr_zones, args: &args); |
| 511 | if (ret != nr_zones) { |
| 512 | DMERR("Get %s zone reset bitmap failed\n" , |
| 513 | md->disk->disk_name); |
| 514 | return -EIO; |
| 515 | } |
| 516 | |
| 517 | return 0; |
| 518 | } |
| 519 | |