| 1 | // SPDX-License-Identifier: GPL-2.0+ |
| 2 | |
| 3 | #include <drm/drm_atomic_helper.h> |
| 4 | #include <drm/drm_edid.h> |
| 5 | #include <drm/drm_simple_kms_helper.h> |
| 6 | #include <drm/drm_gem_framebuffer_helper.h> |
| 7 | #include <drm/drm_vblank.h> |
| 8 | |
| 9 | #include "amdgpu.h" |
| 10 | #ifdef CONFIG_DRM_AMDGPU_SI |
| 11 | #include "dce_v6_0.h" |
| 12 | #endif |
| 13 | #ifdef CONFIG_DRM_AMDGPU_CIK |
| 14 | #include "dce_v8_0.h" |
| 15 | #endif |
| 16 | #include "dce_v10_0.h" |
| 17 | #include "ivsrcid/ivsrcid_vislands30.h" |
| 18 | #include "amdgpu_vkms.h" |
| 19 | #include "amdgpu_display.h" |
| 20 | #include "atom.h" |
| 21 | #include "amdgpu_irq.h" |
| 22 | |
| 23 | /** |
| 24 | * DOC: amdgpu_vkms |
| 25 | * |
| 26 | * The amdgpu vkms interface provides a virtual KMS interface for several use |
| 27 | * cases: devices without display hardware, platforms where the actual display |
| 28 | * hardware is not useful (e.g., servers), SR-IOV virtual functions, device |
| 29 | * emulation/simulation, and device bring up prior to display hardware being |
| 30 | * usable. We previously emulated a legacy KMS interface, but there was a desire |
| 31 | * to move to the atomic KMS interface. The vkms driver did everything we |
| 32 | * needed, but we wanted KMS support natively in the driver without buffer |
| 33 | * sharing and the ability to support an instance of VKMS per device. We first |
| 34 | * looked at splitting vkms into a stub driver and a helper module that other |
| 35 | * drivers could use to implement a virtual display, but this strategy ended up |
| 36 | * being messy due to driver specific callbacks needed for buffer management. |
| 37 | * Ultimately, it proved easier to import the vkms code as it mostly used core |
| 38 | * drm helpers anyway. |
| 39 | */ |
| 40 | |
| 41 | static const u32 amdgpu_vkms_formats[] = { |
| 42 | DRM_FORMAT_XRGB8888, |
| 43 | }; |
| 44 | |
| 45 | static enum hrtimer_restart amdgpu_vkms_vblank_simulate(struct hrtimer *timer) |
| 46 | { |
| 47 | struct amdgpu_crtc *amdgpu_crtc = container_of(timer, struct amdgpu_crtc, vblank_timer); |
| 48 | struct drm_crtc *crtc = &amdgpu_crtc->base; |
| 49 | struct amdgpu_vkms_output *output = drm_crtc_to_amdgpu_vkms_output(crtc); |
| 50 | u64 ret_overrun; |
| 51 | bool ret; |
| 52 | |
| 53 | ret_overrun = hrtimer_forward_now(timer: &amdgpu_crtc->vblank_timer, |
| 54 | interval: output->period_ns); |
| 55 | if (ret_overrun != 1) |
| 56 | DRM_WARN("%s: vblank timer overrun\n" , __func__); |
| 57 | |
| 58 | ret = drm_crtc_handle_vblank(crtc); |
| 59 | /* Don't queue timer again when vblank is disabled. */ |
| 60 | if (!ret) |
| 61 | return HRTIMER_NORESTART; |
| 62 | |
| 63 | return HRTIMER_RESTART; |
| 64 | } |
| 65 | |
| 66 | static int amdgpu_vkms_enable_vblank(struct drm_crtc *crtc) |
| 67 | { |
| 68 | struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); |
| 69 | struct amdgpu_vkms_output *out = drm_crtc_to_amdgpu_vkms_output(crtc); |
| 70 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); |
| 71 | |
| 72 | drm_calc_timestamping_constants(crtc, mode: &crtc->mode); |
| 73 | |
| 74 | out->period_ns = ktime_set(secs: 0, nsecs: vblank->framedur_ns); |
| 75 | hrtimer_start(timer: &amdgpu_crtc->vblank_timer, tim: out->period_ns, mode: HRTIMER_MODE_REL); |
| 76 | |
| 77 | return 0; |
| 78 | } |
| 79 | |
| 80 | static void amdgpu_vkms_disable_vblank(struct drm_crtc *crtc) |
| 81 | { |
| 82 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); |
| 83 | |
| 84 | hrtimer_try_to_cancel(timer: &amdgpu_crtc->vblank_timer); |
| 85 | } |
| 86 | |
| 87 | static bool amdgpu_vkms_get_vblank_timestamp(struct drm_crtc *crtc, |
| 88 | int *max_error, |
| 89 | ktime_t *vblank_time, |
| 90 | bool in_vblank_irq) |
| 91 | { |
| 92 | struct amdgpu_vkms_output *output = drm_crtc_to_amdgpu_vkms_output(crtc); |
| 93 | struct drm_vblank_crtc *vblank = drm_crtc_vblank_crtc(crtc); |
| 94 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); |
| 95 | |
| 96 | if (!READ_ONCE(vblank->enabled)) { |
| 97 | *vblank_time = ktime_get(); |
| 98 | return true; |
| 99 | } |
| 100 | |
| 101 | *vblank_time = READ_ONCE(amdgpu_crtc->vblank_timer.node.expires); |
| 102 | |
| 103 | if (WARN_ON(*vblank_time == vblank->time)) |
| 104 | return true; |
| 105 | |
| 106 | /* |
| 107 | * To prevent races we roll the hrtimer forward before we do any |
| 108 | * interrupt processing - this is how real hw works (the interrupt is |
| 109 | * only generated after all the vblank registers are updated) and what |
| 110 | * the vblank core expects. Therefore we need to always correct the |
| 111 | * timestampe by one frame. |
| 112 | */ |
| 113 | *vblank_time -= output->period_ns; |
| 114 | |
| 115 | return true; |
| 116 | } |
| 117 | |
| 118 | static const struct drm_crtc_funcs amdgpu_vkms_crtc_funcs = { |
| 119 | .set_config = drm_atomic_helper_set_config, |
| 120 | .destroy = drm_crtc_cleanup, |
| 121 | .page_flip = drm_atomic_helper_page_flip, |
| 122 | .reset = drm_atomic_helper_crtc_reset, |
| 123 | .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, |
| 124 | .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, |
| 125 | .enable_vblank = amdgpu_vkms_enable_vblank, |
| 126 | .disable_vblank = amdgpu_vkms_disable_vblank, |
| 127 | .get_vblank_timestamp = amdgpu_vkms_get_vblank_timestamp, |
| 128 | }; |
| 129 | |
| 130 | static void amdgpu_vkms_crtc_atomic_enable(struct drm_crtc *crtc, |
| 131 | struct drm_atomic_state *state) |
| 132 | { |
| 133 | drm_crtc_vblank_on(crtc); |
| 134 | } |
| 135 | |
| 136 | static void amdgpu_vkms_crtc_atomic_disable(struct drm_crtc *crtc, |
| 137 | struct drm_atomic_state *state) |
| 138 | { |
| 139 | drm_crtc_vblank_off(crtc); |
| 140 | } |
| 141 | |
| 142 | static void amdgpu_vkms_crtc_atomic_flush(struct drm_crtc *crtc, |
| 143 | struct drm_atomic_state *state) |
| 144 | { |
| 145 | unsigned long flags; |
| 146 | if (crtc->state->event) { |
| 147 | spin_lock_irqsave(&crtc->dev->event_lock, flags); |
| 148 | |
| 149 | if (drm_crtc_vblank_get(crtc) != 0) |
| 150 | drm_crtc_send_vblank_event(crtc, e: crtc->state->event); |
| 151 | else |
| 152 | drm_crtc_arm_vblank_event(crtc, e: crtc->state->event); |
| 153 | |
| 154 | spin_unlock_irqrestore(lock: &crtc->dev->event_lock, flags); |
| 155 | |
| 156 | crtc->state->event = NULL; |
| 157 | } |
| 158 | } |
| 159 | |
| 160 | static const struct drm_crtc_helper_funcs amdgpu_vkms_crtc_helper_funcs = { |
| 161 | .atomic_flush = amdgpu_vkms_crtc_atomic_flush, |
| 162 | .atomic_enable = amdgpu_vkms_crtc_atomic_enable, |
| 163 | .atomic_disable = amdgpu_vkms_crtc_atomic_disable, |
| 164 | }; |
| 165 | |
| 166 | static int amdgpu_vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, |
| 167 | struct drm_plane *primary, struct drm_plane *cursor) |
| 168 | { |
| 169 | struct amdgpu_device *adev = drm_to_adev(ddev: dev); |
| 170 | struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); |
| 171 | int ret; |
| 172 | |
| 173 | ret = drm_crtc_init_with_planes(dev, crtc, primary, cursor, |
| 174 | funcs: &amdgpu_vkms_crtc_funcs, NULL); |
| 175 | if (ret) { |
| 176 | DRM_ERROR("Failed to init CRTC\n" ); |
| 177 | return ret; |
| 178 | } |
| 179 | |
| 180 | drm_crtc_helper_add(crtc, funcs: &amdgpu_vkms_crtc_helper_funcs); |
| 181 | |
| 182 | amdgpu_crtc->crtc_id = drm_crtc_index(crtc); |
| 183 | adev->mode_info.crtcs[drm_crtc_index(crtc)] = amdgpu_crtc; |
| 184 | |
| 185 | amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; |
| 186 | amdgpu_crtc->encoder = NULL; |
| 187 | amdgpu_crtc->connector = NULL; |
| 188 | amdgpu_crtc->vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE; |
| 189 | |
| 190 | hrtimer_setup(timer: &amdgpu_crtc->vblank_timer, function: &amdgpu_vkms_vblank_simulate, CLOCK_MONOTONIC, |
| 191 | mode: HRTIMER_MODE_REL); |
| 192 | |
| 193 | return ret; |
| 194 | } |
| 195 | |
| 196 | static const struct drm_connector_funcs amdgpu_vkms_connector_funcs = { |
| 197 | .fill_modes = drm_helper_probe_single_connector_modes, |
| 198 | .destroy = drm_connector_cleanup, |
| 199 | .reset = drm_atomic_helper_connector_reset, |
| 200 | .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, |
| 201 | .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, |
| 202 | }; |
| 203 | |
| 204 | static int amdgpu_vkms_conn_get_modes(struct drm_connector *connector) |
| 205 | { |
| 206 | struct drm_device *dev = connector->dev; |
| 207 | struct drm_display_mode *mode = NULL; |
| 208 | unsigned i; |
| 209 | static const struct mode_size { |
| 210 | int w; |
| 211 | int h; |
| 212 | } common_modes[] = { |
| 213 | { 640, 480}, |
| 214 | { 720, 480}, |
| 215 | { 800, 600}, |
| 216 | { 848, 480}, |
| 217 | {1024, 768}, |
| 218 | {1152, 768}, |
| 219 | {1280, 720}, |
| 220 | {1280, 800}, |
| 221 | {1280, 854}, |
| 222 | {1280, 960}, |
| 223 | {1280, 1024}, |
| 224 | {1440, 900}, |
| 225 | {1400, 1050}, |
| 226 | {1680, 1050}, |
| 227 | {1600, 1200}, |
| 228 | {1920, 1080}, |
| 229 | {1920, 1200}, |
| 230 | {2560, 1440}, |
| 231 | {4096, 3112}, |
| 232 | {3656, 2664}, |
| 233 | {3840, 2160}, |
| 234 | {4096, 2160}, |
| 235 | }; |
| 236 | |
| 237 | for (i = 0; i < ARRAY_SIZE(common_modes); i++) { |
| 238 | mode = drm_cvt_mode(dev, hdisplay: common_modes[i].w, vdisplay: common_modes[i].h, vrefresh: 60, reduced: false, interlaced: false, margins: false); |
| 239 | if (!mode) |
| 240 | continue; |
| 241 | drm_mode_probed_add(connector, mode); |
| 242 | } |
| 243 | |
| 244 | drm_set_preferred_mode(connector, XRES_DEF, YRES_DEF); |
| 245 | |
| 246 | return ARRAY_SIZE(common_modes); |
| 247 | } |
| 248 | |
| 249 | static const struct drm_connector_helper_funcs amdgpu_vkms_conn_helper_funcs = { |
| 250 | .get_modes = amdgpu_vkms_conn_get_modes, |
| 251 | }; |
| 252 | |
| 253 | static const struct drm_plane_funcs amdgpu_vkms_plane_funcs = { |
| 254 | .update_plane = drm_atomic_helper_update_plane, |
| 255 | .disable_plane = drm_atomic_helper_disable_plane, |
| 256 | .destroy = drm_plane_cleanup, |
| 257 | .reset = drm_atomic_helper_plane_reset, |
| 258 | .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state, |
| 259 | .atomic_destroy_state = drm_atomic_helper_plane_destroy_state, |
| 260 | }; |
| 261 | |
| 262 | static void amdgpu_vkms_plane_atomic_update(struct drm_plane *plane, |
| 263 | struct drm_atomic_state *old_state) |
| 264 | { |
| 265 | return; |
| 266 | } |
| 267 | |
| 268 | static int amdgpu_vkms_plane_atomic_check(struct drm_plane *plane, |
| 269 | struct drm_atomic_state *state) |
| 270 | { |
| 271 | struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, |
| 272 | plane); |
| 273 | struct drm_crtc_state *crtc_state; |
| 274 | int ret; |
| 275 | |
| 276 | if (!new_plane_state->fb || WARN_ON(!new_plane_state->crtc)) |
| 277 | return 0; |
| 278 | |
| 279 | crtc_state = drm_atomic_get_crtc_state(state, |
| 280 | crtc: new_plane_state->crtc); |
| 281 | if (IS_ERR(ptr: crtc_state)) |
| 282 | return PTR_ERR(ptr: crtc_state); |
| 283 | |
| 284 | ret = drm_atomic_helper_check_plane_state(plane_state: new_plane_state, crtc_state, |
| 285 | DRM_PLANE_NO_SCALING, |
| 286 | DRM_PLANE_NO_SCALING, |
| 287 | can_position: false, can_update_disabled: true); |
| 288 | if (ret != 0) |
| 289 | return ret; |
| 290 | |
| 291 | /* for now primary plane must be visible and full screen */ |
| 292 | if (!new_plane_state->visible) |
| 293 | return -EINVAL; |
| 294 | |
| 295 | return 0; |
| 296 | } |
| 297 | |
| 298 | static int amdgpu_vkms_prepare_fb(struct drm_plane *plane, |
| 299 | struct drm_plane_state *new_state) |
| 300 | { |
| 301 | struct amdgpu_framebuffer *afb; |
| 302 | struct drm_gem_object *obj; |
| 303 | struct amdgpu_device *adev; |
| 304 | struct amdgpu_bo *rbo; |
| 305 | uint32_t domain; |
| 306 | int r; |
| 307 | |
| 308 | if (!new_state->fb) { |
| 309 | DRM_DEBUG_KMS("No FB bound\n" ); |
| 310 | return 0; |
| 311 | } |
| 312 | afb = to_amdgpu_framebuffer(new_state->fb); |
| 313 | |
| 314 | obj = drm_gem_fb_get_obj(fb: new_state->fb, plane: 0); |
| 315 | if (!obj) { |
| 316 | DRM_ERROR("Failed to get obj from framebuffer\n" ); |
| 317 | return -EINVAL; |
| 318 | } |
| 319 | |
| 320 | rbo = gem_to_amdgpu_bo(obj); |
| 321 | adev = amdgpu_ttm_adev(bdev: rbo->tbo.bdev); |
| 322 | |
| 323 | r = amdgpu_bo_reserve(bo: rbo, no_intr: true); |
| 324 | if (r) { |
| 325 | dev_err(adev->dev, "fail to reserve bo (%d)\n" , r); |
| 326 | return r; |
| 327 | } |
| 328 | |
| 329 | r = dma_resv_reserve_fences(obj: rbo->tbo.base.resv, num_fences: 1); |
| 330 | if (r) { |
| 331 | dev_err(adev->dev, "allocating fence slot failed (%d)\n" , r); |
| 332 | goto error_unlock; |
| 333 | } |
| 334 | |
| 335 | if (plane->type != DRM_PLANE_TYPE_CURSOR) |
| 336 | domain = amdgpu_display_supported_domains(adev, bo_flags: rbo->flags); |
| 337 | else |
| 338 | domain = AMDGPU_GEM_DOMAIN_VRAM; |
| 339 | |
| 340 | rbo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; |
| 341 | r = amdgpu_bo_pin(bo: rbo, domain); |
| 342 | if (unlikely(r != 0)) { |
| 343 | if (r != -ERESTARTSYS) |
| 344 | DRM_ERROR("Failed to pin framebuffer with error %d\n" , r); |
| 345 | goto error_unlock; |
| 346 | } |
| 347 | |
| 348 | r = amdgpu_ttm_alloc_gart(bo: &rbo->tbo); |
| 349 | if (unlikely(r != 0)) { |
| 350 | DRM_ERROR("%p bind failed\n" , rbo); |
| 351 | goto error_unpin; |
| 352 | } |
| 353 | |
| 354 | amdgpu_bo_unreserve(bo: rbo); |
| 355 | |
| 356 | afb->address = amdgpu_bo_gpu_offset(bo: rbo); |
| 357 | |
| 358 | amdgpu_bo_ref(bo: rbo); |
| 359 | |
| 360 | return 0; |
| 361 | |
| 362 | error_unpin: |
| 363 | amdgpu_bo_unpin(bo: rbo); |
| 364 | |
| 365 | error_unlock: |
| 366 | amdgpu_bo_unreserve(bo: rbo); |
| 367 | return r; |
| 368 | } |
| 369 | |
| 370 | static void amdgpu_vkms_cleanup_fb(struct drm_plane *plane, |
| 371 | struct drm_plane_state *old_state) |
| 372 | { |
| 373 | struct amdgpu_bo *rbo; |
| 374 | struct drm_gem_object *obj; |
| 375 | int r; |
| 376 | |
| 377 | if (!old_state->fb) |
| 378 | return; |
| 379 | |
| 380 | obj = drm_gem_fb_get_obj(fb: old_state->fb, plane: 0); |
| 381 | if (!obj) { |
| 382 | DRM_ERROR("Failed to get obj from framebuffer\n" ); |
| 383 | return; |
| 384 | } |
| 385 | |
| 386 | rbo = gem_to_amdgpu_bo(obj); |
| 387 | r = amdgpu_bo_reserve(bo: rbo, no_intr: false); |
| 388 | if (unlikely(r)) { |
| 389 | DRM_ERROR("failed to reserve rbo before unpin\n" ); |
| 390 | return; |
| 391 | } |
| 392 | |
| 393 | amdgpu_bo_unpin(bo: rbo); |
| 394 | amdgpu_bo_unreserve(bo: rbo); |
| 395 | amdgpu_bo_unref(bo: &rbo); |
| 396 | } |
| 397 | |
| 398 | static const struct drm_plane_helper_funcs amdgpu_vkms_primary_helper_funcs = { |
| 399 | .atomic_update = amdgpu_vkms_plane_atomic_update, |
| 400 | .atomic_check = amdgpu_vkms_plane_atomic_check, |
| 401 | .prepare_fb = amdgpu_vkms_prepare_fb, |
| 402 | .cleanup_fb = amdgpu_vkms_cleanup_fb, |
| 403 | }; |
| 404 | |
| 405 | static struct drm_plane *amdgpu_vkms_plane_init(struct drm_device *dev, |
| 406 | enum drm_plane_type type, |
| 407 | int index) |
| 408 | { |
| 409 | struct drm_plane *plane; |
| 410 | int ret; |
| 411 | |
| 412 | plane = kzalloc(sizeof(*plane), GFP_KERNEL); |
| 413 | if (!plane) |
| 414 | return ERR_PTR(error: -ENOMEM); |
| 415 | |
| 416 | ret = drm_universal_plane_init(dev, plane, possible_crtcs: 1 << index, |
| 417 | funcs: &amdgpu_vkms_plane_funcs, |
| 418 | formats: amdgpu_vkms_formats, |
| 419 | ARRAY_SIZE(amdgpu_vkms_formats), |
| 420 | NULL, type, NULL); |
| 421 | if (ret) { |
| 422 | kfree(objp: plane); |
| 423 | return ERR_PTR(error: ret); |
| 424 | } |
| 425 | |
| 426 | drm_plane_helper_add(plane, funcs: &amdgpu_vkms_primary_helper_funcs); |
| 427 | |
| 428 | return plane; |
| 429 | } |
| 430 | |
| 431 | static int amdgpu_vkms_output_init(struct drm_device *dev, struct |
| 432 | amdgpu_vkms_output *output, int index) |
| 433 | { |
| 434 | struct drm_connector *connector = &output->connector; |
| 435 | struct drm_encoder *encoder = &output->encoder; |
| 436 | struct drm_crtc *crtc = &output->crtc.base; |
| 437 | struct drm_plane *primary, *cursor = NULL; |
| 438 | int ret; |
| 439 | |
| 440 | primary = amdgpu_vkms_plane_init(dev, type: DRM_PLANE_TYPE_PRIMARY, index); |
| 441 | if (IS_ERR(ptr: primary)) |
| 442 | return PTR_ERR(ptr: primary); |
| 443 | |
| 444 | ret = amdgpu_vkms_crtc_init(dev, crtc, primary, cursor); |
| 445 | if (ret) |
| 446 | goto err_crtc; |
| 447 | |
| 448 | ret = drm_connector_init(dev, connector, funcs: &amdgpu_vkms_connector_funcs, |
| 449 | DRM_MODE_CONNECTOR_VIRTUAL); |
| 450 | if (ret) { |
| 451 | DRM_ERROR("Failed to init connector\n" ); |
| 452 | goto err_connector; |
| 453 | } |
| 454 | |
| 455 | drm_connector_helper_add(connector, funcs: &amdgpu_vkms_conn_helper_funcs); |
| 456 | |
| 457 | ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_VIRTUAL); |
| 458 | if (ret) { |
| 459 | DRM_ERROR("Failed to init encoder\n" ); |
| 460 | goto err_encoder; |
| 461 | } |
| 462 | encoder->possible_crtcs = 1 << index; |
| 463 | |
| 464 | ret = drm_connector_attach_encoder(connector, encoder); |
| 465 | if (ret) { |
| 466 | DRM_ERROR("Failed to attach connector to encoder\n" ); |
| 467 | goto err_attach; |
| 468 | } |
| 469 | |
| 470 | drm_mode_config_reset(dev); |
| 471 | |
| 472 | return 0; |
| 473 | |
| 474 | err_attach: |
| 475 | drm_encoder_cleanup(encoder); |
| 476 | |
| 477 | err_encoder: |
| 478 | drm_connector_cleanup(connector); |
| 479 | |
| 480 | err_connector: |
| 481 | drm_crtc_cleanup(crtc); |
| 482 | |
| 483 | err_crtc: |
| 484 | drm_plane_cleanup(plane: primary); |
| 485 | |
| 486 | return ret; |
| 487 | } |
| 488 | |
| 489 | const struct drm_mode_config_funcs amdgpu_vkms_mode_funcs = { |
| 490 | .fb_create = amdgpu_display_user_framebuffer_create, |
| 491 | .atomic_check = drm_atomic_helper_check, |
| 492 | .atomic_commit = drm_atomic_helper_commit, |
| 493 | }; |
| 494 | |
| 495 | static int amdgpu_vkms_sw_init(struct amdgpu_ip_block *ip_block) |
| 496 | { |
| 497 | int r, i; |
| 498 | struct amdgpu_device *adev = ip_block->adev; |
| 499 | |
| 500 | adev->amdgpu_vkms_output = kcalloc(adev->mode_info.num_crtc, |
| 501 | sizeof(struct amdgpu_vkms_output), GFP_KERNEL); |
| 502 | if (!adev->amdgpu_vkms_output) |
| 503 | return -ENOMEM; |
| 504 | |
| 505 | adev_to_drm(adev)->max_vblank_count = 0; |
| 506 | |
| 507 | adev_to_drm(adev)->mode_config.funcs = &amdgpu_vkms_mode_funcs; |
| 508 | |
| 509 | adev_to_drm(adev)->mode_config.max_width = XRES_MAX; |
| 510 | adev_to_drm(adev)->mode_config.max_height = YRES_MAX; |
| 511 | |
| 512 | adev_to_drm(adev)->mode_config.preferred_depth = 24; |
| 513 | adev_to_drm(adev)->mode_config.prefer_shadow = 1; |
| 514 | |
| 515 | adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true; |
| 516 | |
| 517 | r = amdgpu_display_modeset_create_props(adev); |
| 518 | if (r) |
| 519 | return r; |
| 520 | |
| 521 | /* allocate crtcs, encoders, connectors */ |
| 522 | for (i = 0; i < adev->mode_info.num_crtc; i++) { |
| 523 | r = amdgpu_vkms_output_init(dev: adev_to_drm(adev), output: &adev->amdgpu_vkms_output[i], index: i); |
| 524 | if (r) |
| 525 | return r; |
| 526 | } |
| 527 | |
| 528 | r = drm_vblank_init(dev: adev_to_drm(adev), num_crtcs: adev->mode_info.num_crtc); |
| 529 | if (r) |
| 530 | return r; |
| 531 | |
| 532 | drm_kms_helper_poll_init(dev: adev_to_drm(adev)); |
| 533 | |
| 534 | adev->mode_info.mode_config_initialized = true; |
| 535 | return 0; |
| 536 | } |
| 537 | |
| 538 | static int amdgpu_vkms_sw_fini(struct amdgpu_ip_block *ip_block) |
| 539 | { |
| 540 | struct amdgpu_device *adev = ip_block->adev; |
| 541 | int i = 0; |
| 542 | |
| 543 | for (i = 0; i < adev->mode_info.num_crtc; i++) |
| 544 | if (adev->mode_info.crtcs[i]) |
| 545 | hrtimer_cancel(timer: &adev->mode_info.crtcs[i]->vblank_timer); |
| 546 | |
| 547 | drm_kms_helper_poll_fini(dev: adev_to_drm(adev)); |
| 548 | drm_mode_config_cleanup(dev: adev_to_drm(adev)); |
| 549 | |
| 550 | adev->mode_info.mode_config_initialized = false; |
| 551 | |
| 552 | drm_edid_free(drm_edid: adev->mode_info.bios_hardcoded_edid); |
| 553 | kfree(objp: adev->amdgpu_vkms_output); |
| 554 | return 0; |
| 555 | } |
| 556 | |
| 557 | static int amdgpu_vkms_hw_init(struct amdgpu_ip_block *ip_block) |
| 558 | { |
| 559 | struct amdgpu_device *adev = ip_block->adev; |
| 560 | |
| 561 | switch (adev->asic_type) { |
| 562 | #ifdef CONFIG_DRM_AMDGPU_SI |
| 563 | case CHIP_TAHITI: |
| 564 | case CHIP_PITCAIRN: |
| 565 | case CHIP_VERDE: |
| 566 | case CHIP_OLAND: |
| 567 | dce_v6_0_disable_dce(adev); |
| 568 | break; |
| 569 | #endif |
| 570 | #ifdef CONFIG_DRM_AMDGPU_CIK |
| 571 | case CHIP_BONAIRE: |
| 572 | case CHIP_HAWAII: |
| 573 | case CHIP_KAVERI: |
| 574 | case CHIP_KABINI: |
| 575 | case CHIP_MULLINS: |
| 576 | dce_v8_0_disable_dce(adev); |
| 577 | break; |
| 578 | #endif |
| 579 | case CHIP_FIJI: |
| 580 | case CHIP_TONGA: |
| 581 | dce_v10_0_disable_dce(adev); |
| 582 | break; |
| 583 | case CHIP_TOPAZ: |
| 584 | #ifdef CONFIG_DRM_AMDGPU_SI |
| 585 | case CHIP_HAINAN: |
| 586 | #endif |
| 587 | /* no DCE */ |
| 588 | break; |
| 589 | default: |
| 590 | break; |
| 591 | } |
| 592 | return 0; |
| 593 | } |
| 594 | |
| 595 | static int amdgpu_vkms_hw_fini(struct amdgpu_ip_block *ip_block) |
| 596 | { |
| 597 | return 0; |
| 598 | } |
| 599 | |
| 600 | static int amdgpu_vkms_suspend(struct amdgpu_ip_block *ip_block) |
| 601 | { |
| 602 | struct amdgpu_device *adev = ip_block->adev; |
| 603 | int r; |
| 604 | |
| 605 | r = drm_mode_config_helper_suspend(dev: adev_to_drm(adev)); |
| 606 | if (r) |
| 607 | return r; |
| 608 | |
| 609 | return 0; |
| 610 | } |
| 611 | |
| 612 | static int amdgpu_vkms_resume(struct amdgpu_ip_block *ip_block) |
| 613 | { |
| 614 | int r; |
| 615 | |
| 616 | r = amdgpu_vkms_hw_init(ip_block); |
| 617 | if (r) |
| 618 | return r; |
| 619 | return drm_mode_config_helper_resume(dev: adev_to_drm(adev: ip_block->adev)); |
| 620 | } |
| 621 | |
| 622 | static bool amdgpu_vkms_is_idle(struct amdgpu_ip_block *ip_block) |
| 623 | { |
| 624 | return true; |
| 625 | } |
| 626 | |
| 627 | static int amdgpu_vkms_set_clockgating_state(struct amdgpu_ip_block *ip_block, |
| 628 | enum amd_clockgating_state state) |
| 629 | { |
| 630 | return 0; |
| 631 | } |
| 632 | |
| 633 | static int amdgpu_vkms_set_powergating_state(struct amdgpu_ip_block *ip_block, |
| 634 | enum amd_powergating_state state) |
| 635 | { |
| 636 | return 0; |
| 637 | } |
| 638 | |
| 639 | static const struct amd_ip_funcs amdgpu_vkms_ip_funcs = { |
| 640 | .name = "amdgpu_vkms" , |
| 641 | .sw_init = amdgpu_vkms_sw_init, |
| 642 | .sw_fini = amdgpu_vkms_sw_fini, |
| 643 | .hw_init = amdgpu_vkms_hw_init, |
| 644 | .hw_fini = amdgpu_vkms_hw_fini, |
| 645 | .suspend = amdgpu_vkms_suspend, |
| 646 | .resume = amdgpu_vkms_resume, |
| 647 | .is_idle = amdgpu_vkms_is_idle, |
| 648 | .set_clockgating_state = amdgpu_vkms_set_clockgating_state, |
| 649 | .set_powergating_state = amdgpu_vkms_set_powergating_state, |
| 650 | }; |
| 651 | |
| 652 | const struct amdgpu_ip_block_version amdgpu_vkms_ip_block = { |
| 653 | .type = AMD_IP_BLOCK_TYPE_DCE, |
| 654 | .major = 1, |
| 655 | .minor = 0, |
| 656 | .rev = 0, |
| 657 | .funcs = &amdgpu_vkms_ip_funcs, |
| 658 | }; |
| 659 | |
| 660 | |