1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2023 Intel Corporation
4 */
5
6#include <linux/hwmon-sysfs.h>
7#include <linux/hwmon.h>
8#include <linux/jiffies.h>
9#include <linux/types.h>
10#include <linux/units.h>
11
12#include <drm/drm_managed.h>
13#include "regs/xe_gt_regs.h"
14#include "regs/xe_mchbar_regs.h"
15#include "regs/xe_pcode_regs.h"
16#include "xe_device.h"
17#include "xe_hwmon.h"
18#include "xe_mmio.h"
19#include "xe_pcode.h"
20#include "xe_pcode_api.h"
21#include "xe_sriov.h"
22#include "xe_pm.h"
23#include "xe_vsec.h"
24#include "regs/xe_pmt.h"
25
26enum xe_hwmon_reg {
27 REG_TEMP,
28 REG_PKG_RAPL_LIMIT,
29 REG_PKG_POWER_SKU,
30 REG_PKG_POWER_SKU_UNIT,
31 REG_GT_PERF_STATUS,
32 REG_PKG_ENERGY_STATUS,
33 REG_FAN_SPEED,
34};
35
36enum xe_hwmon_reg_operation {
37 REG_READ32,
38 REG_RMW32,
39 REG_READ64,
40};
41
42enum xe_hwmon_channel {
43 CHANNEL_CARD,
44 CHANNEL_PKG,
45 CHANNEL_VRAM,
46 CHANNEL_MAX,
47};
48
49enum xe_fan_channel {
50 FAN_1,
51 FAN_2,
52 FAN_3,
53 FAN_MAX,
54};
55
56/* Attribute index for powerX_xxx_interval sysfs entries */
57enum sensor_attr_power {
58 SENSOR_INDEX_PSYS_PL1,
59 SENSOR_INDEX_PKG_PL1,
60 SENSOR_INDEX_PSYS_PL2,
61 SENSOR_INDEX_PKG_PL2,
62};
63
64/*
65 * For platforms that support mailbox commands for power limits, REG_PKG_POWER_SKU_UNIT is
66 * not supported and below are SKU units to be used.
67 */
68#define PWR_UNIT 0x3
69#define ENERGY_UNIT 0xe
70#define TIME_UNIT 0xa
71
72/*
73 * SF_* - scale factors for particular quantities according to hwmon spec.
74 */
75#define SF_POWER 1000000 /* microwatts */
76#define SF_CURR 1000 /* milliamperes */
77#define SF_VOLTAGE 1000 /* millivolts */
78#define SF_ENERGY 1000000 /* microjoules */
79#define SF_TIME 1000 /* milliseconds */
80
81/*
82 * PL*_HWMON_ATTR - mapping of hardware power limits to corresponding hwmon power attribute.
83 */
84#define PL1_HWMON_ATTR hwmon_power_max
85#define PL2_HWMON_ATTR hwmon_power_cap
86
87#define PWR_ATTR_TO_STR(attr) (((attr) == hwmon_power_max) ? "PL1" : "PL2")
88
89/*
90 * Timeout for power limit write mailbox command.
91 */
92#define PL_WRITE_MBX_TIMEOUT_MS (1)
93
94/**
95 * struct xe_hwmon_energy_info - to accumulate energy
96 */
97struct xe_hwmon_energy_info {
98 /** @reg_val_prev: previous energy reg val */
99 u32 reg_val_prev;
100 /** @accum_energy: accumulated energy */
101 long accum_energy;
102};
103
104/**
105 * struct xe_hwmon_fan_info - to cache previous fan reading
106 */
107struct xe_hwmon_fan_info {
108 /** @reg_val_prev: previous fan reg val */
109 u32 reg_val_prev;
110 /** @time_prev: previous timestamp */
111 u64 time_prev;
112};
113
114/**
115 * struct xe_hwmon - xe hwmon data structure
116 */
117struct xe_hwmon {
118 /** @hwmon_dev: hwmon device for xe */
119 struct device *hwmon_dev;
120 /** @xe: Xe device */
121 struct xe_device *xe;
122 /** @hwmon_lock: lock for rw attributes*/
123 struct mutex hwmon_lock;
124 /** @scl_shift_power: pkg power unit */
125 int scl_shift_power;
126 /** @scl_shift_energy: pkg energy unit */
127 int scl_shift_energy;
128 /** @scl_shift_time: pkg time unit */
129 int scl_shift_time;
130 /** @ei: Energy info for energyN_input */
131 struct xe_hwmon_energy_info ei[CHANNEL_MAX];
132 /** @fi: Fan info for fanN_input */
133 struct xe_hwmon_fan_info fi[FAN_MAX];
134 /** @boot_power_limit_read: is boot power limits read */
135 bool boot_power_limit_read;
136 /** @pl1_on_boot: power limit PL1 on boot */
137 u32 pl1_on_boot[CHANNEL_MAX];
138 /** @pl2_on_boot: power limit PL2 on boot */
139 u32 pl2_on_boot[CHANNEL_MAX];
140
141};
142
143static int xe_hwmon_pcode_read_power_limit(const struct xe_hwmon *hwmon, u32 attr, int channel,
144 u32 *uval)
145{
146 struct xe_tile *root_tile = xe_device_get_root_tile(xe: hwmon->xe);
147 u32 val0 = 0, val1 = 0;
148 int ret = 0;
149
150 ret = xe_pcode_read(tile: root_tile, PCODE_MBOX(PCODE_POWER_SETUP,
151 (channel == CHANNEL_CARD) ?
152 READ_PSYSGPU_POWER_LIMIT :
153 READ_PACKAGE_POWER_LIMIT,
154 hwmon->boot_power_limit_read ?
155 READ_PL_FROM_PCODE : READ_PL_FROM_FW),
156 val: &val0, val1: &val1);
157
158 if (ret) {
159 drm_dbg(&hwmon->xe->drm, "read failed ch %d val0 0x%08x, val1 0x%08x, ret %d\n",
160 channel, val0, val1, ret);
161 *uval = 0;
162 return ret;
163 }
164
165 /* return the value only if limit is enabled */
166 if (attr == PL1_HWMON_ATTR)
167 *uval = (val0 & PWR_LIM_EN) ? val0 : 0;
168 else if (attr == PL2_HWMON_ATTR)
169 *uval = (val1 & PWR_LIM_EN) ? val1 : 0;
170 else if (attr == hwmon_power_label)
171 *uval = (val0 & PWR_LIM_EN) ? 1 : (val1 & PWR_LIM_EN) ? 1 : 0;
172 else
173 *uval = 0;
174
175 return ret;
176}
177
178static int xe_hwmon_pcode_rmw_power_limit(const struct xe_hwmon *hwmon, u32 attr, u8 channel,
179 u32 clr, u32 set)
180{
181 struct xe_tile *root_tile = xe_device_get_root_tile(xe: hwmon->xe);
182 u32 val0 = 0, val1 = 0;
183 int ret = 0;
184
185 ret = xe_pcode_read(tile: root_tile, PCODE_MBOX(PCODE_POWER_SETUP,
186 (channel == CHANNEL_CARD) ?
187 READ_PSYSGPU_POWER_LIMIT :
188 READ_PACKAGE_POWER_LIMIT,
189 hwmon->boot_power_limit_read ?
190 READ_PL_FROM_PCODE : READ_PL_FROM_FW),
191 val: &val0, val1: &val1);
192
193 if (ret)
194 drm_dbg(&hwmon->xe->drm, "read failed ch %d val0 0x%08x, val1 0x%08x, ret %d\n",
195 channel, val0, val1, ret);
196
197 if (attr == PL1_HWMON_ATTR)
198 val0 = (val0 & ~clr) | set;
199 else if (attr == PL2_HWMON_ATTR)
200 val1 = (val1 & ~clr) | set;
201 else
202 return -EIO;
203
204 ret = xe_pcode_write64_timeout(tile: root_tile, PCODE_MBOX(PCODE_POWER_SETUP,
205 (channel == CHANNEL_CARD) ?
206 WRITE_PSYSGPU_POWER_LIMIT :
207 WRITE_PACKAGE_POWER_LIMIT, 0),
208 data0: val0, data1: val1, PL_WRITE_MBX_TIMEOUT_MS);
209 if (ret)
210 drm_dbg(&hwmon->xe->drm, "write failed ch %d val0 0x%08x, val1 0x%08x, ret %d\n",
211 channel, val0, val1, ret);
212 return ret;
213}
214
215static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg,
216 int channel)
217{
218 struct xe_device *xe = hwmon->xe;
219
220 switch (hwmon_reg) {
221 case REG_TEMP:
222 if (xe->info.platform == XE_BATTLEMAGE) {
223 if (channel == CHANNEL_PKG)
224 return BMG_PACKAGE_TEMPERATURE;
225 else if (channel == CHANNEL_VRAM)
226 return BMG_VRAM_TEMPERATURE;
227 } else if (xe->info.platform == XE_DG2) {
228 if (channel == CHANNEL_PKG)
229 return PCU_CR_PACKAGE_TEMPERATURE;
230 else if (channel == CHANNEL_VRAM)
231 return BMG_VRAM_TEMPERATURE;
232 }
233 break;
234 case REG_PKG_RAPL_LIMIT:
235 if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG)
236 return PVC_GT0_PACKAGE_RAPL_LIMIT;
237 else if ((xe->info.platform == XE_DG2) && (channel == CHANNEL_PKG))
238 return PCU_CR_PACKAGE_RAPL_LIMIT;
239 break;
240 case REG_PKG_POWER_SKU:
241 if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG)
242 return PVC_GT0_PACKAGE_POWER_SKU;
243 else if ((xe->info.platform == XE_DG2) && (channel == CHANNEL_PKG))
244 return PCU_CR_PACKAGE_POWER_SKU;
245 break;
246 case REG_PKG_POWER_SKU_UNIT:
247 if (xe->info.platform == XE_PVC)
248 return PVC_GT0_PACKAGE_POWER_SKU_UNIT;
249 else if (xe->info.platform == XE_DG2)
250 return PCU_CR_PACKAGE_POWER_SKU_UNIT;
251 break;
252 case REG_GT_PERF_STATUS:
253 if (xe->info.platform == XE_DG2 && channel == CHANNEL_PKG)
254 return GT_PERF_STATUS;
255 break;
256 case REG_PKG_ENERGY_STATUS:
257 if (xe->info.platform == XE_PVC && channel == CHANNEL_PKG) {
258 return PVC_GT0_PLATFORM_ENERGY_STATUS;
259 } else if ((xe->info.platform == XE_DG2) && (channel == CHANNEL_PKG)) {
260 return PCU_CR_PACKAGE_ENERGY_STATUS;
261 }
262 break;
263 case REG_FAN_SPEED:
264 if (channel == FAN_1)
265 return BMG_FAN_1_SPEED;
266 else if (channel == FAN_2)
267 return BMG_FAN_2_SPEED;
268 else if (channel == FAN_3)
269 return BMG_FAN_3_SPEED;
270 break;
271 default:
272 drm_warn(&xe->drm, "Unknown xe hwmon reg id: %d\n", hwmon_reg);
273 break;
274 }
275
276 return XE_REG(0);
277}
278
279#define PL_DISABLE 0
280
281/*
282 * HW allows arbitrary PL1 limits to be set but silently clamps these values to
283 * "typical but not guaranteed" min/max values in REG_PKG_POWER_SKU. Follow the
284 * same pattern for sysfs, allow arbitrary PL1 limits to be set but display
285 * clamped values when read.
286 */
287static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *value)
288{
289 u32 reg_val = 0;
290 struct xe_device *xe = hwmon->xe;
291 struct xe_reg rapl_limit, pkg_power_sku;
292 struct xe_mmio *mmio = xe_root_tile_mmio(xe);
293
294 mutex_lock(&hwmon->hwmon_lock);
295
296 if (hwmon->xe->info.has_mbx_power_limits) {
297 xe_hwmon_pcode_read_power_limit(hwmon, attr, channel, uval: &reg_val);
298 } else {
299 rapl_limit = xe_hwmon_get_reg(hwmon, hwmon_reg: REG_PKG_RAPL_LIMIT, channel);
300 pkg_power_sku = xe_hwmon_get_reg(hwmon, hwmon_reg: REG_PKG_POWER_SKU, channel);
301 reg_val = xe_mmio_read32(mmio, reg: rapl_limit);
302 }
303
304 /* Check if PL limits are disabled. */
305 if (!(reg_val & PWR_LIM_EN)) {
306 *value = PL_DISABLE;
307 drm_info(&hwmon->xe->drm, "%s disabled for channel %d, val 0x%08x\n",
308 PWR_ATTR_TO_STR(attr), channel, reg_val);
309 goto unlock;
310 }
311
312 reg_val = REG_FIELD_GET(PWR_LIM_VAL, reg_val);
313 *value = mul_u32_u32(a: reg_val, SF_POWER) >> hwmon->scl_shift_power;
314
315 /* For platforms with mailbox power limit support clamping would be done by pcode. */
316 if (!hwmon->xe->info.has_mbx_power_limits) {
317 u64 pkg_pwr, min, max;
318
319 pkg_pwr = xe_mmio_read64_2x32(mmio, reg: pkg_power_sku);
320 min = REG_FIELD_GET(PKG_MIN_PWR, pkg_pwr);
321 max = REG_FIELD_GET(PKG_MAX_PWR, pkg_pwr);
322 min = mul_u64_u32_shr(a: min, SF_POWER, shift: hwmon->scl_shift_power);
323 max = mul_u64_u32_shr(a: max, SF_POWER, shift: hwmon->scl_shift_power);
324 if (min && max)
325 *value = clamp_t(u64, *value, min, max);
326 }
327unlock:
328 mutex_unlock(lock: &hwmon->hwmon_lock);
329}
330
331static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, u32 attr, int channel, long value)
332{
333 struct xe_mmio *mmio = xe_root_tile_mmio(xe: hwmon->xe);
334 int ret = 0;
335 u32 reg_val, max;
336 struct xe_reg rapl_limit;
337 u64 max_supp_power_limit = 0;
338
339 mutex_lock(&hwmon->hwmon_lock);
340
341 rapl_limit = xe_hwmon_get_reg(hwmon, hwmon_reg: REG_PKG_RAPL_LIMIT, channel);
342
343 /* Disable Power Limit and verify, as limit cannot be disabled on all platforms. */
344 if (value == PL_DISABLE) {
345 if (hwmon->xe->info.has_mbx_power_limits) {
346 drm_dbg(&hwmon->xe->drm, "disabling %s on channel %d\n",
347 PWR_ATTR_TO_STR(attr), channel);
348 xe_hwmon_pcode_rmw_power_limit(hwmon, attr, channel, PWR_LIM_EN, set: 0);
349 xe_hwmon_pcode_read_power_limit(hwmon, attr, channel, uval: &reg_val);
350 } else {
351 reg_val = xe_mmio_rmw32(mmio, reg: rapl_limit, PWR_LIM_EN, set: 0);
352 reg_val = xe_mmio_read32(mmio, reg: rapl_limit);
353 }
354
355 if (reg_val & PWR_LIM_EN) {
356 drm_warn(&hwmon->xe->drm, "Power limit disable is not supported!\n");
357 ret = -EOPNOTSUPP;
358 }
359 goto unlock;
360 }
361
362 /*
363 * If the sysfs value exceeds the maximum pcode supported power limit value, clamp it to
364 * the supported maximum (U12.3 format).
365 * This is to avoid truncation during reg_val calculation below and ensure the valid
366 * power limit is sent for pcode which would clamp it to card-supported value.
367 */
368 max_supp_power_limit = ((PWR_LIM_VAL) >> hwmon->scl_shift_power) * SF_POWER;
369 if (value > max_supp_power_limit) {
370 value = max_supp_power_limit;
371 drm_info(&hwmon->xe->drm,
372 "Power limit clamped as selected %s exceeds channel %d limit\n",
373 PWR_ATTR_TO_STR(attr), channel);
374 }
375
376 /* Computation in 64-bits to avoid overflow. Round to nearest. */
377 reg_val = DIV_ROUND_CLOSEST_ULL((u64)value << hwmon->scl_shift_power, SF_POWER);
378
379 /*
380 * Clamp power limit to GPU firmware default as maximum, as an additional protection to
381 * pcode clamp.
382 */
383 if (hwmon->xe->info.has_mbx_power_limits) {
384 max = (attr == PL1_HWMON_ATTR) ?
385 hwmon->pl1_on_boot[channel] : hwmon->pl2_on_boot[channel];
386 max = REG_FIELD_PREP(PWR_LIM_VAL, max);
387 if (reg_val > max) {
388 reg_val = max;
389 drm_dbg(&hwmon->xe->drm,
390 "Clamping power limit to GPU firmware default 0x%x\n",
391 reg_val);
392 }
393 }
394
395 reg_val = PWR_LIM_EN | REG_FIELD_PREP(PWR_LIM_VAL, reg_val);
396
397 if (hwmon->xe->info.has_mbx_power_limits)
398 ret = xe_hwmon_pcode_rmw_power_limit(hwmon, attr, channel, PWR_LIM, set: reg_val);
399 else
400 reg_val = xe_mmio_rmw32(mmio, reg: rapl_limit, PWR_LIM, set: reg_val);
401unlock:
402 mutex_unlock(lock: &hwmon->hwmon_lock);
403 return ret;
404}
405
406static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, u32 attr, int channel,
407 long *value)
408{
409 struct xe_mmio *mmio = xe_root_tile_mmio(xe: hwmon->xe);
410 u32 reg_val;
411
412 if (hwmon->xe->info.has_mbx_power_limits) {
413 /* PL1 is rated max if supported. */
414 xe_hwmon_pcode_read_power_limit(hwmon, PL1_HWMON_ATTR, channel, uval: &reg_val);
415 } else {
416 /*
417 * This sysfs file won't be visible if REG_PKG_POWER_SKU is invalid, so valid check
418 * for this register can be skipped.
419 * See xe_hwmon_power_is_visible.
420 */
421 struct xe_reg reg = xe_hwmon_get_reg(hwmon, hwmon_reg: REG_PKG_POWER_SKU, channel);
422
423 reg_val = xe_mmio_read32(mmio, reg);
424 }
425
426 reg_val = REG_FIELD_GET(PKG_TDP, reg_val);
427 *value = mul_u64_u32_shr(a: reg_val, SF_POWER, shift: hwmon->scl_shift_power);
428}
429
430/*
431 * xe_hwmon_energy_get - Obtain energy value
432 *
433 * The underlying energy hardware register is 32-bits and is subject to
434 * overflow. How long before overflow? For example, with an example
435 * scaling bit shift of 14 bits (see register *PACKAGE_POWER_SKU_UNIT) and
436 * a power draw of 1000 watts, the 32-bit counter will overflow in
437 * approximately 4.36 minutes.
438 *
439 * Examples:
440 * 1 watt: (2^32 >> 14) / 1 W / (60 * 60 * 24) secs/day -> 3 days
441 * 1000 watts: (2^32 >> 14) / 1000 W / 60 secs/min -> 4.36 minutes
442 *
443 * The function significantly increases overflow duration (from 4.36
444 * minutes) by accumulating the energy register into a 'long' as allowed by
445 * the hwmon API. Using x86_64 128 bit arithmetic (see mul_u64_u32_shr()),
446 * a 'long' of 63 bits, SF_ENERGY of 1e6 (~20 bits) and
447 * hwmon->scl_shift_energy of 14 bits we have 57 (63 - 20 + 14) bits before
448 * energyN_input overflows. This at 1000 W is an overflow duration of 278 years.
449 */
450static void
451xe_hwmon_energy_get(struct xe_hwmon *hwmon, int channel, long *energy)
452{
453 struct xe_mmio *mmio = xe_root_tile_mmio(xe: hwmon->xe);
454 struct xe_hwmon_energy_info *ei = &hwmon->ei[channel];
455 u32 reg_val;
456 int ret = 0;
457
458 /* Energy is supported only for card and pkg */
459 if (channel > CHANNEL_PKG) {
460 *energy = 0;
461 return;
462 }
463
464 if (hwmon->xe->info.platform == XE_BATTLEMAGE) {
465 u64 pmt_val;
466
467 ret = xe_pmt_telem_read(to_pci_dev(hwmon->xe->drm.dev),
468 guid: xe_mmio_read32(mmio, PUNIT_TELEMETRY_GUID),
469 data: &pmt_val, BMG_ENERGY_STATUS_PMT_OFFSET, count: sizeof(pmt_val));
470 if (ret != sizeof(pmt_val)) {
471 drm_warn(&hwmon->xe->drm, "energy read from pmt failed, ret %d\n", ret);
472 *energy = 0;
473 return;
474 }
475
476 if (channel == CHANNEL_PKG)
477 reg_val = REG_FIELD_GET64(ENERGY_PKG, pmt_val);
478 else
479 reg_val = REG_FIELD_GET64(ENERGY_CARD, pmt_val);
480 } else {
481 reg_val = xe_mmio_read32(mmio, reg: xe_hwmon_get_reg(hwmon, hwmon_reg: REG_PKG_ENERGY_STATUS,
482 channel));
483 }
484
485 ei->accum_energy += reg_val - ei->reg_val_prev;
486 ei->reg_val_prev = reg_val;
487
488 *energy = mul_u64_u32_shr(a: ei->accum_energy, SF_ENERGY,
489 shift: hwmon->scl_shift_energy);
490}
491
492static ssize_t
493xe_hwmon_power_max_interval_show(struct device *dev, struct device_attribute *attr,
494 char *buf)
495{
496 struct xe_hwmon *hwmon = dev_get_drvdata(dev);
497 struct xe_mmio *mmio = xe_root_tile_mmio(xe: hwmon->xe);
498 u32 reg_val, x, y, x_w = 2; /* 2 bits */
499 u64 tau4, out;
500 int channel = (to_sensor_dev_attr(attr)->index % 2) ? CHANNEL_PKG : CHANNEL_CARD;
501 u32 power_attr = (to_sensor_dev_attr(attr)->index > 1) ? PL2_HWMON_ATTR : PL1_HWMON_ATTR;
502
503 int ret = 0;
504
505 xe_pm_runtime_get(xe: hwmon->xe);
506
507 mutex_lock(&hwmon->hwmon_lock);
508
509 if (hwmon->xe->info.has_mbx_power_limits) {
510 ret = xe_hwmon_pcode_read_power_limit(hwmon, attr: power_attr, channel, uval: &reg_val);
511 if (ret) {
512 drm_err(&hwmon->xe->drm,
513 "power interval read fail, ch %d, attr %d, val 0x%08x, ret %d\n",
514 channel, power_attr, reg_val, ret);
515 reg_val = 0;
516 }
517 } else {
518 reg_val = xe_mmio_read32(mmio, reg: xe_hwmon_get_reg(hwmon, hwmon_reg: REG_PKG_RAPL_LIMIT,
519 channel));
520 }
521
522 mutex_unlock(lock: &hwmon->hwmon_lock);
523
524 xe_pm_runtime_put(xe: hwmon->xe);
525
526 x = REG_FIELD_GET(PWR_LIM_TIME_X, reg_val);
527 y = REG_FIELD_GET(PWR_LIM_TIME_Y, reg_val);
528
529 /*
530 * tau = (1 + (x / 4)) * power(2,y), x = bits(23:22), y = bits(21:17)
531 * = (4 | x) << (y - 2)
532 *
533 * Here (y - 2) ensures a 1.x fixed point representation of 1.x
534 * As x is 2 bits so 1.x can be 1.0, 1.25, 1.50, 1.75
535 *
536 * As y can be < 2, we compute tau4 = (4 | x) << y
537 * and then add 2 when doing the final right shift to account for units
538 */
539 tau4 = (u64)((1 << x_w) | x) << y;
540
541 /* val in hwmon interface units (millisec) */
542 out = mul_u64_u32_shr(a: tau4, SF_TIME, shift: hwmon->scl_shift_time + x_w);
543
544 return sysfs_emit(buf, fmt: "%llu\n", out);
545}
546
547static ssize_t
548xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *attr,
549 const char *buf, size_t count)
550{
551 struct xe_hwmon *hwmon = dev_get_drvdata(dev);
552 struct xe_mmio *mmio = xe_root_tile_mmio(xe: hwmon->xe);
553 u32 x, y, rxy, x_w = 2; /* 2 bits */
554 u64 tau4, r, max_win;
555 unsigned long val;
556 int channel = (to_sensor_dev_attr(attr)->index % 2) ? CHANNEL_PKG : CHANNEL_CARD;
557 u32 power_attr = (to_sensor_dev_attr(attr)->index > 1) ? PL2_HWMON_ATTR : PL1_HWMON_ATTR;
558 int ret;
559
560 ret = kstrtoul(s: buf, base: 0, res: &val);
561 if (ret)
562 return ret;
563
564 /*
565 * Max HW supported tau in '(1 + (x / 4)) * power(2,y)' format, x = 0, y = 0x12.
566 * The hwmon->scl_shift_time default of 0xa results in a max tau of 256 seconds.
567 *
568 * The ideal scenario is for PKG_MAX_WIN to be read from the PKG_PWR_SKU register.
569 * However, it is observed that existing discrete GPUs does not provide correct
570 * PKG_MAX_WIN value, therefore a using default constant value. For future discrete GPUs
571 * this may get resolved, in which case PKG_MAX_WIN should be obtained from PKG_PWR_SKU.
572 */
573#define PKG_MAX_WIN_DEFAULT 0x12ull
574
575 /*
576 * val must be < max in hwmon interface units. The steps below are
577 * explained in xe_hwmon_power_max_interval_show()
578 */
579 r = FIELD_PREP(PKG_MAX_WIN, PKG_MAX_WIN_DEFAULT);
580 x = REG_FIELD_GET(PKG_MAX_WIN_X, r);
581 y = REG_FIELD_GET(PKG_MAX_WIN_Y, r);
582 tau4 = (u64)((1 << x_w) | x) << y;
583 max_win = mul_u64_u32_shr(a: tau4, SF_TIME, shift: hwmon->scl_shift_time + x_w);
584
585 if (val > max_win)
586 return -EINVAL;
587
588 /* val in hw units */
589 val = DIV_ROUND_CLOSEST_ULL((u64)val << hwmon->scl_shift_time, SF_TIME) + 1;
590
591 /*
592 * Convert val to 1.x * power(2,y)
593 * y = ilog2(val)
594 * x = (val - (1 << y)) >> (y - 2)
595 */
596 if (!val) {
597 y = 0;
598 x = 0;
599 } else {
600 y = ilog2(val);
601 x = (val - (1ul << y)) << x_w >> y;
602 }
603
604 rxy = REG_FIELD_PREP(PWR_LIM_TIME_X, x) |
605 REG_FIELD_PREP(PWR_LIM_TIME_Y, y);
606
607 xe_pm_runtime_get(xe: hwmon->xe);
608
609 mutex_lock(&hwmon->hwmon_lock);
610
611 if (hwmon->xe->info.has_mbx_power_limits)
612 xe_hwmon_pcode_rmw_power_limit(hwmon, attr: power_attr, channel, PWR_LIM_TIME, set: rxy);
613 else
614 r = xe_mmio_rmw32(mmio, reg: xe_hwmon_get_reg(hwmon, hwmon_reg: REG_PKG_RAPL_LIMIT, channel),
615 PWR_LIM_TIME, set: rxy);
616
617 mutex_unlock(lock: &hwmon->hwmon_lock);
618
619 xe_pm_runtime_put(xe: hwmon->xe);
620
621 return count;
622}
623
624/* PSYS PL1 */
625static SENSOR_DEVICE_ATTR(power1_max_interval, 0664,
626 xe_hwmon_power_max_interval_show,
627 xe_hwmon_power_max_interval_store, SENSOR_INDEX_PSYS_PL1);
628/* PKG PL1 */
629static SENSOR_DEVICE_ATTR(power2_max_interval, 0664,
630 xe_hwmon_power_max_interval_show,
631 xe_hwmon_power_max_interval_store, SENSOR_INDEX_PKG_PL1);
632/* PSYS PL2 */
633static SENSOR_DEVICE_ATTR(power1_cap_interval, 0664,
634 xe_hwmon_power_max_interval_show,
635 xe_hwmon_power_max_interval_store, SENSOR_INDEX_PSYS_PL2);
636/* PKG PL2 */
637static SENSOR_DEVICE_ATTR(power2_cap_interval, 0664,
638 xe_hwmon_power_max_interval_show,
639 xe_hwmon_power_max_interval_store, SENSOR_INDEX_PKG_PL2);
640
641static struct attribute *hwmon_attributes[] = {
642 &sensor_dev_attr_power1_max_interval.dev_attr.attr,
643 &sensor_dev_attr_power2_max_interval.dev_attr.attr,
644 &sensor_dev_attr_power1_cap_interval.dev_attr.attr,
645 &sensor_dev_attr_power2_cap_interval.dev_attr.attr,
646 NULL
647};
648
649static umode_t xe_hwmon_attributes_visible(struct kobject *kobj,
650 struct attribute *attr, int index)
651{
652 struct device *dev = kobj_to_dev(kobj);
653 struct xe_hwmon *hwmon = dev_get_drvdata(dev);
654 int ret = 0;
655 int channel = (index % 2) ? CHANNEL_PKG : CHANNEL_CARD;
656 u32 power_attr = (index > 1) ? PL2_HWMON_ATTR : PL1_HWMON_ATTR;
657 u32 uval = 0;
658 struct xe_reg rapl_limit;
659 struct xe_mmio *mmio = xe_root_tile_mmio(xe: hwmon->xe);
660
661 if (hwmon->xe->info.has_mbx_power_limits) {
662 xe_hwmon_pcode_read_power_limit(hwmon, attr: power_attr, channel, uval: &uval);
663 } else if (power_attr != PL2_HWMON_ATTR) {
664 rapl_limit = xe_hwmon_get_reg(hwmon, hwmon_reg: REG_PKG_RAPL_LIMIT, channel);
665 if (xe_reg_is_valid(r: rapl_limit))
666 uval = xe_mmio_read32(mmio, reg: rapl_limit);
667 }
668 ret = (uval & PWR_LIM_EN) ? attr->mode : 0;
669
670 return ret;
671}
672
673static const struct attribute_group hwmon_attrgroup = {
674 .attrs = hwmon_attributes,
675 .is_visible = xe_hwmon_attributes_visible,
676};
677
678static const struct attribute_group *hwmon_groups[] = {
679 &hwmon_attrgroup,
680 NULL
681};
682
683static const struct hwmon_channel_info * const hwmon_info[] = {
684 HWMON_CHANNEL_INFO(temp, HWMON_T_LABEL, HWMON_T_INPUT | HWMON_T_LABEL,
685 HWMON_T_INPUT | HWMON_T_LABEL),
686 HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_LABEL | HWMON_P_CRIT |
687 HWMON_P_CAP,
688 HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_LABEL | HWMON_P_CAP),
689 HWMON_CHANNEL_INFO(curr, HWMON_C_LABEL, HWMON_C_CRIT | HWMON_C_LABEL),
690 HWMON_CHANNEL_INFO(in, HWMON_I_INPUT | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_LABEL),
691 HWMON_CHANNEL_INFO(energy, HWMON_E_INPUT | HWMON_E_LABEL, HWMON_E_INPUT | HWMON_E_LABEL),
692 HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT, HWMON_F_INPUT, HWMON_F_INPUT),
693 NULL
694};
695
696/* I1 is exposed as power_crit or as curr_crit depending on bit 31 */
697static int xe_hwmon_pcode_read_i1(const struct xe_hwmon *hwmon, u32 *uval)
698{
699 struct xe_tile *root_tile = xe_device_get_root_tile(xe: hwmon->xe);
700
701 /* Avoid Illegal Subcommand error */
702 if (hwmon->xe->info.platform == XE_DG2)
703 return -ENXIO;
704
705 return xe_pcode_read(tile: root_tile, PCODE_MBOX(PCODE_POWER_SETUP,
706 POWER_SETUP_SUBCOMMAND_READ_I1, 0),
707 val: uval, NULL);
708}
709
710static int xe_hwmon_pcode_write_i1(const struct xe_hwmon *hwmon, u32 uval)
711{
712 struct xe_tile *root_tile = xe_device_get_root_tile(xe: hwmon->xe);
713
714 return xe_pcode_write(root_tile, PCODE_MBOX(PCODE_POWER_SETUP,
715 POWER_SETUP_SUBCOMMAND_WRITE_I1, 0),
716 (uval & POWER_SETUP_I1_DATA_MASK));
717}
718
719static int xe_hwmon_pcode_read_fan_control(const struct xe_hwmon *hwmon, u32 subcmd, u32 *uval)
720{
721 struct xe_tile *root_tile = xe_device_get_root_tile(xe: hwmon->xe);
722
723 /* Platforms that don't return correct value */
724 if (hwmon->xe->info.platform == XE_DG2 && subcmd == FSC_READ_NUM_FANS) {
725 *uval = 2;
726 return 0;
727 }
728
729 return xe_pcode_read(tile: root_tile, PCODE_MBOX(FAN_SPEED_CONTROL, subcmd, 0), val: uval, NULL);
730}
731
732static int xe_hwmon_power_curr_crit_read(struct xe_hwmon *hwmon, int channel,
733 long *value, u32 scale_factor)
734{
735 int ret;
736 u32 uval = 0;
737
738 mutex_lock(&hwmon->hwmon_lock);
739
740 ret = xe_hwmon_pcode_read_i1(hwmon, uval: &uval);
741 if (ret)
742 goto unlock;
743
744 *value = mul_u64_u32_shr(REG_FIELD_GET(POWER_SETUP_I1_DATA_MASK, uval),
745 mul: scale_factor, POWER_SETUP_I1_SHIFT);
746unlock:
747 mutex_unlock(lock: &hwmon->hwmon_lock);
748 return ret;
749}
750
751static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon, int channel,
752 long value, u32 scale_factor)
753{
754 int ret;
755 u32 uval;
756 u64 max_crit_power_curr = 0;
757
758 mutex_lock(&hwmon->hwmon_lock);
759
760 /*
761 * If the sysfs value exceeds the pcode mailbox cmd POWER_SETUP_SUBCOMMAND_WRITE_I1
762 * max supported value, clamp it to the command's max (U10.6 format).
763 * This is to avoid truncation during uval calculation below and ensure the valid power
764 * limit is sent for pcode which would clamp it to card-supported value.
765 */
766 max_crit_power_curr = (POWER_SETUP_I1_DATA_MASK >> POWER_SETUP_I1_SHIFT) * scale_factor;
767 if (value > max_crit_power_curr) {
768 value = max_crit_power_curr;
769 drm_info(&hwmon->xe->drm,
770 "Power limit clamped as selected exceeds channel %d limit\n",
771 channel);
772 }
773 uval = DIV_ROUND_CLOSEST_ULL(value << POWER_SETUP_I1_SHIFT, scale_factor);
774 ret = xe_hwmon_pcode_write_i1(hwmon, uval);
775
776 mutex_unlock(lock: &hwmon->hwmon_lock);
777 return ret;
778}
779
780static void xe_hwmon_get_voltage(struct xe_hwmon *hwmon, int channel, long *value)
781{
782 struct xe_mmio *mmio = xe_root_tile_mmio(xe: hwmon->xe);
783 u64 reg_val;
784
785 reg_val = xe_mmio_read32(mmio, reg: xe_hwmon_get_reg(hwmon, hwmon_reg: REG_GT_PERF_STATUS, channel));
786 /* HW register value in units of 2.5 millivolt */
787 *value = DIV_ROUND_CLOSEST(REG_FIELD_GET(VOLTAGE_MASK, reg_val) * 2500, SF_VOLTAGE);
788}
789
790static umode_t
791xe_hwmon_temp_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel)
792{
793 switch (attr) {
794 case hwmon_temp_input:
795 case hwmon_temp_label:
796 return xe_reg_is_valid(r: xe_hwmon_get_reg(hwmon, hwmon_reg: REG_TEMP, channel)) ? 0444 : 0;
797 default:
798 return 0;
799 }
800}
801
802static int
803xe_hwmon_temp_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
804{
805 struct xe_mmio *mmio = xe_root_tile_mmio(xe: hwmon->xe);
806 u64 reg_val;
807
808 switch (attr) {
809 case hwmon_temp_input:
810 reg_val = xe_mmio_read32(mmio, reg: xe_hwmon_get_reg(hwmon, hwmon_reg: REG_TEMP, channel));
811
812 /* HW register value is in degrees Celsius, convert to millidegrees. */
813 *val = REG_FIELD_GET(TEMP_MASK, reg_val) * MILLIDEGREE_PER_DEGREE;
814 return 0;
815 default:
816 return -EOPNOTSUPP;
817 }
818}
819
820static umode_t
821xe_hwmon_power_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel)
822{
823 u32 uval = 0;
824 struct xe_reg reg;
825 struct xe_mmio *mmio = xe_root_tile_mmio(xe: hwmon->xe);
826
827 switch (attr) {
828 case hwmon_power_max:
829 case hwmon_power_cap:
830 if (hwmon->xe->info.has_mbx_power_limits) {
831 xe_hwmon_pcode_read_power_limit(hwmon, attr, channel, uval: &uval);
832 } else if (attr != PL2_HWMON_ATTR) {
833 reg = xe_hwmon_get_reg(hwmon, hwmon_reg: REG_PKG_RAPL_LIMIT, channel);
834 if (xe_reg_is_valid(r: reg))
835 uval = xe_mmio_read32(mmio, reg);
836 }
837 if (uval & PWR_LIM_EN) {
838 drm_info(&hwmon->xe->drm, "%s is supported on channel %d\n",
839 PWR_ATTR_TO_STR(attr), channel);
840 return 0664;
841 }
842 drm_dbg(&hwmon->xe->drm, "%s is unsupported on channel %d\n",
843 PWR_ATTR_TO_STR(attr), channel);
844 return 0;
845 case hwmon_power_rated_max:
846 if (hwmon->xe->info.has_mbx_power_limits) {
847 return 0;
848 } else {
849 reg = xe_hwmon_get_reg(hwmon, hwmon_reg: REG_PKG_POWER_SKU, channel);
850 if (xe_reg_is_valid(r: reg))
851 uval = xe_mmio_read32(mmio, reg);
852 return uval ? 0444 : 0;
853 }
854 case hwmon_power_crit:
855 if (channel == CHANNEL_CARD) {
856 xe_hwmon_pcode_read_i1(hwmon, uval: &uval);
857 return (uval & POWER_SETUP_I1_WATTS) ? 0644 : 0;
858 }
859 break;
860 case hwmon_power_label:
861 if (hwmon->xe->info.has_mbx_power_limits) {
862 xe_hwmon_pcode_read_power_limit(hwmon, attr, channel, uval: &uval);
863 } else {
864 reg = xe_hwmon_get_reg(hwmon, hwmon_reg: REG_PKG_POWER_SKU, channel);
865 if (xe_reg_is_valid(r: reg))
866 uval = xe_mmio_read32(mmio, reg);
867
868 if (!uval) {
869 reg = xe_hwmon_get_reg(hwmon, hwmon_reg: REG_PKG_RAPL_LIMIT, channel);
870 if (xe_reg_is_valid(r: reg))
871 uval = xe_mmio_read32(mmio, reg);
872 }
873 }
874 if ((!(uval & PWR_LIM_EN)) && channel == CHANNEL_CARD) {
875 xe_hwmon_pcode_read_i1(hwmon, uval: &uval);
876 return (uval & POWER_SETUP_I1_WATTS) ? 0444 : 0;
877 }
878 return (uval) ? 0444 : 0;
879 default:
880 return 0;
881 }
882 return 0;
883}
884
885static int
886xe_hwmon_power_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
887{
888 switch (attr) {
889 case hwmon_power_max:
890 case hwmon_power_cap:
891 xe_hwmon_power_max_read(hwmon, attr, channel, value: val);
892 return 0;
893 case hwmon_power_rated_max:
894 xe_hwmon_power_rated_max_read(hwmon, attr, channel, value: val);
895 return 0;
896 case hwmon_power_crit:
897 return xe_hwmon_power_curr_crit_read(hwmon, channel, value: val, SF_POWER);
898 default:
899 return -EOPNOTSUPP;
900 }
901}
902
903static int
904xe_hwmon_power_write(struct xe_hwmon *hwmon, u32 attr, int channel, long val)
905{
906 switch (attr) {
907 case hwmon_power_cap:
908 case hwmon_power_max:
909 return xe_hwmon_power_max_write(hwmon, attr, channel, value: val);
910 case hwmon_power_crit:
911 return xe_hwmon_power_curr_crit_write(hwmon, channel, value: val, SF_POWER);
912 default:
913 return -EOPNOTSUPP;
914 }
915}
916
917static umode_t
918xe_hwmon_curr_is_visible(const struct xe_hwmon *hwmon, u32 attr, int channel)
919{
920 u32 uval = 0;
921
922 /* hwmon sysfs attribute of current available only for package */
923 if (channel != CHANNEL_PKG)
924 return 0;
925
926 switch (attr) {
927 case hwmon_curr_crit:
928 return (xe_hwmon_pcode_read_i1(hwmon, uval: &uval) ||
929 (uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644;
930 case hwmon_curr_label:
931 return (xe_hwmon_pcode_read_i1(hwmon, uval: &uval) ||
932 (uval & POWER_SETUP_I1_WATTS)) ? 0 : 0444;
933 break;
934 default:
935 return 0;
936 }
937 return 0;
938}
939
940static int
941xe_hwmon_curr_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
942{
943 switch (attr) {
944 case hwmon_curr_crit:
945 return xe_hwmon_power_curr_crit_read(hwmon, channel, value: val, SF_CURR);
946 default:
947 return -EOPNOTSUPP;
948 }
949}
950
951static int
952xe_hwmon_curr_write(struct xe_hwmon *hwmon, u32 attr, int channel, long val)
953{
954 switch (attr) {
955 case hwmon_curr_crit:
956 return xe_hwmon_power_curr_crit_write(hwmon, channel, value: val, SF_CURR);
957 default:
958 return -EOPNOTSUPP;
959 }
960}
961
962static umode_t
963xe_hwmon_in_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel)
964{
965 switch (attr) {
966 case hwmon_in_input:
967 case hwmon_in_label:
968 return xe_reg_is_valid(r: xe_hwmon_get_reg(hwmon, hwmon_reg: REG_GT_PERF_STATUS,
969 channel)) ? 0444 : 0;
970 default:
971 return 0;
972 }
973}
974
975static int
976xe_hwmon_in_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
977{
978 switch (attr) {
979 case hwmon_in_input:
980 xe_hwmon_get_voltage(hwmon, channel, value: val);
981 return 0;
982 default:
983 return -EOPNOTSUPP;
984 }
985}
986
987static umode_t
988xe_hwmon_energy_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel)
989{
990 long energy = 0;
991
992 switch (attr) {
993 case hwmon_energy_input:
994 case hwmon_energy_label:
995 if (hwmon->xe->info.platform == XE_BATTLEMAGE) {
996 xe_hwmon_energy_get(hwmon, channel, energy: &energy);
997 return energy ? 0444 : 0;
998 } else {
999 return xe_reg_is_valid(r: xe_hwmon_get_reg(hwmon, hwmon_reg: REG_PKG_ENERGY_STATUS,
1000 channel)) ? 0444 : 0;
1001 }
1002 default:
1003 return 0;
1004 }
1005}
1006
1007static int
1008xe_hwmon_energy_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
1009{
1010 switch (attr) {
1011 case hwmon_energy_input:
1012 xe_hwmon_energy_get(hwmon, channel, energy: val);
1013 return 0;
1014 default:
1015 return -EOPNOTSUPP;
1016 }
1017}
1018
1019static umode_t
1020xe_hwmon_fan_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel)
1021{
1022 u32 uval = 0;
1023
1024 if (!hwmon->xe->info.has_fan_control)
1025 return 0;
1026
1027 switch (attr) {
1028 case hwmon_fan_input:
1029 if (xe_hwmon_pcode_read_fan_control(hwmon, FSC_READ_NUM_FANS, uval: &uval))
1030 return 0;
1031
1032 return channel < uval ? 0444 : 0;
1033 default:
1034 return 0;
1035 }
1036}
1037
1038static int
1039xe_hwmon_fan_input_read(struct xe_hwmon *hwmon, int channel, long *val)
1040{
1041 struct xe_mmio *mmio = xe_root_tile_mmio(xe: hwmon->xe);
1042 struct xe_hwmon_fan_info *fi = &hwmon->fi[channel];
1043 u64 rotations, time_now, time;
1044 u32 reg_val;
1045 int ret = 0;
1046
1047 mutex_lock(&hwmon->hwmon_lock);
1048
1049 reg_val = xe_mmio_read32(mmio, reg: xe_hwmon_get_reg(hwmon, hwmon_reg: REG_FAN_SPEED, channel));
1050 time_now = get_jiffies_64();
1051
1052 /*
1053 * HW register value is accumulated count of pulses from PWM fan with the scale
1054 * of 2 pulses per rotation.
1055 */
1056 rotations = (reg_val - fi->reg_val_prev) / 2;
1057
1058 time = jiffies_delta_to_msecs(delta: time_now - fi->time_prev);
1059 if (unlikely(!time)) {
1060 ret = -EAGAIN;
1061 goto unlock;
1062 }
1063
1064 /*
1065 * Calculate fan speed in RPM by time averaging two subsequent readings in minutes.
1066 * RPM = number of rotations * msecs per minute / time in msecs
1067 */
1068 *val = DIV_ROUND_UP_ULL(rotations * (MSEC_PER_SEC * 60), time);
1069
1070 fi->reg_val_prev = reg_val;
1071 fi->time_prev = time_now;
1072unlock:
1073 mutex_unlock(lock: &hwmon->hwmon_lock);
1074 return ret;
1075}
1076
1077static int
1078xe_hwmon_fan_read(struct xe_hwmon *hwmon, u32 attr, int channel, long *val)
1079{
1080 switch (attr) {
1081 case hwmon_fan_input:
1082 return xe_hwmon_fan_input_read(hwmon, channel, val);
1083 default:
1084 return -EOPNOTSUPP;
1085 }
1086}
1087
1088static umode_t
1089xe_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type,
1090 u32 attr, int channel)
1091{
1092 struct xe_hwmon *hwmon = (struct xe_hwmon *)drvdata;
1093 int ret;
1094
1095 switch (type) {
1096 case hwmon_temp:
1097 ret = xe_hwmon_temp_is_visible(hwmon, attr, channel);
1098 break;
1099 case hwmon_power:
1100 ret = xe_hwmon_power_is_visible(hwmon, attr, channel);
1101 break;
1102 case hwmon_curr:
1103 ret = xe_hwmon_curr_is_visible(hwmon, attr, channel);
1104 break;
1105 case hwmon_in:
1106 ret = xe_hwmon_in_is_visible(hwmon, attr, channel);
1107 break;
1108 case hwmon_energy:
1109 ret = xe_hwmon_energy_is_visible(hwmon, attr, channel);
1110 break;
1111 case hwmon_fan:
1112 ret = xe_hwmon_fan_is_visible(hwmon, attr, channel);
1113 break;
1114 default:
1115 ret = 0;
1116 break;
1117 }
1118
1119 return ret;
1120}
1121
1122static int
1123xe_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
1124 int channel, long *val)
1125{
1126 struct xe_hwmon *hwmon = dev_get_drvdata(dev);
1127 int ret;
1128
1129 xe_pm_runtime_get(xe: hwmon->xe);
1130
1131 switch (type) {
1132 case hwmon_temp:
1133 ret = xe_hwmon_temp_read(hwmon, attr, channel, val);
1134 break;
1135 case hwmon_power:
1136 ret = xe_hwmon_power_read(hwmon, attr, channel, val);
1137 break;
1138 case hwmon_curr:
1139 ret = xe_hwmon_curr_read(hwmon, attr, channel, val);
1140 break;
1141 case hwmon_in:
1142 ret = xe_hwmon_in_read(hwmon, attr, channel, val);
1143 break;
1144 case hwmon_energy:
1145 ret = xe_hwmon_energy_read(hwmon, attr, channel, val);
1146 break;
1147 case hwmon_fan:
1148 ret = xe_hwmon_fan_read(hwmon, attr, channel, val);
1149 break;
1150 default:
1151 ret = -EOPNOTSUPP;
1152 break;
1153 }
1154
1155 xe_pm_runtime_put(xe: hwmon->xe);
1156
1157 return ret;
1158}
1159
1160static int
1161xe_hwmon_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
1162 int channel, long val)
1163{
1164 struct xe_hwmon *hwmon = dev_get_drvdata(dev);
1165 int ret;
1166
1167 xe_pm_runtime_get(xe: hwmon->xe);
1168
1169 switch (type) {
1170 case hwmon_power:
1171 ret = xe_hwmon_power_write(hwmon, attr, channel, val);
1172 break;
1173 case hwmon_curr:
1174 ret = xe_hwmon_curr_write(hwmon, attr, channel, val);
1175 break;
1176 default:
1177 ret = -EOPNOTSUPP;
1178 break;
1179 }
1180
1181 xe_pm_runtime_put(xe: hwmon->xe);
1182
1183 return ret;
1184}
1185
1186static int xe_hwmon_read_label(struct device *dev,
1187 enum hwmon_sensor_types type,
1188 u32 attr, int channel, const char **str)
1189{
1190 switch (type) {
1191 case hwmon_temp:
1192 if (channel == CHANNEL_PKG)
1193 *str = "pkg";
1194 else if (channel == CHANNEL_VRAM)
1195 *str = "vram";
1196 return 0;
1197 case hwmon_power:
1198 case hwmon_energy:
1199 case hwmon_curr:
1200 case hwmon_in:
1201 if (channel == CHANNEL_CARD)
1202 *str = "card";
1203 else if (channel == CHANNEL_PKG)
1204 *str = "pkg";
1205 return 0;
1206 default:
1207 return -EOPNOTSUPP;
1208 }
1209}
1210
1211static const struct hwmon_ops hwmon_ops = {
1212 .is_visible = xe_hwmon_is_visible,
1213 .read = xe_hwmon_read,
1214 .write = xe_hwmon_write,
1215 .read_string = xe_hwmon_read_label,
1216};
1217
1218static const struct hwmon_chip_info hwmon_chip_info = {
1219 .ops = &hwmon_ops,
1220 .info = hwmon_info,
1221};
1222
1223static void
1224xe_hwmon_get_preregistration_info(struct xe_hwmon *hwmon)
1225{
1226 struct xe_mmio *mmio = xe_root_tile_mmio(xe: hwmon->xe);
1227 long energy, fan_speed;
1228 u64 val_sku_unit = 0;
1229 int channel;
1230 struct xe_reg pkg_power_sku_unit;
1231
1232 if (hwmon->xe->info.has_mbx_power_limits) {
1233 /* Check if GPU firmware support mailbox power limits commands. */
1234 if (xe_hwmon_pcode_read_power_limit(hwmon, PL1_HWMON_ATTR, channel: CHANNEL_CARD,
1235 uval: &hwmon->pl1_on_boot[CHANNEL_CARD]) |
1236 xe_hwmon_pcode_read_power_limit(hwmon, PL1_HWMON_ATTR, channel: CHANNEL_PKG,
1237 uval: &hwmon->pl1_on_boot[CHANNEL_PKG]) |
1238 xe_hwmon_pcode_read_power_limit(hwmon, PL2_HWMON_ATTR, channel: CHANNEL_CARD,
1239 uval: &hwmon->pl2_on_boot[CHANNEL_CARD]) |
1240 xe_hwmon_pcode_read_power_limit(hwmon, PL2_HWMON_ATTR, channel: CHANNEL_PKG,
1241 uval: &hwmon->pl2_on_boot[CHANNEL_PKG])) {
1242 drm_warn(&hwmon->xe->drm,
1243 "Failed to read power limits, check GPU firmware !\n");
1244 } else {
1245 drm_info(&hwmon->xe->drm, "Using mailbox commands for power limits\n");
1246 /* Write default limits to read from pcode from now on. */
1247 xe_hwmon_pcode_rmw_power_limit(hwmon, PL1_HWMON_ATTR,
1248 channel: CHANNEL_CARD, PWR_LIM | PWR_LIM_TIME,
1249 set: hwmon->pl1_on_boot[CHANNEL_CARD]);
1250 xe_hwmon_pcode_rmw_power_limit(hwmon, PL1_HWMON_ATTR,
1251 channel: CHANNEL_PKG, PWR_LIM | PWR_LIM_TIME,
1252 set: hwmon->pl1_on_boot[CHANNEL_PKG]);
1253 xe_hwmon_pcode_rmw_power_limit(hwmon, PL2_HWMON_ATTR,
1254 channel: CHANNEL_CARD, PWR_LIM | PWR_LIM_TIME,
1255 set: hwmon->pl2_on_boot[CHANNEL_CARD]);
1256 xe_hwmon_pcode_rmw_power_limit(hwmon, PL2_HWMON_ATTR,
1257 channel: CHANNEL_PKG, PWR_LIM | PWR_LIM_TIME,
1258 set: hwmon->pl2_on_boot[CHANNEL_PKG]);
1259 hwmon->scl_shift_power = PWR_UNIT;
1260 hwmon->scl_shift_energy = ENERGY_UNIT;
1261 hwmon->scl_shift_time = TIME_UNIT;
1262 hwmon->boot_power_limit_read = true;
1263 }
1264 } else {
1265 drm_info(&hwmon->xe->drm, "Using register for power limits\n");
1266 /*
1267 * The contents of register PKG_POWER_SKU_UNIT do not change,
1268 * so read it once and store the shift values.
1269 */
1270 pkg_power_sku_unit = xe_hwmon_get_reg(hwmon, hwmon_reg: REG_PKG_POWER_SKU_UNIT, channel: 0);
1271 if (xe_reg_is_valid(r: pkg_power_sku_unit)) {
1272 val_sku_unit = xe_mmio_read32(mmio, reg: pkg_power_sku_unit);
1273 hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT, val_sku_unit);
1274 hwmon->scl_shift_energy = REG_FIELD_GET(PKG_ENERGY_UNIT, val_sku_unit);
1275 hwmon->scl_shift_time = REG_FIELD_GET(PKG_TIME_UNIT, val_sku_unit);
1276 }
1277 }
1278 /*
1279 * Initialize 'struct xe_hwmon_energy_info', i.e. set fields to the
1280 * first value of the energy register read
1281 */
1282 for (channel = 0; channel < CHANNEL_MAX; channel++)
1283 if (xe_hwmon_is_visible(drvdata: hwmon, type: hwmon_energy, attr: hwmon_energy_input, channel))
1284 xe_hwmon_energy_get(hwmon, channel, energy: &energy);
1285
1286 /* Initialize 'struct xe_hwmon_fan_info' with initial fan register reading. */
1287 for (channel = 0; channel < FAN_MAX; channel++)
1288 if (xe_hwmon_is_visible(drvdata: hwmon, type: hwmon_fan, attr: hwmon_fan_input, channel))
1289 xe_hwmon_fan_input_read(hwmon, channel, val: &fan_speed);
1290}
1291
1292int xe_hwmon_register(struct xe_device *xe)
1293{
1294 struct device *dev = xe->drm.dev;
1295 struct xe_hwmon *hwmon;
1296 int ret;
1297
1298 /* hwmon is available only for dGfx */
1299 if (!IS_DGFX(xe))
1300 return 0;
1301
1302 /* hwmon is not available on VFs */
1303 if (IS_SRIOV_VF(xe))
1304 return 0;
1305
1306 hwmon = devm_kzalloc(dev, size: sizeof(*hwmon), GFP_KERNEL);
1307 if (!hwmon)
1308 return -ENOMEM;
1309
1310 ret = devm_mutex_init(dev, &hwmon->hwmon_lock);
1311 if (ret)
1312 return ret;
1313
1314 /* There's only one instance of hwmon per device */
1315 hwmon->xe = xe;
1316 xe->hwmon = hwmon;
1317
1318 xe_hwmon_get_preregistration_info(hwmon);
1319
1320 drm_dbg(&xe->drm, "Register xe hwmon interface\n");
1321
1322 /* hwmon_dev points to device hwmon<i> */
1323 hwmon->hwmon_dev = devm_hwmon_device_register_with_info(dev, name: "xe", drvdata: hwmon,
1324 info: &hwmon_chip_info,
1325 extra_groups: hwmon_groups);
1326 if (IS_ERR(ptr: hwmon->hwmon_dev)) {
1327 drm_err(&xe->drm, "Failed to register xe hwmon (%pe)\n", hwmon->hwmon_dev);
1328 xe->hwmon = NULL;
1329 return PTR_ERR(ptr: hwmon->hwmon_dev);
1330 }
1331
1332 return 0;
1333}
1334MODULE_IMPORT_NS("INTEL_PMT_TELEMETRY");
1335

source code of linux/drivers/gpu/drm/xe/xe_hwmon.c