| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Qualcomm ICE (Inline Crypto Engine) support. |
| 4 | * |
| 5 | * Copyright (c) 2013-2019, The Linux Foundation. All rights reserved. |
| 6 | * Copyright (c) 2019, Google LLC |
| 7 | * Copyright (c) 2023, Linaro Limited |
| 8 | */ |
| 9 | |
| 10 | #include <linux/bitfield.h> |
| 11 | #include <linux/cleanup.h> |
| 12 | #include <linux/clk.h> |
| 13 | #include <linux/delay.h> |
| 14 | #include <linux/device.h> |
| 15 | #include <linux/iopoll.h> |
| 16 | #include <linux/of.h> |
| 17 | #include <linux/of_platform.h> |
| 18 | #include <linux/platform_device.h> |
| 19 | |
| 20 | #include <linux/firmware/qcom/qcom_scm.h> |
| 21 | |
| 22 | #include <soc/qcom/ice.h> |
| 23 | |
| 24 | #define AES_256_XTS_KEY_SIZE 64 /* for raw keys only */ |
| 25 | |
| 26 | #define QCOM_ICE_HWKM_V1 1 /* HWKM version 1 */ |
| 27 | #define QCOM_ICE_HWKM_V2 2 /* HWKM version 2 */ |
| 28 | |
| 29 | #define QCOM_ICE_HWKM_MAX_WRAPPED_KEY_SIZE 100 /* Maximum HWKM wrapped key size */ |
| 30 | |
| 31 | /* |
| 32 | * Wrapped key size depends upon HWKM version: |
| 33 | * HWKM version 1 supports 68 bytes |
| 34 | * HWKM version 2 supports 100 bytes |
| 35 | */ |
| 36 | #define QCOM_ICE_HWKM_WRAPPED_KEY_SIZE(v) ((v) == QCOM_ICE_HWKM_V1 ? 68 : 100) |
| 37 | |
| 38 | /* QCOM ICE registers */ |
| 39 | |
| 40 | #define QCOM_ICE_REG_CONTROL 0x0000 |
| 41 | #define QCOM_ICE_LEGACY_MODE_ENABLED BIT(0) |
| 42 | |
| 43 | #define QCOM_ICE_REG_VERSION 0x0008 |
| 44 | |
| 45 | #define QCOM_ICE_REG_FUSE_SETTING 0x0010 |
| 46 | #define QCOM_ICE_FUSE_SETTING_MASK BIT(0) |
| 47 | #define QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK BIT(1) |
| 48 | #define QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK BIT(2) |
| 49 | |
| 50 | #define QCOM_ICE_REG_BIST_STATUS 0x0070 |
| 51 | #define QCOM_ICE_BIST_STATUS_MASK GENMASK(31, 28) |
| 52 | |
| 53 | #define QCOM_ICE_REG_ADVANCED_CONTROL 0x1000 |
| 54 | |
| 55 | #define QCOM_ICE_REG_CRYPTOCFG_BASE 0x4040 |
| 56 | #define QCOM_ICE_REG_CRYPTOCFG_SIZE 0x80 |
| 57 | #define QCOM_ICE_REG_CRYPTOCFG(slot) (QCOM_ICE_REG_CRYPTOCFG_BASE + \ |
| 58 | QCOM_ICE_REG_CRYPTOCFG_SIZE * (slot)) |
| 59 | union crypto_cfg { |
| 60 | __le32 regval; |
| 61 | struct { |
| 62 | u8 dusize; |
| 63 | u8 capidx; |
| 64 | u8 reserved; |
| 65 | #define QCOM_ICE_HWKM_CFG_ENABLE_VAL BIT(7) |
| 66 | u8 cfge; |
| 67 | }; |
| 68 | }; |
| 69 | |
| 70 | /* QCOM ICE HWKM (Hardware Key Manager) registers */ |
| 71 | |
| 72 | #define HWKM_OFFSET 0x8000 |
| 73 | |
| 74 | #define QCOM_ICE_REG_HWKM_TZ_KM_CTL (HWKM_OFFSET + 0x1000) |
| 75 | #define QCOM_ICE_HWKM_DISABLE_CRC_CHECKS_VAL (BIT(1) | BIT(2)) |
| 76 | /* In HWKM v1 the ICE legacy mode is controlled from HWKM register space */ |
| 77 | #define QCOM_ICE_HWKM_ICE_LEGACY_MODE_ENABLED BIT(5) |
| 78 | |
| 79 | #define QCOM_ICE_REG_HWKM_TZ_KM_STATUS (HWKM_OFFSET + 0x1004) |
| 80 | #define QCOM_ICE_HWKM_KT_CLEAR_DONE BIT(0) |
| 81 | #define QCOM_ICE_HWKM_BOOT_CMD_LIST0_DONE BIT(1) |
| 82 | #define QCOM_ICE_HWKM_BOOT_CMD_LIST1_DONE BIT(2) |
| 83 | #define QCOM_ICE_HWKM_CRYPTO_BIST_DONE(v) (((v) == QCOM_ICE_HWKM_V1) ? BIT(14) : BIT(7)) |
| 84 | #define QCOM_ICE_HWKM_BIST_DONE(v) (((v) == QCOM_ICE_HWKM_V1) ? BIT(16) : BIT(9)) |
| 85 | |
| 86 | #define QCOM_ICE_REG_HWKM_BANK0_BANKN_IRQ_STATUS (HWKM_OFFSET + 0x2008) |
| 87 | #define QCOM_ICE_HWKM_RSP_FIFO_CLEAR_VAL BIT(3) |
| 88 | |
| 89 | #define QCOM_ICE_REG_HWKM_BANK0_BBAC_0 (HWKM_OFFSET + 0x5000) |
| 90 | #define QCOM_ICE_REG_HWKM_BANK0_BBAC_1 (HWKM_OFFSET + 0x5004) |
| 91 | #define QCOM_ICE_REG_HWKM_BANK0_BBAC_2 (HWKM_OFFSET + 0x5008) |
| 92 | #define QCOM_ICE_REG_HWKM_BANK0_BBAC_3 (HWKM_OFFSET + 0x500C) |
| 93 | #define QCOM_ICE_REG_HWKM_BANK0_BBAC_4 (HWKM_OFFSET + 0x5010) |
| 94 | |
| 95 | #define qcom_ice_writel(engine, val, reg) \ |
| 96 | writel((val), (engine)->base + (reg)) |
| 97 | |
| 98 | #define qcom_ice_readl(engine, reg) \ |
| 99 | readl((engine)->base + (reg)) |
| 100 | |
| 101 | static bool qcom_ice_use_wrapped_keys; |
| 102 | module_param_named(use_wrapped_keys, qcom_ice_use_wrapped_keys, bool, 0660); |
| 103 | MODULE_PARM_DESC(use_wrapped_keys, |
| 104 | "Support wrapped keys instead of raw keys, if available on the platform" ); |
| 105 | |
| 106 | struct qcom_ice { |
| 107 | struct device *dev; |
| 108 | void __iomem *base; |
| 109 | |
| 110 | struct clk *core_clk; |
| 111 | bool use_hwkm; |
| 112 | bool hwkm_init_complete; |
| 113 | u8 hwkm_version; |
| 114 | }; |
| 115 | |
| 116 | static bool qcom_ice_check_supported(struct qcom_ice *ice) |
| 117 | { |
| 118 | u32 regval = qcom_ice_readl(ice, QCOM_ICE_REG_VERSION); |
| 119 | struct device *dev = ice->dev; |
| 120 | int major = FIELD_GET(GENMASK(31, 24), regval); |
| 121 | int minor = FIELD_GET(GENMASK(23, 16), regval); |
| 122 | int step = FIELD_GET(GENMASK(15, 0), regval); |
| 123 | |
| 124 | /* For now this driver only supports ICE version 3 and 4. */ |
| 125 | if (major != 3 && major != 4) { |
| 126 | dev_warn(dev, "Unsupported ICE version: v%d.%d.%d\n" , |
| 127 | major, minor, step); |
| 128 | return false; |
| 129 | } |
| 130 | |
| 131 | /* HWKM version v2 is present from ICE 3.2.1 onwards while version v1 |
| 132 | * is present only in ICE 3.2.0. Earlier ICE version don't have HWKM. |
| 133 | */ |
| 134 | if (major > 3 || |
| 135 | (major == 3 && (minor >= 3 || (minor == 2 && step >= 1)))) |
| 136 | ice->hwkm_version = QCOM_ICE_HWKM_V2; |
| 137 | else if ((major == 3) && (minor == 2)) |
| 138 | ice->hwkm_version = QCOM_ICE_HWKM_V1; |
| 139 | else |
| 140 | ice->hwkm_version = 0; |
| 141 | |
| 142 | dev_info(dev, "Found QC Inline Crypto Engine (ICE) v%d.%d.%d\n" , |
| 143 | major, minor, step); |
| 144 | |
| 145 | if (ice->hwkm_version) |
| 146 | dev_info(dev, "QC Hardware Key Manager (HWKM) version v%d\n" , |
| 147 | ice->hwkm_version); |
| 148 | |
| 149 | /* If fuses are blown, ICE might not work in the standard way. */ |
| 150 | regval = qcom_ice_readl(ice, QCOM_ICE_REG_FUSE_SETTING); |
| 151 | if (regval & (QCOM_ICE_FUSE_SETTING_MASK | |
| 152 | QCOM_ICE_FORCE_HW_KEY0_SETTING_MASK | |
| 153 | QCOM_ICE_FORCE_HW_KEY1_SETTING_MASK)) { |
| 154 | dev_warn(dev, "Fuses are blown; ICE is unusable!\n" ); |
| 155 | return false; |
| 156 | } |
| 157 | |
| 158 | /* |
| 159 | * Check for HWKM support and decide whether to use it or not. ICE |
| 160 | * v3.2.1 and later have HWKM v2. ICE v3.2.0 has HWKM v1. Earlier ICE |
| 161 | * versions don't have HWKM at all. However, for HWKM to be fully |
| 162 | * usable by Linux, the TrustZone software also needs to support certain |
| 163 | * SCM calls including the ones to generate and prepare keys. Support |
| 164 | * for these SCM calls is present for SoCs with HWKM v2 and is being |
| 165 | * added for SoCs with HWKM v1 as well but not every SoC with HWKM v1 |
| 166 | * currently supports this. So, this driver checks for the SCM call |
| 167 | * support before it decides to use HWKM. |
| 168 | * |
| 169 | * Also, since HWKM and legacy mode are mutually exclusive, and |
| 170 | * ICE-capable storage driver(s) need to know early on whether to |
| 171 | * advertise support for raw keys or wrapped keys, HWKM cannot be used |
| 172 | * unconditionally. A module parameter is used to opt into using it. |
| 173 | */ |
| 174 | if (ice->hwkm_version && qcom_scm_has_wrapped_key_support()) { |
| 175 | if (qcom_ice_use_wrapped_keys) { |
| 176 | dev_info(dev, "Using HWKM. Supporting wrapped keys only.\n" ); |
| 177 | ice->use_hwkm = true; |
| 178 | } else { |
| 179 | dev_info(dev, "Not using HWKM. Supporting raw keys only.\n" ); |
| 180 | } |
| 181 | } else if (qcom_ice_use_wrapped_keys) { |
| 182 | dev_warn(dev, "A supported HWKM is not present. Ignoring qcom_ice.use_wrapped_keys=1.\n" ); |
| 183 | } else { |
| 184 | dev_info(dev, "A supported HWKM is not present. Supporting raw keys only.\n" ); |
| 185 | } |
| 186 | return true; |
| 187 | } |
| 188 | |
| 189 | static void qcom_ice_low_power_mode_enable(struct qcom_ice *ice) |
| 190 | { |
| 191 | u32 regval; |
| 192 | |
| 193 | regval = qcom_ice_readl(ice, QCOM_ICE_REG_ADVANCED_CONTROL); |
| 194 | |
| 195 | /* Enable low power mode sequence */ |
| 196 | regval |= 0x7000; |
| 197 | qcom_ice_writel(ice, regval, QCOM_ICE_REG_ADVANCED_CONTROL); |
| 198 | } |
| 199 | |
| 200 | static void qcom_ice_optimization_enable(struct qcom_ice *ice) |
| 201 | { |
| 202 | u32 regval; |
| 203 | |
| 204 | /* ICE Optimizations Enable Sequence */ |
| 205 | regval = qcom_ice_readl(ice, QCOM_ICE_REG_ADVANCED_CONTROL); |
| 206 | regval |= 0xd807100; |
| 207 | /* ICE HPG requires delay before writing */ |
| 208 | udelay(usec: 5); |
| 209 | qcom_ice_writel(ice, regval, QCOM_ICE_REG_ADVANCED_CONTROL); |
| 210 | udelay(usec: 5); |
| 211 | } |
| 212 | |
| 213 | /* |
| 214 | * Wait until the ICE BIST (built-in self-test) has completed. |
| 215 | * |
| 216 | * This may be necessary before ICE can be used. |
| 217 | * Note that we don't really care whether the BIST passed or failed; |
| 218 | * we really just want to make sure that it isn't still running. This is |
| 219 | * because (a) the BIST is a FIPS compliance thing that never fails in |
| 220 | * practice, (b) ICE is documented to reject crypto requests if the BIST |
| 221 | * fails, so we needn't do it in software too, and (c) properly testing |
| 222 | * storage encryption requires testing the full storage stack anyway, |
| 223 | * and not relying on hardware-level self-tests. |
| 224 | */ |
| 225 | static int qcom_ice_wait_bist_status(struct qcom_ice *ice) |
| 226 | { |
| 227 | u32 regval; |
| 228 | int err; |
| 229 | |
| 230 | err = readl_poll_timeout(ice->base + QCOM_ICE_REG_BIST_STATUS, |
| 231 | regval, !(regval & QCOM_ICE_BIST_STATUS_MASK), |
| 232 | 50, 5000); |
| 233 | if (err) { |
| 234 | dev_err(ice->dev, "Timed out waiting for ICE self-test to complete\n" ); |
| 235 | return err; |
| 236 | } |
| 237 | |
| 238 | if (ice->use_hwkm && |
| 239 | qcom_ice_readl(ice, QCOM_ICE_REG_HWKM_TZ_KM_STATUS) != |
| 240 | (QCOM_ICE_HWKM_KT_CLEAR_DONE | |
| 241 | QCOM_ICE_HWKM_BOOT_CMD_LIST0_DONE | |
| 242 | QCOM_ICE_HWKM_BOOT_CMD_LIST1_DONE | |
| 243 | QCOM_ICE_HWKM_CRYPTO_BIST_DONE(ice->hwkm_version) | |
| 244 | QCOM_ICE_HWKM_BIST_DONE(ice->hwkm_version))) { |
| 245 | dev_err(ice->dev, "HWKM self-test error!\n" ); |
| 246 | /* |
| 247 | * Too late to revoke use_hwkm here, as it was already |
| 248 | * propagated up the stack into the crypto capabilities. |
| 249 | */ |
| 250 | } |
| 251 | return 0; |
| 252 | } |
| 253 | |
| 254 | static void qcom_ice_hwkm_init(struct qcom_ice *ice) |
| 255 | { |
| 256 | u32 regval; |
| 257 | |
| 258 | if (!ice->use_hwkm) |
| 259 | return; |
| 260 | |
| 261 | BUILD_BUG_ON(QCOM_ICE_HWKM_MAX_WRAPPED_KEY_SIZE > |
| 262 | BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE); |
| 263 | /* |
| 264 | * When ICE is in HWKM mode, it only supports wrapped keys. |
| 265 | * When ICE is in legacy mode, it only supports raw keys. |
| 266 | * |
| 267 | * Put ICE in HWKM mode. ICE defaults to legacy mode. |
| 268 | */ |
| 269 | if (ice->hwkm_version == QCOM_ICE_HWKM_V2) { |
| 270 | regval = qcom_ice_readl(ice, QCOM_ICE_REG_CONTROL); |
| 271 | regval &= ~QCOM_ICE_LEGACY_MODE_ENABLED; |
| 272 | qcom_ice_writel(ice, regval, QCOM_ICE_REG_CONTROL); |
| 273 | } else if (ice->hwkm_version == QCOM_ICE_HWKM_V1) { |
| 274 | regval = qcom_ice_readl(ice, QCOM_ICE_REG_HWKM_TZ_KM_CTL); |
| 275 | regval &= ~QCOM_ICE_HWKM_ICE_LEGACY_MODE_ENABLED; |
| 276 | qcom_ice_writel(ice, regval, QCOM_ICE_REG_HWKM_TZ_KM_CTL); |
| 277 | } |
| 278 | |
| 279 | /* Disable CRC checks. This HWKM feature is not used. */ |
| 280 | qcom_ice_writel(ice, QCOM_ICE_HWKM_DISABLE_CRC_CHECKS_VAL, |
| 281 | QCOM_ICE_REG_HWKM_TZ_KM_CTL); |
| 282 | |
| 283 | /* |
| 284 | * Allow the HWKM slave to read and write the keyslots in the ICE HWKM |
| 285 | * slave. Without this, TrustZone cannot program keys into ICE. |
| 286 | */ |
| 287 | qcom_ice_writel(ice, GENMASK(31, 0), QCOM_ICE_REG_HWKM_BANK0_BBAC_0); |
| 288 | qcom_ice_writel(ice, GENMASK(31, 0), QCOM_ICE_REG_HWKM_BANK0_BBAC_1); |
| 289 | qcom_ice_writel(ice, GENMASK(31, 0), QCOM_ICE_REG_HWKM_BANK0_BBAC_2); |
| 290 | qcom_ice_writel(ice, GENMASK(31, 0), QCOM_ICE_REG_HWKM_BANK0_BBAC_3); |
| 291 | qcom_ice_writel(ice, GENMASK(31, 0), QCOM_ICE_REG_HWKM_BANK0_BBAC_4); |
| 292 | |
| 293 | /* Clear the HWKM response FIFO. */ |
| 294 | qcom_ice_writel(ice, QCOM_ICE_HWKM_RSP_FIFO_CLEAR_VAL, |
| 295 | QCOM_ICE_REG_HWKM_BANK0_BANKN_IRQ_STATUS); |
| 296 | ice->hwkm_init_complete = true; |
| 297 | } |
| 298 | |
| 299 | int qcom_ice_enable(struct qcom_ice *ice) |
| 300 | { |
| 301 | qcom_ice_low_power_mode_enable(ice); |
| 302 | qcom_ice_optimization_enable(ice); |
| 303 | qcom_ice_hwkm_init(ice); |
| 304 | return qcom_ice_wait_bist_status(ice); |
| 305 | } |
| 306 | EXPORT_SYMBOL_GPL(qcom_ice_enable); |
| 307 | |
| 308 | int qcom_ice_resume(struct qcom_ice *ice) |
| 309 | { |
| 310 | struct device *dev = ice->dev; |
| 311 | int err; |
| 312 | |
| 313 | err = clk_prepare_enable(clk: ice->core_clk); |
| 314 | if (err) { |
| 315 | dev_err(dev, "failed to enable core clock (%d)\n" , |
| 316 | err); |
| 317 | return err; |
| 318 | } |
| 319 | qcom_ice_hwkm_init(ice); |
| 320 | return qcom_ice_wait_bist_status(ice); |
| 321 | } |
| 322 | EXPORT_SYMBOL_GPL(qcom_ice_resume); |
| 323 | |
| 324 | int qcom_ice_suspend(struct qcom_ice *ice) |
| 325 | { |
| 326 | clk_disable_unprepare(clk: ice->core_clk); |
| 327 | ice->hwkm_init_complete = false; |
| 328 | |
| 329 | return 0; |
| 330 | } |
| 331 | EXPORT_SYMBOL_GPL(qcom_ice_suspend); |
| 332 | |
| 333 | static unsigned int translate_hwkm_slot(struct qcom_ice *ice, unsigned int slot) |
| 334 | { |
| 335 | return ice->hwkm_version == QCOM_ICE_HWKM_V1 ? slot : slot * 2; |
| 336 | } |
| 337 | |
| 338 | static int qcom_ice_program_wrapped_key(struct qcom_ice *ice, unsigned int slot, |
| 339 | const struct blk_crypto_key *bkey) |
| 340 | { |
| 341 | struct device *dev = ice->dev; |
| 342 | union crypto_cfg cfg = { |
| 343 | .dusize = bkey->crypto_cfg.data_unit_size / 512, |
| 344 | .capidx = QCOM_SCM_ICE_CIPHER_AES_256_XTS, |
| 345 | .cfge = QCOM_ICE_HWKM_CFG_ENABLE_VAL, |
| 346 | }; |
| 347 | int err; |
| 348 | |
| 349 | if (!ice->use_hwkm) { |
| 350 | dev_err_ratelimited(dev, "Got wrapped key when not using HWKM\n" ); |
| 351 | return -EINVAL; |
| 352 | } |
| 353 | if (!ice->hwkm_init_complete) { |
| 354 | dev_err_ratelimited(dev, "HWKM not yet initialized\n" ); |
| 355 | return -EINVAL; |
| 356 | } |
| 357 | |
| 358 | /* Clear CFGE before programming the key. */ |
| 359 | qcom_ice_writel(ice, 0x0, QCOM_ICE_REG_CRYPTOCFG(slot)); |
| 360 | |
| 361 | /* Call into TrustZone to program the wrapped key using HWKM. */ |
| 362 | err = qcom_scm_ice_set_key(index: translate_hwkm_slot(ice, slot), key: bkey->bytes, |
| 363 | key_size: bkey->size, cipher: cfg.capidx, data_unit_size: cfg.dusize); |
| 364 | if (err) { |
| 365 | dev_err_ratelimited(dev, |
| 366 | "qcom_scm_ice_set_key failed; err=%d, slot=%u\n" , |
| 367 | err, slot); |
| 368 | return err; |
| 369 | } |
| 370 | |
| 371 | /* Set CFGE after programming the key. */ |
| 372 | qcom_ice_writel(ice, le32_to_cpu(cfg.regval), |
| 373 | QCOM_ICE_REG_CRYPTOCFG(slot)); |
| 374 | return 0; |
| 375 | } |
| 376 | |
| 377 | int qcom_ice_program_key(struct qcom_ice *ice, unsigned int slot, |
| 378 | const struct blk_crypto_key *blk_key) |
| 379 | { |
| 380 | struct device *dev = ice->dev; |
| 381 | union { |
| 382 | u8 bytes[AES_256_XTS_KEY_SIZE]; |
| 383 | u32 words[AES_256_XTS_KEY_SIZE / sizeof(u32)]; |
| 384 | } key; |
| 385 | int i; |
| 386 | int err; |
| 387 | |
| 388 | /* Only AES-256-XTS has been tested so far. */ |
| 389 | if (blk_key->crypto_cfg.crypto_mode != |
| 390 | BLK_ENCRYPTION_MODE_AES_256_XTS) { |
| 391 | dev_err_ratelimited(dev, "Unsupported crypto mode: %d\n" , |
| 392 | blk_key->crypto_cfg.crypto_mode); |
| 393 | return -EINVAL; |
| 394 | } |
| 395 | |
| 396 | if (blk_key->crypto_cfg.key_type == BLK_CRYPTO_KEY_TYPE_HW_WRAPPED) |
| 397 | return qcom_ice_program_wrapped_key(ice, slot, bkey: blk_key); |
| 398 | |
| 399 | if (ice->use_hwkm) { |
| 400 | dev_err_ratelimited(dev, "Got raw key when using HWKM\n" ); |
| 401 | return -EINVAL; |
| 402 | } |
| 403 | |
| 404 | if (blk_key->size != AES_256_XTS_KEY_SIZE) { |
| 405 | dev_err_ratelimited(dev, "Incorrect key size\n" ); |
| 406 | return -EINVAL; |
| 407 | } |
| 408 | memcpy(key.bytes, blk_key->bytes, AES_256_XTS_KEY_SIZE); |
| 409 | |
| 410 | /* The SCM call requires that the key words are encoded in big endian */ |
| 411 | for (i = 0; i < ARRAY_SIZE(key.words); i++) |
| 412 | __cpu_to_be32s(&key.words[i]); |
| 413 | |
| 414 | err = qcom_scm_ice_set_key(index: slot, key: key.bytes, AES_256_XTS_KEY_SIZE, |
| 415 | cipher: QCOM_SCM_ICE_CIPHER_AES_256_XTS, |
| 416 | data_unit_size: blk_key->crypto_cfg.data_unit_size / 512); |
| 417 | |
| 418 | memzero_explicit(s: &key, count: sizeof(key)); |
| 419 | |
| 420 | return err; |
| 421 | } |
| 422 | EXPORT_SYMBOL_GPL(qcom_ice_program_key); |
| 423 | |
| 424 | int qcom_ice_evict_key(struct qcom_ice *ice, int slot) |
| 425 | { |
| 426 | if (ice->hwkm_init_complete) |
| 427 | slot = translate_hwkm_slot(ice, slot); |
| 428 | return qcom_scm_ice_invalidate_key(index: slot); |
| 429 | } |
| 430 | EXPORT_SYMBOL_GPL(qcom_ice_evict_key); |
| 431 | |
| 432 | /** |
| 433 | * qcom_ice_get_supported_key_type() - Get the supported key type |
| 434 | * @ice: ICE driver data |
| 435 | * |
| 436 | * Return: the blk-crypto key type that the ICE driver is configured to use. |
| 437 | * This is the key type that ICE-capable storage drivers should advertise as |
| 438 | * supported in the crypto capabilities of any disks they register. |
| 439 | */ |
| 440 | enum blk_crypto_key_type qcom_ice_get_supported_key_type(struct qcom_ice *ice) |
| 441 | { |
| 442 | if (ice->use_hwkm) |
| 443 | return BLK_CRYPTO_KEY_TYPE_HW_WRAPPED; |
| 444 | return BLK_CRYPTO_KEY_TYPE_RAW; |
| 445 | } |
| 446 | EXPORT_SYMBOL_GPL(qcom_ice_get_supported_key_type); |
| 447 | |
| 448 | /** |
| 449 | * qcom_ice_derive_sw_secret() - Derive software secret from wrapped key |
| 450 | * @ice: ICE driver data |
| 451 | * @eph_key: an ephemerally-wrapped key |
| 452 | * @eph_key_size: size of @eph_key in bytes |
| 453 | * @sw_secret: output buffer for the software secret |
| 454 | * |
| 455 | * Use HWKM to derive the "software secret" from a hardware-wrapped key that is |
| 456 | * given in ephemerally-wrapped form. |
| 457 | * |
| 458 | * Return: 0 on success; -EBADMSG if the given ephemerally-wrapped key is |
| 459 | * invalid; or another -errno value. |
| 460 | */ |
| 461 | int qcom_ice_derive_sw_secret(struct qcom_ice *ice, |
| 462 | const u8 *eph_key, size_t eph_key_size, |
| 463 | u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE]) |
| 464 | { |
| 465 | int err = qcom_scm_derive_sw_secret(eph_key, eph_key_size, |
| 466 | sw_secret, |
| 467 | BLK_CRYPTO_SW_SECRET_SIZE); |
| 468 | if (err == -EIO || err == -EINVAL) |
| 469 | err = -EBADMSG; /* probably invalid key */ |
| 470 | return err; |
| 471 | } |
| 472 | EXPORT_SYMBOL_GPL(qcom_ice_derive_sw_secret); |
| 473 | |
| 474 | /** |
| 475 | * qcom_ice_generate_key() - Generate a wrapped key for inline encryption |
| 476 | * @ice: ICE driver data |
| 477 | * @lt_key: output buffer for the long-term wrapped key |
| 478 | * |
| 479 | * Use HWKM to generate a new key and return it as a long-term wrapped key. |
| 480 | * |
| 481 | * Return: the size of the resulting wrapped key on success; -errno on failure. |
| 482 | */ |
| 483 | int qcom_ice_generate_key(struct qcom_ice *ice, |
| 484 | u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]) |
| 485 | { |
| 486 | int err; |
| 487 | |
| 488 | err = qcom_scm_generate_ice_key(lt_key, |
| 489 | QCOM_ICE_HWKM_WRAPPED_KEY_SIZE(ice->hwkm_version)); |
| 490 | if (err) |
| 491 | return err; |
| 492 | |
| 493 | return QCOM_ICE_HWKM_WRAPPED_KEY_SIZE(ice->hwkm_version); |
| 494 | } |
| 495 | EXPORT_SYMBOL_GPL(qcom_ice_generate_key); |
| 496 | |
| 497 | /** |
| 498 | * qcom_ice_prepare_key() - Prepare a wrapped key for inline encryption |
| 499 | * @ice: ICE driver data |
| 500 | * @lt_key: a long-term wrapped key |
| 501 | * @lt_key_size: size of @lt_key in bytes |
| 502 | * @eph_key: output buffer for the ephemerally-wrapped key |
| 503 | * |
| 504 | * Use HWKM to re-wrap a long-term wrapped key with the per-boot ephemeral key. |
| 505 | * |
| 506 | * Return: the size of the resulting wrapped key on success; -EBADMSG if the |
| 507 | * given long-term wrapped key is invalid; or another -errno value. |
| 508 | */ |
| 509 | int qcom_ice_prepare_key(struct qcom_ice *ice, |
| 510 | const u8 *lt_key, size_t lt_key_size, |
| 511 | u8 eph_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]) |
| 512 | { |
| 513 | int err; |
| 514 | |
| 515 | err = qcom_scm_prepare_ice_key(lt_key, lt_key_size, |
| 516 | eph_key, QCOM_ICE_HWKM_WRAPPED_KEY_SIZE(ice->hwkm_version)); |
| 517 | if (err == -EIO || err == -EINVAL) |
| 518 | err = -EBADMSG; /* probably invalid key */ |
| 519 | if (err) |
| 520 | return err; |
| 521 | |
| 522 | return QCOM_ICE_HWKM_WRAPPED_KEY_SIZE(ice->hwkm_version); |
| 523 | } |
| 524 | EXPORT_SYMBOL_GPL(qcom_ice_prepare_key); |
| 525 | |
| 526 | /** |
| 527 | * qcom_ice_import_key() - Import a raw key for inline encryption |
| 528 | * @ice: ICE driver data |
| 529 | * @raw_key: the raw key to import |
| 530 | * @raw_key_size: size of @raw_key in bytes |
| 531 | * @lt_key: output buffer for the long-term wrapped key |
| 532 | * |
| 533 | * Use HWKM to import a raw key and return it as a long-term wrapped key. |
| 534 | * |
| 535 | * Return: the size of the resulting wrapped key on success; -errno on failure. |
| 536 | */ |
| 537 | int qcom_ice_import_key(struct qcom_ice *ice, |
| 538 | const u8 *raw_key, size_t raw_key_size, |
| 539 | u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]) |
| 540 | { |
| 541 | int err; |
| 542 | |
| 543 | err = qcom_scm_import_ice_key(raw_key, raw_key_size, |
| 544 | lt_key, QCOM_ICE_HWKM_WRAPPED_KEY_SIZE(ice->hwkm_version)); |
| 545 | if (err) |
| 546 | return err; |
| 547 | |
| 548 | return QCOM_ICE_HWKM_WRAPPED_KEY_SIZE(ice->hwkm_version); |
| 549 | } |
| 550 | EXPORT_SYMBOL_GPL(qcom_ice_import_key); |
| 551 | |
| 552 | static struct qcom_ice *qcom_ice_create(struct device *dev, |
| 553 | void __iomem *base) |
| 554 | { |
| 555 | struct qcom_ice *engine; |
| 556 | |
| 557 | if (!qcom_scm_is_available()) |
| 558 | return ERR_PTR(error: -EPROBE_DEFER); |
| 559 | |
| 560 | if (!qcom_scm_ice_available()) { |
| 561 | dev_warn(dev, "ICE SCM interface not found\n" ); |
| 562 | return NULL; |
| 563 | } |
| 564 | |
| 565 | engine = devm_kzalloc(dev, size: sizeof(*engine), GFP_KERNEL); |
| 566 | if (!engine) |
| 567 | return ERR_PTR(error: -ENOMEM); |
| 568 | |
| 569 | engine->dev = dev; |
| 570 | engine->base = base; |
| 571 | |
| 572 | /* |
| 573 | * Legacy DT binding uses different clk names for each consumer, |
| 574 | * so lets try those first. If none of those are a match, it means |
| 575 | * the we only have one clock and it is part of the dedicated DT node. |
| 576 | * Also, enable the clock before we check what HW version the driver |
| 577 | * supports. |
| 578 | */ |
| 579 | engine->core_clk = devm_clk_get_optional_enabled(dev, id: "ice_core_clk" ); |
| 580 | if (!engine->core_clk) |
| 581 | engine->core_clk = devm_clk_get_optional_enabled(dev, id: "ice" ); |
| 582 | if (!engine->core_clk) |
| 583 | engine->core_clk = devm_clk_get_enabled(dev, NULL); |
| 584 | if (IS_ERR(ptr: engine->core_clk)) |
| 585 | return ERR_CAST(ptr: engine->core_clk); |
| 586 | |
| 587 | if (!qcom_ice_check_supported(ice: engine)) |
| 588 | return ERR_PTR(error: -EOPNOTSUPP); |
| 589 | |
| 590 | dev_dbg(dev, "Registered Qualcomm Inline Crypto Engine\n" ); |
| 591 | |
| 592 | return engine; |
| 593 | } |
| 594 | |
| 595 | /** |
| 596 | * of_qcom_ice_get() - get an ICE instance from a DT node |
| 597 | * @dev: device pointer for the consumer device |
| 598 | * |
| 599 | * This function will provide an ICE instance either by creating one for the |
| 600 | * consumer device if its DT node provides the 'ice' reg range and the 'ice' |
| 601 | * clock (for legacy DT style). On the other hand, if consumer provides a |
| 602 | * phandle via 'qcom,ice' property to an ICE DT, the ICE instance will already |
| 603 | * be created and so this function will return that instead. |
| 604 | * |
| 605 | * Return: ICE pointer on success, NULL if there is no ICE data provided by the |
| 606 | * consumer or ERR_PTR() on error. |
| 607 | */ |
| 608 | static struct qcom_ice *of_qcom_ice_get(struct device *dev) |
| 609 | { |
| 610 | struct platform_device *pdev = to_platform_device(dev); |
| 611 | struct qcom_ice *ice; |
| 612 | struct resource *res; |
| 613 | void __iomem *base; |
| 614 | struct device_link *link; |
| 615 | |
| 616 | if (!dev || !dev->of_node) |
| 617 | return ERR_PTR(error: -ENODEV); |
| 618 | |
| 619 | /* |
| 620 | * In order to support legacy style devicetree bindings, we need |
| 621 | * to create the ICE instance using the consumer device and the reg |
| 622 | * range called 'ice' it provides. |
| 623 | */ |
| 624 | res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "ice" ); |
| 625 | if (res) { |
| 626 | base = devm_ioremap_resource(dev: &pdev->dev, res); |
| 627 | if (IS_ERR(ptr: base)) |
| 628 | return ERR_CAST(ptr: base); |
| 629 | |
| 630 | /* create ICE instance using consumer dev */ |
| 631 | return qcom_ice_create(dev: &pdev->dev, base); |
| 632 | } |
| 633 | |
| 634 | /* |
| 635 | * If the consumer node does not provider an 'ice' reg range |
| 636 | * (legacy DT binding), then it must at least provide a phandle |
| 637 | * to the ICE devicetree node, otherwise ICE is not supported. |
| 638 | */ |
| 639 | struct device_node *node __free(device_node) = of_parse_phandle(np: dev->of_node, |
| 640 | phandle_name: "qcom,ice" , index: 0); |
| 641 | if (!node) |
| 642 | return NULL; |
| 643 | |
| 644 | pdev = of_find_device_by_node(np: node); |
| 645 | if (!pdev) { |
| 646 | dev_err(dev, "Cannot find device node %s\n" , node->name); |
| 647 | return ERR_PTR(error: -EPROBE_DEFER); |
| 648 | } |
| 649 | |
| 650 | ice = platform_get_drvdata(pdev); |
| 651 | if (!ice) { |
| 652 | dev_err(dev, "Cannot get ice instance from %s\n" , |
| 653 | dev_name(&pdev->dev)); |
| 654 | platform_device_put(pdev); |
| 655 | return ERR_PTR(error: -EPROBE_DEFER); |
| 656 | } |
| 657 | |
| 658 | link = device_link_add(consumer: dev, supplier: &pdev->dev, DL_FLAG_AUTOREMOVE_SUPPLIER); |
| 659 | if (!link) { |
| 660 | dev_err(&pdev->dev, |
| 661 | "Failed to create device link to consumer %s\n" , |
| 662 | dev_name(dev)); |
| 663 | platform_device_put(pdev); |
| 664 | ice = ERR_PTR(error: -EINVAL); |
| 665 | } |
| 666 | |
| 667 | return ice; |
| 668 | } |
| 669 | |
| 670 | static void qcom_ice_put(const struct qcom_ice *ice) |
| 671 | { |
| 672 | struct platform_device *pdev = to_platform_device(ice->dev); |
| 673 | |
| 674 | if (!platform_get_resource_byname(pdev, IORESOURCE_MEM, "ice" )) |
| 675 | platform_device_put(pdev); |
| 676 | } |
| 677 | |
| 678 | static void devm_of_qcom_ice_put(struct device *dev, void *res) |
| 679 | { |
| 680 | qcom_ice_put(ice: *(struct qcom_ice **)res); |
| 681 | } |
| 682 | |
| 683 | /** |
| 684 | * devm_of_qcom_ice_get() - Devres managed helper to get an ICE instance from |
| 685 | * a DT node. |
| 686 | * @dev: device pointer for the consumer device. |
| 687 | * |
| 688 | * This function will provide an ICE instance either by creating one for the |
| 689 | * consumer device if its DT node provides the 'ice' reg range and the 'ice' |
| 690 | * clock (for legacy DT style). On the other hand, if consumer provides a |
| 691 | * phandle via 'qcom,ice' property to an ICE DT, the ICE instance will already |
| 692 | * be created and so this function will return that instead. |
| 693 | * |
| 694 | * Return: ICE pointer on success, NULL if there is no ICE data provided by the |
| 695 | * consumer or ERR_PTR() on error. |
| 696 | */ |
| 697 | struct qcom_ice *devm_of_qcom_ice_get(struct device *dev) |
| 698 | { |
| 699 | struct qcom_ice *ice, **dr; |
| 700 | |
| 701 | dr = devres_alloc(devm_of_qcom_ice_put, sizeof(*dr), GFP_KERNEL); |
| 702 | if (!dr) |
| 703 | return ERR_PTR(error: -ENOMEM); |
| 704 | |
| 705 | ice = of_qcom_ice_get(dev); |
| 706 | if (!IS_ERR_OR_NULL(ptr: ice)) { |
| 707 | *dr = ice; |
| 708 | devres_add(dev, res: dr); |
| 709 | } else { |
| 710 | devres_free(res: dr); |
| 711 | } |
| 712 | |
| 713 | return ice; |
| 714 | } |
| 715 | EXPORT_SYMBOL_GPL(devm_of_qcom_ice_get); |
| 716 | |
| 717 | static int qcom_ice_probe(struct platform_device *pdev) |
| 718 | { |
| 719 | struct qcom_ice *engine; |
| 720 | void __iomem *base; |
| 721 | |
| 722 | base = devm_platform_ioremap_resource(pdev, index: 0); |
| 723 | if (IS_ERR(ptr: base)) { |
| 724 | dev_warn(&pdev->dev, "ICE registers not found\n" ); |
| 725 | return PTR_ERR(ptr: base); |
| 726 | } |
| 727 | |
| 728 | engine = qcom_ice_create(dev: &pdev->dev, base); |
| 729 | if (IS_ERR(ptr: engine)) |
| 730 | return PTR_ERR(ptr: engine); |
| 731 | |
| 732 | platform_set_drvdata(pdev, data: engine); |
| 733 | |
| 734 | return 0; |
| 735 | } |
| 736 | |
| 737 | static const struct of_device_id qcom_ice_of_match_table[] = { |
| 738 | { .compatible = "qcom,inline-crypto-engine" }, |
| 739 | { }, |
| 740 | }; |
| 741 | MODULE_DEVICE_TABLE(of, qcom_ice_of_match_table); |
| 742 | |
| 743 | static struct platform_driver qcom_ice_driver = { |
| 744 | .probe = qcom_ice_probe, |
| 745 | .driver = { |
| 746 | .name = "qcom-ice" , |
| 747 | .of_match_table = qcom_ice_of_match_table, |
| 748 | }, |
| 749 | }; |
| 750 | |
| 751 | module_platform_driver(qcom_ice_driver); |
| 752 | |
| 753 | MODULE_DESCRIPTION("Qualcomm Inline Crypto Engine driver" ); |
| 754 | MODULE_LICENSE("GPL" ); |
| 755 | |