1// SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2/* Copyright(c) 2020 Intel Corporation */
3#include "adf_common_drv.h"
4#include "adf_dc.h"
5#include "adf_gen2_hw_data.h"
6#include "icp_qat_fw_comp.h"
7#include "icp_qat_hw.h"
8#include <linux/pci.h>
9
10u32 adf_gen2_get_num_accels(struct adf_hw_device_data *self)
11{
12 if (!self || !self->accel_mask)
13 return 0;
14
15 return hweight16(self->accel_mask);
16}
17EXPORT_SYMBOL_GPL(adf_gen2_get_num_accels);
18
19u32 adf_gen2_get_num_aes(struct adf_hw_device_data *self)
20{
21 if (!self || !self->ae_mask)
22 return 0;
23
24 return hweight32(self->ae_mask);
25}
26EXPORT_SYMBOL_GPL(adf_gen2_get_num_aes);
27
28void adf_gen2_enable_error_correction(struct adf_accel_dev *accel_dev)
29{
30 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
31 void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
32 unsigned long accel_mask = hw_data->accel_mask;
33 unsigned long ae_mask = hw_data->ae_mask;
34 unsigned int val, i;
35
36 /* Enable Accel Engine error detection & correction */
37 for_each_set_bit(i, &ae_mask, hw_data->num_engines) {
38 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_AE_CTX_ENABLES(i));
39 val |= ADF_GEN2_ENABLE_AE_ECC_ERR;
40 ADF_CSR_WR(pmisc_addr, ADF_GEN2_AE_CTX_ENABLES(i), val);
41 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_AE_MISC_CONTROL(i));
42 val |= ADF_GEN2_ENABLE_AE_ECC_PARITY_CORR;
43 ADF_CSR_WR(pmisc_addr, ADF_GEN2_AE_MISC_CONTROL(i), val);
44 }
45
46 /* Enable shared memory error detection & correction */
47 for_each_set_bit(i, &accel_mask, hw_data->num_accel) {
48 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_UERRSSMSH(i));
49 val |= ADF_GEN2_ERRSSMSH_EN;
50 ADF_CSR_WR(pmisc_addr, ADF_GEN2_UERRSSMSH(i), val);
51 val = ADF_CSR_RD(pmisc_addr, ADF_GEN2_CERRSSMSH(i));
52 val |= ADF_GEN2_ERRSSMSH_EN;
53 ADF_CSR_WR(pmisc_addr, ADF_GEN2_CERRSSMSH(i), val);
54 }
55}
56EXPORT_SYMBOL_GPL(adf_gen2_enable_error_correction);
57
58void adf_gen2_cfg_iov_thds(struct adf_accel_dev *accel_dev, bool enable,
59 int num_a_regs, int num_b_regs)
60{
61 void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
62 u32 reg;
63 int i;
64
65 /* Set/Unset Valid bit in AE Thread to PCIe Function Mapping Group A */
66 for (i = 0; i < num_a_regs; i++) {
67 reg = READ_CSR_AE2FUNCTION_MAP_A(pmisc_addr, i);
68 if (enable)
69 reg |= AE2FUNCTION_MAP_VALID;
70 else
71 reg &= ~AE2FUNCTION_MAP_VALID;
72 WRITE_CSR_AE2FUNCTION_MAP_A(pmisc_addr, i, reg);
73 }
74
75 /* Set/Unset Valid bit in AE Thread to PCIe Function Mapping Group B */
76 for (i = 0; i < num_b_regs; i++) {
77 reg = READ_CSR_AE2FUNCTION_MAP_B(pmisc_addr, i);
78 if (enable)
79 reg |= AE2FUNCTION_MAP_VALID;
80 else
81 reg &= ~AE2FUNCTION_MAP_VALID;
82 WRITE_CSR_AE2FUNCTION_MAP_B(pmisc_addr, i, reg);
83 }
84}
85EXPORT_SYMBOL_GPL(adf_gen2_cfg_iov_thds);
86
87void adf_gen2_get_admin_info(struct admin_info *admin_csrs_info)
88{
89 admin_csrs_info->mailbox_offset = ADF_MAILBOX_BASE_OFFSET;
90 admin_csrs_info->admin_msg_ur = ADF_ADMINMSGUR_OFFSET;
91 admin_csrs_info->admin_msg_lr = ADF_ADMINMSGLR_OFFSET;
92}
93EXPORT_SYMBOL_GPL(adf_gen2_get_admin_info);
94
95void adf_gen2_get_arb_info(struct arb_info *arb_info)
96{
97 arb_info->arb_cfg = ADF_ARB_CONFIG;
98 arb_info->arb_offset = ADF_ARB_OFFSET;
99 arb_info->wt2sam_offset = ADF_ARB_WRK_2_SER_MAP_OFFSET;
100}
101EXPORT_SYMBOL_GPL(adf_gen2_get_arb_info);
102
103void adf_gen2_enable_ints(struct adf_accel_dev *accel_dev)
104{
105 void __iomem *addr = adf_get_pmisc_base(accel_dev);
106 u32 val;
107
108 val = accel_dev->pf.vf_info ? 0 : BIT_ULL(GET_MAX_BANKS(accel_dev)) - 1;
109
110 /* Enable bundle and misc interrupts */
111 ADF_CSR_WR(addr, ADF_GEN2_SMIAPF0_MASK_OFFSET, val);
112 ADF_CSR_WR(addr, ADF_GEN2_SMIAPF1_MASK_OFFSET, ADF_GEN2_SMIA1_MASK);
113}
114EXPORT_SYMBOL_GPL(adf_gen2_enable_ints);
115
116u32 adf_gen2_get_accel_cap(struct adf_accel_dev *accel_dev)
117{
118 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
119 struct pci_dev *pdev = accel_dev->accel_pci_dev.pci_dev;
120 u32 fuses = hw_data->fuses[ADF_FUSECTL0];
121 u32 straps = hw_data->straps;
122 u32 legfuses;
123 u32 capabilities = ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC |
124 ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC |
125 ICP_ACCEL_CAPABILITIES_AUTHENTICATION |
126 ICP_ACCEL_CAPABILITIES_CIPHER |
127 ICP_ACCEL_CAPABILITIES_COMPRESSION;
128
129 /* Read accelerator capabilities mask */
130 pci_read_config_dword(dev: pdev, ADF_DEVICE_LEGFUSE_OFFSET, val: &legfuses);
131
132 /* A set bit in legfuses means the feature is OFF in this SKU */
133 if (legfuses & ICP_ACCEL_MASK_CIPHER_SLICE) {
134 capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_SYMMETRIC;
135 capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
136 }
137 if (legfuses & ICP_ACCEL_MASK_PKE_SLICE)
138 capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
139 if (legfuses & ICP_ACCEL_MASK_AUTH_SLICE) {
140 capabilities &= ~ICP_ACCEL_CAPABILITIES_AUTHENTICATION;
141 capabilities &= ~ICP_ACCEL_CAPABILITIES_CIPHER;
142 }
143 if (legfuses & ICP_ACCEL_MASK_COMPRESS_SLICE)
144 capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
145
146 if ((straps | fuses) & ADF_POWERGATE_PKE)
147 capabilities &= ~ICP_ACCEL_CAPABILITIES_CRYPTO_ASYMMETRIC;
148
149 if ((straps | fuses) & ADF_POWERGATE_DC)
150 capabilities &= ~ICP_ACCEL_CAPABILITIES_COMPRESSION;
151
152 return capabilities;
153}
154EXPORT_SYMBOL_GPL(adf_gen2_get_accel_cap);
155
156void adf_gen2_set_ssm_wdtimer(struct adf_accel_dev *accel_dev)
157{
158 struct adf_hw_device_data *hw_data = accel_dev->hw_device;
159 void __iomem *pmisc_addr = adf_get_pmisc_base(accel_dev);
160 u32 timer_val_pke = ADF_SSM_WDT_PKE_DEFAULT_VALUE;
161 u32 timer_val = ADF_SSM_WDT_DEFAULT_VALUE;
162 unsigned long accel_mask = hw_data->accel_mask;
163 u32 i = 0;
164
165 /* Configures WDT timers */
166 for_each_set_bit(i, &accel_mask, hw_data->num_accel) {
167 /* Enable WDT for sym and dc */
168 ADF_CSR_WR(pmisc_addr, ADF_SSMWDT(i), timer_val);
169 /* Enable WDT for pke */
170 ADF_CSR_WR(pmisc_addr, ADF_SSMWDTPKE(i), timer_val_pke);
171 }
172}
173EXPORT_SYMBOL_GPL(adf_gen2_set_ssm_wdtimer);
174
175static int adf_gen2_build_comp_block(void *ctx, enum adf_dc_algo algo)
176{
177 struct icp_qat_fw_comp_req *req_tmpl = ctx;
178 struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
179 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
180
181 switch (algo) {
182 case QAT_DEFLATE:
183 header->service_cmd_id = ICP_QAT_FW_COMP_CMD_STATIC;
184 break;
185 default:
186 return -EINVAL;
187 }
188
189 cd_pars->u.sl.comp_slice_cfg_word[0] =
190 ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(ICP_QAT_HW_COMPRESSION_DIR_COMPRESS,
191 ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED,
192 ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE,
193 ICP_QAT_HW_COMPRESSION_DEPTH_1,
194 ICP_QAT_HW_COMPRESSION_FILE_TYPE_0);
195
196 return 0;
197}
198
199static int adf_gen2_build_decomp_block(void *ctx, enum adf_dc_algo algo)
200{
201 struct icp_qat_fw_comp_req *req_tmpl = ctx;
202 struct icp_qat_fw_comp_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
203 struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
204
205 switch (algo) {
206 case QAT_DEFLATE:
207 header->service_cmd_id = ICP_QAT_FW_COMP_CMD_DECOMPRESS;
208 break;
209 default:
210 return -EINVAL;
211 }
212
213 cd_pars->u.sl.comp_slice_cfg_word[0] =
214 ICP_QAT_HW_COMPRESSION_CONFIG_BUILD(ICP_QAT_HW_COMPRESSION_DIR_DECOMPRESS,
215 ICP_QAT_HW_COMPRESSION_DELAYED_MATCH_DISABLED,
216 ICP_QAT_HW_COMPRESSION_ALGO_DEFLATE,
217 ICP_QAT_HW_COMPRESSION_DEPTH_1,
218 ICP_QAT_HW_COMPRESSION_FILE_TYPE_0);
219
220 return 0;
221}
222
223void adf_gen2_init_dc_ops(struct adf_dc_ops *dc_ops)
224{
225 dc_ops->build_comp_block = adf_gen2_build_comp_block;
226 dc_ops->build_decomp_block = adf_gen2_build_decomp_block;
227}
228EXPORT_SYMBOL_GPL(adf_gen2_init_dc_ops);
229

source code of linux/drivers/crypto/intel/qat/qat_common/adf_gen2_hw_data.c