1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright(c) 2014 - 2022 Intel Corporation */
3#include <linux/device.h>
4#include <linux/dma-mapping.h>
5#include <linux/pci.h>
6#include <linux/scatterlist.h>
7#include <linux/slab.h>
8#include <linux/types.h>
9#include "adf_accel_devices.h"
10#include "qat_bl.h"
11#include "qat_crypto.h"
12
13void qat_bl_free_bufl(struct adf_accel_dev *accel_dev,
14 struct qat_request_buffs *buf)
15{
16 struct device *dev = &GET_DEV(accel_dev);
17 struct qat_alg_buf_list *bl = buf->bl;
18 struct qat_alg_buf_list *blout = buf->blout;
19 dma_addr_t blp = buf->blp;
20 dma_addr_t blpout = buf->bloutp;
21 size_t sz = buf->sz;
22 size_t sz_out = buf->sz_out;
23 int bl_dma_dir;
24 int i;
25
26 bl_dma_dir = blp != blpout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
27
28 for (i = 0; i < bl->num_bufs; i++)
29 dma_unmap_single(dev, bl->buffers[i].addr,
30 bl->buffers[i].len, bl_dma_dir);
31
32 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
33
34 if (!buf->sgl_src_valid)
35 kfree(objp: bl);
36
37 if (blp != blpout) {
38 for (i = 0; i < blout->num_mapped_bufs; i++) {
39 dma_unmap_single(dev, blout->buffers[i].addr,
40 blout->buffers[i].len,
41 DMA_BIDIRECTIONAL);
42 }
43 dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
44
45 if (!buf->sgl_dst_valid)
46 kfree(objp: blout);
47 }
48}
49
50static int __qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
51 struct scatterlist *sgl,
52 struct scatterlist *sglout,
53 struct qat_request_buffs *buf,
54 dma_addr_t extra_dst_buff,
55 size_t sz_extra_dst_buff,
56 unsigned int sskip,
57 unsigned int dskip,
58 gfp_t flags)
59{
60 struct device *dev = &GET_DEV(accel_dev);
61 int i, sg_nctr = 0;
62 int n = sg_nents(sg: sgl);
63 struct qat_alg_buf_list *bufl;
64 struct qat_alg_buf_list *buflout = NULL;
65 dma_addr_t blp = DMA_MAPPING_ERROR;
66 dma_addr_t bloutp = DMA_MAPPING_ERROR;
67 struct scatterlist *sg;
68 size_t sz_out, sz = struct_size(bufl, buffers, n);
69 int node = dev_to_node(dev: &GET_DEV(accel_dev));
70 unsigned int left;
71 int bufl_dma_dir;
72
73 if (unlikely(!n))
74 return -EINVAL;
75
76 buf->sgl_src_valid = false;
77 buf->sgl_dst_valid = false;
78
79 if (n > QAT_MAX_BUFF_DESC) {
80 bufl = kzalloc_node(sz, flags, node);
81 if (unlikely(!bufl))
82 return -ENOMEM;
83 } else {
84 bufl = container_of(&buf->sgl_src.sgl_hdr,
85 struct qat_alg_buf_list, hdr);
86 memset(bufl, 0, sizeof(struct qat_alg_buf_list));
87 buf->sgl_src_valid = true;
88 }
89
90 bufl_dma_dir = sgl != sglout ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL;
91
92 for (i = 0; i < n; i++)
93 bufl->buffers[i].addr = DMA_MAPPING_ERROR;
94
95 left = sskip;
96
97 for_each_sg(sgl, sg, n, i) {
98 int y = sg_nctr;
99
100 if (!sg->length)
101 continue;
102
103 if (left >= sg->length) {
104 left -= sg->length;
105 continue;
106 }
107 bufl->buffers[y].addr = dma_map_single(dev, sg_virt(sg) + left,
108 sg->length - left,
109 bufl_dma_dir);
110 bufl->buffers[y].len = sg->length;
111 if (unlikely(dma_mapping_error(dev, bufl->buffers[y].addr)))
112 goto err_in;
113 sg_nctr++;
114 if (left) {
115 bufl->buffers[y].len -= left;
116 left = 0;
117 }
118 }
119 bufl->num_bufs = sg_nctr;
120 blp = dma_map_single(dev, bufl, sz, DMA_TO_DEVICE);
121 if (unlikely(dma_mapping_error(dev, blp)))
122 goto err_in;
123 buf->bl = bufl;
124 buf->blp = blp;
125 buf->sz = sz;
126 /* Handle out of place operation */
127 if (sgl != sglout) {
128 struct qat_alg_buf *buffers;
129 int extra_buff = extra_dst_buff ? 1 : 0;
130 int n_sglout = sg_nents(sg: sglout);
131
132 n = n_sglout + extra_buff;
133 sz_out = struct_size(buflout, buffers, n);
134 left = dskip;
135
136 sg_nctr = 0;
137
138 if (n > QAT_MAX_BUFF_DESC) {
139 buflout = kzalloc_node(sz_out, flags, node);
140 if (unlikely(!buflout))
141 goto err_in;
142 } else {
143 buflout = container_of(&buf->sgl_dst.sgl_hdr,
144 struct qat_alg_buf_list, hdr);
145 memset(buflout, 0, sizeof(struct qat_alg_buf_list));
146 buf->sgl_dst_valid = true;
147 }
148
149 buffers = buflout->buffers;
150 for (i = 0; i < n; i++)
151 buffers[i].addr = DMA_MAPPING_ERROR;
152
153 for_each_sg(sglout, sg, n_sglout, i) {
154 int y = sg_nctr;
155
156 if (!sg->length)
157 continue;
158
159 if (left >= sg->length) {
160 left -= sg->length;
161 continue;
162 }
163 buffers[y].addr = dma_map_single(dev, sg_virt(sg) + left,
164 sg->length - left,
165 DMA_BIDIRECTIONAL);
166 if (unlikely(dma_mapping_error(dev, buffers[y].addr)))
167 goto err_out;
168 buffers[y].len = sg->length;
169 sg_nctr++;
170 if (left) {
171 buffers[y].len -= left;
172 left = 0;
173 }
174 }
175 if (extra_buff) {
176 buffers[sg_nctr].addr = extra_dst_buff;
177 buffers[sg_nctr].len = sz_extra_dst_buff;
178 }
179
180 buflout->num_bufs = sg_nctr;
181 buflout->num_bufs += extra_buff;
182 buflout->num_mapped_bufs = sg_nctr;
183 bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
184 if (unlikely(dma_mapping_error(dev, bloutp)))
185 goto err_out;
186 buf->blout = buflout;
187 buf->bloutp = bloutp;
188 buf->sz_out = sz_out;
189 } else {
190 /* Otherwise set the src and dst to the same address */
191 buf->bloutp = buf->blp;
192 buf->sz_out = 0;
193 }
194 return 0;
195
196err_out:
197 if (!dma_mapping_error(dev, dma_addr: bloutp))
198 dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
199
200 n = sg_nents(sg: sglout);
201 for (i = 0; i < n; i++) {
202 if (buflout->buffers[i].addr == extra_dst_buff)
203 break;
204 if (!dma_mapping_error(dev, dma_addr: buflout->buffers[i].addr))
205 dma_unmap_single(dev, buflout->buffers[i].addr,
206 buflout->buffers[i].len,
207 DMA_BIDIRECTIONAL);
208 }
209
210 if (!buf->sgl_dst_valid)
211 kfree(objp: buflout);
212
213err_in:
214 if (!dma_mapping_error(dev, dma_addr: blp))
215 dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
216
217 n = sg_nents(sg: sgl);
218 for (i = 0; i < n; i++)
219 if (!dma_mapping_error(dev, dma_addr: bufl->buffers[i].addr))
220 dma_unmap_single(dev, bufl->buffers[i].addr,
221 bufl->buffers[i].len,
222 bufl_dma_dir);
223
224 if (!buf->sgl_src_valid)
225 kfree(objp: bufl);
226
227 dev_err(dev, "Failed to map buf for dma\n");
228 return -ENOMEM;
229}
230
231int qat_bl_sgl_to_bufl(struct adf_accel_dev *accel_dev,
232 struct scatterlist *sgl,
233 struct scatterlist *sglout,
234 struct qat_request_buffs *buf,
235 struct qat_sgl_to_bufl_params *params,
236 gfp_t flags)
237{
238 dma_addr_t extra_dst_buff = 0;
239 size_t sz_extra_dst_buff = 0;
240 unsigned int sskip = 0;
241 unsigned int dskip = 0;
242
243 if (params) {
244 extra_dst_buff = params->extra_dst_buff;
245 sz_extra_dst_buff = params->sz_extra_dst_buff;
246 sskip = params->sskip;
247 dskip = params->dskip;
248 }
249
250 return __qat_bl_sgl_to_bufl(accel_dev, sgl, sglout, buf,
251 extra_dst_buff, sz_extra_dst_buff,
252 sskip, dskip, flags);
253}
254

source code of linux/drivers/crypto/intel/qat/qat_common/qat_bl.c