1// SPDX-License-Identifier: GPL-2.0-or-later
2 /* Driver for Virtio crypto device.
3 *
4 * Copyright 2016 HUAWEI TECHNOLOGIES CO., LTD.
5 */
6
7#include <linux/err.h>
8#include <linux/module.h>
9#include <linux/virtio_config.h>
10#include <linux/cpu.h>
11
12#include <uapi/linux/virtio_crypto.h>
13#include "virtio_crypto_common.h"
14
15
16void
17virtcrypto_clear_request(struct virtio_crypto_request *vc_req)
18{
19 if (vc_req) {
20 kfree_sensitive(objp: vc_req->req_data);
21 kfree(objp: vc_req->sgs);
22 }
23}
24
25static void virtio_crypto_ctrlq_callback(struct virtio_crypto_ctrl_request *vc_ctrl_req)
26{
27 complete(&vc_ctrl_req->compl);
28}
29
30static void virtcrypto_ctrlq_callback(struct virtqueue *vq)
31{
32 struct virtio_crypto *vcrypto = vq->vdev->priv;
33 struct virtio_crypto_ctrl_request *vc_ctrl_req;
34 unsigned long flags;
35 unsigned int len;
36
37 spin_lock_irqsave(&vcrypto->ctrl_lock, flags);
38 do {
39 virtqueue_disable_cb(vq);
40 while ((vc_ctrl_req = virtqueue_get_buf(vq, len: &len)) != NULL) {
41 spin_unlock_irqrestore(lock: &vcrypto->ctrl_lock, flags);
42 virtio_crypto_ctrlq_callback(vc_ctrl_req);
43 spin_lock_irqsave(&vcrypto->ctrl_lock, flags);
44 }
45 } while (!virtqueue_enable_cb(vq));
46 spin_unlock_irqrestore(lock: &vcrypto->ctrl_lock, flags);
47}
48
49int virtio_crypto_ctrl_vq_request(struct virtio_crypto *vcrypto, struct scatterlist *sgs[],
50 unsigned int out_sgs, unsigned int in_sgs,
51 struct virtio_crypto_ctrl_request *vc_ctrl_req)
52{
53 int err;
54 unsigned long flags;
55
56 init_completion(x: &vc_ctrl_req->compl);
57
58 spin_lock_irqsave(&vcrypto->ctrl_lock, flags);
59 err = virtqueue_add_sgs(vq: vcrypto->ctrl_vq, sgs, out_sgs, in_sgs, data: vc_ctrl_req, GFP_ATOMIC);
60 if (err < 0) {
61 spin_unlock_irqrestore(lock: &vcrypto->ctrl_lock, flags);
62 return err;
63 }
64
65 virtqueue_kick(vq: vcrypto->ctrl_vq);
66 spin_unlock_irqrestore(lock: &vcrypto->ctrl_lock, flags);
67
68 wait_for_completion(&vc_ctrl_req->compl);
69
70 return 0;
71}
72
73static void virtcrypto_done_task(unsigned long data)
74{
75 struct data_queue *data_vq = (struct data_queue *)data;
76 struct virtqueue *vq = data_vq->vq;
77 struct virtio_crypto_request *vc_req;
78 unsigned int len;
79
80 do {
81 virtqueue_disable_cb(vq);
82 while ((vc_req = virtqueue_get_buf(vq, len: &len)) != NULL) {
83 if (vc_req->alg_cb)
84 vc_req->alg_cb(vc_req, len);
85 }
86 } while (!virtqueue_enable_cb(vq));
87}
88
89static void virtcrypto_dataq_callback(struct virtqueue *vq)
90{
91 struct virtio_crypto *vcrypto = vq->vdev->priv;
92 struct data_queue *dq = &vcrypto->data_vq[vq->index];
93
94 tasklet_schedule(t: &dq->done_task);
95}
96
97static int virtcrypto_find_vqs(struct virtio_crypto *vi)
98{
99 struct virtqueue_info *vqs_info;
100 struct virtqueue **vqs;
101 int ret = -ENOMEM;
102 int i, total_vqs;
103 struct device *dev = &vi->vdev->dev;
104
105 /*
106 * We expect 1 data virtqueue, followed by
107 * possible N-1 data queues used in multiqueue mode,
108 * followed by control vq.
109 */
110 total_vqs = vi->max_data_queues + 1;
111
112 /* Allocate space for find_vqs parameters */
113 vqs = kcalloc(total_vqs, sizeof(*vqs), GFP_KERNEL);
114 if (!vqs)
115 goto err_vq;
116 vqs_info = kcalloc(total_vqs, sizeof(*vqs_info), GFP_KERNEL);
117 if (!vqs_info)
118 goto err_vqs_info;
119
120 /* Parameters for control virtqueue */
121 vqs_info[total_vqs - 1].callback = virtcrypto_ctrlq_callback;
122 vqs_info[total_vqs - 1].name = "controlq";
123
124 /* Allocate/initialize parameters for data virtqueues */
125 for (i = 0; i < vi->max_data_queues; i++) {
126 vqs_info[i].callback = virtcrypto_dataq_callback;
127 snprintf(buf: vi->data_vq[i].name, size: sizeof(vi->data_vq[i].name),
128 fmt: "dataq.%d", i);
129 vqs_info[i].name = vi->data_vq[i].name;
130 }
131
132 ret = virtio_find_vqs(vdev: vi->vdev, nvqs: total_vqs, vqs, vqs_info, NULL);
133 if (ret)
134 goto err_find;
135
136 vi->ctrl_vq = vqs[total_vqs - 1];
137
138 for (i = 0; i < vi->max_data_queues; i++) {
139 spin_lock_init(&vi->data_vq[i].lock);
140 vi->data_vq[i].vq = vqs[i];
141 /* Initialize crypto engine */
142 vi->data_vq[i].engine = crypto_engine_alloc_init_and_set(dev, retry_support: true, rt: true,
143 qlen: virtqueue_get_vring_size(vq: vqs[i]));
144 if (!vi->data_vq[i].engine) {
145 ret = -ENOMEM;
146 goto err_engine;
147 }
148 tasklet_init(t: &vi->data_vq[i].done_task, func: virtcrypto_done_task,
149 data: (unsigned long)&vi->data_vq[i]);
150 }
151
152 kfree(objp: vqs_info);
153 kfree(objp: vqs);
154
155 return 0;
156
157err_engine:
158err_find:
159 kfree(objp: vqs_info);
160err_vqs_info:
161 kfree(objp: vqs);
162err_vq:
163 return ret;
164}
165
166static int virtcrypto_alloc_queues(struct virtio_crypto *vi)
167{
168 vi->data_vq = kcalloc(vi->max_data_queues, sizeof(*vi->data_vq),
169 GFP_KERNEL);
170 if (!vi->data_vq)
171 return -ENOMEM;
172
173 return 0;
174}
175
176static void virtcrypto_clean_affinity(struct virtio_crypto *vi, long hcpu)
177{
178 int i;
179
180 if (vi->affinity_hint_set) {
181 for (i = 0; i < vi->max_data_queues; i++)
182 virtqueue_set_affinity(vq: vi->data_vq[i].vq, NULL);
183
184 vi->affinity_hint_set = false;
185 }
186}
187
188static void virtcrypto_set_affinity(struct virtio_crypto *vcrypto)
189{
190 int i = 0;
191 int cpu;
192
193 /*
194 * In single queue mode, we don't set the cpu affinity.
195 */
196 if (vcrypto->curr_queue == 1 || vcrypto->max_data_queues == 1) {
197 virtcrypto_clean_affinity(vi: vcrypto, hcpu: -1);
198 return;
199 }
200
201 /*
202 * In multiqueue mode, we let the queue to be private to one cpu
203 * by setting the affinity hint to eliminate the contention.
204 *
205 * TODO: adds cpu hotplug support by register cpu notifier.
206 *
207 */
208 for_each_online_cpu(cpu) {
209 virtqueue_set_affinity(vq: vcrypto->data_vq[i].vq, cpumask_of(cpu));
210 if (++i >= vcrypto->max_data_queues)
211 break;
212 }
213
214 vcrypto->affinity_hint_set = true;
215}
216
217static void virtcrypto_free_queues(struct virtio_crypto *vi)
218{
219 kfree(objp: vi->data_vq);
220}
221
222static int virtcrypto_init_vqs(struct virtio_crypto *vi)
223{
224 int ret;
225
226 /* Allocate send & receive queues */
227 ret = virtcrypto_alloc_queues(vi);
228 if (ret)
229 goto err;
230
231 ret = virtcrypto_find_vqs(vi);
232 if (ret)
233 goto err_free;
234
235 cpus_read_lock();
236 virtcrypto_set_affinity(vcrypto: vi);
237 cpus_read_unlock();
238
239 return 0;
240
241err_free:
242 virtcrypto_free_queues(vi);
243err:
244 return ret;
245}
246
247static int virtcrypto_update_status(struct virtio_crypto *vcrypto)
248{
249 u32 status;
250 int err;
251
252 virtio_cread_le(vcrypto->vdev,
253 struct virtio_crypto_config, status, &status);
254
255 /*
256 * Unknown status bits would be a host error and the driver
257 * should consider the device to be broken.
258 */
259 if (status & (~VIRTIO_CRYPTO_S_HW_READY)) {
260 dev_warn(&vcrypto->vdev->dev,
261 "Unknown status bits: 0x%x\n", status);
262
263 virtio_break_device(dev: vcrypto->vdev);
264 return -EPERM;
265 }
266
267 if (vcrypto->status == status)
268 return 0;
269
270 vcrypto->status = status;
271
272 if (vcrypto->status & VIRTIO_CRYPTO_S_HW_READY) {
273 err = virtcrypto_dev_start(vcrypto);
274 if (err) {
275 dev_err(&vcrypto->vdev->dev,
276 "Failed to start virtio crypto device.\n");
277
278 return -EPERM;
279 }
280 dev_info(&vcrypto->vdev->dev, "Accelerator device is ready\n");
281 } else {
282 virtcrypto_dev_stop(vcrypto);
283 dev_info(&vcrypto->vdev->dev, "Accelerator is not ready\n");
284 }
285
286 return 0;
287}
288
289static int virtcrypto_start_crypto_engines(struct virtio_crypto *vcrypto)
290{
291 int32_t i;
292 int ret;
293
294 for (i = 0; i < vcrypto->max_data_queues; i++) {
295 if (vcrypto->data_vq[i].engine) {
296 ret = crypto_engine_start(engine: vcrypto->data_vq[i].engine);
297 if (ret)
298 goto err;
299 }
300 }
301
302 return 0;
303
304err:
305 while (--i >= 0)
306 if (vcrypto->data_vq[i].engine)
307 crypto_engine_exit(engine: vcrypto->data_vq[i].engine);
308
309 return ret;
310}
311
312static void virtcrypto_clear_crypto_engines(struct virtio_crypto *vcrypto)
313{
314 u32 i;
315
316 for (i = 0; i < vcrypto->max_data_queues; i++)
317 if (vcrypto->data_vq[i].engine)
318 crypto_engine_exit(engine: vcrypto->data_vq[i].engine);
319}
320
321static void virtcrypto_del_vqs(struct virtio_crypto *vcrypto)
322{
323 struct virtio_device *vdev = vcrypto->vdev;
324
325 virtcrypto_clean_affinity(vi: vcrypto, hcpu: -1);
326
327 vdev->config->del_vqs(vdev);
328
329 virtcrypto_free_queues(vi: vcrypto);
330}
331
332static void vcrypto_config_changed_work(struct work_struct *work)
333{
334 struct virtio_crypto *vcrypto =
335 container_of(work, struct virtio_crypto, config_work);
336
337 virtcrypto_update_status(vcrypto);
338}
339
340static int virtcrypto_probe(struct virtio_device *vdev)
341{
342 int err = -EFAULT;
343 struct virtio_crypto *vcrypto;
344 u32 max_data_queues = 0, max_cipher_key_len = 0;
345 u32 max_auth_key_len = 0;
346 u64 max_size = 0;
347 u32 cipher_algo_l = 0;
348 u32 cipher_algo_h = 0;
349 u32 hash_algo = 0;
350 u32 mac_algo_l = 0;
351 u32 mac_algo_h = 0;
352 u32 aead_algo = 0;
353 u32 akcipher_algo = 0;
354 u32 crypto_services = 0;
355
356 if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
357 return -ENODEV;
358
359 if (!vdev->config->get) {
360 dev_err(&vdev->dev, "%s failure: config access disabled\n",
361 __func__);
362 return -EINVAL;
363 }
364
365 if (num_possible_nodes() > 1 && dev_to_node(dev: &vdev->dev) < 0) {
366 /*
367 * If the accelerator is connected to a node with no memory
368 * there is no point in using the accelerator since the remote
369 * memory transaction will be very slow.
370 */
371 dev_err(&vdev->dev, "Invalid NUMA configuration.\n");
372 return -EINVAL;
373 }
374
375 vcrypto = kzalloc_node(sizeof(*vcrypto), GFP_KERNEL,
376 dev_to_node(&vdev->dev));
377 if (!vcrypto)
378 return -ENOMEM;
379
380 virtio_cread_le(vdev, struct virtio_crypto_config,
381 max_dataqueues, &max_data_queues);
382 if (max_data_queues < 1)
383 max_data_queues = 1;
384
385 virtio_cread_le(vdev, struct virtio_crypto_config,
386 max_cipher_key_len, &max_cipher_key_len);
387 virtio_cread_le(vdev, struct virtio_crypto_config,
388 max_auth_key_len, &max_auth_key_len);
389 virtio_cread_le(vdev, struct virtio_crypto_config,
390 max_size, &max_size);
391 virtio_cread_le(vdev, struct virtio_crypto_config,
392 crypto_services, &crypto_services);
393 virtio_cread_le(vdev, struct virtio_crypto_config,
394 cipher_algo_l, &cipher_algo_l);
395 virtio_cread_le(vdev, struct virtio_crypto_config,
396 cipher_algo_h, &cipher_algo_h);
397 virtio_cread_le(vdev, struct virtio_crypto_config,
398 hash_algo, &hash_algo);
399 virtio_cread_le(vdev, struct virtio_crypto_config,
400 mac_algo_l, &mac_algo_l);
401 virtio_cread_le(vdev, struct virtio_crypto_config,
402 mac_algo_h, &mac_algo_h);
403 virtio_cread_le(vdev, struct virtio_crypto_config,
404 aead_algo, &aead_algo);
405 if (crypto_services & (1 << VIRTIO_CRYPTO_SERVICE_AKCIPHER))
406 virtio_cread_le(vdev, struct virtio_crypto_config,
407 akcipher_algo, &akcipher_algo);
408
409 /* Add virtio crypto device to global table */
410 err = virtcrypto_devmgr_add_dev(vcrypto_dev: vcrypto);
411 if (err) {
412 dev_err(&vdev->dev, "Failed to add new virtio crypto device.\n");
413 goto free;
414 }
415 vcrypto->owner = THIS_MODULE;
416 vcrypto = vdev->priv = vcrypto;
417 vcrypto->vdev = vdev;
418
419 spin_lock_init(&vcrypto->ctrl_lock);
420
421 /* Use single data queue as default */
422 vcrypto->curr_queue = 1;
423 vcrypto->max_data_queues = max_data_queues;
424 vcrypto->max_cipher_key_len = max_cipher_key_len;
425 vcrypto->max_auth_key_len = max_auth_key_len;
426 vcrypto->max_size = max_size;
427 vcrypto->crypto_services = crypto_services;
428 vcrypto->cipher_algo_l = cipher_algo_l;
429 vcrypto->cipher_algo_h = cipher_algo_h;
430 vcrypto->mac_algo_l = mac_algo_l;
431 vcrypto->mac_algo_h = mac_algo_h;
432 vcrypto->hash_algo = hash_algo;
433 vcrypto->aead_algo = aead_algo;
434 vcrypto->akcipher_algo = akcipher_algo;
435
436 dev_info(&vdev->dev,
437 "max_queues: %u, max_cipher_key_len: %u, max_auth_key_len: %u, max_size 0x%llx\n",
438 vcrypto->max_data_queues,
439 vcrypto->max_cipher_key_len,
440 vcrypto->max_auth_key_len,
441 vcrypto->max_size);
442
443 err = virtcrypto_init_vqs(vi: vcrypto);
444 if (err) {
445 dev_err(&vdev->dev, "Failed to initialize vqs.\n");
446 goto free_dev;
447 }
448
449 err = virtcrypto_start_crypto_engines(vcrypto);
450 if (err)
451 goto free_vqs;
452
453 virtio_device_ready(dev: vdev);
454
455 err = virtcrypto_update_status(vcrypto);
456 if (err)
457 goto free_engines;
458
459 INIT_WORK(&vcrypto->config_work, vcrypto_config_changed_work);
460
461 return 0;
462
463free_engines:
464 virtcrypto_clear_crypto_engines(vcrypto);
465free_vqs:
466 virtio_reset_device(dev: vdev);
467 virtcrypto_del_vqs(vcrypto);
468free_dev:
469 virtcrypto_devmgr_rm_dev(vcrypto_dev: vcrypto);
470free:
471 kfree(objp: vcrypto);
472 return err;
473}
474
475static void virtcrypto_free_unused_reqs(struct virtio_crypto *vcrypto)
476{
477 struct virtio_crypto_request *vc_req;
478 int i;
479 struct virtqueue *vq;
480
481 for (i = 0; i < vcrypto->max_data_queues; i++) {
482 vq = vcrypto->data_vq[i].vq;
483 while ((vc_req = virtqueue_detach_unused_buf(vq)) != NULL)
484 virtcrypto_clear_request(vc_req);
485 cond_resched();
486 }
487}
488
489static void virtcrypto_remove(struct virtio_device *vdev)
490{
491 struct virtio_crypto *vcrypto = vdev->priv;
492 int i;
493
494 dev_info(&vdev->dev, "Start virtcrypto_remove.\n");
495
496 flush_work(work: &vcrypto->config_work);
497 if (virtcrypto_dev_started(vcrypto_dev: vcrypto))
498 virtcrypto_dev_stop(vcrypto);
499 for (i = 0; i < vcrypto->max_data_queues; i++)
500 tasklet_kill(t: &vcrypto->data_vq[i].done_task);
501 virtio_reset_device(dev: vdev);
502 virtcrypto_free_unused_reqs(vcrypto);
503 virtcrypto_clear_crypto_engines(vcrypto);
504 virtcrypto_del_vqs(vcrypto);
505 virtcrypto_devmgr_rm_dev(vcrypto_dev: vcrypto);
506 kfree(objp: vcrypto);
507}
508
509static void virtcrypto_config_changed(struct virtio_device *vdev)
510{
511 struct virtio_crypto *vcrypto = vdev->priv;
512
513 schedule_work(work: &vcrypto->config_work);
514}
515
516#ifdef CONFIG_PM_SLEEP
517static int virtcrypto_freeze(struct virtio_device *vdev)
518{
519 struct virtio_crypto *vcrypto = vdev->priv;
520
521 flush_work(work: &vcrypto->config_work);
522 virtio_reset_device(dev: vdev);
523 virtcrypto_free_unused_reqs(vcrypto);
524 if (virtcrypto_dev_started(vcrypto_dev: vcrypto))
525 virtcrypto_dev_stop(vcrypto);
526
527 virtcrypto_clear_crypto_engines(vcrypto);
528 virtcrypto_del_vqs(vcrypto);
529 return 0;
530}
531
532static int virtcrypto_restore(struct virtio_device *vdev)
533{
534 struct virtio_crypto *vcrypto = vdev->priv;
535 int err;
536
537 err = virtcrypto_init_vqs(vi: vcrypto);
538 if (err)
539 return err;
540
541 err = virtcrypto_start_crypto_engines(vcrypto);
542 if (err)
543 goto free_vqs;
544
545 virtio_device_ready(dev: vdev);
546
547 err = virtcrypto_dev_start(vcrypto);
548 if (err) {
549 dev_err(&vdev->dev, "Failed to start virtio crypto device.\n");
550 goto free_engines;
551 }
552
553 return 0;
554
555free_engines:
556 virtcrypto_clear_crypto_engines(vcrypto);
557free_vqs:
558 virtio_reset_device(dev: vdev);
559 virtcrypto_del_vqs(vcrypto);
560 return err;
561}
562#endif
563
564static const unsigned int features[] = {
565 /* none */
566};
567
568static const struct virtio_device_id id_table[] = {
569 { VIRTIO_ID_CRYPTO, VIRTIO_DEV_ANY_ID },
570 { 0 },
571};
572
573static struct virtio_driver virtio_crypto_driver = {
574 .driver.name = KBUILD_MODNAME,
575 .feature_table = features,
576 .feature_table_size = ARRAY_SIZE(features),
577 .id_table = id_table,
578 .probe = virtcrypto_probe,
579 .remove = virtcrypto_remove,
580 .config_changed = virtcrypto_config_changed,
581#ifdef CONFIG_PM_SLEEP
582 .freeze = virtcrypto_freeze,
583 .restore = virtcrypto_restore,
584#endif
585};
586
587module_virtio_driver(virtio_crypto_driver);
588
589MODULE_DEVICE_TABLE(virtio, id_table);
590MODULE_DESCRIPTION("virtio crypto device driver");
591MODULE_LICENSE("GPL");
592MODULE_AUTHOR("Gonglei <arei.gonglei@huawei.com>");
593

source code of linux/drivers/crypto/virtio/virtio_crypto_core.c