| 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* Copyright(c) 2023 Intel Corporation */ |
| 3 | |
| 4 | #include <linux/dma-mapping.h> |
| 5 | #include <linux/pci.h> |
| 6 | |
| 7 | #include "adf_admin.h" |
| 8 | #include "adf_accel_devices.h" |
| 9 | #include "adf_rl_admin.h" |
| 10 | |
| 11 | static void |
| 12 | prep_admin_req_msg(struct rl_sla *sla, dma_addr_t dma_addr, |
| 13 | struct icp_qat_fw_init_admin_sla_config_params *fw_params, |
| 14 | struct icp_qat_fw_init_admin_req *req, bool is_update) |
| 15 | { |
| 16 | req->cmd_id = is_update ? ICP_QAT_FW_RL_UPDATE : ICP_QAT_FW_RL_ADD; |
| 17 | req->init_cfg_ptr = dma_addr; |
| 18 | req->init_cfg_sz = sizeof(*fw_params); |
| 19 | req->node_id = sla->node_id; |
| 20 | req->node_type = sla->type; |
| 21 | req->rp_count = sla->ring_pairs_cnt; |
| 22 | req->svc_type = sla->srv; |
| 23 | } |
| 24 | |
| 25 | static void |
| 26 | prep_admin_req_params(struct adf_accel_dev *accel_dev, struct rl_sla *sla, |
| 27 | struct icp_qat_fw_init_admin_sla_config_params *fw_params) |
| 28 | { |
| 29 | fw_params->pcie_in_cir = |
| 30 | adf_rl_calculate_pci_bw(accel_dev, sla_val: sla->cir, svc_type: sla->srv, is_bw_out: false); |
| 31 | fw_params->pcie_in_pir = |
| 32 | adf_rl_calculate_pci_bw(accel_dev, sla_val: sla->pir, svc_type: sla->srv, is_bw_out: false); |
| 33 | fw_params->pcie_out_cir = |
| 34 | adf_rl_calculate_pci_bw(accel_dev, sla_val: sla->cir, svc_type: sla->srv, is_bw_out: true); |
| 35 | fw_params->pcie_out_pir = |
| 36 | adf_rl_calculate_pci_bw(accel_dev, sla_val: sla->pir, svc_type: sla->srv, is_bw_out: true); |
| 37 | |
| 38 | fw_params->slice_util_cir = |
| 39 | adf_rl_calculate_slice_tokens(accel_dev, sla_val: sla->cir, svc_type: sla->srv); |
| 40 | fw_params->slice_util_pir = |
| 41 | adf_rl_calculate_slice_tokens(accel_dev, sla_val: sla->pir, svc_type: sla->srv); |
| 42 | |
| 43 | fw_params->ae_util_cir = |
| 44 | adf_rl_calculate_ae_cycles(accel_dev, sla_val: sla->cir, svc_type: sla->srv); |
| 45 | fw_params->ae_util_pir = |
| 46 | adf_rl_calculate_ae_cycles(accel_dev, sla_val: sla->pir, svc_type: sla->srv); |
| 47 | |
| 48 | memcpy(fw_params->rp_ids, sla->ring_pairs_ids, |
| 49 | sizeof(sla->ring_pairs_ids)); |
| 50 | } |
| 51 | |
| 52 | int adf_rl_send_admin_init_msg(struct adf_accel_dev *accel_dev, |
| 53 | struct rl_slice_cnt *slices_int) |
| 54 | { |
| 55 | struct icp_qat_fw_init_admin_slice_cnt slices_resp = { }; |
| 56 | int ret; |
| 57 | |
| 58 | ret = adf_send_admin_rl_init(accel_dev, slices: &slices_resp); |
| 59 | if (ret) |
| 60 | return ret; |
| 61 | |
| 62 | slices_int->dcpr_cnt = slices_resp.dcpr_cnt; |
| 63 | slices_int->pke_cnt = slices_resp.pke_cnt; |
| 64 | /* For symmetric crypto, slice tokens are relative to the UCS slice */ |
| 65 | slices_int->cph_cnt = slices_resp.ucs_cnt; |
| 66 | slices_int->cpr_cnt = slices_resp.cpr_cnt; |
| 67 | |
| 68 | return 0; |
| 69 | } |
| 70 | |
| 71 | int adf_rl_send_admin_add_update_msg(struct adf_accel_dev *accel_dev, |
| 72 | struct rl_sla *sla, bool is_update) |
| 73 | { |
| 74 | struct icp_qat_fw_init_admin_sla_config_params *fw_params; |
| 75 | struct icp_qat_fw_init_admin_req req = { }; |
| 76 | dma_addr_t dma_addr; |
| 77 | int ret; |
| 78 | |
| 79 | fw_params = dma_alloc_coherent(dev: &GET_DEV(accel_dev), size: sizeof(*fw_params), |
| 80 | dma_handle: &dma_addr, GFP_KERNEL); |
| 81 | if (!fw_params) |
| 82 | return -ENOMEM; |
| 83 | |
| 84 | prep_admin_req_params(accel_dev, sla, fw_params); |
| 85 | prep_admin_req_msg(sla, dma_addr, fw_params, req: &req, is_update); |
| 86 | ret = adf_send_admin_rl_add_update(accel_dev, req: &req); |
| 87 | |
| 88 | dma_free_coherent(dev: &GET_DEV(accel_dev), size: sizeof(*fw_params), cpu_addr: fw_params, |
| 89 | dma_handle: dma_addr); |
| 90 | |
| 91 | return ret; |
| 92 | } |
| 93 | |
| 94 | int adf_rl_send_admin_delete_msg(struct adf_accel_dev *accel_dev, u16 node_id, |
| 95 | u8 node_type) |
| 96 | { |
| 97 | return adf_send_admin_rl_delete(accel_dev, node_id, node_type); |
| 98 | } |
| 99 | |