| 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | |
| 3 | #include <net/netdev_queues.h> |
| 4 | |
| 5 | #include "netlink.h" |
| 6 | #include "common.h" |
| 7 | |
| 8 | struct rings_req_info { |
| 9 | struct ethnl_req_info base; |
| 10 | }; |
| 11 | |
| 12 | struct rings_reply_data { |
| 13 | struct ethnl_reply_data base; |
| 14 | struct ethtool_ringparam ringparam; |
| 15 | struct kernel_ethtool_ringparam kernel_ringparam; |
| 16 | u32 supported_ring_params; |
| 17 | }; |
| 18 | |
| 19 | #define RINGS_REPDATA(__reply_base) \ |
| 20 | container_of(__reply_base, struct rings_reply_data, base) |
| 21 | |
| 22 | const struct nla_policy ethnl_rings_get_policy[] = { |
| 23 | [ETHTOOL_A_RINGS_HEADER] = |
| 24 | NLA_POLICY_NESTED(ethnl_header_policy), |
| 25 | }; |
| 26 | |
| 27 | static int rings_prepare_data(const struct ethnl_req_info *req_base, |
| 28 | struct ethnl_reply_data *reply_base, |
| 29 | const struct genl_info *info) |
| 30 | { |
| 31 | struct rings_reply_data *data = RINGS_REPDATA(reply_base); |
| 32 | struct net_device *dev = reply_base->dev; |
| 33 | int ret; |
| 34 | |
| 35 | if (!dev->ethtool_ops->get_ringparam) |
| 36 | return -EOPNOTSUPP; |
| 37 | |
| 38 | data->supported_ring_params = dev->ethtool_ops->supported_ring_params; |
| 39 | ret = ethnl_ops_begin(dev); |
| 40 | if (ret < 0) |
| 41 | return ret; |
| 42 | |
| 43 | data->kernel_ringparam.tcp_data_split = dev->cfg->hds_config; |
| 44 | data->kernel_ringparam.hds_thresh = dev->cfg->hds_thresh; |
| 45 | |
| 46 | dev->ethtool_ops->get_ringparam(dev, &data->ringparam, |
| 47 | &data->kernel_ringparam, info->extack); |
| 48 | ethnl_ops_complete(dev); |
| 49 | |
| 50 | return 0; |
| 51 | } |
| 52 | |
| 53 | static int rings_reply_size(const struct ethnl_req_info *req_base, |
| 54 | const struct ethnl_reply_data *reply_base) |
| 55 | { |
| 56 | return nla_total_size(payload: sizeof(u32)) + /* _RINGS_RX_MAX */ |
| 57 | nla_total_size(payload: sizeof(u32)) + /* _RINGS_RX_MINI_MAX */ |
| 58 | nla_total_size(payload: sizeof(u32)) + /* _RINGS_RX_JUMBO_MAX */ |
| 59 | nla_total_size(payload: sizeof(u32)) + /* _RINGS_TX_MAX */ |
| 60 | nla_total_size(payload: sizeof(u32)) + /* _RINGS_RX */ |
| 61 | nla_total_size(payload: sizeof(u32)) + /* _RINGS_RX_MINI */ |
| 62 | nla_total_size(payload: sizeof(u32)) + /* _RINGS_RX_JUMBO */ |
| 63 | nla_total_size(payload: sizeof(u32)) + /* _RINGS_TX */ |
| 64 | nla_total_size(payload: sizeof(u32)) + /* _RINGS_RX_BUF_LEN */ |
| 65 | nla_total_size(payload: sizeof(u8)) + /* _RINGS_TCP_DATA_SPLIT */ |
| 66 | nla_total_size(payload: sizeof(u32) + /* _RINGS_CQE_SIZE */ |
| 67 | nla_total_size(payload: sizeof(u8)) + /* _RINGS_TX_PUSH */ |
| 68 | nla_total_size(payload: sizeof(u8))) + /* _RINGS_RX_PUSH */ |
| 69 | nla_total_size(payload: sizeof(u32)) + /* _RINGS_TX_PUSH_BUF_LEN */ |
| 70 | nla_total_size(payload: sizeof(u32)) + /* _RINGS_TX_PUSH_BUF_LEN_MAX */ |
| 71 | nla_total_size(payload: sizeof(u32)) + /* _RINGS_HDS_THRESH */ |
| 72 | nla_total_size(payload: sizeof(u32)); /* _RINGS_HDS_THRESH_MAX*/ |
| 73 | } |
| 74 | |
| 75 | static int rings_fill_reply(struct sk_buff *skb, |
| 76 | const struct ethnl_req_info *req_base, |
| 77 | const struct ethnl_reply_data *reply_base) |
| 78 | { |
| 79 | const struct rings_reply_data *data = RINGS_REPDATA(reply_base); |
| 80 | const struct kernel_ethtool_ringparam *kr = &data->kernel_ringparam; |
| 81 | const struct ethtool_ringparam *ringparam = &data->ringparam; |
| 82 | u32 supported_ring_params = data->supported_ring_params; |
| 83 | |
| 84 | WARN_ON(kr->tcp_data_split > ETHTOOL_TCP_DATA_SPLIT_ENABLED); |
| 85 | |
| 86 | if ((ringparam->rx_max_pending && |
| 87 | (nla_put_u32(skb, attrtype: ETHTOOL_A_RINGS_RX_MAX, |
| 88 | value: ringparam->rx_max_pending) || |
| 89 | nla_put_u32(skb, attrtype: ETHTOOL_A_RINGS_RX, |
| 90 | value: ringparam->rx_pending))) || |
| 91 | (ringparam->rx_mini_max_pending && |
| 92 | (nla_put_u32(skb, attrtype: ETHTOOL_A_RINGS_RX_MINI_MAX, |
| 93 | value: ringparam->rx_mini_max_pending) || |
| 94 | nla_put_u32(skb, attrtype: ETHTOOL_A_RINGS_RX_MINI, |
| 95 | value: ringparam->rx_mini_pending))) || |
| 96 | (ringparam->rx_jumbo_max_pending && |
| 97 | (nla_put_u32(skb, attrtype: ETHTOOL_A_RINGS_RX_JUMBO_MAX, |
| 98 | value: ringparam->rx_jumbo_max_pending) || |
| 99 | nla_put_u32(skb, attrtype: ETHTOOL_A_RINGS_RX_JUMBO, |
| 100 | value: ringparam->rx_jumbo_pending))) || |
| 101 | (ringparam->tx_max_pending && |
| 102 | (nla_put_u32(skb, attrtype: ETHTOOL_A_RINGS_TX_MAX, |
| 103 | value: ringparam->tx_max_pending) || |
| 104 | nla_put_u32(skb, attrtype: ETHTOOL_A_RINGS_TX, |
| 105 | value: ringparam->tx_pending))) || |
| 106 | (kr->rx_buf_len && |
| 107 | (nla_put_u32(skb, attrtype: ETHTOOL_A_RINGS_RX_BUF_LEN, value: kr->rx_buf_len))) || |
| 108 | (kr->tcp_data_split && |
| 109 | (nla_put_u8(skb, attrtype: ETHTOOL_A_RINGS_TCP_DATA_SPLIT, |
| 110 | value: kr->tcp_data_split))) || |
| 111 | (kr->cqe_size && |
| 112 | (nla_put_u32(skb, attrtype: ETHTOOL_A_RINGS_CQE_SIZE, value: kr->cqe_size))) || |
| 113 | nla_put_u8(skb, attrtype: ETHTOOL_A_RINGS_TX_PUSH, value: !!kr->tx_push) || |
| 114 | nla_put_u8(skb, attrtype: ETHTOOL_A_RINGS_RX_PUSH, value: !!kr->rx_push) || |
| 115 | ((supported_ring_params & ETHTOOL_RING_USE_TX_PUSH_BUF_LEN) && |
| 116 | (nla_put_u32(skb, attrtype: ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN_MAX, |
| 117 | value: kr->tx_push_buf_max_len) || |
| 118 | nla_put_u32(skb, attrtype: ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN, |
| 119 | value: kr->tx_push_buf_len))) || |
| 120 | ((supported_ring_params & ETHTOOL_RING_USE_HDS_THRS) && |
| 121 | (nla_put_u32(skb, attrtype: ETHTOOL_A_RINGS_HDS_THRESH, |
| 122 | value: kr->hds_thresh) || |
| 123 | nla_put_u32(skb, attrtype: ETHTOOL_A_RINGS_HDS_THRESH_MAX, |
| 124 | value: kr->hds_thresh_max)))) |
| 125 | return -EMSGSIZE; |
| 126 | |
| 127 | return 0; |
| 128 | } |
| 129 | |
| 130 | /* RINGS_SET */ |
| 131 | |
| 132 | const struct nla_policy ethnl_rings_set_policy[] = { |
| 133 | [ETHTOOL_A_RINGS_HEADER] = |
| 134 | NLA_POLICY_NESTED(ethnl_header_policy), |
| 135 | [ETHTOOL_A_RINGS_RX] = { .type = NLA_U32 }, |
| 136 | [ETHTOOL_A_RINGS_RX_MINI] = { .type = NLA_U32 }, |
| 137 | [ETHTOOL_A_RINGS_RX_JUMBO] = { .type = NLA_U32 }, |
| 138 | [ETHTOOL_A_RINGS_TX] = { .type = NLA_U32 }, |
| 139 | [ETHTOOL_A_RINGS_RX_BUF_LEN] = NLA_POLICY_MIN(NLA_U32, 1), |
| 140 | [ETHTOOL_A_RINGS_TCP_DATA_SPLIT] = |
| 141 | NLA_POLICY_MAX(NLA_U8, ETHTOOL_TCP_DATA_SPLIT_ENABLED), |
| 142 | [ETHTOOL_A_RINGS_CQE_SIZE] = NLA_POLICY_MIN(NLA_U32, 1), |
| 143 | [ETHTOOL_A_RINGS_TX_PUSH] = NLA_POLICY_MAX(NLA_U8, 1), |
| 144 | [ETHTOOL_A_RINGS_RX_PUSH] = NLA_POLICY_MAX(NLA_U8, 1), |
| 145 | [ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN] = { .type = NLA_U32 }, |
| 146 | [ETHTOOL_A_RINGS_HDS_THRESH] = { .type = NLA_U32 }, |
| 147 | }; |
| 148 | |
| 149 | static int |
| 150 | ethnl_set_rings_validate(struct ethnl_req_info *req_info, |
| 151 | struct genl_info *info) |
| 152 | { |
| 153 | const struct ethtool_ops *ops = req_info->dev->ethtool_ops; |
| 154 | struct nlattr **tb = info->attrs; |
| 155 | |
| 156 | if (tb[ETHTOOL_A_RINGS_RX_BUF_LEN] && |
| 157 | !(ops->supported_ring_params & ETHTOOL_RING_USE_RX_BUF_LEN)) { |
| 158 | NL_SET_ERR_MSG_ATTR(info->extack, |
| 159 | tb[ETHTOOL_A_RINGS_RX_BUF_LEN], |
| 160 | "setting rx buf len not supported" ); |
| 161 | return -EOPNOTSUPP; |
| 162 | } |
| 163 | |
| 164 | if (tb[ETHTOOL_A_RINGS_TCP_DATA_SPLIT] && |
| 165 | !(ops->supported_ring_params & ETHTOOL_RING_USE_TCP_DATA_SPLIT)) { |
| 166 | NL_SET_ERR_MSG_ATTR(info->extack, |
| 167 | tb[ETHTOOL_A_RINGS_TCP_DATA_SPLIT], |
| 168 | "setting TCP data split is not supported" ); |
| 169 | return -EOPNOTSUPP; |
| 170 | } |
| 171 | |
| 172 | if (tb[ETHTOOL_A_RINGS_HDS_THRESH] && |
| 173 | !(ops->supported_ring_params & ETHTOOL_RING_USE_HDS_THRS)) { |
| 174 | NL_SET_ERR_MSG_ATTR(info->extack, |
| 175 | tb[ETHTOOL_A_RINGS_HDS_THRESH], |
| 176 | "setting hds-thresh is not supported" ); |
| 177 | return -EOPNOTSUPP; |
| 178 | } |
| 179 | |
| 180 | if (tb[ETHTOOL_A_RINGS_CQE_SIZE] && |
| 181 | !(ops->supported_ring_params & ETHTOOL_RING_USE_CQE_SIZE)) { |
| 182 | NL_SET_ERR_MSG_ATTR(info->extack, |
| 183 | tb[ETHTOOL_A_RINGS_CQE_SIZE], |
| 184 | "setting cqe size not supported" ); |
| 185 | return -EOPNOTSUPP; |
| 186 | } |
| 187 | |
| 188 | if (tb[ETHTOOL_A_RINGS_TX_PUSH] && |
| 189 | !(ops->supported_ring_params & ETHTOOL_RING_USE_TX_PUSH)) { |
| 190 | NL_SET_ERR_MSG_ATTR(info->extack, |
| 191 | tb[ETHTOOL_A_RINGS_TX_PUSH], |
| 192 | "setting tx push not supported" ); |
| 193 | return -EOPNOTSUPP; |
| 194 | } |
| 195 | |
| 196 | if (tb[ETHTOOL_A_RINGS_RX_PUSH] && |
| 197 | !(ops->supported_ring_params & ETHTOOL_RING_USE_RX_PUSH)) { |
| 198 | NL_SET_ERR_MSG_ATTR(info->extack, |
| 199 | tb[ETHTOOL_A_RINGS_RX_PUSH], |
| 200 | "setting rx push not supported" ); |
| 201 | return -EOPNOTSUPP; |
| 202 | } |
| 203 | |
| 204 | if (tb[ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN] && |
| 205 | !(ops->supported_ring_params & ETHTOOL_RING_USE_TX_PUSH_BUF_LEN)) { |
| 206 | NL_SET_ERR_MSG_ATTR(info->extack, |
| 207 | tb[ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN], |
| 208 | "setting tx push buf len is not supported" ); |
| 209 | return -EOPNOTSUPP; |
| 210 | } |
| 211 | |
| 212 | return ops->get_ringparam && ops->set_ringparam ? 1 : -EOPNOTSUPP; |
| 213 | } |
| 214 | |
| 215 | static int |
| 216 | ethnl_set_rings(struct ethnl_req_info *req_info, struct genl_info *info) |
| 217 | { |
| 218 | struct kernel_ethtool_ringparam kernel_ringparam; |
| 219 | struct net_device *dev = req_info->dev; |
| 220 | struct ethtool_ringparam ringparam; |
| 221 | struct nlattr **tb = info->attrs; |
| 222 | const struct nlattr *err_attr; |
| 223 | bool mod = false; |
| 224 | int ret; |
| 225 | |
| 226 | ethtool_ringparam_get_cfg(dev, param: &ringparam, kparam: &kernel_ringparam, |
| 227 | extack: info->extack); |
| 228 | |
| 229 | ethnl_update_u32(dst: &ringparam.rx_pending, attr: tb[ETHTOOL_A_RINGS_RX], mod: &mod); |
| 230 | ethnl_update_u32(dst: &ringparam.rx_mini_pending, |
| 231 | attr: tb[ETHTOOL_A_RINGS_RX_MINI], mod: &mod); |
| 232 | ethnl_update_u32(dst: &ringparam.rx_jumbo_pending, |
| 233 | attr: tb[ETHTOOL_A_RINGS_RX_JUMBO], mod: &mod); |
| 234 | ethnl_update_u32(dst: &ringparam.tx_pending, attr: tb[ETHTOOL_A_RINGS_TX], mod: &mod); |
| 235 | ethnl_update_u32(dst: &kernel_ringparam.rx_buf_len, |
| 236 | attr: tb[ETHTOOL_A_RINGS_RX_BUF_LEN], mod: &mod); |
| 237 | ethnl_update_u8(dst: &kernel_ringparam.tcp_data_split, |
| 238 | attr: tb[ETHTOOL_A_RINGS_TCP_DATA_SPLIT], mod: &mod); |
| 239 | ethnl_update_u32(dst: &kernel_ringparam.cqe_size, |
| 240 | attr: tb[ETHTOOL_A_RINGS_CQE_SIZE], mod: &mod); |
| 241 | ethnl_update_u8(dst: &kernel_ringparam.tx_push, |
| 242 | attr: tb[ETHTOOL_A_RINGS_TX_PUSH], mod: &mod); |
| 243 | ethnl_update_u8(dst: &kernel_ringparam.rx_push, |
| 244 | attr: tb[ETHTOOL_A_RINGS_RX_PUSH], mod: &mod); |
| 245 | ethnl_update_u32(dst: &kernel_ringparam.tx_push_buf_len, |
| 246 | attr: tb[ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN], mod: &mod); |
| 247 | ethnl_update_u32(dst: &kernel_ringparam.hds_thresh, |
| 248 | attr: tb[ETHTOOL_A_RINGS_HDS_THRESH], mod: &mod); |
| 249 | if (!mod) |
| 250 | return 0; |
| 251 | |
| 252 | if (kernel_ringparam.tcp_data_split == ETHTOOL_TCP_DATA_SPLIT_ENABLED && |
| 253 | dev_xdp_sb_prog_count(dev)) { |
| 254 | NL_SET_ERR_MSG_ATTR(info->extack, |
| 255 | tb[ETHTOOL_A_RINGS_TCP_DATA_SPLIT], |
| 256 | "tcp-data-split can not be enabled with single buffer XDP" ); |
| 257 | return -EINVAL; |
| 258 | } |
| 259 | |
| 260 | if (dev_get_min_mp_channel_count(dev)) { |
| 261 | if (kernel_ringparam.tcp_data_split != |
| 262 | ETHTOOL_TCP_DATA_SPLIT_ENABLED) { |
| 263 | NL_SET_ERR_MSG(info->extack, |
| 264 | "can't disable tcp-data-split while device has memory provider enabled" ); |
| 265 | return -EINVAL; |
| 266 | } else if (kernel_ringparam.hds_thresh) { |
| 267 | NL_SET_ERR_MSG(info->extack, |
| 268 | "can't set non-zero hds_thresh while device is memory provider enabled" ); |
| 269 | return -EINVAL; |
| 270 | } |
| 271 | } |
| 272 | |
| 273 | /* ensure new ring parameters are within limits */ |
| 274 | if (ringparam.rx_pending > ringparam.rx_max_pending) |
| 275 | err_attr = tb[ETHTOOL_A_RINGS_RX]; |
| 276 | else if (ringparam.rx_mini_pending > ringparam.rx_mini_max_pending) |
| 277 | err_attr = tb[ETHTOOL_A_RINGS_RX_MINI]; |
| 278 | else if (ringparam.rx_jumbo_pending > ringparam.rx_jumbo_max_pending) |
| 279 | err_attr = tb[ETHTOOL_A_RINGS_RX_JUMBO]; |
| 280 | else if (ringparam.tx_pending > ringparam.tx_max_pending) |
| 281 | err_attr = tb[ETHTOOL_A_RINGS_TX]; |
| 282 | else if (kernel_ringparam.hds_thresh > kernel_ringparam.hds_thresh_max) |
| 283 | err_attr = tb[ETHTOOL_A_RINGS_HDS_THRESH]; |
| 284 | else |
| 285 | err_attr = NULL; |
| 286 | if (err_attr) { |
| 287 | NL_SET_ERR_MSG_ATTR(info->extack, err_attr, |
| 288 | "requested ring size exceeds maximum" ); |
| 289 | return -EINVAL; |
| 290 | } |
| 291 | |
| 292 | if (kernel_ringparam.tx_push_buf_len > kernel_ringparam.tx_push_buf_max_len) { |
| 293 | NL_SET_ERR_MSG_ATTR_FMT(info->extack, tb[ETHTOOL_A_RINGS_TX_PUSH_BUF_LEN], |
| 294 | "Requested TX push buffer exceeds the maximum of %u" , |
| 295 | kernel_ringparam.tx_push_buf_max_len); |
| 296 | |
| 297 | return -EINVAL; |
| 298 | } |
| 299 | |
| 300 | dev->cfg_pending->hds_config = kernel_ringparam.tcp_data_split; |
| 301 | dev->cfg_pending->hds_thresh = kernel_ringparam.hds_thresh; |
| 302 | |
| 303 | ret = dev->ethtool_ops->set_ringparam(dev, &ringparam, |
| 304 | &kernel_ringparam, info->extack); |
| 305 | return ret < 0 ? ret : 1; |
| 306 | } |
| 307 | |
| 308 | const struct ethnl_request_ops ethnl_rings_request_ops = { |
| 309 | .request_cmd = ETHTOOL_MSG_RINGS_GET, |
| 310 | .reply_cmd = ETHTOOL_MSG_RINGS_GET_REPLY, |
| 311 | .hdr_attr = ETHTOOL_A_RINGS_HEADER, |
| 312 | .req_info_size = sizeof(struct rings_req_info), |
| 313 | .reply_data_size = sizeof(struct rings_reply_data), |
| 314 | |
| 315 | .prepare_data = rings_prepare_data, |
| 316 | .reply_size = rings_reply_size, |
| 317 | .fill_reply = rings_fill_reply, |
| 318 | |
| 319 | .set_validate = ethnl_set_rings_validate, |
| 320 | .set = ethnl_set_rings, |
| 321 | .set_ntf_cmd = ETHTOOL_MSG_RINGS_NTF, |
| 322 | }; |
| 323 | |