| 1 | /* |
| 2 | * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. |
| 3 | * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. |
| 4 | * |
| 5 | * This software is available to you under a choice of one of two |
| 6 | * licenses. You may choose to be licensed under the terms of the GNU |
| 7 | * General Public License (GPL) Version 2, available from the file |
| 8 | * COPYING in the main directory of this source tree, or the |
| 9 | * OpenIB.org BSD license below: |
| 10 | * |
| 11 | * Redistribution and use in source and binary forms, with or |
| 12 | * without modification, are permitted provided that the following |
| 13 | * conditions are met: |
| 14 | * |
| 15 | * - Redistributions of source code must retain the above |
| 16 | * copyright notice, this list of conditions and the following |
| 17 | * disclaimer. |
| 18 | * |
| 19 | * - Redistributions in binary form must reproduce the above |
| 20 | * copyright notice, this list of conditions and the following |
| 21 | * disclaimer in the documentation and/or other materials |
| 22 | * provided with the distribution. |
| 23 | * |
| 24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
| 25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
| 26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND |
| 27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS |
| 28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN |
| 29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN |
| 30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
| 31 | * SOFTWARE. |
| 32 | */ |
| 33 | |
| 34 | #include <linux/module.h> |
| 35 | #include <linux/init.h> |
| 36 | #include <linux/slab.h> |
| 37 | #include <linux/errno.h> |
| 38 | #include <linux/netdevice.h> |
| 39 | #include <linux/inetdevice.h> |
| 40 | #include <linux/rtnetlink.h> |
| 41 | #include <linux/if_vlan.h> |
| 42 | #include <linux/sched/mm.h> |
| 43 | #include <linux/sched/task.h> |
| 44 | |
| 45 | #include <net/ipv6.h> |
| 46 | #include <net/addrconf.h> |
| 47 | #include <net/devlink.h> |
| 48 | |
| 49 | #include <rdma/ib_smi.h> |
| 50 | #include <rdma/ib_user_verbs.h> |
| 51 | #include <rdma/ib_addr.h> |
| 52 | #include <rdma/ib_cache.h> |
| 53 | |
| 54 | #include <net/bonding.h> |
| 55 | |
| 56 | #include <linux/mlx4/driver.h> |
| 57 | #include <linux/mlx4/cmd.h> |
| 58 | #include <linux/mlx4/qp.h> |
| 59 | |
| 60 | #include "mlx4_ib.h" |
| 61 | #include <rdma/mlx4-abi.h> |
| 62 | |
| 63 | #define DRV_NAME MLX4_IB_DRV_NAME |
| 64 | #define DRV_VERSION "4.0-0" |
| 65 | |
| 66 | #define MLX4_IB_FLOW_MAX_PRIO 0xFFF |
| 67 | #define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF |
| 68 | #define MLX4_IB_CARD_REV_A0 0xA0 |
| 69 | |
| 70 | MODULE_AUTHOR("Roland Dreier" ); |
| 71 | MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver" ); |
| 72 | MODULE_LICENSE("Dual BSD/GPL" ); |
| 73 | |
| 74 | int mlx4_ib_sm_guid_assign = 0; |
| 75 | module_param_named(sm_guid_assign, mlx4_ib_sm_guid_assign, int, 0444); |
| 76 | MODULE_PARM_DESC(sm_guid_assign, "Enable SM alias_GUID assignment if sm_guid_assign > 0 (Default: 0)" ); |
| 77 | |
| 78 | static const char mlx4_ib_version[] = |
| 79 | DRV_NAME ": Mellanox ConnectX InfiniBand driver v" |
| 80 | DRV_VERSION "\n" ; |
| 81 | |
| 82 | static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init); |
| 83 | static enum rdma_link_layer mlx4_ib_port_link_layer(struct ib_device *device, |
| 84 | u32 port_num); |
| 85 | static int mlx4_ib_event(struct notifier_block *this, unsigned long event, |
| 86 | void *param); |
| 87 | |
| 88 | static struct workqueue_struct *wq; |
| 89 | |
| 90 | static int check_flow_steering_support(struct mlx4_dev *dev) |
| 91 | { |
| 92 | int eth_num_ports = 0; |
| 93 | int ib_num_ports = 0; |
| 94 | |
| 95 | int dmfs = dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED; |
| 96 | |
| 97 | if (dmfs) { |
| 98 | int i; |
| 99 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) |
| 100 | eth_num_ports++; |
| 101 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) |
| 102 | ib_num_ports++; |
| 103 | dmfs &= (!ib_num_ports || |
| 104 | (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB)) && |
| 105 | (!eth_num_ports || |
| 106 | (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_FS_EN)); |
| 107 | if (ib_num_ports && mlx4_is_mfunc(dev)) { |
| 108 | pr_warn("Device managed flow steering is unavailable for IB port in multifunction env.\n" ); |
| 109 | dmfs = 0; |
| 110 | } |
| 111 | } |
| 112 | return dmfs; |
| 113 | } |
| 114 | |
| 115 | static int num_ib_ports(struct mlx4_dev *dev) |
| 116 | { |
| 117 | int ib_ports = 0; |
| 118 | int i; |
| 119 | |
| 120 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) |
| 121 | ib_ports++; |
| 122 | |
| 123 | return ib_ports; |
| 124 | } |
| 125 | |
| 126 | static struct net_device *mlx4_ib_get_netdev(struct ib_device *device, |
| 127 | u32 port_num) |
| 128 | { |
| 129 | struct mlx4_ib_dev *ibdev = to_mdev(ibdev: device); |
| 130 | struct net_device *dev, *ret = NULL; |
| 131 | |
| 132 | rcu_read_lock(); |
| 133 | for_each_netdev_rcu(&init_net, dev) { |
| 134 | if (dev->dev.parent != ibdev->ib_dev.dev.parent || |
| 135 | dev->dev_port + 1 != port_num) |
| 136 | continue; |
| 137 | |
| 138 | if (mlx4_is_bonded(dev: ibdev->dev)) { |
| 139 | struct net_device *upper; |
| 140 | |
| 141 | upper = netdev_master_upper_dev_get_rcu(dev); |
| 142 | if (upper) { |
| 143 | struct net_device *active; |
| 144 | |
| 145 | active = bond_option_active_slave_get_rcu(bond: netdev_priv(dev: upper)); |
| 146 | if (active) |
| 147 | dev = active; |
| 148 | } |
| 149 | } |
| 150 | |
| 151 | dev_hold(dev); |
| 152 | ret = dev; |
| 153 | break; |
| 154 | } |
| 155 | |
| 156 | rcu_read_unlock(); |
| 157 | return ret; |
| 158 | } |
| 159 | |
| 160 | static int mlx4_ib_update_gids_v1(struct gid_entry *gids, |
| 161 | struct mlx4_ib_dev *ibdev, |
| 162 | u32 port_num) |
| 163 | { |
| 164 | struct mlx4_cmd_mailbox *mailbox; |
| 165 | int err; |
| 166 | struct mlx4_dev *dev = ibdev->dev; |
| 167 | int i; |
| 168 | union ib_gid *gid_tbl; |
| 169 | |
| 170 | mailbox = mlx4_alloc_cmd_mailbox(dev); |
| 171 | if (IS_ERR(ptr: mailbox)) |
| 172 | return -ENOMEM; |
| 173 | |
| 174 | gid_tbl = mailbox->buf; |
| 175 | |
| 176 | for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) |
| 177 | memcpy(&gid_tbl[i], &gids[i].gid, sizeof(union ib_gid)); |
| 178 | |
| 179 | err = mlx4_cmd(dev, in_param: mailbox->dma, |
| 180 | in_modifier: MLX4_SET_PORT_GID_TABLE << 8 | port_num, |
| 181 | op_modifier: 1, op: MLX4_CMD_SET_PORT, timeout: MLX4_CMD_TIME_CLASS_B, |
| 182 | native: MLX4_CMD_WRAPPED); |
| 183 | if (mlx4_is_bonded(dev)) |
| 184 | err += mlx4_cmd(dev, in_param: mailbox->dma, |
| 185 | in_modifier: MLX4_SET_PORT_GID_TABLE << 8 | 2, |
| 186 | op_modifier: 1, op: MLX4_CMD_SET_PORT, timeout: MLX4_CMD_TIME_CLASS_B, |
| 187 | native: MLX4_CMD_WRAPPED); |
| 188 | |
| 189 | mlx4_free_cmd_mailbox(dev, mailbox); |
| 190 | return err; |
| 191 | } |
| 192 | |
| 193 | static int mlx4_ib_update_gids_v1_v2(struct gid_entry *gids, |
| 194 | struct mlx4_ib_dev *ibdev, |
| 195 | u32 port_num) |
| 196 | { |
| 197 | struct mlx4_cmd_mailbox *mailbox; |
| 198 | int err; |
| 199 | struct mlx4_dev *dev = ibdev->dev; |
| 200 | int i; |
| 201 | struct { |
| 202 | union ib_gid gid; |
| 203 | __be32 rsrvd1[2]; |
| 204 | __be16 rsrvd2; |
| 205 | u8 type; |
| 206 | u8 version; |
| 207 | __be32 rsrvd3; |
| 208 | } *gid_tbl; |
| 209 | |
| 210 | mailbox = mlx4_alloc_cmd_mailbox(dev); |
| 211 | if (IS_ERR(ptr: mailbox)) |
| 212 | return -ENOMEM; |
| 213 | |
| 214 | gid_tbl = mailbox->buf; |
| 215 | for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) { |
| 216 | memcpy(&gid_tbl[i].gid, &gids[i].gid, sizeof(union ib_gid)); |
| 217 | if (gids[i].gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP) { |
| 218 | gid_tbl[i].version = 2; |
| 219 | if (!ipv6_addr_v4mapped(a: (struct in6_addr *)&gids[i].gid)) |
| 220 | gid_tbl[i].type = 1; |
| 221 | } |
| 222 | } |
| 223 | |
| 224 | err = mlx4_cmd(dev, in_param: mailbox->dma, |
| 225 | in_modifier: MLX4_SET_PORT_ROCE_ADDR << 8 | port_num, |
| 226 | op_modifier: 1, op: MLX4_CMD_SET_PORT, timeout: MLX4_CMD_TIME_CLASS_B, |
| 227 | native: MLX4_CMD_WRAPPED); |
| 228 | if (mlx4_is_bonded(dev)) |
| 229 | err += mlx4_cmd(dev, in_param: mailbox->dma, |
| 230 | in_modifier: MLX4_SET_PORT_ROCE_ADDR << 8 | 2, |
| 231 | op_modifier: 1, op: MLX4_CMD_SET_PORT, timeout: MLX4_CMD_TIME_CLASS_B, |
| 232 | native: MLX4_CMD_WRAPPED); |
| 233 | |
| 234 | mlx4_free_cmd_mailbox(dev, mailbox); |
| 235 | return err; |
| 236 | } |
| 237 | |
| 238 | static int mlx4_ib_update_gids(struct gid_entry *gids, |
| 239 | struct mlx4_ib_dev *ibdev, |
| 240 | u32 port_num) |
| 241 | { |
| 242 | if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) |
| 243 | return mlx4_ib_update_gids_v1_v2(gids, ibdev, port_num); |
| 244 | |
| 245 | return mlx4_ib_update_gids_v1(gids, ibdev, port_num); |
| 246 | } |
| 247 | |
| 248 | static void free_gid_entry(struct gid_entry *entry) |
| 249 | { |
| 250 | memset(&entry->gid, 0, sizeof(entry->gid)); |
| 251 | kfree(objp: entry->ctx); |
| 252 | entry->ctx = NULL; |
| 253 | } |
| 254 | |
| 255 | static int mlx4_ib_add_gid(const struct ib_gid_attr *attr, void **context) |
| 256 | { |
| 257 | struct mlx4_ib_dev *ibdev = to_mdev(ibdev: attr->device); |
| 258 | struct mlx4_ib_iboe *iboe = &ibdev->iboe; |
| 259 | struct mlx4_port_gid_table *port_gid_table; |
| 260 | int free = -1, found = -1; |
| 261 | int ret = 0; |
| 262 | int hw_update = 0; |
| 263 | int i; |
| 264 | struct gid_entry *gids; |
| 265 | u16 vlan_id = 0xffff; |
| 266 | u8 mac[ETH_ALEN]; |
| 267 | |
| 268 | if (!rdma_cap_roce_gid_table(device: attr->device, port_num: attr->port_num)) |
| 269 | return -EINVAL; |
| 270 | |
| 271 | if (attr->port_num > MLX4_MAX_PORTS) |
| 272 | return -EINVAL; |
| 273 | |
| 274 | if (!context) |
| 275 | return -EINVAL; |
| 276 | |
| 277 | ret = rdma_read_gid_l2_fields(attr, vlan_id: &vlan_id, smac: &mac[0]); |
| 278 | if (ret) |
| 279 | return ret; |
| 280 | port_gid_table = &iboe->gids[attr->port_num - 1]; |
| 281 | spin_lock_bh(lock: &iboe->lock); |
| 282 | for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) { |
| 283 | if (!memcmp(p: &port_gid_table->gids[i].gid, |
| 284 | q: &attr->gid, size: sizeof(attr->gid)) && |
| 285 | port_gid_table->gids[i].gid_type == attr->gid_type && |
| 286 | port_gid_table->gids[i].vlan_id == vlan_id) { |
| 287 | found = i; |
| 288 | break; |
| 289 | } |
| 290 | if (free < 0 && rdma_is_zero_gid(gid: &port_gid_table->gids[i].gid)) |
| 291 | free = i; /* HW has space */ |
| 292 | } |
| 293 | |
| 294 | if (found < 0) { |
| 295 | if (free < 0) { |
| 296 | ret = -ENOSPC; |
| 297 | } else { |
| 298 | port_gid_table->gids[free].ctx = kmalloc(sizeof(*port_gid_table->gids[free].ctx), GFP_ATOMIC); |
| 299 | if (!port_gid_table->gids[free].ctx) { |
| 300 | ret = -ENOMEM; |
| 301 | } else { |
| 302 | *context = port_gid_table->gids[free].ctx; |
| 303 | port_gid_table->gids[free].gid = attr->gid; |
| 304 | port_gid_table->gids[free].gid_type = attr->gid_type; |
| 305 | port_gid_table->gids[free].vlan_id = vlan_id; |
| 306 | port_gid_table->gids[free].ctx->real_index = free; |
| 307 | port_gid_table->gids[free].ctx->refcount = 1; |
| 308 | hw_update = 1; |
| 309 | } |
| 310 | } |
| 311 | } else { |
| 312 | struct gid_cache_context *ctx = port_gid_table->gids[found].ctx; |
| 313 | *context = ctx; |
| 314 | ctx->refcount++; |
| 315 | } |
| 316 | if (!ret && hw_update) { |
| 317 | gids = kmalloc_array(MLX4_MAX_PORT_GIDS, sizeof(*gids), |
| 318 | GFP_ATOMIC); |
| 319 | if (!gids) { |
| 320 | ret = -ENOMEM; |
| 321 | *context = NULL; |
| 322 | free_gid_entry(entry: &port_gid_table->gids[free]); |
| 323 | } else { |
| 324 | for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) { |
| 325 | memcpy(&gids[i].gid, &port_gid_table->gids[i].gid, sizeof(union ib_gid)); |
| 326 | gids[i].gid_type = port_gid_table->gids[i].gid_type; |
| 327 | } |
| 328 | } |
| 329 | } |
| 330 | spin_unlock_bh(lock: &iboe->lock); |
| 331 | |
| 332 | if (!ret && hw_update) { |
| 333 | ret = mlx4_ib_update_gids(gids, ibdev, port_num: attr->port_num); |
| 334 | if (ret) { |
| 335 | spin_lock_bh(lock: &iboe->lock); |
| 336 | *context = NULL; |
| 337 | free_gid_entry(entry: &port_gid_table->gids[free]); |
| 338 | spin_unlock_bh(lock: &iboe->lock); |
| 339 | } |
| 340 | kfree(objp: gids); |
| 341 | } |
| 342 | |
| 343 | return ret; |
| 344 | } |
| 345 | |
| 346 | static int mlx4_ib_del_gid(const struct ib_gid_attr *attr, void **context) |
| 347 | { |
| 348 | struct gid_cache_context *ctx = *context; |
| 349 | struct mlx4_ib_dev *ibdev = to_mdev(ibdev: attr->device); |
| 350 | struct mlx4_ib_iboe *iboe = &ibdev->iboe; |
| 351 | struct mlx4_port_gid_table *port_gid_table; |
| 352 | int ret = 0; |
| 353 | int hw_update = 0; |
| 354 | struct gid_entry *gids = NULL; |
| 355 | |
| 356 | if (!rdma_cap_roce_gid_table(device: attr->device, port_num: attr->port_num)) |
| 357 | return -EINVAL; |
| 358 | |
| 359 | if (attr->port_num > MLX4_MAX_PORTS) |
| 360 | return -EINVAL; |
| 361 | |
| 362 | port_gid_table = &iboe->gids[attr->port_num - 1]; |
| 363 | spin_lock_bh(lock: &iboe->lock); |
| 364 | if (ctx) { |
| 365 | ctx->refcount--; |
| 366 | if (!ctx->refcount) { |
| 367 | unsigned int real_index = ctx->real_index; |
| 368 | |
| 369 | free_gid_entry(entry: &port_gid_table->gids[real_index]); |
| 370 | hw_update = 1; |
| 371 | } |
| 372 | } |
| 373 | if (!ret && hw_update) { |
| 374 | int i; |
| 375 | |
| 376 | gids = kmalloc_array(MLX4_MAX_PORT_GIDS, sizeof(*gids), |
| 377 | GFP_ATOMIC); |
| 378 | if (!gids) { |
| 379 | ret = -ENOMEM; |
| 380 | } else { |
| 381 | for (i = 0; i < MLX4_MAX_PORT_GIDS; i++) { |
| 382 | memcpy(&gids[i].gid, |
| 383 | &port_gid_table->gids[i].gid, |
| 384 | sizeof(union ib_gid)); |
| 385 | gids[i].gid_type = |
| 386 | port_gid_table->gids[i].gid_type; |
| 387 | } |
| 388 | } |
| 389 | } |
| 390 | spin_unlock_bh(lock: &iboe->lock); |
| 391 | |
| 392 | if (gids) |
| 393 | ret = mlx4_ib_update_gids(gids, ibdev, port_num: attr->port_num); |
| 394 | |
| 395 | kfree(objp: gids); |
| 396 | return ret; |
| 397 | } |
| 398 | |
| 399 | int mlx4_ib_gid_index_to_real_index(struct mlx4_ib_dev *ibdev, |
| 400 | const struct ib_gid_attr *attr) |
| 401 | { |
| 402 | struct mlx4_ib_iboe *iboe = &ibdev->iboe; |
| 403 | struct gid_cache_context *ctx = NULL; |
| 404 | struct mlx4_port_gid_table *port_gid_table; |
| 405 | int real_index = -EINVAL; |
| 406 | int i; |
| 407 | unsigned long flags; |
| 408 | u32 port_num = attr->port_num; |
| 409 | |
| 410 | if (port_num > MLX4_MAX_PORTS) |
| 411 | return -EINVAL; |
| 412 | |
| 413 | if (mlx4_is_bonded(dev: ibdev->dev)) |
| 414 | port_num = 1; |
| 415 | |
| 416 | if (!rdma_cap_roce_gid_table(device: &ibdev->ib_dev, port_num)) |
| 417 | return attr->index; |
| 418 | |
| 419 | spin_lock_irqsave(&iboe->lock, flags); |
| 420 | port_gid_table = &iboe->gids[port_num - 1]; |
| 421 | |
| 422 | for (i = 0; i < MLX4_MAX_PORT_GIDS; ++i) |
| 423 | if (!memcmp(p: &port_gid_table->gids[i].gid, |
| 424 | q: &attr->gid, size: sizeof(attr->gid)) && |
| 425 | attr->gid_type == port_gid_table->gids[i].gid_type) { |
| 426 | ctx = port_gid_table->gids[i].ctx; |
| 427 | break; |
| 428 | } |
| 429 | if (ctx) |
| 430 | real_index = ctx->real_index; |
| 431 | spin_unlock_irqrestore(lock: &iboe->lock, flags); |
| 432 | return real_index; |
| 433 | } |
| 434 | |
| 435 | static int mlx4_ib_query_device(struct ib_device *ibdev, |
| 436 | struct ib_device_attr *props, |
| 437 | struct ib_udata *uhw) |
| 438 | { |
| 439 | struct mlx4_ib_dev *dev = to_mdev(ibdev); |
| 440 | struct ib_smp *in_mad; |
| 441 | struct ib_smp *out_mad; |
| 442 | int err; |
| 443 | int have_ib_ports; |
| 444 | struct mlx4_uverbs_ex_query_device cmd; |
| 445 | struct mlx4_uverbs_ex_query_device_resp resp = {}; |
| 446 | struct mlx4_clock_params clock_params; |
| 447 | |
| 448 | if (uhw->inlen) { |
| 449 | if (uhw->inlen < sizeof(cmd)) |
| 450 | return -EINVAL; |
| 451 | |
| 452 | err = ib_copy_from_udata(dest: &cmd, udata: uhw, len: sizeof(cmd)); |
| 453 | if (err) |
| 454 | return err; |
| 455 | |
| 456 | if (cmd.comp_mask) |
| 457 | return -EINVAL; |
| 458 | |
| 459 | if (cmd.reserved) |
| 460 | return -EINVAL; |
| 461 | } |
| 462 | |
| 463 | resp.response_length = offsetof(typeof(resp), response_length) + |
| 464 | sizeof(resp.response_length); |
| 465 | in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); |
| 466 | out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); |
| 467 | err = -ENOMEM; |
| 468 | if (!in_mad || !out_mad) |
| 469 | goto out; |
| 470 | |
| 471 | ib_init_query_mad(mad: in_mad); |
| 472 | in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; |
| 473 | |
| 474 | err = mlx4_MAD_IFC(dev: to_mdev(ibdev), mad_ifc_flags: MLX4_MAD_IFC_IGNORE_KEYS, |
| 475 | port: 1, NULL, NULL, in_mad, response_mad: out_mad); |
| 476 | if (err) |
| 477 | goto out; |
| 478 | |
| 479 | memset(props, 0, sizeof *props); |
| 480 | |
| 481 | have_ib_ports = num_ib_ports(dev: dev->dev); |
| 482 | |
| 483 | props->fw_ver = dev->dev->caps.fw_ver; |
| 484 | props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | |
| 485 | IB_DEVICE_PORT_ACTIVE_EVENT | |
| 486 | IB_DEVICE_SYS_IMAGE_GUID | |
| 487 | IB_DEVICE_RC_RNR_NAK_GEN; |
| 488 | props->kernel_cap_flags = IBK_BLOCK_MULTICAST_LOOPBACK; |
| 489 | if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR) |
| 490 | props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; |
| 491 | if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR) |
| 492 | props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; |
| 493 | if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM && have_ib_ports) |
| 494 | props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; |
| 495 | if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT) |
| 496 | props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE; |
| 497 | if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM) |
| 498 | props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; |
| 499 | if (dev->dev->caps.max_gso_sz && |
| 500 | (dev->dev->rev_id != MLX4_IB_CARD_REV_A0) && |
| 501 | (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH)) |
| 502 | props->kernel_cap_flags |= IBK_UD_TSO; |
| 503 | if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY) |
| 504 | props->kernel_cap_flags |= IBK_LOCAL_DMA_LKEY; |
| 505 | if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) && |
| 506 | (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) && |
| 507 | (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR)) |
| 508 | props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; |
| 509 | if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) |
| 510 | props->device_cap_flags |= IB_DEVICE_XRC; |
| 511 | if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW) |
| 512 | props->device_cap_flags |= IB_DEVICE_MEM_WINDOW; |
| 513 | if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) { |
| 514 | if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_WIN_TYPE_2B) |
| 515 | props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B; |
| 516 | else |
| 517 | props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A; |
| 518 | } |
| 519 | if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) |
| 520 | props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING; |
| 521 | |
| 522 | props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM; |
| 523 | |
| 524 | props->vendor_id = be32_to_cpup(p: (__be32 *) (out_mad->data + 36)) & |
| 525 | 0xffffff; |
| 526 | props->vendor_part_id = dev->dev->persist->pdev->device; |
| 527 | props->hw_ver = be32_to_cpup(p: (__be32 *) (out_mad->data + 32)); |
| 528 | memcpy(&props->sys_image_guid, out_mad->data + 4, 8); |
| 529 | |
| 530 | props->max_mr_size = ~0ull; |
| 531 | props->page_size_cap = dev->dev->caps.page_size_cap; |
| 532 | props->max_qp = dev->dev->quotas.qp; |
| 533 | props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE; |
| 534 | props->max_send_sge = |
| 535 | min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg); |
| 536 | props->max_recv_sge = |
| 537 | min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg); |
| 538 | props->max_sge_rd = MLX4_MAX_SGE_RD; |
| 539 | props->max_cq = dev->dev->quotas.cq; |
| 540 | props->max_cqe = dev->dev->caps.max_cqes; |
| 541 | props->max_mr = dev->dev->quotas.mpt; |
| 542 | props->max_pd = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds; |
| 543 | props->max_qp_rd_atom = dev->dev->caps.max_qp_dest_rdma; |
| 544 | props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma; |
| 545 | props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; |
| 546 | props->max_srq = dev->dev->quotas.srq; |
| 547 | props->max_srq_wr = dev->dev->caps.max_srq_wqes - 1; |
| 548 | props->max_srq_sge = dev->dev->caps.max_srq_sge; |
| 549 | props->max_fast_reg_page_list_len = MLX4_MAX_FAST_REG_PAGES; |
| 550 | props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay; |
| 551 | props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ? |
| 552 | IB_ATOMIC_HCA : IB_ATOMIC_NONE; |
| 553 | props->masked_atomic_cap = props->atomic_cap; |
| 554 | props->max_pkeys = dev->dev->caps.pkey_table_len[1]; |
| 555 | props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms; |
| 556 | props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm; |
| 557 | props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * |
| 558 | props->max_mcast_grp; |
| 559 | props->hca_core_clock = dev->dev->caps.hca_core_clock * 1000UL; |
| 560 | props->timestamp_mask = 0xFFFFFFFFFFFFULL; |
| 561 | props->max_ah = INT_MAX; |
| 562 | |
| 563 | if (mlx4_ib_port_link_layer(device: ibdev, port_num: 1) == IB_LINK_LAYER_ETHERNET || |
| 564 | mlx4_ib_port_link_layer(device: ibdev, port_num: 2) == IB_LINK_LAYER_ETHERNET) { |
| 565 | if (dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) { |
| 566 | props->rss_caps.max_rwq_indirection_tables = |
| 567 | props->max_qp; |
| 568 | props->rss_caps.max_rwq_indirection_table_size = |
| 569 | dev->dev->caps.max_rss_tbl_sz; |
| 570 | props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET; |
| 571 | props->max_wq_type_rq = props->max_qp; |
| 572 | } |
| 573 | |
| 574 | if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_FCS_KEEP) |
| 575 | props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS; |
| 576 | } |
| 577 | |
| 578 | props->cq_caps.max_cq_moderation_count = MLX4_MAX_CQ_COUNT; |
| 579 | props->cq_caps.max_cq_moderation_period = MLX4_MAX_CQ_PERIOD; |
| 580 | |
| 581 | if (uhw->outlen >= resp.response_length + sizeof(resp.hca_core_clock_offset)) { |
| 582 | resp.response_length += sizeof(resp.hca_core_clock_offset); |
| 583 | if (!mlx4_get_internal_clock_params(dev: dev->dev, params: &clock_params)) { |
| 584 | resp.comp_mask |= MLX4_IB_QUERY_DEV_RESP_MASK_CORE_CLOCK_OFFSET; |
| 585 | resp.hca_core_clock_offset = clock_params.offset % PAGE_SIZE; |
| 586 | } |
| 587 | } |
| 588 | |
| 589 | if (uhw->outlen >= resp.response_length + |
| 590 | sizeof(resp.max_inl_recv_sz)) { |
| 591 | resp.response_length += sizeof(resp.max_inl_recv_sz); |
| 592 | resp.max_inl_recv_sz = dev->dev->caps.max_rq_sg * |
| 593 | sizeof(struct mlx4_wqe_data_seg); |
| 594 | } |
| 595 | |
| 596 | if (offsetofend(typeof(resp), rss_caps) <= uhw->outlen) { |
| 597 | if (props->rss_caps.supported_qpts) { |
| 598 | resp.rss_caps.rx_hash_function = |
| 599 | MLX4_IB_RX_HASH_FUNC_TOEPLITZ; |
| 600 | |
| 601 | resp.rss_caps.rx_hash_fields_mask = |
| 602 | MLX4_IB_RX_HASH_SRC_IPV4 | |
| 603 | MLX4_IB_RX_HASH_DST_IPV4 | |
| 604 | MLX4_IB_RX_HASH_SRC_IPV6 | |
| 605 | MLX4_IB_RX_HASH_DST_IPV6 | |
| 606 | MLX4_IB_RX_HASH_SRC_PORT_TCP | |
| 607 | MLX4_IB_RX_HASH_DST_PORT_TCP | |
| 608 | MLX4_IB_RX_HASH_SRC_PORT_UDP | |
| 609 | MLX4_IB_RX_HASH_DST_PORT_UDP; |
| 610 | |
| 611 | if (dev->dev->caps.tunnel_offload_mode == |
| 612 | MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) |
| 613 | resp.rss_caps.rx_hash_fields_mask |= |
| 614 | MLX4_IB_RX_HASH_INNER; |
| 615 | } |
| 616 | resp.response_length = offsetof(typeof(resp), rss_caps) + |
| 617 | sizeof(resp.rss_caps); |
| 618 | } |
| 619 | |
| 620 | if (offsetofend(typeof(resp), tso_caps) <= uhw->outlen) { |
| 621 | if (dev->dev->caps.max_gso_sz && |
| 622 | ((mlx4_ib_port_link_layer(device: ibdev, port_num: 1) == |
| 623 | IB_LINK_LAYER_ETHERNET) || |
| 624 | (mlx4_ib_port_link_layer(device: ibdev, port_num: 2) == |
| 625 | IB_LINK_LAYER_ETHERNET))) { |
| 626 | resp.tso_caps.max_tso = dev->dev->caps.max_gso_sz; |
| 627 | resp.tso_caps.supported_qpts |= |
| 628 | 1 << IB_QPT_RAW_PACKET; |
| 629 | } |
| 630 | resp.response_length = offsetof(typeof(resp), tso_caps) + |
| 631 | sizeof(resp.tso_caps); |
| 632 | } |
| 633 | |
| 634 | if (uhw->outlen) { |
| 635 | err = ib_copy_to_udata(udata: uhw, src: &resp, len: resp.response_length); |
| 636 | if (err) |
| 637 | goto out; |
| 638 | } |
| 639 | out: |
| 640 | kfree(objp: in_mad); |
| 641 | kfree(objp: out_mad); |
| 642 | |
| 643 | return err; |
| 644 | } |
| 645 | |
| 646 | static enum rdma_link_layer |
| 647 | mlx4_ib_port_link_layer(struct ib_device *device, u32 port_num) |
| 648 | { |
| 649 | struct mlx4_dev *dev = to_mdev(ibdev: device)->dev; |
| 650 | |
| 651 | return dev->caps.port_mask[port_num] == MLX4_PORT_TYPE_IB ? |
| 652 | IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET; |
| 653 | } |
| 654 | |
| 655 | static int ib_link_query_port(struct ib_device *ibdev, u32 port, |
| 656 | struct ib_port_attr *props, int netw_view) |
| 657 | { |
| 658 | struct ib_smp *in_mad; |
| 659 | struct ib_smp *out_mad; |
| 660 | int ext_active_speed; |
| 661 | int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS; |
| 662 | int err = -ENOMEM; |
| 663 | |
| 664 | in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); |
| 665 | out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); |
| 666 | if (!in_mad || !out_mad) |
| 667 | goto out; |
| 668 | |
| 669 | ib_init_query_mad(mad: in_mad); |
| 670 | in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; |
| 671 | in_mad->attr_mod = cpu_to_be32(port); |
| 672 | |
| 673 | if (mlx4_is_mfunc(dev: to_mdev(ibdev)->dev) && netw_view) |
| 674 | mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW; |
| 675 | |
| 676 | err = mlx4_MAD_IFC(dev: to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL, |
| 677 | in_mad, response_mad: out_mad); |
| 678 | if (err) |
| 679 | goto out; |
| 680 | |
| 681 | |
| 682 | props->lid = be16_to_cpup(p: (__be16 *) (out_mad->data + 16)); |
| 683 | props->lmc = out_mad->data[34] & 0x7; |
| 684 | props->sm_lid = be16_to_cpup(p: (__be16 *) (out_mad->data + 18)); |
| 685 | props->sm_sl = out_mad->data[36] & 0xf; |
| 686 | props->state = out_mad->data[32] & 0xf; |
| 687 | props->phys_state = out_mad->data[33] >> 4; |
| 688 | props->port_cap_flags = be32_to_cpup(p: (__be32 *) (out_mad->data + 20)); |
| 689 | if (netw_view) |
| 690 | props->gid_tbl_len = out_mad->data[50]; |
| 691 | else |
| 692 | props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port]; |
| 693 | props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz; |
| 694 | props->pkey_tbl_len = to_mdev(ibdev)->dev->caps.pkey_table_len[port]; |
| 695 | props->bad_pkey_cntr = be16_to_cpup(p: (__be16 *) (out_mad->data + 46)); |
| 696 | props->qkey_viol_cntr = be16_to_cpup(p: (__be16 *) (out_mad->data + 48)); |
| 697 | props->active_width = out_mad->data[31] & 0xf; |
| 698 | props->active_speed = out_mad->data[35] >> 4; |
| 699 | props->max_mtu = out_mad->data[41] & 0xf; |
| 700 | props->active_mtu = out_mad->data[36] >> 4; |
| 701 | props->subnet_timeout = out_mad->data[51] & 0x1f; |
| 702 | props->max_vl_num = out_mad->data[37] >> 4; |
| 703 | props->init_type_reply = out_mad->data[41] >> 4; |
| 704 | |
| 705 | /* Check if extended speeds (EDR/FDR/...) are supported */ |
| 706 | if (props->port_cap_flags & IB_PORT_EXTENDED_SPEEDS_SUP) { |
| 707 | ext_active_speed = out_mad->data[62] >> 4; |
| 708 | |
| 709 | switch (ext_active_speed) { |
| 710 | case 1: |
| 711 | props->active_speed = IB_SPEED_FDR; |
| 712 | break; |
| 713 | case 2: |
| 714 | props->active_speed = IB_SPEED_EDR; |
| 715 | break; |
| 716 | } |
| 717 | } |
| 718 | |
| 719 | /* If reported active speed is QDR, check if is FDR-10 */ |
| 720 | if (props->active_speed == IB_SPEED_QDR) { |
| 721 | ib_init_query_mad(mad: in_mad); |
| 722 | in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO; |
| 723 | in_mad->attr_mod = cpu_to_be32(port); |
| 724 | |
| 725 | err = mlx4_MAD_IFC(dev: to_mdev(ibdev), mad_ifc_flags, port, |
| 726 | NULL, NULL, in_mad, response_mad: out_mad); |
| 727 | if (err) |
| 728 | goto out; |
| 729 | |
| 730 | /* Checking LinkSpeedActive for FDR-10 */ |
| 731 | if (out_mad->data[15] & 0x1) |
| 732 | props->active_speed = IB_SPEED_FDR10; |
| 733 | } |
| 734 | |
| 735 | /* Avoid wrong speed value returned by FW if the IB link is down. */ |
| 736 | if (props->state == IB_PORT_DOWN) |
| 737 | props->active_speed = IB_SPEED_SDR; |
| 738 | |
| 739 | out: |
| 740 | kfree(objp: in_mad); |
| 741 | kfree(objp: out_mad); |
| 742 | return err; |
| 743 | } |
| 744 | |
| 745 | static u8 state_to_phys_state(enum ib_port_state state) |
| 746 | { |
| 747 | return state == IB_PORT_ACTIVE ? |
| 748 | IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED; |
| 749 | } |
| 750 | |
| 751 | static int eth_link_query_port(struct ib_device *ibdev, u32 port, |
| 752 | struct ib_port_attr *props) |
| 753 | { |
| 754 | |
| 755 | struct mlx4_ib_dev *mdev = to_mdev(ibdev); |
| 756 | struct mlx4_ib_iboe *iboe = &mdev->iboe; |
| 757 | struct net_device *ndev; |
| 758 | enum ib_mtu tmp; |
| 759 | struct mlx4_cmd_mailbox *mailbox; |
| 760 | int err = 0; |
| 761 | int is_bonded = mlx4_is_bonded(dev: mdev->dev); |
| 762 | |
| 763 | mailbox = mlx4_alloc_cmd_mailbox(dev: mdev->dev); |
| 764 | if (IS_ERR(ptr: mailbox)) |
| 765 | return PTR_ERR(ptr: mailbox); |
| 766 | |
| 767 | err = mlx4_cmd_box(dev: mdev->dev, in_param: 0, out_param: mailbox->dma, in_modifier: port, op_modifier: 0, |
| 768 | op: MLX4_CMD_QUERY_PORT, timeout: MLX4_CMD_TIME_CLASS_B, |
| 769 | native: MLX4_CMD_WRAPPED); |
| 770 | if (err) |
| 771 | goto out; |
| 772 | |
| 773 | props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) || |
| 774 | (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ? |
| 775 | IB_WIDTH_4X : IB_WIDTH_1X; |
| 776 | props->active_speed = (((u8 *)mailbox->buf)[5] == 0x20 /*56Gb*/) ? |
| 777 | IB_SPEED_FDR : IB_SPEED_QDR; |
| 778 | props->port_cap_flags = IB_PORT_CM_SUP; |
| 779 | props->ip_gids = true; |
| 780 | props->gid_tbl_len = mdev->dev->caps.gid_table_len[port]; |
| 781 | props->max_msg_sz = mdev->dev->caps.max_msg_sz; |
| 782 | if (mdev->dev->caps.pkey_table_len[port]) |
| 783 | props->pkey_tbl_len = 1; |
| 784 | props->max_mtu = IB_MTU_4096; |
| 785 | props->max_vl_num = 2; |
| 786 | props->state = IB_PORT_DOWN; |
| 787 | props->phys_state = state_to_phys_state(state: props->state); |
| 788 | props->active_mtu = IB_MTU_256; |
| 789 | spin_lock_bh(lock: &iboe->lock); |
| 790 | ndev = iboe->netdevs[port - 1]; |
| 791 | if (ndev && is_bonded) { |
| 792 | rcu_read_lock(); /* required to get upper dev */ |
| 793 | ndev = netdev_master_upper_dev_get_rcu(dev: ndev); |
| 794 | rcu_read_unlock(); |
| 795 | } |
| 796 | if (!ndev) |
| 797 | goto out_unlock; |
| 798 | |
| 799 | tmp = iboe_get_mtu(mtu: ndev->mtu); |
| 800 | props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256; |
| 801 | |
| 802 | props->state = (netif_running(dev: ndev) && netif_carrier_ok(dev: ndev)) ? |
| 803 | IB_PORT_ACTIVE : IB_PORT_DOWN; |
| 804 | props->phys_state = state_to_phys_state(state: props->state); |
| 805 | out_unlock: |
| 806 | spin_unlock_bh(lock: &iboe->lock); |
| 807 | out: |
| 808 | mlx4_free_cmd_mailbox(dev: mdev->dev, mailbox); |
| 809 | return err; |
| 810 | } |
| 811 | |
| 812 | int __mlx4_ib_query_port(struct ib_device *ibdev, u32 port, |
| 813 | struct ib_port_attr *props, int netw_view) |
| 814 | { |
| 815 | int err; |
| 816 | |
| 817 | /* props being zeroed by the caller, avoid zeroing it here */ |
| 818 | |
| 819 | err = mlx4_ib_port_link_layer(device: ibdev, port_num: port) == IB_LINK_LAYER_INFINIBAND ? |
| 820 | ib_link_query_port(ibdev, port, props, netw_view) : |
| 821 | eth_link_query_port(ibdev, port, props); |
| 822 | |
| 823 | return err; |
| 824 | } |
| 825 | |
| 826 | static int mlx4_ib_query_port(struct ib_device *ibdev, u32 port, |
| 827 | struct ib_port_attr *props) |
| 828 | { |
| 829 | /* returns host view */ |
| 830 | return __mlx4_ib_query_port(ibdev, port, props, netw_view: 0); |
| 831 | } |
| 832 | |
| 833 | int __mlx4_ib_query_gid(struct ib_device *ibdev, u32 port, int index, |
| 834 | union ib_gid *gid, int netw_view) |
| 835 | { |
| 836 | struct ib_smp *in_mad; |
| 837 | struct ib_smp *out_mad; |
| 838 | int err = -ENOMEM; |
| 839 | struct mlx4_ib_dev *dev = to_mdev(ibdev); |
| 840 | int clear = 0; |
| 841 | int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS; |
| 842 | |
| 843 | in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); |
| 844 | out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); |
| 845 | if (!in_mad || !out_mad) |
| 846 | goto out; |
| 847 | |
| 848 | ib_init_query_mad(mad: in_mad); |
| 849 | in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; |
| 850 | in_mad->attr_mod = cpu_to_be32(port); |
| 851 | |
| 852 | if (mlx4_is_mfunc(dev: dev->dev) && netw_view) |
| 853 | mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW; |
| 854 | |
| 855 | err = mlx4_MAD_IFC(dev, mad_ifc_flags, port, NULL, NULL, in_mad, response_mad: out_mad); |
| 856 | if (err) |
| 857 | goto out; |
| 858 | |
| 859 | memcpy(gid->raw, out_mad->data + 8, 8); |
| 860 | |
| 861 | if (mlx4_is_mfunc(dev: dev->dev) && !netw_view) { |
| 862 | if (index) { |
| 863 | /* For any index > 0, return the null guid */ |
| 864 | err = 0; |
| 865 | clear = 1; |
| 866 | goto out; |
| 867 | } |
| 868 | } |
| 869 | |
| 870 | ib_init_query_mad(mad: in_mad); |
| 871 | in_mad->attr_id = IB_SMP_ATTR_GUID_INFO; |
| 872 | in_mad->attr_mod = cpu_to_be32(index / 8); |
| 873 | |
| 874 | err = mlx4_MAD_IFC(dev, mad_ifc_flags, port, |
| 875 | NULL, NULL, in_mad, response_mad: out_mad); |
| 876 | if (err) |
| 877 | goto out; |
| 878 | |
| 879 | memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8); |
| 880 | |
| 881 | out: |
| 882 | if (clear) |
| 883 | memset(gid->raw + 8, 0, 8); |
| 884 | kfree(objp: in_mad); |
| 885 | kfree(objp: out_mad); |
| 886 | return err; |
| 887 | } |
| 888 | |
| 889 | static int mlx4_ib_query_gid(struct ib_device *ibdev, u32 port, int index, |
| 890 | union ib_gid *gid) |
| 891 | { |
| 892 | if (rdma_protocol_ib(device: ibdev, port_num: port)) |
| 893 | return __mlx4_ib_query_gid(ibdev, port, index, gid, netw_view: 0); |
| 894 | return 0; |
| 895 | } |
| 896 | |
| 897 | static int mlx4_ib_query_sl2vl(struct ib_device *ibdev, u32 port, |
| 898 | u64 *sl2vl_tbl) |
| 899 | { |
| 900 | union sl2vl_tbl_to_u64 sl2vl64; |
| 901 | struct ib_smp *in_mad; |
| 902 | struct ib_smp *out_mad; |
| 903 | int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS; |
| 904 | int err = -ENOMEM; |
| 905 | int jj; |
| 906 | |
| 907 | if (mlx4_is_slave(dev: to_mdev(ibdev)->dev)) { |
| 908 | *sl2vl_tbl = 0; |
| 909 | return 0; |
| 910 | } |
| 911 | |
| 912 | in_mad = kzalloc(sizeof(*in_mad), GFP_KERNEL); |
| 913 | out_mad = kmalloc(sizeof(*out_mad), GFP_KERNEL); |
| 914 | if (!in_mad || !out_mad) |
| 915 | goto out; |
| 916 | |
| 917 | ib_init_query_mad(mad: in_mad); |
| 918 | in_mad->attr_id = IB_SMP_ATTR_SL_TO_VL_TABLE; |
| 919 | in_mad->attr_mod = 0; |
| 920 | |
| 921 | if (mlx4_is_mfunc(dev: to_mdev(ibdev)->dev)) |
| 922 | mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW; |
| 923 | |
| 924 | err = mlx4_MAD_IFC(dev: to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL, |
| 925 | in_mad, response_mad: out_mad); |
| 926 | if (err) |
| 927 | goto out; |
| 928 | |
| 929 | for (jj = 0; jj < 8; jj++) |
| 930 | sl2vl64.sl8[jj] = ((struct ib_smp *)out_mad)->data[jj]; |
| 931 | *sl2vl_tbl = sl2vl64.sl64; |
| 932 | |
| 933 | out: |
| 934 | kfree(objp: in_mad); |
| 935 | kfree(objp: out_mad); |
| 936 | return err; |
| 937 | } |
| 938 | |
| 939 | static void mlx4_init_sl2vl_tbl(struct mlx4_ib_dev *mdev) |
| 940 | { |
| 941 | u64 sl2vl; |
| 942 | int i; |
| 943 | int err; |
| 944 | |
| 945 | for (i = 1; i <= mdev->dev->caps.num_ports; i++) { |
| 946 | if (mdev->dev->caps.port_type[i] == MLX4_PORT_TYPE_ETH) |
| 947 | continue; |
| 948 | err = mlx4_ib_query_sl2vl(ibdev: &mdev->ib_dev, port: i, sl2vl_tbl: &sl2vl); |
| 949 | if (err) { |
| 950 | pr_err("Unable to get default sl to vl mapping for port %d. Using all zeroes (%d)\n" , |
| 951 | i, err); |
| 952 | sl2vl = 0; |
| 953 | } |
| 954 | atomic64_set(v: &mdev->sl2vl[i - 1], i: sl2vl); |
| 955 | } |
| 956 | } |
| 957 | |
| 958 | int __mlx4_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index, |
| 959 | u16 *pkey, int netw_view) |
| 960 | { |
| 961 | struct ib_smp *in_mad; |
| 962 | struct ib_smp *out_mad; |
| 963 | int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS; |
| 964 | int err = -ENOMEM; |
| 965 | |
| 966 | in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); |
| 967 | out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); |
| 968 | if (!in_mad || !out_mad) |
| 969 | goto out; |
| 970 | |
| 971 | ib_init_query_mad(mad: in_mad); |
| 972 | in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE; |
| 973 | in_mad->attr_mod = cpu_to_be32(index / 32); |
| 974 | |
| 975 | if (mlx4_is_mfunc(dev: to_mdev(ibdev)->dev) && netw_view) |
| 976 | mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW; |
| 977 | |
| 978 | err = mlx4_MAD_IFC(dev: to_mdev(ibdev), mad_ifc_flags, port, NULL, NULL, |
| 979 | in_mad, response_mad: out_mad); |
| 980 | if (err) |
| 981 | goto out; |
| 982 | |
| 983 | *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]); |
| 984 | |
| 985 | out: |
| 986 | kfree(objp: in_mad); |
| 987 | kfree(objp: out_mad); |
| 988 | return err; |
| 989 | } |
| 990 | |
| 991 | static int mlx4_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index, |
| 992 | u16 *pkey) |
| 993 | { |
| 994 | return __mlx4_ib_query_pkey(ibdev, port, index, pkey, netw_view: 0); |
| 995 | } |
| 996 | |
| 997 | static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask, |
| 998 | struct ib_device_modify *props) |
| 999 | { |
| 1000 | struct mlx4_cmd_mailbox *mailbox; |
| 1001 | unsigned long flags; |
| 1002 | |
| 1003 | if (mask & ~IB_DEVICE_MODIFY_NODE_DESC) |
| 1004 | return -EOPNOTSUPP; |
| 1005 | |
| 1006 | if (!(mask & IB_DEVICE_MODIFY_NODE_DESC)) |
| 1007 | return 0; |
| 1008 | |
| 1009 | if (mlx4_is_slave(dev: to_mdev(ibdev)->dev)) |
| 1010 | return -EOPNOTSUPP; |
| 1011 | |
| 1012 | spin_lock_irqsave(&to_mdev(ibdev)->sm_lock, flags); |
| 1013 | memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX); |
| 1014 | spin_unlock_irqrestore(lock: &to_mdev(ibdev)->sm_lock, flags); |
| 1015 | |
| 1016 | /* |
| 1017 | * If possible, pass node desc to FW, so it can generate |
| 1018 | * a 144 trap. If cmd fails, just ignore. |
| 1019 | */ |
| 1020 | mailbox = mlx4_alloc_cmd_mailbox(dev: to_mdev(ibdev)->dev); |
| 1021 | if (IS_ERR(ptr: mailbox)) |
| 1022 | return 0; |
| 1023 | |
| 1024 | memcpy(mailbox->buf, props->node_desc, IB_DEVICE_NODE_DESC_MAX); |
| 1025 | mlx4_cmd(dev: to_mdev(ibdev)->dev, in_param: mailbox->dma, in_modifier: 1, op_modifier: 0, |
| 1026 | op: MLX4_CMD_SET_NODE, timeout: MLX4_CMD_TIME_CLASS_A, native: MLX4_CMD_NATIVE); |
| 1027 | |
| 1028 | mlx4_free_cmd_mailbox(dev: to_mdev(ibdev)->dev, mailbox); |
| 1029 | |
| 1030 | return 0; |
| 1031 | } |
| 1032 | |
| 1033 | static int mlx4_ib_SET_PORT(struct mlx4_ib_dev *dev, u32 port, |
| 1034 | int reset_qkey_viols, u32 cap_mask) |
| 1035 | { |
| 1036 | struct mlx4_cmd_mailbox *mailbox; |
| 1037 | int err; |
| 1038 | |
| 1039 | mailbox = mlx4_alloc_cmd_mailbox(dev: dev->dev); |
| 1040 | if (IS_ERR(ptr: mailbox)) |
| 1041 | return PTR_ERR(ptr: mailbox); |
| 1042 | |
| 1043 | if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { |
| 1044 | *(u8 *) mailbox->buf = !!reset_qkey_viols << 6; |
| 1045 | ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask); |
| 1046 | } else { |
| 1047 | ((u8 *) mailbox->buf)[3] = !!reset_qkey_viols; |
| 1048 | ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask); |
| 1049 | } |
| 1050 | |
| 1051 | err = mlx4_cmd(dev: dev->dev, in_param: mailbox->dma, in_modifier: port, op_modifier: MLX4_SET_PORT_IB_OPCODE, |
| 1052 | op: MLX4_CMD_SET_PORT, timeout: MLX4_CMD_TIME_CLASS_B, |
| 1053 | native: MLX4_CMD_WRAPPED); |
| 1054 | |
| 1055 | mlx4_free_cmd_mailbox(dev: dev->dev, mailbox); |
| 1056 | return err; |
| 1057 | } |
| 1058 | |
| 1059 | static int mlx4_ib_modify_port(struct ib_device *ibdev, u32 port, int mask, |
| 1060 | struct ib_port_modify *props) |
| 1061 | { |
| 1062 | struct mlx4_ib_dev *mdev = to_mdev(ibdev); |
| 1063 | u8 is_eth = mdev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH; |
| 1064 | struct ib_port_attr attr; |
| 1065 | u32 cap_mask; |
| 1066 | int err; |
| 1067 | |
| 1068 | /* return OK if this is RoCE. CM calls ib_modify_port() regardless |
| 1069 | * of whether port link layer is ETH or IB. For ETH ports, qkey |
| 1070 | * violations and port capabilities are not meaningful. |
| 1071 | */ |
| 1072 | if (is_eth) |
| 1073 | return 0; |
| 1074 | |
| 1075 | mutex_lock(&mdev->cap_mask_mutex); |
| 1076 | |
| 1077 | err = ib_query_port(device: ibdev, port_num: port, port_attr: &attr); |
| 1078 | if (err) |
| 1079 | goto out; |
| 1080 | |
| 1081 | cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) & |
| 1082 | ~props->clr_port_cap_mask; |
| 1083 | |
| 1084 | err = mlx4_ib_SET_PORT(dev: mdev, port, |
| 1085 | reset_qkey_viols: !!(mask & IB_PORT_RESET_QKEY_CNTR), |
| 1086 | cap_mask); |
| 1087 | |
| 1088 | out: |
| 1089 | mutex_unlock(lock: &to_mdev(ibdev)->cap_mask_mutex); |
| 1090 | return err; |
| 1091 | } |
| 1092 | |
| 1093 | static int mlx4_ib_alloc_ucontext(struct ib_ucontext *uctx, |
| 1094 | struct ib_udata *udata) |
| 1095 | { |
| 1096 | struct ib_device *ibdev = uctx->device; |
| 1097 | struct mlx4_ib_dev *dev = to_mdev(ibdev); |
| 1098 | struct mlx4_ib_ucontext *context = to_mucontext(ibucontext: uctx); |
| 1099 | struct mlx4_ib_alloc_ucontext_resp_v3 resp_v3; |
| 1100 | struct mlx4_ib_alloc_ucontext_resp resp; |
| 1101 | int err; |
| 1102 | |
| 1103 | if (!dev->ib_active) |
| 1104 | return -EAGAIN; |
| 1105 | |
| 1106 | if (ibdev->ops.uverbs_abi_ver == |
| 1107 | MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) { |
| 1108 | resp_v3.qp_tab_size = dev->dev->caps.num_qps; |
| 1109 | resp_v3.bf_reg_size = dev->dev->caps.bf_reg_size; |
| 1110 | resp_v3.bf_regs_per_page = dev->dev->caps.bf_regs_per_page; |
| 1111 | } else { |
| 1112 | resp.dev_caps = dev->dev->caps.userspace_caps; |
| 1113 | resp.qp_tab_size = dev->dev->caps.num_qps; |
| 1114 | resp.bf_reg_size = dev->dev->caps.bf_reg_size; |
| 1115 | resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page; |
| 1116 | resp.cqe_size = dev->dev->caps.cqe_size; |
| 1117 | } |
| 1118 | |
| 1119 | err = mlx4_uar_alloc(dev: to_mdev(ibdev)->dev, uar: &context->uar); |
| 1120 | if (err) |
| 1121 | return err; |
| 1122 | |
| 1123 | INIT_LIST_HEAD(list: &context->db_page_list); |
| 1124 | mutex_init(&context->db_page_mutex); |
| 1125 | |
| 1126 | INIT_LIST_HEAD(list: &context->wqn_ranges_list); |
| 1127 | mutex_init(&context->wqn_ranges_mutex); |
| 1128 | |
| 1129 | if (ibdev->ops.uverbs_abi_ver == MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION) |
| 1130 | err = ib_copy_to_udata(udata, src: &resp_v3, len: sizeof(resp_v3)); |
| 1131 | else |
| 1132 | err = ib_copy_to_udata(udata, src: &resp, len: sizeof(resp)); |
| 1133 | |
| 1134 | if (err) { |
| 1135 | mlx4_uar_free(dev: to_mdev(ibdev)->dev, uar: &context->uar); |
| 1136 | return -EFAULT; |
| 1137 | } |
| 1138 | |
| 1139 | return err; |
| 1140 | } |
| 1141 | |
| 1142 | static void mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) |
| 1143 | { |
| 1144 | struct mlx4_ib_ucontext *context = to_mucontext(ibucontext: ibcontext); |
| 1145 | |
| 1146 | mlx4_uar_free(dev: to_mdev(ibdev: ibcontext->device)->dev, uar: &context->uar); |
| 1147 | } |
| 1148 | |
| 1149 | static void mlx4_ib_disassociate_ucontext(struct ib_ucontext *ibcontext) |
| 1150 | { |
| 1151 | } |
| 1152 | |
| 1153 | static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma) |
| 1154 | { |
| 1155 | struct mlx4_ib_dev *dev = to_mdev(ibdev: context->device); |
| 1156 | |
| 1157 | switch (vma->vm_pgoff) { |
| 1158 | case 0: |
| 1159 | return rdma_user_mmap_io(ucontext: context, vma, |
| 1160 | pfn: to_mucontext(ibucontext: context)->uar.pfn, |
| 1161 | PAGE_SIZE, |
| 1162 | pgprot_noncached(vma->vm_page_prot), |
| 1163 | NULL); |
| 1164 | |
| 1165 | case 1: |
| 1166 | if (dev->dev->caps.bf_reg_size == 0) |
| 1167 | return -EINVAL; |
| 1168 | return rdma_user_mmap_io( |
| 1169 | ucontext: context, vma, |
| 1170 | pfn: to_mucontext(ibucontext: context)->uar.pfn + |
| 1171 | dev->dev->caps.num_uars, |
| 1172 | PAGE_SIZE, pgprot_writecombine(prot: vma->vm_page_prot), |
| 1173 | NULL); |
| 1174 | |
| 1175 | case 3: { |
| 1176 | struct mlx4_clock_params params; |
| 1177 | int ret; |
| 1178 | |
| 1179 | ret = mlx4_get_internal_clock_params(dev: dev->dev, params: ¶ms); |
| 1180 | if (ret) |
| 1181 | return ret; |
| 1182 | |
| 1183 | return rdma_user_mmap_io( |
| 1184 | ucontext: context, vma, |
| 1185 | pfn: (pci_resource_start(dev->dev->persist->pdev, |
| 1186 | params.bar) + |
| 1187 | params.offset) >> |
| 1188 | PAGE_SHIFT, |
| 1189 | PAGE_SIZE, pgprot_noncached(vma->vm_page_prot), |
| 1190 | NULL); |
| 1191 | } |
| 1192 | |
| 1193 | default: |
| 1194 | return -EINVAL; |
| 1195 | } |
| 1196 | } |
| 1197 | |
| 1198 | static int mlx4_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) |
| 1199 | { |
| 1200 | struct mlx4_ib_pd *pd = to_mpd(ibpd); |
| 1201 | struct ib_device *ibdev = ibpd->device; |
| 1202 | int err; |
| 1203 | |
| 1204 | err = mlx4_pd_alloc(dev: to_mdev(ibdev)->dev, pdn: &pd->pdn); |
| 1205 | if (err) |
| 1206 | return err; |
| 1207 | |
| 1208 | if (udata && ib_copy_to_udata(udata, src: &pd->pdn, len: sizeof(__u32))) { |
| 1209 | mlx4_pd_free(dev: to_mdev(ibdev)->dev, pdn: pd->pdn); |
| 1210 | return -EFAULT; |
| 1211 | } |
| 1212 | return 0; |
| 1213 | } |
| 1214 | |
| 1215 | static int mlx4_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) |
| 1216 | { |
| 1217 | mlx4_pd_free(dev: to_mdev(ibdev: pd->device)->dev, pdn: to_mpd(ibpd: pd)->pdn); |
| 1218 | return 0; |
| 1219 | } |
| 1220 | |
| 1221 | static int mlx4_ib_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata) |
| 1222 | { |
| 1223 | struct mlx4_ib_dev *dev = to_mdev(ibdev: ibxrcd->device); |
| 1224 | struct mlx4_ib_xrcd *xrcd = to_mxrcd(ibxrcd); |
| 1225 | struct ib_cq_init_attr cq_attr = {}; |
| 1226 | int err; |
| 1227 | |
| 1228 | if (!(dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)) |
| 1229 | return -EOPNOTSUPP; |
| 1230 | |
| 1231 | err = mlx4_xrcd_alloc(dev: dev->dev, xrcdn: &xrcd->xrcdn); |
| 1232 | if (err) |
| 1233 | return err; |
| 1234 | |
| 1235 | xrcd->pd = ib_alloc_pd(ibxrcd->device, 0); |
| 1236 | if (IS_ERR(ptr: xrcd->pd)) { |
| 1237 | err = PTR_ERR(ptr: xrcd->pd); |
| 1238 | goto err2; |
| 1239 | } |
| 1240 | |
| 1241 | cq_attr.cqe = 1; |
| 1242 | xrcd->cq = ib_create_cq(ibxrcd->device, NULL, NULL, xrcd, &cq_attr); |
| 1243 | if (IS_ERR(ptr: xrcd->cq)) { |
| 1244 | err = PTR_ERR(ptr: xrcd->cq); |
| 1245 | goto err3; |
| 1246 | } |
| 1247 | |
| 1248 | return 0; |
| 1249 | |
| 1250 | err3: |
| 1251 | ib_dealloc_pd(pd: xrcd->pd); |
| 1252 | err2: |
| 1253 | mlx4_xrcd_free(dev: dev->dev, xrcdn: xrcd->xrcdn); |
| 1254 | return err; |
| 1255 | } |
| 1256 | |
| 1257 | static int mlx4_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata) |
| 1258 | { |
| 1259 | ib_destroy_cq(cq: to_mxrcd(ibxrcd: xrcd)->cq); |
| 1260 | ib_dealloc_pd(pd: to_mxrcd(ibxrcd: xrcd)->pd); |
| 1261 | mlx4_xrcd_free(dev: to_mdev(ibdev: xrcd->device)->dev, xrcdn: to_mxrcd(ibxrcd: xrcd)->xrcdn); |
| 1262 | return 0; |
| 1263 | } |
| 1264 | |
| 1265 | static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid) |
| 1266 | { |
| 1267 | struct mlx4_ib_qp *mqp = to_mqp(ibqp); |
| 1268 | struct mlx4_ib_dev *mdev = to_mdev(ibdev: ibqp->device); |
| 1269 | struct mlx4_ib_gid_entry *ge; |
| 1270 | |
| 1271 | ge = kzalloc(sizeof *ge, GFP_KERNEL); |
| 1272 | if (!ge) |
| 1273 | return -ENOMEM; |
| 1274 | |
| 1275 | ge->gid = *gid; |
| 1276 | if (mlx4_ib_add_mc(mdev, mqp, gid)) { |
| 1277 | ge->port = mqp->port; |
| 1278 | ge->added = 1; |
| 1279 | } |
| 1280 | |
| 1281 | mutex_lock(&mqp->mutex); |
| 1282 | list_add_tail(new: &ge->list, head: &mqp->gid_list); |
| 1283 | mutex_unlock(lock: &mqp->mutex); |
| 1284 | |
| 1285 | return 0; |
| 1286 | } |
| 1287 | |
| 1288 | static void mlx4_ib_delete_counters_table(struct mlx4_ib_dev *ibdev, |
| 1289 | struct mlx4_ib_counters *ctr_table) |
| 1290 | { |
| 1291 | struct counter_index *counter, *tmp_count; |
| 1292 | |
| 1293 | mutex_lock(&ctr_table->mutex); |
| 1294 | list_for_each_entry_safe(counter, tmp_count, &ctr_table->counters_list, |
| 1295 | list) { |
| 1296 | if (counter->allocated) |
| 1297 | mlx4_counter_free(dev: ibdev->dev, idx: counter->index); |
| 1298 | list_del(entry: &counter->list); |
| 1299 | kfree(objp: counter); |
| 1300 | } |
| 1301 | mutex_unlock(lock: &ctr_table->mutex); |
| 1302 | } |
| 1303 | |
| 1304 | int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, |
| 1305 | union ib_gid *gid) |
| 1306 | { |
| 1307 | struct net_device *ndev; |
| 1308 | int ret = 0; |
| 1309 | |
| 1310 | if (!mqp->port) |
| 1311 | return 0; |
| 1312 | |
| 1313 | spin_lock_bh(lock: &mdev->iboe.lock); |
| 1314 | ndev = mdev->iboe.netdevs[mqp->port - 1]; |
| 1315 | dev_hold(dev: ndev); |
| 1316 | spin_unlock_bh(lock: &mdev->iboe.lock); |
| 1317 | |
| 1318 | if (ndev) { |
| 1319 | ret = 1; |
| 1320 | dev_put(dev: ndev); |
| 1321 | } |
| 1322 | |
| 1323 | return ret; |
| 1324 | } |
| 1325 | |
| 1326 | struct mlx4_ib_steering { |
| 1327 | struct list_head list; |
| 1328 | struct mlx4_flow_reg_id reg_id; |
| 1329 | union ib_gid gid; |
| 1330 | }; |
| 1331 | |
| 1332 | #define LAST_ETH_FIELD vlan_tag |
| 1333 | #define LAST_IB_FIELD sl |
| 1334 | #define LAST_IPV4_FIELD dst_ip |
| 1335 | #define LAST_TCP_UDP_FIELD src_port |
| 1336 | |
| 1337 | /* Field is the last supported field */ |
| 1338 | #define FIELDS_NOT_SUPPORTED(filter, field)\ |
| 1339 | memchr_inv((void *)&filter.field +\ |
| 1340 | sizeof(filter.field), 0,\ |
| 1341 | sizeof(filter) -\ |
| 1342 | offsetof(typeof(filter), field) -\ |
| 1343 | sizeof(filter.field)) |
| 1344 | |
| 1345 | static int parse_flow_attr(struct mlx4_dev *dev, |
| 1346 | u32 qp_num, |
| 1347 | union ib_flow_spec *ib_spec, |
| 1348 | struct _rule_hw *mlx4_spec) |
| 1349 | { |
| 1350 | enum mlx4_net_trans_rule_id type; |
| 1351 | |
| 1352 | switch (ib_spec->type) { |
| 1353 | case IB_FLOW_SPEC_ETH: |
| 1354 | if (FIELDS_NOT_SUPPORTED(ib_spec->eth.mask, LAST_ETH_FIELD)) |
| 1355 | return -ENOTSUPP; |
| 1356 | |
| 1357 | type = MLX4_NET_TRANS_RULE_ID_ETH; |
| 1358 | memcpy(mlx4_spec->eth.dst_mac, ib_spec->eth.val.dst_mac, |
| 1359 | ETH_ALEN); |
| 1360 | memcpy(mlx4_spec->eth.dst_mac_msk, ib_spec->eth.mask.dst_mac, |
| 1361 | ETH_ALEN); |
| 1362 | mlx4_spec->eth.vlan_tag = ib_spec->eth.val.vlan_tag; |
| 1363 | mlx4_spec->eth.vlan_tag_msk = ib_spec->eth.mask.vlan_tag; |
| 1364 | break; |
| 1365 | case IB_FLOW_SPEC_IB: |
| 1366 | if (FIELDS_NOT_SUPPORTED(ib_spec->ib.mask, LAST_IB_FIELD)) |
| 1367 | return -ENOTSUPP; |
| 1368 | |
| 1369 | type = MLX4_NET_TRANS_RULE_ID_IB; |
| 1370 | mlx4_spec->ib.l3_qpn = |
| 1371 | cpu_to_be32(qp_num); |
| 1372 | mlx4_spec->ib.qpn_mask = |
| 1373 | cpu_to_be32(MLX4_IB_FLOW_QPN_MASK); |
| 1374 | break; |
| 1375 | |
| 1376 | |
| 1377 | case IB_FLOW_SPEC_IPV4: |
| 1378 | if (FIELDS_NOT_SUPPORTED(ib_spec->ipv4.mask, LAST_IPV4_FIELD)) |
| 1379 | return -ENOTSUPP; |
| 1380 | |
| 1381 | type = MLX4_NET_TRANS_RULE_ID_IPV4; |
| 1382 | mlx4_spec->ipv4.src_ip = ib_spec->ipv4.val.src_ip; |
| 1383 | mlx4_spec->ipv4.src_ip_msk = ib_spec->ipv4.mask.src_ip; |
| 1384 | mlx4_spec->ipv4.dst_ip = ib_spec->ipv4.val.dst_ip; |
| 1385 | mlx4_spec->ipv4.dst_ip_msk = ib_spec->ipv4.mask.dst_ip; |
| 1386 | break; |
| 1387 | |
| 1388 | case IB_FLOW_SPEC_TCP: |
| 1389 | case IB_FLOW_SPEC_UDP: |
| 1390 | if (FIELDS_NOT_SUPPORTED(ib_spec->tcp_udp.mask, LAST_TCP_UDP_FIELD)) |
| 1391 | return -ENOTSUPP; |
| 1392 | |
| 1393 | type = ib_spec->type == IB_FLOW_SPEC_TCP ? |
| 1394 | MLX4_NET_TRANS_RULE_ID_TCP : |
| 1395 | MLX4_NET_TRANS_RULE_ID_UDP; |
| 1396 | mlx4_spec->tcp_udp.dst_port = ib_spec->tcp_udp.val.dst_port; |
| 1397 | mlx4_spec->tcp_udp.dst_port_msk = ib_spec->tcp_udp.mask.dst_port; |
| 1398 | mlx4_spec->tcp_udp.src_port = ib_spec->tcp_udp.val.src_port; |
| 1399 | mlx4_spec->tcp_udp.src_port_msk = ib_spec->tcp_udp.mask.src_port; |
| 1400 | break; |
| 1401 | |
| 1402 | default: |
| 1403 | return -EINVAL; |
| 1404 | } |
| 1405 | if (mlx4_map_sw_to_hw_steering_id(dev, id: type) < 0 || |
| 1406 | mlx4_hw_rule_sz(dev, id: type) < 0) |
| 1407 | return -EINVAL; |
| 1408 | mlx4_spec->id = cpu_to_be16(mlx4_map_sw_to_hw_steering_id(dev, type)); |
| 1409 | mlx4_spec->size = mlx4_hw_rule_sz(dev, id: type) >> 2; |
| 1410 | return mlx4_hw_rule_sz(dev, id: type); |
| 1411 | } |
| 1412 | |
| 1413 | struct default_rules { |
| 1414 | __u32 mandatory_fields[IB_FLOW_SPEC_SUPPORT_LAYERS]; |
| 1415 | __u32 mandatory_not_fields[IB_FLOW_SPEC_SUPPORT_LAYERS]; |
| 1416 | __u32 rules_create_list[IB_FLOW_SPEC_SUPPORT_LAYERS]; |
| 1417 | __u8 link_layer; |
| 1418 | }; |
| 1419 | static const struct default_rules default_table[] = { |
| 1420 | { |
| 1421 | .mandatory_fields = {IB_FLOW_SPEC_IPV4}, |
| 1422 | .mandatory_not_fields = {IB_FLOW_SPEC_ETH}, |
| 1423 | .rules_create_list = {IB_FLOW_SPEC_IB}, |
| 1424 | .link_layer = IB_LINK_LAYER_INFINIBAND |
| 1425 | } |
| 1426 | }; |
| 1427 | |
| 1428 | static int __mlx4_ib_default_rules_match(struct ib_qp *qp, |
| 1429 | struct ib_flow_attr *flow_attr) |
| 1430 | { |
| 1431 | int i, j, k; |
| 1432 | void *ib_flow; |
| 1433 | const struct default_rules *pdefault_rules = default_table; |
| 1434 | u8 link_layer = rdma_port_get_link_layer(device: qp->device, port_num: flow_attr->port); |
| 1435 | |
| 1436 | for (i = 0; i < ARRAY_SIZE(default_table); i++, pdefault_rules++) { |
| 1437 | __u32 field_types[IB_FLOW_SPEC_SUPPORT_LAYERS]; |
| 1438 | memset(&field_types, 0, sizeof(field_types)); |
| 1439 | |
| 1440 | if (link_layer != pdefault_rules->link_layer) |
| 1441 | continue; |
| 1442 | |
| 1443 | ib_flow = flow_attr + 1; |
| 1444 | /* we assume the specs are sorted */ |
| 1445 | for (j = 0, k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS && |
| 1446 | j < flow_attr->num_of_specs; k++) { |
| 1447 | union ib_flow_spec *current_flow = |
| 1448 | (union ib_flow_spec *)ib_flow; |
| 1449 | |
| 1450 | /* same layer but different type */ |
| 1451 | if (((current_flow->type & IB_FLOW_SPEC_LAYER_MASK) == |
| 1452 | (pdefault_rules->mandatory_fields[k] & |
| 1453 | IB_FLOW_SPEC_LAYER_MASK)) && |
| 1454 | (current_flow->type != |
| 1455 | pdefault_rules->mandatory_fields[k])) |
| 1456 | goto out; |
| 1457 | |
| 1458 | /* same layer, try match next one */ |
| 1459 | if (current_flow->type == |
| 1460 | pdefault_rules->mandatory_fields[k]) { |
| 1461 | j++; |
| 1462 | ib_flow += |
| 1463 | ((union ib_flow_spec *)ib_flow)->size; |
| 1464 | } |
| 1465 | } |
| 1466 | |
| 1467 | ib_flow = flow_attr + 1; |
| 1468 | for (j = 0; j < flow_attr->num_of_specs; |
| 1469 | j++, ib_flow += ((union ib_flow_spec *)ib_flow)->size) |
| 1470 | for (k = 0; k < IB_FLOW_SPEC_SUPPORT_LAYERS; k++) |
| 1471 | /* same layer and same type */ |
| 1472 | if (((union ib_flow_spec *)ib_flow)->type == |
| 1473 | pdefault_rules->mandatory_not_fields[k]) |
| 1474 | goto out; |
| 1475 | |
| 1476 | return i; |
| 1477 | } |
| 1478 | out: |
| 1479 | return -1; |
| 1480 | } |
| 1481 | |
| 1482 | static int __mlx4_ib_create_default_rules( |
| 1483 | struct mlx4_ib_dev *mdev, |
| 1484 | struct ib_qp *qp, |
| 1485 | const struct default_rules *pdefault_rules, |
| 1486 | struct _rule_hw *mlx4_spec) { |
| 1487 | int size = 0; |
| 1488 | int i; |
| 1489 | |
| 1490 | for (i = 0; i < ARRAY_SIZE(pdefault_rules->rules_create_list); i++) { |
| 1491 | union ib_flow_spec ib_spec = {}; |
| 1492 | int ret; |
| 1493 | |
| 1494 | switch (pdefault_rules->rules_create_list[i]) { |
| 1495 | case 0: |
| 1496 | /* no rule */ |
| 1497 | continue; |
| 1498 | case IB_FLOW_SPEC_IB: |
| 1499 | ib_spec.type = IB_FLOW_SPEC_IB; |
| 1500 | ib_spec.size = sizeof(struct ib_flow_spec_ib); |
| 1501 | |
| 1502 | break; |
| 1503 | default: |
| 1504 | /* invalid rule */ |
| 1505 | return -EINVAL; |
| 1506 | } |
| 1507 | /* We must put empty rule, qpn is being ignored */ |
| 1508 | ret = parse_flow_attr(dev: mdev->dev, qp_num: 0, ib_spec: &ib_spec, |
| 1509 | mlx4_spec); |
| 1510 | if (ret < 0) { |
| 1511 | pr_info("invalid parsing\n" ); |
| 1512 | return -EINVAL; |
| 1513 | } |
| 1514 | |
| 1515 | mlx4_spec = (void *)mlx4_spec + ret; |
| 1516 | size += ret; |
| 1517 | } |
| 1518 | return size; |
| 1519 | } |
| 1520 | |
| 1521 | static int __mlx4_ib_create_flow(struct ib_qp *qp, struct ib_flow_attr *flow_attr, |
| 1522 | int domain, |
| 1523 | enum mlx4_net_trans_promisc_mode flow_type, |
| 1524 | u64 *reg_id) |
| 1525 | { |
| 1526 | int ret, i; |
| 1527 | int size = 0; |
| 1528 | void *ib_flow; |
| 1529 | struct mlx4_ib_dev *mdev = to_mdev(ibdev: qp->device); |
| 1530 | struct mlx4_cmd_mailbox *mailbox; |
| 1531 | struct mlx4_net_trans_rule_hw_ctrl *ctrl; |
| 1532 | int default_flow; |
| 1533 | |
| 1534 | if (flow_attr->priority > MLX4_IB_FLOW_MAX_PRIO) { |
| 1535 | pr_err("Invalid priority value %d\n" , flow_attr->priority); |
| 1536 | return -EINVAL; |
| 1537 | } |
| 1538 | |
| 1539 | if (mlx4_map_sw_to_hw_steering_mode(dev: mdev->dev, flow_type) < 0) |
| 1540 | return -EINVAL; |
| 1541 | |
| 1542 | mailbox = mlx4_alloc_cmd_mailbox(dev: mdev->dev); |
| 1543 | if (IS_ERR(ptr: mailbox)) |
| 1544 | return PTR_ERR(ptr: mailbox); |
| 1545 | ctrl = mailbox->buf; |
| 1546 | |
| 1547 | ctrl->prio = cpu_to_be16(domain | flow_attr->priority); |
| 1548 | ctrl->type = mlx4_map_sw_to_hw_steering_mode(dev: mdev->dev, flow_type); |
| 1549 | ctrl->port = flow_attr->port; |
| 1550 | ctrl->qpn = cpu_to_be32(qp->qp_num); |
| 1551 | |
| 1552 | ib_flow = flow_attr + 1; |
| 1553 | size += sizeof(struct mlx4_net_trans_rule_hw_ctrl); |
| 1554 | /* Add default flows */ |
| 1555 | default_flow = __mlx4_ib_default_rules_match(qp, flow_attr); |
| 1556 | if (default_flow >= 0) { |
| 1557 | ret = __mlx4_ib_create_default_rules( |
| 1558 | mdev, qp, pdefault_rules: default_table + default_flow, |
| 1559 | mlx4_spec: mailbox->buf + size); |
| 1560 | if (ret < 0) { |
| 1561 | mlx4_free_cmd_mailbox(dev: mdev->dev, mailbox); |
| 1562 | return -EINVAL; |
| 1563 | } |
| 1564 | size += ret; |
| 1565 | } |
| 1566 | for (i = 0; i < flow_attr->num_of_specs; i++) { |
| 1567 | ret = parse_flow_attr(dev: mdev->dev, qp_num: qp->qp_num, ib_spec: ib_flow, |
| 1568 | mlx4_spec: mailbox->buf + size); |
| 1569 | if (ret < 0) { |
| 1570 | mlx4_free_cmd_mailbox(dev: mdev->dev, mailbox); |
| 1571 | return -EINVAL; |
| 1572 | } |
| 1573 | ib_flow += ((union ib_flow_spec *) ib_flow)->size; |
| 1574 | size += ret; |
| 1575 | } |
| 1576 | |
| 1577 | if (mlx4_is_master(dev: mdev->dev) && flow_type == MLX4_FS_REGULAR && |
| 1578 | flow_attr->num_of_specs == 1) { |
| 1579 | struct _rule_hw * = (struct _rule_hw *)(ctrl + 1); |
| 1580 | enum ib_flow_spec_type = |
| 1581 | ((union ib_flow_spec *)(flow_attr + 1))->type; |
| 1582 | |
| 1583 | if (header_spec == IB_FLOW_SPEC_ETH) |
| 1584 | mlx4_handle_eth_header_mcast_prio(ctrl, eth_header: rule_header); |
| 1585 | } |
| 1586 | |
| 1587 | ret = mlx4_cmd_imm(dev: mdev->dev, in_param: mailbox->dma, out_param: reg_id, in_modifier: size >> 2, op_modifier: 0, |
| 1588 | op: MLX4_QP_FLOW_STEERING_ATTACH, timeout: MLX4_CMD_TIME_CLASS_A, |
| 1589 | native: MLX4_CMD_NATIVE); |
| 1590 | if (ret == -ENOMEM) |
| 1591 | pr_err("mcg table is full. Fail to register network rule.\n" ); |
| 1592 | else if (ret == -ENXIO) |
| 1593 | pr_err("Device managed flow steering is disabled. Fail to register network rule.\n" ); |
| 1594 | else if (ret) |
| 1595 | pr_err("Invalid argument. Fail to register network rule.\n" ); |
| 1596 | |
| 1597 | mlx4_free_cmd_mailbox(dev: mdev->dev, mailbox); |
| 1598 | return ret; |
| 1599 | } |
| 1600 | |
| 1601 | static int __mlx4_ib_destroy_flow(struct mlx4_dev *dev, u64 reg_id) |
| 1602 | { |
| 1603 | int err; |
| 1604 | err = mlx4_cmd(dev, in_param: reg_id, in_modifier: 0, op_modifier: 0, |
| 1605 | op: MLX4_QP_FLOW_STEERING_DETACH, timeout: MLX4_CMD_TIME_CLASS_A, |
| 1606 | native: MLX4_CMD_NATIVE); |
| 1607 | if (err) |
| 1608 | pr_err("Fail to detach network rule. registration id = 0x%llx\n" , |
| 1609 | reg_id); |
| 1610 | return err; |
| 1611 | } |
| 1612 | |
| 1613 | static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_attr, |
| 1614 | u64 *reg_id) |
| 1615 | { |
| 1616 | void *ib_flow; |
| 1617 | union ib_flow_spec *ib_spec; |
| 1618 | struct mlx4_dev *dev = to_mdev(ibdev: qp->device)->dev; |
| 1619 | int err = 0; |
| 1620 | |
| 1621 | if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN || |
| 1622 | dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) |
| 1623 | return 0; /* do nothing */ |
| 1624 | |
| 1625 | ib_flow = flow_attr + 1; |
| 1626 | ib_spec = (union ib_flow_spec *)ib_flow; |
| 1627 | |
| 1628 | if (ib_spec->type != IB_FLOW_SPEC_ETH || flow_attr->num_of_specs != 1) |
| 1629 | return 0; /* do nothing */ |
| 1630 | |
| 1631 | err = mlx4_tunnel_steer_add(dev: to_mdev(ibdev: qp->device)->dev, addr: ib_spec->eth.val.dst_mac, |
| 1632 | port: flow_attr->port, qpn: qp->qp_num, |
| 1633 | prio: MLX4_DOMAIN_UVERBS | (flow_attr->priority & 0xff), |
| 1634 | reg_id); |
| 1635 | return err; |
| 1636 | } |
| 1637 | |
| 1638 | static int mlx4_ib_add_dont_trap_rule(struct mlx4_dev *dev, |
| 1639 | struct ib_flow_attr *flow_attr, |
| 1640 | enum mlx4_net_trans_promisc_mode *type) |
| 1641 | { |
| 1642 | int err = 0; |
| 1643 | |
| 1644 | if (!(dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_UC_MC_SNIFFER) || |
| 1645 | (dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) || |
| 1646 | (flow_attr->num_of_specs > 1) || (flow_attr->priority != 0)) { |
| 1647 | return -EOPNOTSUPP; |
| 1648 | } |
| 1649 | |
| 1650 | if (flow_attr->num_of_specs == 0) { |
| 1651 | type[0] = MLX4_FS_MC_SNIFFER; |
| 1652 | type[1] = MLX4_FS_UC_SNIFFER; |
| 1653 | } else { |
| 1654 | union ib_flow_spec *ib_spec; |
| 1655 | |
| 1656 | ib_spec = (union ib_flow_spec *)(flow_attr + 1); |
| 1657 | if (ib_spec->type != IB_FLOW_SPEC_ETH) |
| 1658 | return -EINVAL; |
| 1659 | |
| 1660 | /* if all is zero than MC and UC */ |
| 1661 | if (is_zero_ether_addr(addr: ib_spec->eth.mask.dst_mac)) { |
| 1662 | type[0] = MLX4_FS_MC_SNIFFER; |
| 1663 | type[1] = MLX4_FS_UC_SNIFFER; |
| 1664 | } else { |
| 1665 | u8 mac[ETH_ALEN] = {ib_spec->eth.mask.dst_mac[0] ^ 0x01, |
| 1666 | ib_spec->eth.mask.dst_mac[1], |
| 1667 | ib_spec->eth.mask.dst_mac[2], |
| 1668 | ib_spec->eth.mask.dst_mac[3], |
| 1669 | ib_spec->eth.mask.dst_mac[4], |
| 1670 | ib_spec->eth.mask.dst_mac[5]}; |
| 1671 | |
| 1672 | /* Above xor was only on MC bit, non empty mask is valid |
| 1673 | * only if this bit is set and rest are zero. |
| 1674 | */ |
| 1675 | if (!is_zero_ether_addr(addr: &mac[0])) |
| 1676 | return -EINVAL; |
| 1677 | |
| 1678 | if (is_multicast_ether_addr(addr: ib_spec->eth.val.dst_mac)) |
| 1679 | type[0] = MLX4_FS_MC_SNIFFER; |
| 1680 | else |
| 1681 | type[0] = MLX4_FS_UC_SNIFFER; |
| 1682 | } |
| 1683 | } |
| 1684 | |
| 1685 | return err; |
| 1686 | } |
| 1687 | |
| 1688 | static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp, |
| 1689 | struct ib_flow_attr *flow_attr, |
| 1690 | struct ib_udata *udata) |
| 1691 | { |
| 1692 | int err = 0, i = 0, j = 0; |
| 1693 | struct mlx4_ib_flow *mflow; |
| 1694 | enum mlx4_net_trans_promisc_mode type[2]; |
| 1695 | struct mlx4_dev *dev = (to_mdev(ibdev: qp->device))->dev; |
| 1696 | int is_bonded = mlx4_is_bonded(dev); |
| 1697 | |
| 1698 | if (flow_attr->flags & ~IB_FLOW_ATTR_FLAGS_DONT_TRAP) |
| 1699 | return ERR_PTR(error: -EOPNOTSUPP); |
| 1700 | |
| 1701 | if ((flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP) && |
| 1702 | (flow_attr->type != IB_FLOW_ATTR_NORMAL)) |
| 1703 | return ERR_PTR(error: -EOPNOTSUPP); |
| 1704 | |
| 1705 | if (udata && |
| 1706 | udata->inlen && !ib_is_udata_cleared(udata, offset: 0, len: udata->inlen)) |
| 1707 | return ERR_PTR(error: -EOPNOTSUPP); |
| 1708 | |
| 1709 | memset(type, 0, sizeof(type)); |
| 1710 | |
| 1711 | mflow = kzalloc(sizeof(*mflow), GFP_KERNEL); |
| 1712 | if (!mflow) { |
| 1713 | err = -ENOMEM; |
| 1714 | goto err_free; |
| 1715 | } |
| 1716 | |
| 1717 | switch (flow_attr->type) { |
| 1718 | case IB_FLOW_ATTR_NORMAL: |
| 1719 | /* If dont trap flag (continue match) is set, under specific |
| 1720 | * condition traffic be replicated to given qp, |
| 1721 | * without stealing it |
| 1722 | */ |
| 1723 | if (unlikely(flow_attr->flags & IB_FLOW_ATTR_FLAGS_DONT_TRAP)) { |
| 1724 | err = mlx4_ib_add_dont_trap_rule(dev, |
| 1725 | flow_attr, |
| 1726 | type); |
| 1727 | if (err) |
| 1728 | goto err_free; |
| 1729 | } else { |
| 1730 | type[0] = MLX4_FS_REGULAR; |
| 1731 | } |
| 1732 | break; |
| 1733 | |
| 1734 | case IB_FLOW_ATTR_ALL_DEFAULT: |
| 1735 | type[0] = MLX4_FS_ALL_DEFAULT; |
| 1736 | break; |
| 1737 | |
| 1738 | case IB_FLOW_ATTR_MC_DEFAULT: |
| 1739 | type[0] = MLX4_FS_MC_DEFAULT; |
| 1740 | break; |
| 1741 | |
| 1742 | case IB_FLOW_ATTR_SNIFFER: |
| 1743 | type[0] = MLX4_FS_MIRROR_RX_PORT; |
| 1744 | type[1] = MLX4_FS_MIRROR_SX_PORT; |
| 1745 | break; |
| 1746 | |
| 1747 | default: |
| 1748 | err = -EINVAL; |
| 1749 | goto err_free; |
| 1750 | } |
| 1751 | |
| 1752 | while (i < ARRAY_SIZE(type) && type[i]) { |
| 1753 | err = __mlx4_ib_create_flow(qp, flow_attr, domain: MLX4_DOMAIN_UVERBS, |
| 1754 | flow_type: type[i], reg_id: &mflow->reg_id[i].id); |
| 1755 | if (err) |
| 1756 | goto err_create_flow; |
| 1757 | if (is_bonded) { |
| 1758 | /* Application always sees one port so the mirror rule |
| 1759 | * must be on port #2 |
| 1760 | */ |
| 1761 | flow_attr->port = 2; |
| 1762 | err = __mlx4_ib_create_flow(qp, flow_attr, |
| 1763 | domain: MLX4_DOMAIN_UVERBS, flow_type: type[j], |
| 1764 | reg_id: &mflow->reg_id[j].mirror); |
| 1765 | flow_attr->port = 1; |
| 1766 | if (err) |
| 1767 | goto err_create_flow; |
| 1768 | j++; |
| 1769 | } |
| 1770 | |
| 1771 | i++; |
| 1772 | } |
| 1773 | |
| 1774 | if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) { |
| 1775 | err = mlx4_ib_tunnel_steer_add(qp, flow_attr, |
| 1776 | reg_id: &mflow->reg_id[i].id); |
| 1777 | if (err) |
| 1778 | goto err_create_flow; |
| 1779 | |
| 1780 | if (is_bonded) { |
| 1781 | flow_attr->port = 2; |
| 1782 | err = mlx4_ib_tunnel_steer_add(qp, flow_attr, |
| 1783 | reg_id: &mflow->reg_id[j].mirror); |
| 1784 | flow_attr->port = 1; |
| 1785 | if (err) |
| 1786 | goto err_create_flow; |
| 1787 | j++; |
| 1788 | } |
| 1789 | /* function to create mirror rule */ |
| 1790 | i++; |
| 1791 | } |
| 1792 | |
| 1793 | return &mflow->ibflow; |
| 1794 | |
| 1795 | err_create_flow: |
| 1796 | while (i) { |
| 1797 | (void)__mlx4_ib_destroy_flow(dev: to_mdev(ibdev: qp->device)->dev, |
| 1798 | reg_id: mflow->reg_id[i].id); |
| 1799 | i--; |
| 1800 | } |
| 1801 | |
| 1802 | while (j) { |
| 1803 | (void)__mlx4_ib_destroy_flow(dev: to_mdev(ibdev: qp->device)->dev, |
| 1804 | reg_id: mflow->reg_id[j].mirror); |
| 1805 | j--; |
| 1806 | } |
| 1807 | err_free: |
| 1808 | kfree(objp: mflow); |
| 1809 | return ERR_PTR(error: err); |
| 1810 | } |
| 1811 | |
| 1812 | static int mlx4_ib_destroy_flow(struct ib_flow *flow_id) |
| 1813 | { |
| 1814 | int err, ret = 0; |
| 1815 | int i = 0; |
| 1816 | struct mlx4_ib_dev *mdev = to_mdev(ibdev: flow_id->qp->device); |
| 1817 | struct mlx4_ib_flow *mflow = to_mflow(ibflow: flow_id); |
| 1818 | |
| 1819 | while (i < ARRAY_SIZE(mflow->reg_id) && mflow->reg_id[i].id) { |
| 1820 | err = __mlx4_ib_destroy_flow(dev: mdev->dev, reg_id: mflow->reg_id[i].id); |
| 1821 | if (err) |
| 1822 | ret = err; |
| 1823 | if (mflow->reg_id[i].mirror) { |
| 1824 | err = __mlx4_ib_destroy_flow(dev: mdev->dev, |
| 1825 | reg_id: mflow->reg_id[i].mirror); |
| 1826 | if (err) |
| 1827 | ret = err; |
| 1828 | } |
| 1829 | i++; |
| 1830 | } |
| 1831 | |
| 1832 | kfree(objp: mflow); |
| 1833 | return ret; |
| 1834 | } |
| 1835 | |
| 1836 | static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) |
| 1837 | { |
| 1838 | int err; |
| 1839 | struct mlx4_ib_dev *mdev = to_mdev(ibdev: ibqp->device); |
| 1840 | struct mlx4_dev *dev = mdev->dev; |
| 1841 | struct mlx4_ib_qp *mqp = to_mqp(ibqp); |
| 1842 | struct mlx4_ib_steering *ib_steering = NULL; |
| 1843 | enum mlx4_protocol prot = MLX4_PROT_IB_IPV6; |
| 1844 | struct mlx4_flow_reg_id reg_id; |
| 1845 | |
| 1846 | if (mdev->dev->caps.steering_mode == |
| 1847 | MLX4_STEERING_MODE_DEVICE_MANAGED) { |
| 1848 | ib_steering = kmalloc(sizeof(*ib_steering), GFP_KERNEL); |
| 1849 | if (!ib_steering) |
| 1850 | return -ENOMEM; |
| 1851 | } |
| 1852 | |
| 1853 | err = mlx4_multicast_attach(dev: mdev->dev, qp: &mqp->mqp, gid: gid->raw, port: mqp->port, |
| 1854 | block_mcast_loopback: !!(mqp->flags & |
| 1855 | MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK), |
| 1856 | protocol: prot, reg_id: ®_id.id); |
| 1857 | if (err) { |
| 1858 | pr_err("multicast attach op failed, err %d\n" , err); |
| 1859 | goto err_malloc; |
| 1860 | } |
| 1861 | |
| 1862 | reg_id.mirror = 0; |
| 1863 | if (mlx4_is_bonded(dev)) { |
| 1864 | err = mlx4_multicast_attach(dev: mdev->dev, qp: &mqp->mqp, gid: gid->raw, |
| 1865 | port: (mqp->port == 1) ? 2 : 1, |
| 1866 | block_mcast_loopback: !!(mqp->flags & |
| 1867 | MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK), |
| 1868 | protocol: prot, reg_id: ®_id.mirror); |
| 1869 | if (err) |
| 1870 | goto err_add; |
| 1871 | } |
| 1872 | |
| 1873 | err = add_gid_entry(ibqp, gid); |
| 1874 | if (err) |
| 1875 | goto err_add; |
| 1876 | |
| 1877 | if (ib_steering) { |
| 1878 | memcpy(ib_steering->gid.raw, gid->raw, 16); |
| 1879 | ib_steering->reg_id = reg_id; |
| 1880 | mutex_lock(&mqp->mutex); |
| 1881 | list_add(new: &ib_steering->list, head: &mqp->steering_rules); |
| 1882 | mutex_unlock(lock: &mqp->mutex); |
| 1883 | } |
| 1884 | return 0; |
| 1885 | |
| 1886 | err_add: |
| 1887 | mlx4_multicast_detach(dev: mdev->dev, qp: &mqp->mqp, gid: gid->raw, |
| 1888 | protocol: prot, reg_id: reg_id.id); |
| 1889 | if (reg_id.mirror) |
| 1890 | mlx4_multicast_detach(dev: mdev->dev, qp: &mqp->mqp, gid: gid->raw, |
| 1891 | protocol: prot, reg_id: reg_id.mirror); |
| 1892 | err_malloc: |
| 1893 | kfree(objp: ib_steering); |
| 1894 | |
| 1895 | return err; |
| 1896 | } |
| 1897 | |
| 1898 | static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw) |
| 1899 | { |
| 1900 | struct mlx4_ib_gid_entry *ge; |
| 1901 | struct mlx4_ib_gid_entry *tmp; |
| 1902 | struct mlx4_ib_gid_entry *ret = NULL; |
| 1903 | |
| 1904 | list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) { |
| 1905 | if (!memcmp(p: raw, q: ge->gid.raw, size: 16)) { |
| 1906 | ret = ge; |
| 1907 | break; |
| 1908 | } |
| 1909 | } |
| 1910 | |
| 1911 | return ret; |
| 1912 | } |
| 1913 | |
| 1914 | static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) |
| 1915 | { |
| 1916 | int err; |
| 1917 | struct mlx4_ib_dev *mdev = to_mdev(ibdev: ibqp->device); |
| 1918 | struct mlx4_dev *dev = mdev->dev; |
| 1919 | struct mlx4_ib_qp *mqp = to_mqp(ibqp); |
| 1920 | struct net_device *ndev; |
| 1921 | struct mlx4_ib_gid_entry *ge; |
| 1922 | struct mlx4_flow_reg_id reg_id = {0, 0}; |
| 1923 | enum mlx4_protocol prot = MLX4_PROT_IB_IPV6; |
| 1924 | |
| 1925 | if (mdev->dev->caps.steering_mode == |
| 1926 | MLX4_STEERING_MODE_DEVICE_MANAGED) { |
| 1927 | struct mlx4_ib_steering *ib_steering; |
| 1928 | |
| 1929 | mutex_lock(&mqp->mutex); |
| 1930 | list_for_each_entry(ib_steering, &mqp->steering_rules, list) { |
| 1931 | if (!memcmp(p: ib_steering->gid.raw, q: gid->raw, size: 16)) { |
| 1932 | list_del(entry: &ib_steering->list); |
| 1933 | break; |
| 1934 | } |
| 1935 | } |
| 1936 | mutex_unlock(lock: &mqp->mutex); |
| 1937 | if (&ib_steering->list == &mqp->steering_rules) { |
| 1938 | pr_err("Couldn't find reg_id for mgid. Steering rule is left attached\n" ); |
| 1939 | return -EINVAL; |
| 1940 | } |
| 1941 | reg_id = ib_steering->reg_id; |
| 1942 | kfree(objp: ib_steering); |
| 1943 | } |
| 1944 | |
| 1945 | err = mlx4_multicast_detach(dev: mdev->dev, qp: &mqp->mqp, gid: gid->raw, |
| 1946 | protocol: prot, reg_id: reg_id.id); |
| 1947 | if (err) |
| 1948 | return err; |
| 1949 | |
| 1950 | if (mlx4_is_bonded(dev)) { |
| 1951 | err = mlx4_multicast_detach(dev: mdev->dev, qp: &mqp->mqp, gid: gid->raw, |
| 1952 | protocol: prot, reg_id: reg_id.mirror); |
| 1953 | if (err) |
| 1954 | return err; |
| 1955 | } |
| 1956 | |
| 1957 | mutex_lock(&mqp->mutex); |
| 1958 | ge = find_gid_entry(qp: mqp, raw: gid->raw); |
| 1959 | if (ge) { |
| 1960 | spin_lock_bh(lock: &mdev->iboe.lock); |
| 1961 | ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL; |
| 1962 | dev_hold(dev: ndev); |
| 1963 | spin_unlock_bh(lock: &mdev->iboe.lock); |
| 1964 | dev_put(dev: ndev); |
| 1965 | list_del(entry: &ge->list); |
| 1966 | kfree(objp: ge); |
| 1967 | } else |
| 1968 | pr_warn("could not find mgid entry\n" ); |
| 1969 | |
| 1970 | mutex_unlock(lock: &mqp->mutex); |
| 1971 | |
| 1972 | return 0; |
| 1973 | } |
| 1974 | |
| 1975 | static int init_node_data(struct mlx4_ib_dev *dev) |
| 1976 | { |
| 1977 | struct ib_smp *in_mad; |
| 1978 | struct ib_smp *out_mad; |
| 1979 | int mad_ifc_flags = MLX4_MAD_IFC_IGNORE_KEYS; |
| 1980 | int err = -ENOMEM; |
| 1981 | |
| 1982 | in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); |
| 1983 | out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); |
| 1984 | if (!in_mad || !out_mad) |
| 1985 | goto out; |
| 1986 | |
| 1987 | ib_init_query_mad(mad: in_mad); |
| 1988 | in_mad->attr_id = IB_SMP_ATTR_NODE_DESC; |
| 1989 | if (mlx4_is_master(dev: dev->dev)) |
| 1990 | mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW; |
| 1991 | |
| 1992 | err = mlx4_MAD_IFC(dev, mad_ifc_flags, port: 1, NULL, NULL, in_mad, response_mad: out_mad); |
| 1993 | if (err) |
| 1994 | goto out; |
| 1995 | |
| 1996 | memcpy(dev->ib_dev.node_desc, out_mad->data, IB_DEVICE_NODE_DESC_MAX); |
| 1997 | |
| 1998 | in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; |
| 1999 | |
| 2000 | err = mlx4_MAD_IFC(dev, mad_ifc_flags, port: 1, NULL, NULL, in_mad, response_mad: out_mad); |
| 2001 | if (err) |
| 2002 | goto out; |
| 2003 | |
| 2004 | dev->dev->rev_id = be32_to_cpup(p: (__be32 *) (out_mad->data + 32)); |
| 2005 | memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8); |
| 2006 | |
| 2007 | out: |
| 2008 | kfree(objp: in_mad); |
| 2009 | kfree(objp: out_mad); |
| 2010 | return err; |
| 2011 | } |
| 2012 | |
| 2013 | static ssize_t hca_type_show(struct device *device, |
| 2014 | struct device_attribute *attr, char *buf) |
| 2015 | { |
| 2016 | struct mlx4_ib_dev *dev = |
| 2017 | rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev); |
| 2018 | |
| 2019 | return sysfs_emit(buf, fmt: "MT%d\n" , dev->dev->persist->pdev->device); |
| 2020 | } |
| 2021 | static DEVICE_ATTR_RO(hca_type); |
| 2022 | |
| 2023 | static ssize_t hw_rev_show(struct device *device, |
| 2024 | struct device_attribute *attr, char *buf) |
| 2025 | { |
| 2026 | struct mlx4_ib_dev *dev = |
| 2027 | rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev); |
| 2028 | |
| 2029 | return sysfs_emit(buf, fmt: "%x\n" , dev->dev->rev_id); |
| 2030 | } |
| 2031 | static DEVICE_ATTR_RO(hw_rev); |
| 2032 | |
| 2033 | static ssize_t board_id_show(struct device *device, |
| 2034 | struct device_attribute *attr, char *buf) |
| 2035 | { |
| 2036 | struct mlx4_ib_dev *dev = |
| 2037 | rdma_device_to_drv_device(device, struct mlx4_ib_dev, ib_dev); |
| 2038 | |
| 2039 | return sysfs_emit(buf, fmt: "%.*s\n" , MLX4_BOARD_ID_LEN, dev->dev->board_id); |
| 2040 | } |
| 2041 | static DEVICE_ATTR_RO(board_id); |
| 2042 | |
| 2043 | static struct attribute *mlx4_class_attributes[] = { |
| 2044 | &dev_attr_hw_rev.attr, |
| 2045 | &dev_attr_hca_type.attr, |
| 2046 | &dev_attr_board_id.attr, |
| 2047 | NULL |
| 2048 | }; |
| 2049 | |
| 2050 | static const struct attribute_group mlx4_attr_group = { |
| 2051 | .attrs = mlx4_class_attributes, |
| 2052 | }; |
| 2053 | |
| 2054 | struct diag_counter { |
| 2055 | const char *name; |
| 2056 | u32 offset; |
| 2057 | }; |
| 2058 | |
| 2059 | #define DIAG_COUNTER(_name, _offset) \ |
| 2060 | { .name = #_name, .offset = _offset } |
| 2061 | |
| 2062 | static const struct diag_counter diag_basic[] = { |
| 2063 | DIAG_COUNTER(rq_num_lle, 0x00), |
| 2064 | DIAG_COUNTER(sq_num_lle, 0x04), |
| 2065 | DIAG_COUNTER(rq_num_lqpoe, 0x08), |
| 2066 | DIAG_COUNTER(sq_num_lqpoe, 0x0C), |
| 2067 | DIAG_COUNTER(rq_num_lpe, 0x18), |
| 2068 | DIAG_COUNTER(sq_num_lpe, 0x1C), |
| 2069 | DIAG_COUNTER(rq_num_wrfe, 0x20), |
| 2070 | DIAG_COUNTER(sq_num_wrfe, 0x24), |
| 2071 | DIAG_COUNTER(sq_num_mwbe, 0x2C), |
| 2072 | DIAG_COUNTER(sq_num_bre, 0x34), |
| 2073 | DIAG_COUNTER(sq_num_rire, 0x44), |
| 2074 | DIAG_COUNTER(rq_num_rire, 0x48), |
| 2075 | DIAG_COUNTER(sq_num_rae, 0x4C), |
| 2076 | DIAG_COUNTER(rq_num_rae, 0x50), |
| 2077 | DIAG_COUNTER(sq_num_roe, 0x54), |
| 2078 | DIAG_COUNTER(sq_num_tree, 0x5C), |
| 2079 | DIAG_COUNTER(sq_num_rree, 0x64), |
| 2080 | DIAG_COUNTER(rq_num_rnr, 0x68), |
| 2081 | DIAG_COUNTER(sq_num_rnr, 0x6C), |
| 2082 | DIAG_COUNTER(rq_num_oos, 0x100), |
| 2083 | DIAG_COUNTER(sq_num_oos, 0x104), |
| 2084 | }; |
| 2085 | |
| 2086 | static const struct diag_counter diag_ext[] = { |
| 2087 | DIAG_COUNTER(rq_num_dup, 0x130), |
| 2088 | DIAG_COUNTER(sq_num_to, 0x134), |
| 2089 | }; |
| 2090 | |
| 2091 | static const struct diag_counter diag_device_only[] = { |
| 2092 | DIAG_COUNTER(num_cqovf, 0x1A0), |
| 2093 | DIAG_COUNTER(rq_num_udsdprd, 0x118), |
| 2094 | }; |
| 2095 | |
| 2096 | static struct rdma_hw_stats * |
| 2097 | mlx4_ib_alloc_hw_device_stats(struct ib_device *ibdev) |
| 2098 | { |
| 2099 | struct mlx4_ib_dev *dev = to_mdev(ibdev); |
| 2100 | struct mlx4_ib_diag_counters *diag = dev->diag_counters; |
| 2101 | |
| 2102 | if (!diag[0].descs) |
| 2103 | return NULL; |
| 2104 | |
| 2105 | return rdma_alloc_hw_stats_struct(descs: diag[0].descs, num_counters: diag[0].num_counters, |
| 2106 | RDMA_HW_STATS_DEFAULT_LIFESPAN); |
| 2107 | } |
| 2108 | |
| 2109 | static struct rdma_hw_stats * |
| 2110 | mlx4_ib_alloc_hw_port_stats(struct ib_device *ibdev, u32 port_num) |
| 2111 | { |
| 2112 | struct mlx4_ib_dev *dev = to_mdev(ibdev); |
| 2113 | struct mlx4_ib_diag_counters *diag = dev->diag_counters; |
| 2114 | |
| 2115 | if (!diag[1].descs) |
| 2116 | return NULL; |
| 2117 | |
| 2118 | return rdma_alloc_hw_stats_struct(descs: diag[1].descs, num_counters: diag[1].num_counters, |
| 2119 | RDMA_HW_STATS_DEFAULT_LIFESPAN); |
| 2120 | } |
| 2121 | |
| 2122 | static int mlx4_ib_get_hw_stats(struct ib_device *ibdev, |
| 2123 | struct rdma_hw_stats *stats, |
| 2124 | u32 port, int index) |
| 2125 | { |
| 2126 | struct mlx4_ib_dev *dev = to_mdev(ibdev); |
| 2127 | struct mlx4_ib_diag_counters *diag = dev->diag_counters; |
| 2128 | u32 hw_value[ARRAY_SIZE(diag_device_only) + |
| 2129 | ARRAY_SIZE(diag_ext) + ARRAY_SIZE(diag_basic)] = {}; |
| 2130 | int ret; |
| 2131 | int i; |
| 2132 | |
| 2133 | ret = mlx4_query_diag_counters(dev: dev->dev, |
| 2134 | op_modifier: MLX4_OP_MOD_QUERY_TRANSPORT_CI_ERRORS, |
| 2135 | offset: diag[!!port].offset, value: hw_value, |
| 2136 | array_len: diag[!!port].num_counters, port); |
| 2137 | |
| 2138 | if (ret) |
| 2139 | return ret; |
| 2140 | |
| 2141 | for (i = 0; i < diag[!!port].num_counters; i++) |
| 2142 | stats->value[i] = hw_value[i]; |
| 2143 | |
| 2144 | return diag[!!port].num_counters; |
| 2145 | } |
| 2146 | |
| 2147 | static int __mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev, |
| 2148 | struct rdma_stat_desc **pdescs, |
| 2149 | u32 **offset, u32 *num, bool port) |
| 2150 | { |
| 2151 | u32 num_counters; |
| 2152 | |
| 2153 | num_counters = ARRAY_SIZE(diag_basic); |
| 2154 | |
| 2155 | if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT) |
| 2156 | num_counters += ARRAY_SIZE(diag_ext); |
| 2157 | |
| 2158 | if (!port) |
| 2159 | num_counters += ARRAY_SIZE(diag_device_only); |
| 2160 | |
| 2161 | *pdescs = kcalloc(num_counters, sizeof(struct rdma_stat_desc), |
| 2162 | GFP_KERNEL); |
| 2163 | if (!*pdescs) |
| 2164 | return -ENOMEM; |
| 2165 | |
| 2166 | *offset = kcalloc(num_counters, sizeof(**offset), GFP_KERNEL); |
| 2167 | if (!*offset) |
| 2168 | goto err; |
| 2169 | |
| 2170 | *num = num_counters; |
| 2171 | |
| 2172 | return 0; |
| 2173 | |
| 2174 | err: |
| 2175 | kfree(objp: *pdescs); |
| 2176 | return -ENOMEM; |
| 2177 | } |
| 2178 | |
| 2179 | static void mlx4_ib_fill_diag_counters(struct mlx4_ib_dev *ibdev, |
| 2180 | struct rdma_stat_desc *descs, |
| 2181 | u32 *offset, bool port) |
| 2182 | { |
| 2183 | int i; |
| 2184 | int j; |
| 2185 | |
| 2186 | for (i = 0, j = 0; i < ARRAY_SIZE(diag_basic); i++, j++) { |
| 2187 | descs[i].name = diag_basic[i].name; |
| 2188 | offset[i] = diag_basic[i].offset; |
| 2189 | } |
| 2190 | |
| 2191 | if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT) { |
| 2192 | for (i = 0; i < ARRAY_SIZE(diag_ext); i++, j++) { |
| 2193 | descs[j].name = diag_ext[i].name; |
| 2194 | offset[j] = diag_ext[i].offset; |
| 2195 | } |
| 2196 | } |
| 2197 | |
| 2198 | if (!port) { |
| 2199 | for (i = 0; i < ARRAY_SIZE(diag_device_only); i++, j++) { |
| 2200 | descs[j].name = diag_device_only[i].name; |
| 2201 | offset[j] = diag_device_only[i].offset; |
| 2202 | } |
| 2203 | } |
| 2204 | } |
| 2205 | |
| 2206 | static const struct ib_device_ops mlx4_ib_hw_stats_ops = { |
| 2207 | .alloc_hw_device_stats = mlx4_ib_alloc_hw_device_stats, |
| 2208 | .alloc_hw_port_stats = mlx4_ib_alloc_hw_port_stats, |
| 2209 | .get_hw_stats = mlx4_ib_get_hw_stats, |
| 2210 | }; |
| 2211 | |
| 2212 | static const struct ib_device_ops mlx4_ib_hw_stats_ops1 = { |
| 2213 | .alloc_hw_device_stats = mlx4_ib_alloc_hw_device_stats, |
| 2214 | .get_hw_stats = mlx4_ib_get_hw_stats, |
| 2215 | }; |
| 2216 | |
| 2217 | static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev) |
| 2218 | { |
| 2219 | struct mlx4_ib_diag_counters *diag = ibdev->diag_counters; |
| 2220 | int i; |
| 2221 | int ret; |
| 2222 | bool per_port = !!(ibdev->dev->caps.flags2 & |
| 2223 | MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT); |
| 2224 | |
| 2225 | if (mlx4_is_slave(dev: ibdev->dev)) |
| 2226 | return 0; |
| 2227 | |
| 2228 | for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) { |
| 2229 | /* |
| 2230 | * i == 1 means we are building port counters, set a different |
| 2231 | * stats ops without port stats callback. |
| 2232 | */ |
| 2233 | if (i && !per_port) { |
| 2234 | ib_set_device_ops(device: &ibdev->ib_dev, |
| 2235 | ops: &mlx4_ib_hw_stats_ops1); |
| 2236 | |
| 2237 | return 0; |
| 2238 | } |
| 2239 | |
| 2240 | ret = __mlx4_ib_alloc_diag_counters(ibdev, pdescs: &diag[i].descs, |
| 2241 | offset: &diag[i].offset, |
| 2242 | num: &diag[i].num_counters, port: i); |
| 2243 | if (ret) |
| 2244 | goto err_alloc; |
| 2245 | |
| 2246 | mlx4_ib_fill_diag_counters(ibdev, descs: diag[i].descs, |
| 2247 | offset: diag[i].offset, port: i); |
| 2248 | } |
| 2249 | |
| 2250 | ib_set_device_ops(device: &ibdev->ib_dev, ops: &mlx4_ib_hw_stats_ops); |
| 2251 | |
| 2252 | return 0; |
| 2253 | |
| 2254 | err_alloc: |
| 2255 | if (i) { |
| 2256 | kfree(objp: diag[i - 1].descs); |
| 2257 | kfree(objp: diag[i - 1].offset); |
| 2258 | } |
| 2259 | |
| 2260 | return ret; |
| 2261 | } |
| 2262 | |
| 2263 | static void mlx4_ib_diag_cleanup(struct mlx4_ib_dev *ibdev) |
| 2264 | { |
| 2265 | int i; |
| 2266 | |
| 2267 | for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) { |
| 2268 | kfree(objp: ibdev->diag_counters[i].offset); |
| 2269 | kfree(objp: ibdev->diag_counters[i].descs); |
| 2270 | } |
| 2271 | } |
| 2272 | |
| 2273 | #define MLX4_IB_INVALID_MAC ((u64)-1) |
| 2274 | static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev, |
| 2275 | struct net_device *dev, |
| 2276 | int port) |
| 2277 | { |
| 2278 | u64 new_smac = 0; |
| 2279 | u64 release_mac = MLX4_IB_INVALID_MAC; |
| 2280 | struct mlx4_ib_qp *qp; |
| 2281 | |
| 2282 | new_smac = ether_addr_to_u64(addr: dev->dev_addr); |
| 2283 | atomic64_set(v: &ibdev->iboe.mac[port - 1], i: new_smac); |
| 2284 | |
| 2285 | /* no need for update QP1 and mac registration in non-SRIOV */ |
| 2286 | if (!mlx4_is_mfunc(dev: ibdev->dev)) |
| 2287 | return; |
| 2288 | |
| 2289 | mutex_lock(&ibdev->qp1_proxy_lock[port - 1]); |
| 2290 | qp = ibdev->qp1_proxy[port - 1]; |
| 2291 | if (qp) { |
| 2292 | int new_smac_index; |
| 2293 | u64 old_smac; |
| 2294 | struct mlx4_update_qp_params update_params; |
| 2295 | |
| 2296 | mutex_lock(&qp->mutex); |
| 2297 | old_smac = qp->pri.smac; |
| 2298 | if (new_smac == old_smac) |
| 2299 | goto unlock; |
| 2300 | |
| 2301 | new_smac_index = mlx4_register_mac(dev: ibdev->dev, port, mac: new_smac); |
| 2302 | |
| 2303 | if (new_smac_index < 0) |
| 2304 | goto unlock; |
| 2305 | |
| 2306 | update_params.smac_index = new_smac_index; |
| 2307 | if (mlx4_update_qp(dev: ibdev->dev, qpn: qp->mqp.qpn, attr: MLX4_UPDATE_QP_SMAC, |
| 2308 | params: &update_params)) { |
| 2309 | release_mac = new_smac; |
| 2310 | goto unlock; |
| 2311 | } |
| 2312 | /* if old port was zero, no mac was yet registered for this QP */ |
| 2313 | if (qp->pri.smac_port) |
| 2314 | release_mac = old_smac; |
| 2315 | qp->pri.smac = new_smac; |
| 2316 | qp->pri.smac_port = port; |
| 2317 | qp->pri.smac_index = new_smac_index; |
| 2318 | } |
| 2319 | |
| 2320 | unlock: |
| 2321 | if (release_mac != MLX4_IB_INVALID_MAC) |
| 2322 | mlx4_unregister_mac(dev: ibdev->dev, port, mac: release_mac); |
| 2323 | if (qp) |
| 2324 | mutex_unlock(lock: &qp->mutex); |
| 2325 | mutex_unlock(lock: &ibdev->qp1_proxy_lock[port - 1]); |
| 2326 | } |
| 2327 | |
| 2328 | static void mlx4_ib_scan_netdev(struct mlx4_ib_dev *ibdev, |
| 2329 | struct net_device *dev, |
| 2330 | unsigned long event) |
| 2331 | |
| 2332 | { |
| 2333 | struct mlx4_ib_iboe *iboe = &ibdev->iboe; |
| 2334 | |
| 2335 | ASSERT_RTNL(); |
| 2336 | |
| 2337 | if (dev->dev.parent != ibdev->ib_dev.dev.parent) |
| 2338 | return; |
| 2339 | |
| 2340 | spin_lock_bh(lock: &iboe->lock); |
| 2341 | |
| 2342 | iboe->netdevs[dev->dev_port] = event != NETDEV_UNREGISTER ? dev : NULL; |
| 2343 | |
| 2344 | spin_unlock_bh(lock: &iboe->lock); |
| 2345 | |
| 2346 | if (event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER) |
| 2347 | mlx4_ib_update_qps(ibdev, dev, port: dev->dev_port + 1); |
| 2348 | } |
| 2349 | |
| 2350 | static void mlx4_ib_port_event(struct ib_device *ibdev, struct net_device *ndev, |
| 2351 | unsigned long event) |
| 2352 | { |
| 2353 | struct mlx4_ib_dev *mlx4_ibdev = |
| 2354 | container_of(ibdev, struct mlx4_ib_dev, ib_dev); |
| 2355 | struct mlx4_ib_iboe *iboe = &mlx4_ibdev->iboe; |
| 2356 | |
| 2357 | if (!net_eq(net1: dev_net(dev: ndev), net2: &init_net)) |
| 2358 | return; |
| 2359 | |
| 2360 | ASSERT_RTNL(); |
| 2361 | |
| 2362 | if (ndev->dev.parent != mlx4_ibdev->ib_dev.dev.parent) |
| 2363 | return; |
| 2364 | |
| 2365 | spin_lock_bh(lock: &iboe->lock); |
| 2366 | |
| 2367 | iboe->netdevs[ndev->dev_port] = event != NETDEV_UNREGISTER ? ndev : NULL; |
| 2368 | |
| 2369 | if (event == NETDEV_UP || event == NETDEV_DOWN) |
| 2370 | ib_dispatch_port_state_event(ibdev: &mlx4_ibdev->ib_dev, ndev); |
| 2371 | |
| 2372 | spin_unlock_bh(lock: &iboe->lock); |
| 2373 | |
| 2374 | if (event == NETDEV_UP || event == NETDEV_CHANGE) |
| 2375 | mlx4_ib_update_qps(ibdev: mlx4_ibdev, dev: ndev, port: ndev->dev_port + 1); |
| 2376 | } |
| 2377 | |
| 2378 | static int mlx4_ib_netdev_event(struct notifier_block *this, |
| 2379 | unsigned long event, void *ptr) |
| 2380 | { |
| 2381 | struct net_device *dev = netdev_notifier_info_to_dev(info: ptr); |
| 2382 | struct mlx4_ib_dev *ibdev; |
| 2383 | |
| 2384 | if (!net_eq(net1: dev_net(dev), net2: &init_net)) |
| 2385 | return NOTIFY_DONE; |
| 2386 | |
| 2387 | ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb); |
| 2388 | mlx4_ib_scan_netdev(ibdev, dev, event); |
| 2389 | |
| 2390 | return NOTIFY_DONE; |
| 2391 | } |
| 2392 | |
| 2393 | static void init_pkeys(struct mlx4_ib_dev *ibdev) |
| 2394 | { |
| 2395 | int port; |
| 2396 | int slave; |
| 2397 | int i; |
| 2398 | |
| 2399 | if (mlx4_is_master(dev: ibdev->dev)) { |
| 2400 | for (slave = 0; slave <= ibdev->dev->persist->num_vfs; |
| 2401 | ++slave) { |
| 2402 | for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) { |
| 2403 | for (i = 0; |
| 2404 | i < ibdev->dev->phys_caps.pkey_phys_table_len[port]; |
| 2405 | ++i) { |
| 2406 | ibdev->pkeys.virt2phys_pkey[slave][port - 1][i] = |
| 2407 | /* master has the identity virt2phys pkey mapping */ |
| 2408 | (slave == mlx4_master_func_num(dev: ibdev->dev) || !i) ? i : |
| 2409 | ibdev->dev->phys_caps.pkey_phys_table_len[port] - 1; |
| 2410 | mlx4_sync_pkey_table(dev: ibdev->dev, slave, port, i, |
| 2411 | val: ibdev->pkeys.virt2phys_pkey[slave][port - 1][i]); |
| 2412 | } |
| 2413 | } |
| 2414 | } |
| 2415 | /* initialize pkey cache */ |
| 2416 | for (port = 1; port <= ibdev->dev->caps.num_ports; ++port) { |
| 2417 | for (i = 0; |
| 2418 | i < ibdev->dev->phys_caps.pkey_phys_table_len[port]; |
| 2419 | ++i) |
| 2420 | ibdev->pkeys.phys_pkey_cache[port-1][i] = |
| 2421 | (i) ? 0 : 0xFFFF; |
| 2422 | } |
| 2423 | } |
| 2424 | } |
| 2425 | |
| 2426 | static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) |
| 2427 | { |
| 2428 | int i, j, eq = 0, total_eqs = 0; |
| 2429 | |
| 2430 | ibdev->eq_table = kcalloc(dev->caps.num_comp_vectors, |
| 2431 | sizeof(ibdev->eq_table[0]), GFP_KERNEL); |
| 2432 | if (!ibdev->eq_table) |
| 2433 | return; |
| 2434 | |
| 2435 | for (i = 1; i <= dev->caps.num_ports; i++) { |
| 2436 | for (j = 0; j < mlx4_get_eqs_per_port(dev, port: i); |
| 2437 | j++, total_eqs++) { |
| 2438 | if (i > 1 && mlx4_is_eq_shared(dev, vector: total_eqs)) |
| 2439 | continue; |
| 2440 | ibdev->eq_table[eq] = total_eqs; |
| 2441 | if (!mlx4_assign_eq(dev, port: i, |
| 2442 | vector: &ibdev->eq_table[eq])) |
| 2443 | eq++; |
| 2444 | else |
| 2445 | ibdev->eq_table[eq] = -1; |
| 2446 | } |
| 2447 | } |
| 2448 | |
| 2449 | for (i = eq; i < dev->caps.num_comp_vectors; |
| 2450 | ibdev->eq_table[i++] = -1) |
| 2451 | ; |
| 2452 | |
| 2453 | /* Advertise the new number of EQs to clients */ |
| 2454 | ibdev->ib_dev.num_comp_vectors = eq; |
| 2455 | } |
| 2456 | |
| 2457 | static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) |
| 2458 | { |
| 2459 | int i; |
| 2460 | int total_eqs = ibdev->ib_dev.num_comp_vectors; |
| 2461 | |
| 2462 | /* no eqs were allocated */ |
| 2463 | if (!ibdev->eq_table) |
| 2464 | return; |
| 2465 | |
| 2466 | /* Reset the advertised EQ number */ |
| 2467 | ibdev->ib_dev.num_comp_vectors = 0; |
| 2468 | |
| 2469 | for (i = 0; i < total_eqs; i++) |
| 2470 | mlx4_release_eq(dev, vec: ibdev->eq_table[i]); |
| 2471 | |
| 2472 | kfree(objp: ibdev->eq_table); |
| 2473 | ibdev->eq_table = NULL; |
| 2474 | } |
| 2475 | |
| 2476 | static int mlx4_port_immutable(struct ib_device *ibdev, u32 port_num, |
| 2477 | struct ib_port_immutable *immutable) |
| 2478 | { |
| 2479 | struct ib_port_attr attr; |
| 2480 | struct mlx4_ib_dev *mdev = to_mdev(ibdev); |
| 2481 | int err; |
| 2482 | |
| 2483 | if (mlx4_ib_port_link_layer(device: ibdev, port_num) == IB_LINK_LAYER_INFINIBAND) { |
| 2484 | immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB; |
| 2485 | immutable->max_mad_size = IB_MGMT_MAD_SIZE; |
| 2486 | } else { |
| 2487 | if (mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE) |
| 2488 | immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE; |
| 2489 | if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) |
| 2490 | immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE | |
| 2491 | RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; |
| 2492 | immutable->core_cap_flags |= RDMA_CORE_PORT_RAW_PACKET; |
| 2493 | if (immutable->core_cap_flags & (RDMA_CORE_PORT_IBA_ROCE | |
| 2494 | RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP)) |
| 2495 | immutable->max_mad_size = IB_MGMT_MAD_SIZE; |
| 2496 | } |
| 2497 | |
| 2498 | err = ib_query_port(device: ibdev, port_num, port_attr: &attr); |
| 2499 | if (err) |
| 2500 | return err; |
| 2501 | |
| 2502 | immutable->pkey_tbl_len = attr.pkey_tbl_len; |
| 2503 | immutable->gid_tbl_len = attr.gid_tbl_len; |
| 2504 | |
| 2505 | return 0; |
| 2506 | } |
| 2507 | |
| 2508 | static void get_fw_ver_str(struct ib_device *device, char *str) |
| 2509 | { |
| 2510 | struct mlx4_ib_dev *dev = |
| 2511 | container_of(device, struct mlx4_ib_dev, ib_dev); |
| 2512 | snprintf(buf: str, IB_FW_VERSION_NAME_MAX, fmt: "%d.%d.%d" , |
| 2513 | (int) (dev->dev->caps.fw_ver >> 32), |
| 2514 | (int) (dev->dev->caps.fw_ver >> 16) & 0xffff, |
| 2515 | (int) dev->dev->caps.fw_ver & 0xffff); |
| 2516 | } |
| 2517 | |
| 2518 | static const struct ib_device_ops mlx4_ib_dev_ops = { |
| 2519 | .owner = THIS_MODULE, |
| 2520 | .driver_id = RDMA_DRIVER_MLX4, |
| 2521 | .uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION, |
| 2522 | |
| 2523 | .add_gid = mlx4_ib_add_gid, |
| 2524 | .alloc_mr = mlx4_ib_alloc_mr, |
| 2525 | .alloc_pd = mlx4_ib_alloc_pd, |
| 2526 | .alloc_ucontext = mlx4_ib_alloc_ucontext, |
| 2527 | .attach_mcast = mlx4_ib_mcg_attach, |
| 2528 | .create_ah = mlx4_ib_create_ah, |
| 2529 | .create_cq = mlx4_ib_create_cq, |
| 2530 | .create_qp = mlx4_ib_create_qp, |
| 2531 | .create_srq = mlx4_ib_create_srq, |
| 2532 | .dealloc_pd = mlx4_ib_dealloc_pd, |
| 2533 | .dealloc_ucontext = mlx4_ib_dealloc_ucontext, |
| 2534 | .del_gid = mlx4_ib_del_gid, |
| 2535 | .dereg_mr = mlx4_ib_dereg_mr, |
| 2536 | .destroy_ah = mlx4_ib_destroy_ah, |
| 2537 | .destroy_cq = mlx4_ib_destroy_cq, |
| 2538 | .destroy_qp = mlx4_ib_destroy_qp, |
| 2539 | .destroy_srq = mlx4_ib_destroy_srq, |
| 2540 | .detach_mcast = mlx4_ib_mcg_detach, |
| 2541 | .device_group = &mlx4_attr_group, |
| 2542 | .disassociate_ucontext = mlx4_ib_disassociate_ucontext, |
| 2543 | .drain_rq = mlx4_ib_drain_rq, |
| 2544 | .drain_sq = mlx4_ib_drain_sq, |
| 2545 | .get_dev_fw_str = get_fw_ver_str, |
| 2546 | .get_dma_mr = mlx4_ib_get_dma_mr, |
| 2547 | .get_link_layer = mlx4_ib_port_link_layer, |
| 2548 | .get_netdev = mlx4_ib_get_netdev, |
| 2549 | .get_port_immutable = mlx4_port_immutable, |
| 2550 | .map_mr_sg = mlx4_ib_map_mr_sg, |
| 2551 | .mmap = mlx4_ib_mmap, |
| 2552 | .modify_cq = mlx4_ib_modify_cq, |
| 2553 | .modify_device = mlx4_ib_modify_device, |
| 2554 | .modify_port = mlx4_ib_modify_port, |
| 2555 | .modify_qp = mlx4_ib_modify_qp, |
| 2556 | .modify_srq = mlx4_ib_modify_srq, |
| 2557 | .poll_cq = mlx4_ib_poll_cq, |
| 2558 | .post_recv = mlx4_ib_post_recv, |
| 2559 | .post_send = mlx4_ib_post_send, |
| 2560 | .post_srq_recv = mlx4_ib_post_srq_recv, |
| 2561 | .process_mad = mlx4_ib_process_mad, |
| 2562 | .query_ah = mlx4_ib_query_ah, |
| 2563 | .query_device = mlx4_ib_query_device, |
| 2564 | .query_gid = mlx4_ib_query_gid, |
| 2565 | .query_pkey = mlx4_ib_query_pkey, |
| 2566 | .query_port = mlx4_ib_query_port, |
| 2567 | .query_qp = mlx4_ib_query_qp, |
| 2568 | .query_srq = mlx4_ib_query_srq, |
| 2569 | .reg_user_mr = mlx4_ib_reg_user_mr, |
| 2570 | .req_notify_cq = mlx4_ib_arm_cq, |
| 2571 | .rereg_user_mr = mlx4_ib_rereg_user_mr, |
| 2572 | .resize_cq = mlx4_ib_resize_cq, |
| 2573 | .report_port_event = mlx4_ib_port_event, |
| 2574 | |
| 2575 | INIT_RDMA_OBJ_SIZE(ib_ah, mlx4_ib_ah, ibah), |
| 2576 | INIT_RDMA_OBJ_SIZE(ib_cq, mlx4_ib_cq, ibcq), |
| 2577 | INIT_RDMA_OBJ_SIZE(ib_pd, mlx4_ib_pd, ibpd), |
| 2578 | INIT_RDMA_OBJ_SIZE(ib_qp, mlx4_ib_qp, ibqp), |
| 2579 | INIT_RDMA_OBJ_SIZE(ib_srq, mlx4_ib_srq, ibsrq), |
| 2580 | INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx4_ib_ucontext, ibucontext), |
| 2581 | }; |
| 2582 | |
| 2583 | static const struct ib_device_ops mlx4_ib_dev_wq_ops = { |
| 2584 | .create_rwq_ind_table = mlx4_ib_create_rwq_ind_table, |
| 2585 | .create_wq = mlx4_ib_create_wq, |
| 2586 | .destroy_rwq_ind_table = mlx4_ib_destroy_rwq_ind_table, |
| 2587 | .destroy_wq = mlx4_ib_destroy_wq, |
| 2588 | .modify_wq = mlx4_ib_modify_wq, |
| 2589 | |
| 2590 | INIT_RDMA_OBJ_SIZE(ib_rwq_ind_table, mlx4_ib_rwq_ind_table, |
| 2591 | ib_rwq_ind_tbl), |
| 2592 | }; |
| 2593 | |
| 2594 | static const struct ib_device_ops mlx4_ib_dev_mw_ops = { |
| 2595 | .alloc_mw = mlx4_ib_alloc_mw, |
| 2596 | .dealloc_mw = mlx4_ib_dealloc_mw, |
| 2597 | |
| 2598 | INIT_RDMA_OBJ_SIZE(ib_mw, mlx4_ib_mw, ibmw), |
| 2599 | }; |
| 2600 | |
| 2601 | static const struct ib_device_ops mlx4_ib_dev_xrc_ops = { |
| 2602 | .alloc_xrcd = mlx4_ib_alloc_xrcd, |
| 2603 | .dealloc_xrcd = mlx4_ib_dealloc_xrcd, |
| 2604 | |
| 2605 | INIT_RDMA_OBJ_SIZE(ib_xrcd, mlx4_ib_xrcd, ibxrcd), |
| 2606 | }; |
| 2607 | |
| 2608 | static const struct ib_device_ops mlx4_ib_dev_fs_ops = { |
| 2609 | .create_flow = mlx4_ib_create_flow, |
| 2610 | .destroy_flow = mlx4_ib_destroy_flow, |
| 2611 | }; |
| 2612 | |
| 2613 | static int mlx4_ib_probe(struct auxiliary_device *adev, |
| 2614 | const struct auxiliary_device_id *id) |
| 2615 | { |
| 2616 | struct mlx4_adev *madev = container_of(adev, struct mlx4_adev, adev); |
| 2617 | struct mlx4_dev *dev = madev->mdev; |
| 2618 | struct mlx4_ib_dev *ibdev; |
| 2619 | int num_ports = 0; |
| 2620 | int i, j; |
| 2621 | int err; |
| 2622 | struct mlx4_ib_iboe *iboe; |
| 2623 | int ib_num_ports = 0; |
| 2624 | int num_req_counters; |
| 2625 | int allocated; |
| 2626 | u32 counter_index; |
| 2627 | struct counter_index *new_counter_index; |
| 2628 | |
| 2629 | pr_info_once("%s" , mlx4_ib_version); |
| 2630 | |
| 2631 | num_ports = 0; |
| 2632 | mlx4_foreach_ib_transport_port(i, dev) |
| 2633 | num_ports++; |
| 2634 | |
| 2635 | /* No point in registering a device with no ports... */ |
| 2636 | if (num_ports == 0) |
| 2637 | return -ENODEV; |
| 2638 | |
| 2639 | ibdev = ib_alloc_device(mlx4_ib_dev, ib_dev); |
| 2640 | if (!ibdev) { |
| 2641 | dev_err(&dev->persist->pdev->dev, |
| 2642 | "Device struct alloc failed\n" ); |
| 2643 | return -ENOMEM; |
| 2644 | } |
| 2645 | |
| 2646 | iboe = &ibdev->iboe; |
| 2647 | |
| 2648 | err = mlx4_pd_alloc(dev, pdn: &ibdev->priv_pdn); |
| 2649 | if (err) |
| 2650 | goto err_dealloc; |
| 2651 | |
| 2652 | err = mlx4_uar_alloc(dev, uar: &ibdev->priv_uar); |
| 2653 | if (err) |
| 2654 | goto err_pd; |
| 2655 | |
| 2656 | ibdev->uar_map = ioremap(offset: (phys_addr_t) ibdev->priv_uar.pfn << PAGE_SHIFT, |
| 2657 | PAGE_SIZE); |
| 2658 | if (!ibdev->uar_map) { |
| 2659 | err = -ENOMEM; |
| 2660 | goto err_uar; |
| 2661 | } |
| 2662 | MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock); |
| 2663 | |
| 2664 | ibdev->dev = dev; |
| 2665 | ibdev->bond_next_port = 0; |
| 2666 | |
| 2667 | ibdev->ib_dev.node_type = RDMA_NODE_IB_CA; |
| 2668 | ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey; |
| 2669 | ibdev->num_ports = num_ports; |
| 2670 | ibdev->ib_dev.phys_port_cnt = mlx4_is_bonded(dev) ? |
| 2671 | 1 : ibdev->num_ports; |
| 2672 | ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors; |
| 2673 | ibdev->ib_dev.dev.parent = &dev->persist->pdev->dev; |
| 2674 | |
| 2675 | ib_set_device_ops(device: &ibdev->ib_dev, ops: &mlx4_ib_dev_ops); |
| 2676 | |
| 2677 | if ((dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS) && |
| 2678 | ((mlx4_ib_port_link_layer(device: &ibdev->ib_dev, port_num: 1) == |
| 2679 | IB_LINK_LAYER_ETHERNET) || |
| 2680 | (mlx4_ib_port_link_layer(device: &ibdev->ib_dev, port_num: 2) == |
| 2681 | IB_LINK_LAYER_ETHERNET))) |
| 2682 | ib_set_device_ops(device: &ibdev->ib_dev, ops: &mlx4_ib_dev_wq_ops); |
| 2683 | |
| 2684 | if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW || |
| 2685 | dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) |
| 2686 | ib_set_device_ops(device: &ibdev->ib_dev, ops: &mlx4_ib_dev_mw_ops); |
| 2687 | |
| 2688 | if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) { |
| 2689 | ib_set_device_ops(device: &ibdev->ib_dev, ops: &mlx4_ib_dev_xrc_ops); |
| 2690 | } |
| 2691 | |
| 2692 | if (check_flow_steering_support(dev)) { |
| 2693 | ibdev->steering_support = MLX4_STEERING_MODE_DEVICE_MANAGED; |
| 2694 | ib_set_device_ops(device: &ibdev->ib_dev, ops: &mlx4_ib_dev_fs_ops); |
| 2695 | } |
| 2696 | |
| 2697 | if (!dev->caps.userspace_caps) |
| 2698 | ibdev->ib_dev.ops.uverbs_abi_ver = |
| 2699 | MLX4_IB_UVERBS_NO_DEV_CAPS_ABI_VERSION; |
| 2700 | |
| 2701 | mlx4_ib_alloc_eqs(dev, ibdev); |
| 2702 | |
| 2703 | spin_lock_init(&iboe->lock); |
| 2704 | |
| 2705 | err = init_node_data(dev: ibdev); |
| 2706 | if (err) |
| 2707 | goto err_map; |
| 2708 | mlx4_init_sl2vl_tbl(mdev: ibdev); |
| 2709 | |
| 2710 | for (i = 0; i < ibdev->num_ports; ++i) { |
| 2711 | mutex_init(&ibdev->counters_table[i].mutex); |
| 2712 | INIT_LIST_HEAD(list: &ibdev->counters_table[i].counters_list); |
| 2713 | iboe->last_port_state[i] = IB_PORT_DOWN; |
| 2714 | } |
| 2715 | |
| 2716 | num_req_counters = mlx4_is_bonded(dev) ? 1 : ibdev->num_ports; |
| 2717 | for (i = 0; i < num_req_counters; ++i) { |
| 2718 | mutex_init(&ibdev->qp1_proxy_lock[i]); |
| 2719 | allocated = 0; |
| 2720 | if (mlx4_ib_port_link_layer(device: &ibdev->ib_dev, port_num: i + 1) == |
| 2721 | IB_LINK_LAYER_ETHERNET) { |
| 2722 | err = mlx4_counter_alloc(dev: ibdev->dev, idx: &counter_index, |
| 2723 | usage: MLX4_RES_USAGE_DRIVER); |
| 2724 | /* if failed to allocate a new counter, use default */ |
| 2725 | if (err) |
| 2726 | counter_index = |
| 2727 | mlx4_get_default_counter_index(dev, |
| 2728 | port: i + 1); |
| 2729 | else |
| 2730 | allocated = 1; |
| 2731 | } else { /* IB_LINK_LAYER_INFINIBAND use the default counter */ |
| 2732 | counter_index = mlx4_get_default_counter_index(dev, |
| 2733 | port: i + 1); |
| 2734 | } |
| 2735 | new_counter_index = kmalloc(sizeof(*new_counter_index), |
| 2736 | GFP_KERNEL); |
| 2737 | if (!new_counter_index) { |
| 2738 | err = -ENOMEM; |
| 2739 | if (allocated) |
| 2740 | mlx4_counter_free(dev: ibdev->dev, idx: counter_index); |
| 2741 | goto err_counter; |
| 2742 | } |
| 2743 | new_counter_index->index = counter_index; |
| 2744 | new_counter_index->allocated = allocated; |
| 2745 | list_add_tail(new: &new_counter_index->list, |
| 2746 | head: &ibdev->counters_table[i].counters_list); |
| 2747 | ibdev->counters_table[i].default_counter = counter_index; |
| 2748 | pr_info("counter index %d for port %d allocated %d\n" , |
| 2749 | counter_index, i + 1, allocated); |
| 2750 | } |
| 2751 | if (mlx4_is_bonded(dev)) |
| 2752 | for (i = 1; i < ibdev->num_ports ; ++i) { |
| 2753 | new_counter_index = |
| 2754 | kmalloc(sizeof(struct counter_index), |
| 2755 | GFP_KERNEL); |
| 2756 | if (!new_counter_index) { |
| 2757 | err = -ENOMEM; |
| 2758 | goto err_counter; |
| 2759 | } |
| 2760 | new_counter_index->index = counter_index; |
| 2761 | new_counter_index->allocated = 0; |
| 2762 | list_add_tail(new: &new_counter_index->list, |
| 2763 | head: &ibdev->counters_table[i].counters_list); |
| 2764 | ibdev->counters_table[i].default_counter = |
| 2765 | counter_index; |
| 2766 | } |
| 2767 | |
| 2768 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) |
| 2769 | ib_num_ports++; |
| 2770 | |
| 2771 | spin_lock_init(&ibdev->sm_lock); |
| 2772 | mutex_init(&ibdev->cap_mask_mutex); |
| 2773 | INIT_LIST_HEAD(list: &ibdev->qp_list); |
| 2774 | spin_lock_init(&ibdev->reset_flow_resource_lock); |
| 2775 | |
| 2776 | if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED && |
| 2777 | ib_num_ports) { |
| 2778 | ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS; |
| 2779 | err = mlx4_qp_reserve_range(dev, cnt: ibdev->steer_qpn_count, |
| 2780 | MLX4_IB_UC_STEER_QPN_ALIGN, |
| 2781 | base: &ibdev->steer_qpn_base, flags: 0, |
| 2782 | usage: MLX4_RES_USAGE_DRIVER); |
| 2783 | if (err) |
| 2784 | goto err_counter; |
| 2785 | |
| 2786 | ibdev->ib_uc_qpns_bitmap = bitmap_alloc(nbits: ibdev->steer_qpn_count, |
| 2787 | GFP_KERNEL); |
| 2788 | if (!ibdev->ib_uc_qpns_bitmap) { |
| 2789 | err = -ENOMEM; |
| 2790 | goto err_steer_qp_release; |
| 2791 | } |
| 2792 | |
| 2793 | if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DMFS_IPOIB) { |
| 2794 | bitmap_zero(dst: ibdev->ib_uc_qpns_bitmap, |
| 2795 | nbits: ibdev->steer_qpn_count); |
| 2796 | err = mlx4_FLOW_STEERING_IB_UC_QP_RANGE( |
| 2797 | dev, min_range_qpn: ibdev->steer_qpn_base, |
| 2798 | max_range_qpn: ibdev->steer_qpn_base + |
| 2799 | ibdev->steer_qpn_count - 1); |
| 2800 | if (err) |
| 2801 | goto err_steer_free_bitmap; |
| 2802 | } else { |
| 2803 | bitmap_fill(dst: ibdev->ib_uc_qpns_bitmap, |
| 2804 | nbits: ibdev->steer_qpn_count); |
| 2805 | } |
| 2806 | } |
| 2807 | |
| 2808 | for (j = 1; j <= ibdev->dev->caps.num_ports; j++) |
| 2809 | atomic64_set(v: &iboe->mac[j - 1], i: ibdev->dev->caps.def_mac[j]); |
| 2810 | |
| 2811 | err = mlx4_ib_alloc_diag_counters(ibdev); |
| 2812 | if (err) |
| 2813 | goto err_steer_free_bitmap; |
| 2814 | |
| 2815 | err = ib_register_device(device: &ibdev->ib_dev, name: "mlx4_%d" , |
| 2816 | dma_device: &dev->persist->pdev->dev); |
| 2817 | if (err) |
| 2818 | goto err_diag_counters; |
| 2819 | |
| 2820 | err = mlx4_ib_mad_init(dev: ibdev); |
| 2821 | if (err) |
| 2822 | goto err_reg; |
| 2823 | |
| 2824 | err = mlx4_ib_init_sriov(dev: ibdev); |
| 2825 | if (err) |
| 2826 | goto err_mad; |
| 2827 | |
| 2828 | if (!iboe->nb.notifier_call) { |
| 2829 | iboe->nb.notifier_call = mlx4_ib_netdev_event; |
| 2830 | err = register_netdevice_notifier(nb: &iboe->nb); |
| 2831 | if (err) { |
| 2832 | iboe->nb.notifier_call = NULL; |
| 2833 | goto err_notif; |
| 2834 | } |
| 2835 | } |
| 2836 | if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2) { |
| 2837 | err = mlx4_config_roce_v2_port(dev, ROCE_V2_UDP_DPORT); |
| 2838 | if (err) |
| 2839 | goto err_notif; |
| 2840 | } |
| 2841 | |
| 2842 | ibdev->ib_active = true; |
| 2843 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) |
| 2844 | devlink_port_type_ib_set(devlink_port: mlx4_get_devlink_port(dev, port: i), |
| 2845 | ibdev: &ibdev->ib_dev); |
| 2846 | |
| 2847 | if (mlx4_is_mfunc(dev: ibdev->dev)) |
| 2848 | init_pkeys(ibdev); |
| 2849 | |
| 2850 | /* create paravirt contexts for any VFs which are active */ |
| 2851 | if (mlx4_is_master(dev: ibdev->dev)) { |
| 2852 | for (j = 0; j < MLX4_MFUNC_MAX; j++) { |
| 2853 | if (j == mlx4_master_func_num(dev: ibdev->dev)) |
| 2854 | continue; |
| 2855 | if (mlx4_is_slave_active(dev: ibdev->dev, slave: j)) |
| 2856 | do_slave_init(ibdev, slave: j, do_init: 1); |
| 2857 | } |
| 2858 | } |
| 2859 | |
| 2860 | /* register mlx4 core notifier */ |
| 2861 | ibdev->mlx_nb.notifier_call = mlx4_ib_event; |
| 2862 | err = mlx4_register_event_notifier(dev, nb: &ibdev->mlx_nb); |
| 2863 | WARN(err, "failed to register mlx4 event notifier (%d)" , err); |
| 2864 | |
| 2865 | auxiliary_set_drvdata(auxdev: adev, data: ibdev); |
| 2866 | return 0; |
| 2867 | |
| 2868 | err_notif: |
| 2869 | if (ibdev->iboe.nb.notifier_call) { |
| 2870 | if (unregister_netdevice_notifier(nb: &ibdev->iboe.nb)) |
| 2871 | pr_warn("failure unregistering notifier\n" ); |
| 2872 | ibdev->iboe.nb.notifier_call = NULL; |
| 2873 | } |
| 2874 | flush_workqueue(wq); |
| 2875 | |
| 2876 | mlx4_ib_close_sriov(dev: ibdev); |
| 2877 | |
| 2878 | err_mad: |
| 2879 | mlx4_ib_mad_cleanup(dev: ibdev); |
| 2880 | |
| 2881 | err_reg: |
| 2882 | ib_unregister_device(device: &ibdev->ib_dev); |
| 2883 | |
| 2884 | err_diag_counters: |
| 2885 | mlx4_ib_diag_cleanup(ibdev); |
| 2886 | |
| 2887 | err_steer_free_bitmap: |
| 2888 | bitmap_free(bitmap: ibdev->ib_uc_qpns_bitmap); |
| 2889 | |
| 2890 | err_steer_qp_release: |
| 2891 | mlx4_qp_release_range(dev, base_qpn: ibdev->steer_qpn_base, |
| 2892 | cnt: ibdev->steer_qpn_count); |
| 2893 | err_counter: |
| 2894 | for (i = 0; i < ibdev->num_ports; ++i) |
| 2895 | mlx4_ib_delete_counters_table(ibdev, ctr_table: &ibdev->counters_table[i]); |
| 2896 | |
| 2897 | err_map: |
| 2898 | mlx4_ib_free_eqs(dev, ibdev); |
| 2899 | iounmap(addr: ibdev->uar_map); |
| 2900 | |
| 2901 | err_uar: |
| 2902 | mlx4_uar_free(dev, uar: &ibdev->priv_uar); |
| 2903 | |
| 2904 | err_pd: |
| 2905 | mlx4_pd_free(dev, pdn: ibdev->priv_pdn); |
| 2906 | |
| 2907 | err_dealloc: |
| 2908 | ib_dealloc_device(device: &ibdev->ib_dev); |
| 2909 | |
| 2910 | return err; |
| 2911 | } |
| 2912 | |
| 2913 | int mlx4_ib_steer_qp_alloc(struct mlx4_ib_dev *dev, int count, int *qpn) |
| 2914 | { |
| 2915 | int offset; |
| 2916 | |
| 2917 | WARN_ON(!dev->ib_uc_qpns_bitmap); |
| 2918 | |
| 2919 | offset = bitmap_find_free_region(bitmap: dev->ib_uc_qpns_bitmap, |
| 2920 | bits: dev->steer_qpn_count, |
| 2921 | order: get_count_order(count)); |
| 2922 | if (offset < 0) |
| 2923 | return offset; |
| 2924 | |
| 2925 | *qpn = dev->steer_qpn_base + offset; |
| 2926 | return 0; |
| 2927 | } |
| 2928 | |
| 2929 | void mlx4_ib_steer_qp_free(struct mlx4_ib_dev *dev, u32 qpn, int count) |
| 2930 | { |
| 2931 | if (!qpn || |
| 2932 | dev->steering_support != MLX4_STEERING_MODE_DEVICE_MANAGED) |
| 2933 | return; |
| 2934 | |
| 2935 | if (WARN(qpn < dev->steer_qpn_base, "qpn = %u, steer_qpn_base = %u\n" , |
| 2936 | qpn, dev->steer_qpn_base)) |
| 2937 | /* not supposed to be here */ |
| 2938 | return; |
| 2939 | |
| 2940 | bitmap_release_region(bitmap: dev->ib_uc_qpns_bitmap, |
| 2941 | pos: qpn - dev->steer_qpn_base, |
| 2942 | order: get_count_order(count)); |
| 2943 | } |
| 2944 | |
| 2945 | int mlx4_ib_steer_qp_reg(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp, |
| 2946 | int is_attach) |
| 2947 | { |
| 2948 | int err; |
| 2949 | size_t flow_size; |
| 2950 | struct ib_flow_attr *flow; |
| 2951 | struct ib_flow_spec_ib *ib_spec; |
| 2952 | |
| 2953 | if (is_attach) { |
| 2954 | flow_size = sizeof(struct ib_flow_attr) + |
| 2955 | sizeof(struct ib_flow_spec_ib); |
| 2956 | flow = kzalloc(flow_size, GFP_KERNEL); |
| 2957 | if (!flow) |
| 2958 | return -ENOMEM; |
| 2959 | flow->port = mqp->port; |
| 2960 | flow->num_of_specs = 1; |
| 2961 | flow->size = flow_size; |
| 2962 | ib_spec = (struct ib_flow_spec_ib *)(flow + 1); |
| 2963 | ib_spec->type = IB_FLOW_SPEC_IB; |
| 2964 | ib_spec->size = sizeof(struct ib_flow_spec_ib); |
| 2965 | /* Add an empty rule for IB L2 */ |
| 2966 | memset(&ib_spec->mask, 0, sizeof(ib_spec->mask)); |
| 2967 | |
| 2968 | err = __mlx4_ib_create_flow(qp: &mqp->ibqp, flow_attr: flow, domain: MLX4_DOMAIN_NIC, |
| 2969 | flow_type: MLX4_FS_REGULAR, reg_id: &mqp->reg_id); |
| 2970 | kfree(objp: flow); |
| 2971 | return err; |
| 2972 | } |
| 2973 | |
| 2974 | return __mlx4_ib_destroy_flow(dev: mdev->dev, reg_id: mqp->reg_id); |
| 2975 | } |
| 2976 | |
| 2977 | static void mlx4_ib_remove(struct auxiliary_device *adev) |
| 2978 | { |
| 2979 | struct mlx4_adev *madev = container_of(adev, struct mlx4_adev, adev); |
| 2980 | struct mlx4_dev *dev = madev->mdev; |
| 2981 | struct mlx4_ib_dev *ibdev = auxiliary_get_drvdata(auxdev: adev); |
| 2982 | int p; |
| 2983 | int i; |
| 2984 | |
| 2985 | mlx4_unregister_event_notifier(dev, nb: &ibdev->mlx_nb); |
| 2986 | |
| 2987 | mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB) |
| 2988 | devlink_port_type_clear(devlink_port: mlx4_get_devlink_port(dev, port: i)); |
| 2989 | ibdev->ib_active = false; |
| 2990 | flush_workqueue(wq); |
| 2991 | |
| 2992 | if (ibdev->iboe.nb.notifier_call) { |
| 2993 | if (unregister_netdevice_notifier(nb: &ibdev->iboe.nb)) |
| 2994 | pr_warn("failure unregistering notifier\n" ); |
| 2995 | ibdev->iboe.nb.notifier_call = NULL; |
| 2996 | } |
| 2997 | |
| 2998 | mlx4_ib_close_sriov(dev: ibdev); |
| 2999 | mlx4_ib_mad_cleanup(dev: ibdev); |
| 3000 | ib_unregister_device(device: &ibdev->ib_dev); |
| 3001 | mlx4_ib_diag_cleanup(ibdev); |
| 3002 | |
| 3003 | mlx4_qp_release_range(dev, base_qpn: ibdev->steer_qpn_base, |
| 3004 | cnt: ibdev->steer_qpn_count); |
| 3005 | bitmap_free(bitmap: ibdev->ib_uc_qpns_bitmap); |
| 3006 | |
| 3007 | iounmap(addr: ibdev->uar_map); |
| 3008 | for (p = 0; p < ibdev->num_ports; ++p) |
| 3009 | mlx4_ib_delete_counters_table(ibdev, ctr_table: &ibdev->counters_table[p]); |
| 3010 | |
| 3011 | mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB) |
| 3012 | mlx4_CLOSE_PORT(dev, port: p); |
| 3013 | |
| 3014 | mlx4_ib_free_eqs(dev, ibdev); |
| 3015 | |
| 3016 | mlx4_uar_free(dev, uar: &ibdev->priv_uar); |
| 3017 | mlx4_pd_free(dev, pdn: ibdev->priv_pdn); |
| 3018 | ib_dealloc_device(device: &ibdev->ib_dev); |
| 3019 | } |
| 3020 | |
| 3021 | static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init) |
| 3022 | { |
| 3023 | struct mlx4_ib_demux_work **dm; |
| 3024 | struct mlx4_dev *dev = ibdev->dev; |
| 3025 | int i; |
| 3026 | unsigned long flags; |
| 3027 | struct mlx4_active_ports actv_ports; |
| 3028 | unsigned int ports; |
| 3029 | unsigned int first_port; |
| 3030 | |
| 3031 | if (!mlx4_is_master(dev)) |
| 3032 | return; |
| 3033 | |
| 3034 | actv_ports = mlx4_get_active_ports(dev, slave); |
| 3035 | ports = bitmap_weight(src: actv_ports.ports, nbits: dev->caps.num_ports); |
| 3036 | first_port = find_first_bit(addr: actv_ports.ports, size: dev->caps.num_ports); |
| 3037 | |
| 3038 | dm = kcalloc(ports, sizeof(*dm), GFP_ATOMIC); |
| 3039 | if (!dm) |
| 3040 | return; |
| 3041 | |
| 3042 | for (i = 0; i < ports; i++) { |
| 3043 | dm[i] = kmalloc(sizeof (struct mlx4_ib_demux_work), GFP_ATOMIC); |
| 3044 | if (!dm[i]) { |
| 3045 | while (--i >= 0) |
| 3046 | kfree(objp: dm[i]); |
| 3047 | goto out; |
| 3048 | } |
| 3049 | INIT_WORK(&dm[i]->work, mlx4_ib_tunnels_update_work); |
| 3050 | dm[i]->port = first_port + i + 1; |
| 3051 | dm[i]->slave = slave; |
| 3052 | dm[i]->do_init = do_init; |
| 3053 | dm[i]->dev = ibdev; |
| 3054 | } |
| 3055 | /* initialize or tear down tunnel QPs for the slave */ |
| 3056 | spin_lock_irqsave(&ibdev->sriov.going_down_lock, flags); |
| 3057 | if (!ibdev->sriov.is_going_down) { |
| 3058 | for (i = 0; i < ports; i++) |
| 3059 | queue_work(wq: ibdev->sriov.demux[i].ud_wq, work: &dm[i]->work); |
| 3060 | spin_unlock_irqrestore(lock: &ibdev->sriov.going_down_lock, flags); |
| 3061 | } else { |
| 3062 | spin_unlock_irqrestore(lock: &ibdev->sriov.going_down_lock, flags); |
| 3063 | for (i = 0; i < ports; i++) |
| 3064 | kfree(objp: dm[i]); |
| 3065 | } |
| 3066 | out: |
| 3067 | kfree(objp: dm); |
| 3068 | return; |
| 3069 | } |
| 3070 | |
| 3071 | static void mlx4_ib_handle_catas_error(struct mlx4_ib_dev *ibdev) |
| 3072 | { |
| 3073 | struct mlx4_ib_qp *mqp; |
| 3074 | unsigned long flags_qp; |
| 3075 | unsigned long flags_cq; |
| 3076 | struct mlx4_ib_cq *send_mcq, *recv_mcq; |
| 3077 | struct list_head cq_notify_list; |
| 3078 | struct mlx4_cq *mcq; |
| 3079 | unsigned long flags; |
| 3080 | |
| 3081 | pr_warn("mlx4_ib_handle_catas_error was started\n" ); |
| 3082 | INIT_LIST_HEAD(list: &cq_notify_list); |
| 3083 | |
| 3084 | /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/ |
| 3085 | spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags); |
| 3086 | |
| 3087 | list_for_each_entry(mqp, &ibdev->qp_list, qps_list) { |
| 3088 | spin_lock_irqsave(&mqp->sq.lock, flags_qp); |
| 3089 | if (mqp->sq.tail != mqp->sq.head) { |
| 3090 | send_mcq = to_mcq(ibcq: mqp->ibqp.send_cq); |
| 3091 | spin_lock_irqsave(&send_mcq->lock, flags_cq); |
| 3092 | if (send_mcq->mcq.comp && |
| 3093 | mqp->ibqp.send_cq->comp_handler) { |
| 3094 | if (!send_mcq->mcq.reset_notify_added) { |
| 3095 | send_mcq->mcq.reset_notify_added = 1; |
| 3096 | list_add_tail(new: &send_mcq->mcq.reset_notify, |
| 3097 | head: &cq_notify_list); |
| 3098 | } |
| 3099 | } |
| 3100 | spin_unlock_irqrestore(lock: &send_mcq->lock, flags: flags_cq); |
| 3101 | } |
| 3102 | spin_unlock_irqrestore(lock: &mqp->sq.lock, flags: flags_qp); |
| 3103 | /* Now, handle the QP's receive queue */ |
| 3104 | spin_lock_irqsave(&mqp->rq.lock, flags_qp); |
| 3105 | /* no handling is needed for SRQ */ |
| 3106 | if (!mqp->ibqp.srq) { |
| 3107 | if (mqp->rq.tail != mqp->rq.head) { |
| 3108 | recv_mcq = to_mcq(ibcq: mqp->ibqp.recv_cq); |
| 3109 | spin_lock_irqsave(&recv_mcq->lock, flags_cq); |
| 3110 | if (recv_mcq->mcq.comp && |
| 3111 | mqp->ibqp.recv_cq->comp_handler) { |
| 3112 | if (!recv_mcq->mcq.reset_notify_added) { |
| 3113 | recv_mcq->mcq.reset_notify_added = 1; |
| 3114 | list_add_tail(new: &recv_mcq->mcq.reset_notify, |
| 3115 | head: &cq_notify_list); |
| 3116 | } |
| 3117 | } |
| 3118 | spin_unlock_irqrestore(lock: &recv_mcq->lock, |
| 3119 | flags: flags_cq); |
| 3120 | } |
| 3121 | } |
| 3122 | spin_unlock_irqrestore(lock: &mqp->rq.lock, flags: flags_qp); |
| 3123 | } |
| 3124 | |
| 3125 | list_for_each_entry(mcq, &cq_notify_list, reset_notify) { |
| 3126 | mcq->comp(mcq); |
| 3127 | } |
| 3128 | spin_unlock_irqrestore(lock: &ibdev->reset_flow_resource_lock, flags); |
| 3129 | pr_warn("mlx4_ib_handle_catas_error ended\n" ); |
| 3130 | } |
| 3131 | |
| 3132 | static void handle_bonded_port_state_event(struct work_struct *work) |
| 3133 | { |
| 3134 | struct ib_event_work *ew = |
| 3135 | container_of(work, struct ib_event_work, work); |
| 3136 | struct mlx4_ib_dev *ibdev = ew->ib_dev; |
| 3137 | enum ib_port_state bonded_port_state = IB_PORT_NOP; |
| 3138 | int i; |
| 3139 | struct ib_event ibev; |
| 3140 | |
| 3141 | kfree(objp: ew); |
| 3142 | spin_lock_bh(lock: &ibdev->iboe.lock); |
| 3143 | for (i = 0; i < MLX4_MAX_PORTS; ++i) { |
| 3144 | struct net_device *curr_netdev = ibdev->iboe.netdevs[i]; |
| 3145 | enum ib_port_state curr_port_state; |
| 3146 | |
| 3147 | if (!curr_netdev) |
| 3148 | continue; |
| 3149 | |
| 3150 | curr_port_state = |
| 3151 | (netif_running(dev: curr_netdev) && |
| 3152 | netif_carrier_ok(dev: curr_netdev)) ? |
| 3153 | IB_PORT_ACTIVE : IB_PORT_DOWN; |
| 3154 | |
| 3155 | bonded_port_state = (bonded_port_state != IB_PORT_ACTIVE) ? |
| 3156 | curr_port_state : IB_PORT_ACTIVE; |
| 3157 | } |
| 3158 | spin_unlock_bh(lock: &ibdev->iboe.lock); |
| 3159 | |
| 3160 | ibev.device = &ibdev->ib_dev; |
| 3161 | ibev.element.port_num = 1; |
| 3162 | ibev.event = (bonded_port_state == IB_PORT_ACTIVE) ? |
| 3163 | IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; |
| 3164 | |
| 3165 | ib_dispatch_event(event: &ibev); |
| 3166 | } |
| 3167 | |
| 3168 | void mlx4_ib_sl2vl_update(struct mlx4_ib_dev *mdev, int port) |
| 3169 | { |
| 3170 | u64 sl2vl; |
| 3171 | int err; |
| 3172 | |
| 3173 | err = mlx4_ib_query_sl2vl(ibdev: &mdev->ib_dev, port, sl2vl_tbl: &sl2vl); |
| 3174 | if (err) { |
| 3175 | pr_err("Unable to get current sl to vl mapping for port %d. Using all zeroes (%d)\n" , |
| 3176 | port, err); |
| 3177 | sl2vl = 0; |
| 3178 | } |
| 3179 | atomic64_set(v: &mdev->sl2vl[port - 1], i: sl2vl); |
| 3180 | } |
| 3181 | |
| 3182 | static void ib_sl2vl_update_work(struct work_struct *work) |
| 3183 | { |
| 3184 | struct ib_event_work *ew = container_of(work, struct ib_event_work, work); |
| 3185 | struct mlx4_ib_dev *mdev = ew->ib_dev; |
| 3186 | int port = ew->port; |
| 3187 | |
| 3188 | mlx4_ib_sl2vl_update(mdev, port); |
| 3189 | |
| 3190 | kfree(objp: ew); |
| 3191 | } |
| 3192 | |
| 3193 | void mlx4_sched_ib_sl2vl_update_work(struct mlx4_ib_dev *ibdev, |
| 3194 | int port) |
| 3195 | { |
| 3196 | struct ib_event_work *ew; |
| 3197 | |
| 3198 | ew = kmalloc(sizeof(*ew), GFP_ATOMIC); |
| 3199 | if (ew) { |
| 3200 | INIT_WORK(&ew->work, ib_sl2vl_update_work); |
| 3201 | ew->port = port; |
| 3202 | ew->ib_dev = ibdev; |
| 3203 | queue_work(wq, work: &ew->work); |
| 3204 | } |
| 3205 | } |
| 3206 | |
| 3207 | static int mlx4_ib_event(struct notifier_block *this, unsigned long event, |
| 3208 | void *param) |
| 3209 | { |
| 3210 | struct mlx4_ib_dev *ibdev = |
| 3211 | container_of(this, struct mlx4_ib_dev, mlx_nb); |
| 3212 | struct mlx4_dev *dev = ibdev->dev; |
| 3213 | struct ib_event ibev; |
| 3214 | struct mlx4_eqe *eqe = NULL; |
| 3215 | struct ib_event_work *ew; |
| 3216 | int p = 0; |
| 3217 | |
| 3218 | if (mlx4_is_bonded(dev) && |
| 3219 | ((event == MLX4_DEV_EVENT_PORT_UP) || |
| 3220 | (event == MLX4_DEV_EVENT_PORT_DOWN))) { |
| 3221 | ew = kmalloc(sizeof(*ew), GFP_ATOMIC); |
| 3222 | if (!ew) |
| 3223 | return NOTIFY_DONE; |
| 3224 | INIT_WORK(&ew->work, handle_bonded_port_state_event); |
| 3225 | ew->ib_dev = ibdev; |
| 3226 | queue_work(wq, work: &ew->work); |
| 3227 | return NOTIFY_DONE; |
| 3228 | } |
| 3229 | |
| 3230 | switch (event) { |
| 3231 | case MLX4_DEV_EVENT_CATASTROPHIC_ERROR: |
| 3232 | break; |
| 3233 | case MLX4_DEV_EVENT_PORT_MGMT_CHANGE: |
| 3234 | eqe = (struct mlx4_eqe *)param; |
| 3235 | break; |
| 3236 | default: |
| 3237 | p = *(int *)param; |
| 3238 | break; |
| 3239 | } |
| 3240 | |
| 3241 | switch (event) { |
| 3242 | case MLX4_DEV_EVENT_PORT_UP: |
| 3243 | if (p > ibdev->num_ports) |
| 3244 | return NOTIFY_DONE; |
| 3245 | if (!mlx4_is_slave(dev) && |
| 3246 | rdma_port_get_link_layer(device: &ibdev->ib_dev, port_num: p) == |
| 3247 | IB_LINK_LAYER_INFINIBAND) { |
| 3248 | if (mlx4_is_master(dev)) |
| 3249 | mlx4_ib_invalidate_all_guid_record(dev: ibdev, port: p); |
| 3250 | if (ibdev->dev->flags & MLX4_FLAG_SECURE_HOST && |
| 3251 | !(ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT)) |
| 3252 | mlx4_sched_ib_sl2vl_update_work(ibdev, port: p); |
| 3253 | } |
| 3254 | ibev.event = IB_EVENT_PORT_ACTIVE; |
| 3255 | break; |
| 3256 | |
| 3257 | case MLX4_DEV_EVENT_PORT_DOWN: |
| 3258 | if (p > ibdev->num_ports) |
| 3259 | return NOTIFY_DONE; |
| 3260 | ibev.event = IB_EVENT_PORT_ERR; |
| 3261 | break; |
| 3262 | |
| 3263 | case MLX4_DEV_EVENT_CATASTROPHIC_ERROR: |
| 3264 | ibdev->ib_active = false; |
| 3265 | ibev.event = IB_EVENT_DEVICE_FATAL; |
| 3266 | mlx4_ib_handle_catas_error(ibdev); |
| 3267 | break; |
| 3268 | |
| 3269 | case MLX4_DEV_EVENT_PORT_MGMT_CHANGE: |
| 3270 | ew = kmalloc(sizeof *ew, GFP_ATOMIC); |
| 3271 | if (!ew) |
| 3272 | return NOTIFY_DONE; |
| 3273 | |
| 3274 | INIT_WORK(&ew->work, handle_port_mgmt_change_event); |
| 3275 | memcpy(&ew->ib_eqe, eqe, sizeof *eqe); |
| 3276 | ew->ib_dev = ibdev; |
| 3277 | /* need to queue only for port owner, which uses GEN_EQE */ |
| 3278 | if (mlx4_is_master(dev)) |
| 3279 | queue_work(wq, work: &ew->work); |
| 3280 | else |
| 3281 | handle_port_mgmt_change_event(work: &ew->work); |
| 3282 | return NOTIFY_DONE; |
| 3283 | |
| 3284 | case MLX4_DEV_EVENT_SLAVE_INIT: |
| 3285 | /* here, p is the slave id */ |
| 3286 | do_slave_init(ibdev, slave: p, do_init: 1); |
| 3287 | if (mlx4_is_master(dev)) { |
| 3288 | int i; |
| 3289 | |
| 3290 | for (i = 1; i <= ibdev->num_ports; i++) { |
| 3291 | if (rdma_port_get_link_layer(device: &ibdev->ib_dev, port_num: i) |
| 3292 | == IB_LINK_LAYER_INFINIBAND) |
| 3293 | mlx4_ib_slave_alias_guid_event(dev: ibdev, |
| 3294 | slave: p, port: i, |
| 3295 | slave_init: 1); |
| 3296 | } |
| 3297 | } |
| 3298 | return NOTIFY_DONE; |
| 3299 | |
| 3300 | case MLX4_DEV_EVENT_SLAVE_SHUTDOWN: |
| 3301 | if (mlx4_is_master(dev)) { |
| 3302 | int i; |
| 3303 | |
| 3304 | for (i = 1; i <= ibdev->num_ports; i++) { |
| 3305 | if (rdma_port_get_link_layer(device: &ibdev->ib_dev, port_num: i) |
| 3306 | == IB_LINK_LAYER_INFINIBAND) |
| 3307 | mlx4_ib_slave_alias_guid_event(dev: ibdev, |
| 3308 | slave: p, port: i, |
| 3309 | slave_init: 0); |
| 3310 | } |
| 3311 | } |
| 3312 | /* here, p is the slave id */ |
| 3313 | do_slave_init(ibdev, slave: p, do_init: 0); |
| 3314 | return NOTIFY_DONE; |
| 3315 | |
| 3316 | default: |
| 3317 | return NOTIFY_DONE; |
| 3318 | } |
| 3319 | |
| 3320 | ibev.device = &ibdev->ib_dev; |
| 3321 | ibev.element.port_num = mlx4_is_bonded(dev: ibdev->dev) ? 1 : (u8)p; |
| 3322 | |
| 3323 | ib_dispatch_event(event: &ibev); |
| 3324 | return NOTIFY_DONE; |
| 3325 | } |
| 3326 | |
| 3327 | static const struct auxiliary_device_id mlx4_ib_id_table[] = { |
| 3328 | { .name = MLX4_ADEV_NAME ".ib" }, |
| 3329 | {}, |
| 3330 | }; |
| 3331 | |
| 3332 | MODULE_DEVICE_TABLE(auxiliary, mlx4_ib_id_table); |
| 3333 | |
| 3334 | static struct mlx4_adrv mlx4_ib_adrv = { |
| 3335 | .adrv = { |
| 3336 | .name = "ib" , |
| 3337 | .probe = mlx4_ib_probe, |
| 3338 | .remove = mlx4_ib_remove, |
| 3339 | .id_table = mlx4_ib_id_table, |
| 3340 | }, |
| 3341 | .protocol = MLX4_PROT_IB_IPV6, |
| 3342 | .flags = MLX4_INTFF_BONDING |
| 3343 | }; |
| 3344 | |
| 3345 | static int __init mlx4_ib_init(void) |
| 3346 | { |
| 3347 | int err; |
| 3348 | |
| 3349 | wq = alloc_ordered_workqueue("mlx4_ib" , WQ_MEM_RECLAIM); |
| 3350 | if (!wq) |
| 3351 | return -ENOMEM; |
| 3352 | |
| 3353 | err = mlx4_ib_qp_event_init(); |
| 3354 | if (err) |
| 3355 | goto clean_qp_event; |
| 3356 | |
| 3357 | err = mlx4_ib_cm_init(); |
| 3358 | if (err) |
| 3359 | goto clean_wq; |
| 3360 | |
| 3361 | err = mlx4_ib_mcg_init(); |
| 3362 | if (err) |
| 3363 | goto clean_cm; |
| 3364 | |
| 3365 | err = mlx4_register_auxiliary_driver(madrv: &mlx4_ib_adrv); |
| 3366 | if (err) |
| 3367 | goto clean_mcg; |
| 3368 | |
| 3369 | return 0; |
| 3370 | |
| 3371 | clean_mcg: |
| 3372 | mlx4_ib_mcg_destroy(); |
| 3373 | |
| 3374 | clean_cm: |
| 3375 | mlx4_ib_cm_destroy(); |
| 3376 | |
| 3377 | clean_wq: |
| 3378 | mlx4_ib_qp_event_cleanup(); |
| 3379 | |
| 3380 | clean_qp_event: |
| 3381 | destroy_workqueue(wq); |
| 3382 | return err; |
| 3383 | } |
| 3384 | |
| 3385 | static void __exit mlx4_ib_cleanup(void) |
| 3386 | { |
| 3387 | mlx4_unregister_auxiliary_driver(madrv: &mlx4_ib_adrv); |
| 3388 | mlx4_ib_mcg_destroy(); |
| 3389 | mlx4_ib_cm_destroy(); |
| 3390 | mlx4_ib_qp_event_cleanup(); |
| 3391 | destroy_workqueue(wq); |
| 3392 | } |
| 3393 | |
| 3394 | module_init(mlx4_ib_init); |
| 3395 | module_exit(mlx4_ib_cleanup); |
| 3396 | |