1// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB 2/* Copyright (c) 2020 Mellanox Technologies Ltd. */ 3 4#include <linux/mlx5/driver.h> 5#include "eswitch.h" 6 7static void 8mlx5_esw_get_port_parent_id(struct mlx5_core_dev *dev, struct netdev_phys_item_id *ppid) 9{ 10 u64 parent_id; 11 12 parent_id = mlx5_query_nic_system_image_guid(dev); 13 ppid->id_len = sizeof(parent_id); 14 memcpy(ppid->id, &parent_id, sizeof(parent_id)); 15} 16 17static bool mlx5_esw_devlink_port_supported(struct mlx5_eswitch *esw, u16 vport_num) 18{ 19 return (mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF) || 20 mlx5_eswitch_is_vf_vport(esw, vport_num) || 21 mlx5_core_is_ec_vf_vport(esw->dev, vport_num); 22} 23 24static void mlx5_esw_offloads_pf_vf_devlink_port_attrs_set(struct mlx5_eswitch *esw, 25 u16 vport_num, 26 struct devlink_port *dl_port) 27{ 28 struct mlx5_core_dev *dev = esw->dev; 29 struct netdev_phys_item_id ppid = {}; 30 u32 controller_num = 0; 31 bool external; 32 u16 pfnum; 33 34 mlx5_esw_get_port_parent_id(dev, &ppid); 35 pfnum = mlx5_get_dev_index(dev); 36 external = mlx5_core_is_ecpf_esw_manager(dev); 37 if (external) 38 controller_num = dev->priv.eswitch->offloads.host_number + 1; 39 40 if (vport_num == MLX5_VPORT_PF) { 41 memcpy(dl_port->attrs.switch_id.id, ppid.id, ppid.id_len); 42 dl_port->attrs.switch_id.id_len = ppid.id_len; 43 devlink_port_attrs_pci_pf_set(dl_port, controller_num, pfnum, external); 44 } else if (mlx5_eswitch_is_vf_vport(esw, vport_num)) { 45 memcpy(dl_port->attrs.switch_id.id, ppid.id, ppid.id_len); 46 dl_port->attrs.switch_id.id_len = ppid.id_len; 47 devlink_port_attrs_pci_vf_set(dl_port, controller_num, pfnum, 48 vport_num - 1, external); 49 } else if (mlx5_core_is_ec_vf_vport(esw->dev, vport_num)) { 50 memcpy(dl_port->attrs.switch_id.id, ppid.id, ppid.id_len); 51 dl_port->attrs.switch_id.id_len = ppid.id_len; 52 devlink_port_attrs_pci_vf_set(dl_port, 0, pfnum, 53 vport_num - 1, false); 54 } 55} 56 57int mlx5_esw_offloads_pf_vf_devlink_port_init(struct mlx5_eswitch *esw, 58 struct mlx5_vport *vport) 59{ 60 struct mlx5_devlink_port *dl_port; 61 u16 vport_num = vport->vport; 62 63 if (!mlx5_esw_devlink_port_supported(esw, vport_num)) 64 return 0; 65 66 dl_port = kzalloc(sizeof(*dl_port), GFP_KERNEL); 67 if (!dl_port) 68 return -ENOMEM; 69 70 mlx5_esw_offloads_pf_vf_devlink_port_attrs_set(esw, vport_num, 71 &dl_port->dl_port); 72 73 vport->dl_port = dl_port; 74 mlx5_devlink_port_init(dl_port, vport); 75 return 0; 76} 77 78void mlx5_esw_offloads_pf_vf_devlink_port_cleanup(struct mlx5_eswitch *esw, 79 struct mlx5_vport *vport) 80{ 81 if (!vport->dl_port) 82 return; 83 84 kfree(vport->dl_port); 85 vport->dl_port = NULL; 86} 87 88static const struct devlink_port_ops mlx5_esw_pf_vf_dl_port_ops = { 89 .port_fn_hw_addr_get = mlx5_devlink_port_fn_hw_addr_get, 90 .port_fn_hw_addr_set = mlx5_devlink_port_fn_hw_addr_set, 91 .port_fn_roce_get = mlx5_devlink_port_fn_roce_get, 92 .port_fn_roce_set = mlx5_devlink_port_fn_roce_set, 93 .port_fn_migratable_get = mlx5_devlink_port_fn_migratable_get, 94 .port_fn_migratable_set = mlx5_devlink_port_fn_migratable_set, 95#ifdef CONFIG_XFRM_OFFLOAD 96 .port_fn_ipsec_crypto_get = mlx5_devlink_port_fn_ipsec_crypto_get, 97 .port_fn_ipsec_crypto_set = mlx5_devlink_port_fn_ipsec_crypto_set, 98 .port_fn_ipsec_packet_get = mlx5_devlink_port_fn_ipsec_packet_get, 99 .port_fn_ipsec_packet_set = mlx5_devlink_port_fn_ipsec_packet_set, 100#endif /* CONFIG_XFRM_OFFLOAD */ 101 .port_fn_max_io_eqs_get = mlx5_devlink_port_fn_max_io_eqs_get, 102 .port_fn_max_io_eqs_set = mlx5_devlink_port_fn_max_io_eqs_set, 103}; 104 105static void mlx5_esw_offloads_sf_devlink_port_attrs_set(struct mlx5_eswitch *esw, 106 struct devlink_port *dl_port, 107 u32 controller, u32 sfnum) 108{ 109 struct mlx5_core_dev *dev = esw->dev; 110 struct netdev_phys_item_id ppid = {}; 111 u16 pfnum; 112 113 pfnum = mlx5_get_dev_index(dev); 114 mlx5_esw_get_port_parent_id(dev, &ppid); 115 memcpy(dl_port->attrs.switch_id.id, &ppid.id[0], ppid.id_len); 116 dl_port->attrs.switch_id.id_len = ppid.id_len; 117 devlink_port_attrs_pci_sf_set(dl_port, controller, pfnum, sfnum, !!controller); 118} 119 120int mlx5_esw_offloads_sf_devlink_port_init(struct mlx5_eswitch *esw, struct mlx5_vport *vport, 121 struct mlx5_devlink_port *dl_port, 122 u32 controller, u32 sfnum) 123{ 124 mlx5_esw_offloads_sf_devlink_port_attrs_set(esw, &dl_port->dl_port, controller, sfnum); 125 126 vport->dl_port = dl_port; 127 mlx5_devlink_port_init(dl_port, vport); 128 return 0; 129} 130 131void mlx5_esw_offloads_sf_devlink_port_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport) 132{ 133 vport->dl_port = NULL; 134} 135 136static const struct devlink_port_ops mlx5_esw_dl_sf_port_ops = { 137#ifdef CONFIG_MLX5_SF_MANAGER 138 .port_del = mlx5_devlink_sf_port_del, 139#endif 140 .port_fn_hw_addr_get = mlx5_devlink_port_fn_hw_addr_get, 141 .port_fn_hw_addr_set = mlx5_devlink_port_fn_hw_addr_set, 142 .port_fn_roce_get = mlx5_devlink_port_fn_roce_get, 143 .port_fn_roce_set = mlx5_devlink_port_fn_roce_set, 144#ifdef CONFIG_MLX5_SF_MANAGER 145 .port_fn_state_get = mlx5_devlink_sf_port_fn_state_get, 146 .port_fn_state_set = mlx5_devlink_sf_port_fn_state_set, 147#endif 148 .port_fn_max_io_eqs_get = mlx5_devlink_port_fn_max_io_eqs_get, 149 .port_fn_max_io_eqs_set = mlx5_devlink_port_fn_max_io_eqs_set, 150}; 151 152int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, struct mlx5_vport *vport) 153{ 154 struct mlx5_core_dev *dev = esw->dev; 155 const struct devlink_port_ops *ops; 156 struct mlx5_devlink_port *dl_port; 157 u16 vport_num = vport->vport; 158 unsigned int dl_port_index; 159 struct devlink *devlink; 160 int err; 161 162 dl_port = vport->dl_port; 163 if (!dl_port) 164 return 0; 165 166 if (mlx5_esw_is_sf_vport(esw, vport_num)) 167 ops = &mlx5_esw_dl_sf_port_ops; 168 else if (mlx5_eswitch_is_pf_vf_vport(esw, vport_num)) 169 ops = &mlx5_esw_pf_vf_dl_port_ops; 170 else 171 ops = NULL; 172 173 devlink = priv_to_devlink(dev); 174 dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, vport_num); 175 err = devl_port_register_with_ops(devlink, &dl_port->dl_port, dl_port_index, ops); 176 if (err) 177 return err; 178 179 err = devl_rate_leaf_create(&dl_port->dl_port, vport, NULL); 180 if (err) 181 goto rate_err; 182 183 return 0; 184 185rate_err: 186 devl_port_unregister(&dl_port->dl_port); 187 return err; 188} 189 190void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, struct mlx5_vport *vport) 191{ 192 struct mlx5_devlink_port *dl_port; 193 194 if (!vport->dl_port) 195 return; 196 dl_port = vport->dl_port; 197 198 mlx5_esw_qos_vport_update_group(esw, vport, NULL, NULL); 199 devl_rate_leaf_destroy(&dl_port->dl_port); 200 201 devl_port_unregister(&dl_port->dl_port); 202} 203 204struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num) 205{ 206 struct mlx5_vport *vport; 207 208 vport = mlx5_eswitch_get_vport(esw, vport_num); 209 return IS_ERR(vport) ? ERR_CAST(vport) : &vport->dl_port->dl_port; 210} 211