en_netdev.c revision 292113
1/* 2 * Copyright (c) 2007, 2014 Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 34#include <linux/etherdevice.h> 35#include <linux/delay.h> 36#include <linux/slab.h> 37#ifdef CONFIG_NET_RX_BUSY_POLL 38#include <net/busy_poll.h> 39#endif 40 41#include <linux/list.h> 42#include <linux/if_ether.h> 43 44#include <linux/mlx4/driver.h> 45#include <linux/mlx4/device.h> 46#include <linux/mlx4/cmd.h> 47#include <linux/mlx4/cq.h> 48 49#include <sys/sockio.h> 50#include <sys/sysctl.h> 51 52#include "mlx4_en.h" 53#include "en_port.h" 54 55static void mlx4_en_sysctl_stat(struct mlx4_en_priv *priv); 56static void mlx4_en_sysctl_conf(struct mlx4_en_priv *priv); 57static int mlx4_en_unit; 58 59#ifdef CONFIG_NET_RX_BUSY_POLL 60/* must be called with local_bh_disable()d */ 61static int mlx4_en_low_latency_recv(struct napi_struct *napi) 62{ 63 struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); 64 struct net_device *dev = cq->dev; 65 struct mlx4_en_priv *priv = netdev_priv(dev); 66 struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring]; 67 int done; 68 69 if (!priv->port_up) 70 return LL_FLUSH_FAILED; 71 72 if (!mlx4_en_cq_lock_poll(cq)) 73 return LL_FLUSH_BUSY; 74 75 done = mlx4_en_process_rx_cq(dev, cq, 4); 76#ifdef LL_EXTENDED_STATS 77 if (done) 78 rx_ring->cleaned += done; 79 else 80 rx_ring->misses++; 81#endif 82 83 mlx4_en_cq_unlock_poll(cq); 84 85 return done; 86} 87#endif /* CONFIG_NET_RX_BUSY_POLL */ 88 89#ifdef CONFIG_RFS_ACCEL 90 91struct mlx4_en_filter { 92 struct list_head next; 93 struct work_struct work; 94 95 u8 ip_proto; 96 __be32 src_ip; 97 __be32 dst_ip; 98 __be16 src_port; 99 __be16 dst_port; 100 101 int rxq_index; 102 struct mlx4_en_priv *priv; 103 u32 flow_id; /* RFS infrastructure id */ 104 int id; /* mlx4_en driver id */ 105 u64 reg_id; /* Flow steering API id */ 106 u8 activated; /* Used to prevent expiry before filter 107 * is attached 108 */ 109 struct hlist_node filter_chain; 110}; 111 112static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv); 113 114static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto) 115{ 116 switch (ip_proto) { 117 case IPPROTO_UDP: 118 return MLX4_NET_TRANS_RULE_ID_UDP; 119 case IPPROTO_TCP: 120 return MLX4_NET_TRANS_RULE_ID_TCP; 121 default: 122 return -EPROTONOSUPPORT; 123 } 124}; 125 126static void mlx4_en_filter_work(struct work_struct *work) 127{ 128 struct mlx4_en_filter *filter = container_of(work, 129 struct mlx4_en_filter, 130 work); 131 struct mlx4_en_priv *priv = filter->priv; 132 struct mlx4_spec_list spec_tcp_udp = { 133 .id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto), 134 { 135 .tcp_udp = { 136 .dst_port = filter->dst_port, 137 .dst_port_msk = (__force __be16)-1, 138 .src_port = filter->src_port, 139 .src_port_msk = (__force __be16)-1, 140 }, 141 }, 142 }; 143 struct mlx4_spec_list spec_ip = { 144 .id = MLX4_NET_TRANS_RULE_ID_IPV4, 145 { 146 .ipv4 = { 147 .dst_ip = filter->dst_ip, 148 .dst_ip_msk = (__force __be32)-1, 149 .src_ip = filter->src_ip, 150 .src_ip_msk = (__force __be32)-1, 151 }, 152 }, 153 }; 154 struct mlx4_spec_list spec_eth = { 155 .id = MLX4_NET_TRANS_RULE_ID_ETH, 156 }; 157 struct mlx4_net_trans_rule rule = { 158 .list = LIST_HEAD_INIT(rule.list), 159 .queue_mode = MLX4_NET_TRANS_Q_LIFO, 160 .exclusive = 1, 161 .allow_loopback = 1, 162 .promisc_mode = MLX4_FS_REGULAR, 163 .port = priv->port, 164 .priority = MLX4_DOMAIN_RFS, 165 }; 166 int rc; 167 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); 168 169 if (spec_tcp_udp.id < 0) { 170 en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n", 171 filter->ip_proto); 172 goto ignore; 173 } 174 list_add_tail(&spec_eth.list, &rule.list); 175 list_add_tail(&spec_ip.list, &rule.list); 176 list_add_tail(&spec_tcp_udp.list, &rule.list); 177 178 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn; 179 memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN); 180 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN); 181 182 filter->activated = 0; 183 184 if (filter->reg_id) { 185 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id); 186 if (rc && rc != -ENOENT) 187 en_err(priv, "Error detaching flow. rc = %d\n", rc); 188 } 189 190 rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id); 191 if (rc) 192 en_err(priv, "Error attaching flow. err = %d\n", rc); 193 194ignore: 195 mlx4_en_filter_rfs_expire(priv); 196 197 filter->activated = 1; 198} 199 200static inline struct hlist_head * 201filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, 202 __be16 src_port, __be16 dst_port) 203{ 204 unsigned long l; 205 int bucket_idx; 206 207 l = (__force unsigned long)src_port | 208 ((__force unsigned long)dst_port << 2); 209 l ^= (__force unsigned long)(src_ip ^ dst_ip); 210 211 bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT); 212 213 return &priv->filter_hash[bucket_idx]; 214} 215 216static struct mlx4_en_filter * 217mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip, 218 __be32 dst_ip, u8 ip_proto, __be16 src_port, 219 __be16 dst_port, u32 flow_id) 220{ 221 struct mlx4_en_filter *filter = NULL; 222 223 filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC); 224 if (!filter) 225 return NULL; 226 227 filter->priv = priv; 228 filter->rxq_index = rxq_index; 229 INIT_WORK(&filter->work, mlx4_en_filter_work); 230 231 filter->src_ip = src_ip; 232 filter->dst_ip = dst_ip; 233 filter->ip_proto = ip_proto; 234 filter->src_port = src_port; 235 filter->dst_port = dst_port; 236 237 filter->flow_id = flow_id; 238 239 filter->id = priv->last_filter_id++ % RPS_NO_FILTER; 240 241 list_add_tail(&filter->next, &priv->filters); 242 hlist_add_head(&filter->filter_chain, 243 filter_hash_bucket(priv, src_ip, dst_ip, src_port, 244 dst_port)); 245 246 return filter; 247} 248 249static void mlx4_en_filter_free(struct mlx4_en_filter *filter) 250{ 251 struct mlx4_en_priv *priv = filter->priv; 252 int rc; 253 254 list_del(&filter->next); 255 256 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id); 257 if (rc && rc != -ENOENT) 258 en_err(priv, "Error detaching flow. rc = %d\n", rc); 259 260 kfree(filter); 261} 262 263static inline struct mlx4_en_filter * 264mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, 265 u8 ip_proto, __be16 src_port, __be16 dst_port) 266{ 267 struct hlist_node *elem; 268 struct mlx4_en_filter *filter; 269 struct mlx4_en_filter *ret = NULL; 270 271 hlist_for_each_entry(filter, elem, 272 filter_hash_bucket(priv, src_ip, dst_ip, 273 src_port, dst_port), 274 filter_chain) { 275 if (filter->src_ip == src_ip && 276 filter->dst_ip == dst_ip && 277 filter->ip_proto == ip_proto && 278 filter->src_port == src_port && 279 filter->dst_port == dst_port) { 280 ret = filter; 281 break; 282 } 283 } 284 285 return ret; 286} 287 288static int 289mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, 290 u16 rxq_index, u32 flow_id) 291{ 292 struct mlx4_en_priv *priv = netdev_priv(net_dev); 293 struct mlx4_en_filter *filter; 294 const struct iphdr *ip; 295 const __be16 *ports; 296 u8 ip_proto; 297 __be32 src_ip; 298 __be32 dst_ip; 299 __be16 src_port; 300 __be16 dst_port; 301 int nhoff = skb_network_offset(skb); 302 int ret = 0; 303 304 if (skb->protocol != htons(ETH_P_IP)) 305 return -EPROTONOSUPPORT; 306 307 ip = (const struct iphdr *)(skb->data + nhoff); 308 if (ip_is_fragment(ip)) 309 return -EPROTONOSUPPORT; 310 311 if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP)) 312 return -EPROTONOSUPPORT; 313 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl); 314 315 ip_proto = ip->protocol; 316 src_ip = ip->saddr; 317 dst_ip = ip->daddr; 318 src_port = ports[0]; 319 dst_port = ports[1]; 320 321 spin_lock_bh(&priv->filters_lock); 322 filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto, 323 src_port, dst_port); 324 if (filter) { 325 if (filter->rxq_index == rxq_index) 326 goto out; 327 328 filter->rxq_index = rxq_index; 329 } else { 330 filter = mlx4_en_filter_alloc(priv, rxq_index, 331 src_ip, dst_ip, ip_proto, 332 src_port, dst_port, flow_id); 333 if (!filter) { 334 ret = -ENOMEM; 335 goto err; 336 } 337 } 338 339 queue_work(priv->mdev->workqueue, &filter->work); 340 341out: 342 ret = filter->id; 343err: 344 spin_unlock_bh(&priv->filters_lock); 345 346 return ret; 347} 348 349void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv, 350 struct mlx4_en_rx_ring *rx_ring) 351{ 352 struct mlx4_en_filter *filter, *tmp; 353 LIST_HEAD(del_list); 354 355 spin_lock_bh(&priv->filters_lock); 356 list_for_each_entry_safe(filter, tmp, &priv->filters, next) { 357 list_move(&filter->next, &del_list); 358 hlist_del(&filter->filter_chain); 359 } 360 spin_unlock_bh(&priv->filters_lock); 361 362 list_for_each_entry_safe(filter, tmp, &del_list, next) { 363 cancel_work_sync(&filter->work); 364 mlx4_en_filter_free(filter); 365 } 366} 367 368static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv) 369{ 370 struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL; 371 LIST_HEAD(del_list); 372 int i = 0; 373 374 spin_lock_bh(&priv->filters_lock); 375 list_for_each_entry_safe(filter, tmp, &priv->filters, next) { 376 if (i > MLX4_EN_FILTER_EXPIRY_QUOTA) 377 break; 378 379 if (filter->activated && 380 !work_pending(&filter->work) && 381 rps_may_expire_flow(priv->dev, 382 filter->rxq_index, filter->flow_id, 383 filter->id)) { 384 list_move(&filter->next, &del_list); 385 hlist_del(&filter->filter_chain); 386 } else 387 last_filter = filter; 388 389 i++; 390 } 391 392 if (last_filter && (&last_filter->next != priv->filters.next)) 393 list_move(&priv->filters, &last_filter->next); 394 395 spin_unlock_bh(&priv->filters_lock); 396 397 list_for_each_entry_safe(filter, tmp, &del_list, next) 398 mlx4_en_filter_free(filter); 399} 400#endif 401 402static void mlx4_en_vlan_rx_add_vid(void *arg, struct net_device *dev, u16 vid) 403{ 404 struct mlx4_en_priv *priv = netdev_priv(dev); 405 struct mlx4_en_dev *mdev = priv->mdev; 406 int err; 407 int idx; 408 409 if (arg != priv) 410 return; 411 412 en_dbg(HW, priv, "adding VLAN:%d\n", vid); 413 414 set_bit(vid, priv->active_vlans); 415 416 /* Add VID to port VLAN filter */ 417 mutex_lock(&mdev->state_lock); 418 if (mdev->device_up && priv->port_up) { 419 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 420 if (err) 421 en_err(priv, "Failed configuring VLAN filter\n"); 422 } 423 if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx)) 424 en_dbg(HW, priv, "failed adding vlan %d\n", vid); 425 mutex_unlock(&mdev->state_lock); 426 427} 428 429static void mlx4_en_vlan_rx_kill_vid(void *arg, struct net_device *dev, u16 vid) 430{ 431 struct mlx4_en_priv *priv = netdev_priv(dev); 432 struct mlx4_en_dev *mdev = priv->mdev; 433 int err; 434 435 if (arg != priv) 436 return; 437 438 en_dbg(HW, priv, "Killing VID:%d\n", vid); 439 440 clear_bit(vid, priv->active_vlans); 441 442 /* Remove VID from port VLAN filter */ 443 mutex_lock(&mdev->state_lock); 444 mlx4_unregister_vlan(mdev->dev, priv->port, vid); 445 446 if (mdev->device_up && priv->port_up) { 447 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 448 if (err) 449 en_err(priv, "Failed configuring VLAN filter\n"); 450 } 451 mutex_unlock(&mdev->state_lock); 452 453} 454 455static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv, 456 unsigned char *mac, int *qpn, u64 *reg_id) 457{ 458 struct mlx4_en_dev *mdev = priv->mdev; 459 struct mlx4_dev *dev = mdev->dev; 460 int err; 461 462 switch (dev->caps.steering_mode) { 463 case MLX4_STEERING_MODE_B0: { 464 struct mlx4_qp qp; 465 u8 gid[16] = {0}; 466 467 qp.qpn = *qpn; 468 memcpy(&gid[10], mac, ETH_ALEN); 469 gid[5] = priv->port; 470 471 err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH); 472 break; 473 } 474 case MLX4_STEERING_MODE_DEVICE_MANAGED: { 475 struct mlx4_spec_list spec_eth = { {NULL} }; 476 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); 477 478 struct mlx4_net_trans_rule rule = { 479 .queue_mode = MLX4_NET_TRANS_Q_FIFO, 480 .exclusive = 0, 481 .allow_loopback = 1, 482 .promisc_mode = MLX4_FS_REGULAR, 483 .priority = MLX4_DOMAIN_NIC, 484 }; 485 486 rule.port = priv->port; 487 rule.qpn = *qpn; 488 INIT_LIST_HEAD(&rule.list); 489 490 spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH; 491 memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN); 492 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN); 493 list_add_tail(&spec_eth.list, &rule.list); 494 495 err = mlx4_flow_attach(dev, &rule, reg_id); 496 break; 497 } 498 default: 499 return -EINVAL; 500 } 501 if (err) 502 en_warn(priv, "Failed Attaching Unicast\n"); 503 504 return err; 505} 506 507static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv, 508 unsigned char *mac, int qpn, u64 reg_id) 509{ 510 struct mlx4_en_dev *mdev = priv->mdev; 511 struct mlx4_dev *dev = mdev->dev; 512 513 switch (dev->caps.steering_mode) { 514 case MLX4_STEERING_MODE_B0: { 515 struct mlx4_qp qp; 516 u8 gid[16] = {0}; 517 518 qp.qpn = qpn; 519 memcpy(&gid[10], mac, ETH_ALEN); 520 gid[5] = priv->port; 521 522 mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH); 523 break; 524 } 525 case MLX4_STEERING_MODE_DEVICE_MANAGED: { 526 mlx4_flow_detach(dev, reg_id); 527 break; 528 } 529 default: 530 en_err(priv, "Invalid steering mode.\n"); 531 } 532} 533 534static int mlx4_en_get_qp(struct mlx4_en_priv *priv) 535{ 536 struct mlx4_en_dev *mdev = priv->mdev; 537 struct mlx4_dev *dev = mdev->dev; 538 struct mlx4_mac_entry *entry; 539 int index = 0; 540 int err = 0; 541 u64 reg_id; 542 int *qpn = &priv->base_qpn; 543 u64 mac = mlx4_mac_to_u64(IF_LLADDR(priv->dev)); 544 545 en_dbg(DRV, priv, "Registering MAC: %pM for adding\n", 546 IF_LLADDR(priv->dev)); 547 index = mlx4_register_mac(dev, priv->port, mac); 548 if (index < 0) { 549 err = index; 550 en_err(priv, "Failed adding MAC: %pM\n", 551 IF_LLADDR(priv->dev)); 552 return err; 553 } 554 555 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) { 556 int base_qpn = mlx4_get_base_qpn(dev, priv->port); 557 *qpn = base_qpn + index; 558 return 0; 559 } 560 561 err = mlx4_qp_reserve_range(dev, 1, 1, qpn, 0); 562 en_dbg(DRV, priv, "Reserved qp %d\n", *qpn); 563 if (err) { 564 en_err(priv, "Failed to reserve qp for mac registration\n"); 565 goto qp_err; 566 } 567 568 err = mlx4_en_uc_steer_add(priv, IF_LLADDR(priv->dev), qpn, ®_id); 569 if (err) 570 goto steer_err; 571 572 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 573 if (!entry) { 574 err = -ENOMEM; 575 goto alloc_err; 576 } 577 memcpy(entry->mac, IF_LLADDR(priv->dev), sizeof(entry->mac)); 578 entry->reg_id = reg_id; 579 580 hlist_add_head(&entry->hlist, 581 &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]); 582 583 return 0; 584 585alloc_err: 586 mlx4_en_uc_steer_release(priv, IF_LLADDR(priv->dev), *qpn, reg_id); 587 588steer_err: 589 mlx4_qp_release_range(dev, *qpn, 1); 590 591qp_err: 592 mlx4_unregister_mac(dev, priv->port, mac); 593 return err; 594} 595 596static void mlx4_en_put_qp(struct mlx4_en_priv *priv) 597{ 598 struct mlx4_en_dev *mdev = priv->mdev; 599 struct mlx4_dev *dev = mdev->dev; 600 int qpn = priv->base_qpn; 601 u64 mac; 602 603 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) { 604 mac = mlx4_mac_to_u64(IF_LLADDR(priv->dev)); 605 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n", 606 IF_LLADDR(priv->dev)); 607 mlx4_unregister_mac(dev, priv->port, mac); 608 } else { 609 struct mlx4_mac_entry *entry; 610 struct hlist_node *n, *tmp; 611 struct hlist_head *bucket; 612 unsigned int i; 613 614 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) { 615 bucket = &priv->mac_hash[i]; 616 hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) { 617 mac = mlx4_mac_to_u64(entry->mac); 618 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n", 619 entry->mac); 620 mlx4_en_uc_steer_release(priv, entry->mac, 621 qpn, entry->reg_id); 622 623 mlx4_unregister_mac(dev, priv->port, mac); 624 hlist_del(&entry->hlist); 625 kfree(entry); 626 } 627 } 628 629 en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n", 630 priv->port, qpn); 631 mlx4_qp_release_range(dev, qpn, 1); 632 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC; 633 } 634} 635 636static void mlx4_en_clear_list(struct net_device *dev) 637{ 638 struct mlx4_en_priv *priv = netdev_priv(dev); 639 struct mlx4_en_mc_list *tmp, *mc_to_del; 640 641 list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) { 642 list_del(&mc_to_del->list); 643 kfree(mc_to_del); 644 } 645} 646 647static void mlx4_en_cache_mclist(struct net_device *dev) 648{ 649 struct ifmultiaddr *ifma; 650 struct mlx4_en_mc_list *tmp; 651 struct mlx4_en_priv *priv = netdev_priv(dev); 652 653 if_maddr_rlock(dev); 654 TAILQ_FOREACH(ifma, &dev->if_multiaddrs, ifma_link) { 655 if (ifma->ifma_addr->sa_family != AF_LINK) 656 continue; 657 if (((struct sockaddr_dl *)ifma->ifma_addr)->sdl_alen != 658 ETHER_ADDR_LEN) 659 continue; 660 /* Make sure the list didn't grow. */ 661 tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC); 662 if (tmp == NULL) { 663 en_err(priv, "Failed to allocate multicast list\n"); 664 break; 665 } 666 memcpy(tmp->addr, 667 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), ETH_ALEN); 668 list_add_tail(&tmp->list, &priv->mc_list); 669 } 670 if_maddr_runlock(dev); 671} 672 673static void update_mclist_flags(struct mlx4_en_priv *priv, 674 struct list_head *dst, 675 struct list_head *src) 676{ 677 struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc; 678 bool found; 679 680 /* Find all the entries that should be removed from dst, 681 * These are the entries that are not found in src 682 */ 683 list_for_each_entry(dst_tmp, dst, list) { 684 found = false; 685 list_for_each_entry(src_tmp, src, list) { 686 if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) { 687 found = true; 688 break; 689 } 690 } 691 if (!found) 692 dst_tmp->action = MCLIST_REM; 693 } 694 695 /* Add entries that exist in src but not in dst 696 * mark them as need to add 697 */ 698 list_for_each_entry(src_tmp, src, list) { 699 found = false; 700 list_for_each_entry(dst_tmp, dst, list) { 701 if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) { 702 dst_tmp->action = MCLIST_NONE; 703 found = true; 704 break; 705 } 706 } 707 if (!found) { 708 new_mc = kmalloc(sizeof(struct mlx4_en_mc_list), 709 GFP_KERNEL); 710 if (!new_mc) { 711 en_err(priv, "Failed to allocate current multicast list\n"); 712 return; 713 } 714 memcpy(new_mc, src_tmp, 715 sizeof(struct mlx4_en_mc_list)); 716 new_mc->action = MCLIST_ADD; 717 list_add_tail(&new_mc->list, dst); 718 } 719 } 720} 721 722static void mlx4_en_set_rx_mode(struct net_device *dev) 723{ 724 struct mlx4_en_priv *priv = netdev_priv(dev); 725 726 if (!priv->port_up) 727 return; 728 729 queue_work(priv->mdev->workqueue, &priv->rx_mode_task); 730} 731 732static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv, 733 struct mlx4_en_dev *mdev) 734{ 735 int err = 0; 736 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) { 737 priv->flags |= MLX4_EN_FLAG_PROMISC; 738 739 /* Enable promiscouos mode */ 740 switch (mdev->dev->caps.steering_mode) { 741 case MLX4_STEERING_MODE_DEVICE_MANAGED: 742 err = mlx4_flow_steer_promisc_add(mdev->dev, 743 priv->port, 744 priv->base_qpn, 745 MLX4_FS_ALL_DEFAULT); 746 if (err) 747 en_err(priv, "Failed enabling promiscuous mode\n"); 748 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 749 break; 750 751 case MLX4_STEERING_MODE_B0: 752 err = mlx4_unicast_promisc_add(mdev->dev, 753 priv->base_qpn, 754 priv->port); 755 if (err) 756 en_err(priv, "Failed enabling unicast promiscuous mode\n"); 757 758 /* Add the default qp number as multicast 759 * promisc 760 */ 761 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { 762 err = mlx4_multicast_promisc_add(mdev->dev, 763 priv->base_qpn, 764 priv->port); 765 if (err) 766 en_err(priv, "Failed enabling multicast promiscuous mode\n"); 767 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 768 } 769 break; 770 771 case MLX4_STEERING_MODE_A0: 772 err = mlx4_SET_PORT_qpn_calc(mdev->dev, 773 priv->port, 774 priv->base_qpn, 775 1); 776 if (err) 777 en_err(priv, "Failed enabling promiscuous mode\n"); 778 break; 779 } 780 781 /* Disable port multicast filter (unconditionally) */ 782 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 783 0, MLX4_MCAST_DISABLE); 784 if (err) 785 en_err(priv, "Failed disabling multicast filter\n"); 786 } 787} 788 789static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv, 790 struct mlx4_en_dev *mdev) 791{ 792 int err = 0; 793 794 priv->flags &= ~MLX4_EN_FLAG_PROMISC; 795 796 /* Disable promiscouos mode */ 797 switch (mdev->dev->caps.steering_mode) { 798 case MLX4_STEERING_MODE_DEVICE_MANAGED: 799 err = mlx4_flow_steer_promisc_remove(mdev->dev, 800 priv->port, 801 MLX4_FS_ALL_DEFAULT); 802 if (err) 803 en_err(priv, "Failed disabling promiscuous mode\n"); 804 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 805 break; 806 807 case MLX4_STEERING_MODE_B0: 808 err = mlx4_unicast_promisc_remove(mdev->dev, 809 priv->base_qpn, 810 priv->port); 811 if (err) 812 en_err(priv, "Failed disabling unicast promiscuous mode\n"); 813 /* Disable Multicast promisc */ 814 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 815 err = mlx4_multicast_promisc_remove(mdev->dev, 816 priv->base_qpn, 817 priv->port); 818 if (err) 819 en_err(priv, "Failed disabling multicast promiscuous mode\n"); 820 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 821 } 822 break; 823 824 case MLX4_STEERING_MODE_A0: 825 err = mlx4_SET_PORT_qpn_calc(mdev->dev, 826 priv->port, 827 priv->base_qpn, 0); 828 if (err) 829 en_err(priv, "Failed disabling promiscuous mode\n"); 830 break; 831 } 832} 833 834static void mlx4_en_do_multicast(struct mlx4_en_priv *priv, 835 struct net_device *dev, 836 struct mlx4_en_dev *mdev) 837{ 838 struct mlx4_en_mc_list *mclist, *tmp; 839 u8 mc_list[16] = {0}; 840 int err = 0; 841 u64 mcast_addr = 0; 842 843 844 /* Enable/disable the multicast filter according to IFF_ALLMULTI */ 845 if (dev->if_flags & IFF_ALLMULTI) { 846 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 847 0, MLX4_MCAST_DISABLE); 848 if (err) 849 en_err(priv, "Failed disabling multicast filter\n"); 850 851 /* Add the default qp number as multicast promisc */ 852 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { 853 switch (mdev->dev->caps.steering_mode) { 854 case MLX4_STEERING_MODE_DEVICE_MANAGED: 855 err = mlx4_flow_steer_promisc_add(mdev->dev, 856 priv->port, 857 priv->base_qpn, 858 MLX4_FS_MC_DEFAULT); 859 break; 860 861 case MLX4_STEERING_MODE_B0: 862 err = mlx4_multicast_promisc_add(mdev->dev, 863 priv->base_qpn, 864 priv->port); 865 break; 866 867 case MLX4_STEERING_MODE_A0: 868 break; 869 } 870 if (err) 871 en_err(priv, "Failed entering multicast promisc mode\n"); 872 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 873 } 874 } else { 875 /* Disable Multicast promisc */ 876 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 877 switch (mdev->dev->caps.steering_mode) { 878 case MLX4_STEERING_MODE_DEVICE_MANAGED: 879 err = mlx4_flow_steer_promisc_remove(mdev->dev, 880 priv->port, 881 MLX4_FS_MC_DEFAULT); 882 break; 883 884 case MLX4_STEERING_MODE_B0: 885 err = mlx4_multicast_promisc_remove(mdev->dev, 886 priv->base_qpn, 887 priv->port); 888 break; 889 890 case MLX4_STEERING_MODE_A0: 891 break; 892 } 893 if (err) 894 en_err(priv, "Failed disabling multicast promiscuous mode\n"); 895 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 896 } 897 898 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 899 0, MLX4_MCAST_DISABLE); 900 if (err) 901 en_err(priv, "Failed disabling multicast filter\n"); 902 903 /* Flush mcast filter and init it with broadcast address */ 904 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, 905 1, MLX4_MCAST_CONFIG); 906 907 /* Update multicast list - we cache all addresses so they won't 908 * change while HW is updated holding the command semaphor */ 909 mlx4_en_cache_mclist(dev); 910 list_for_each_entry(mclist, &priv->mc_list, list) { 911 mcast_addr = mlx4_mac_to_u64(mclist->addr); 912 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 913 mcast_addr, 0, MLX4_MCAST_CONFIG); 914 } 915 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 916 0, MLX4_MCAST_ENABLE); 917 if (err) 918 en_err(priv, "Failed enabling multicast filter\n"); 919 920 update_mclist_flags(priv, &priv->curr_list, &priv->mc_list); 921 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) { 922 if (mclist->action == MCLIST_REM) { 923 /* detach this address and delete from list */ 924 memcpy(&mc_list[10], mclist->addr, ETH_ALEN); 925 mc_list[5] = priv->port; 926 err = mlx4_multicast_detach(mdev->dev, 927 &priv->rss_map.indir_qp, 928 mc_list, 929 MLX4_PROT_ETH, 930 mclist->reg_id); 931 if (err) 932 en_err(priv, "Fail to detach multicast address\n"); 933 934 /* remove from list */ 935 list_del(&mclist->list); 936 kfree(mclist); 937 } else if (mclist->action == MCLIST_ADD) { 938 /* attach the address */ 939 memcpy(&mc_list[10], mclist->addr, ETH_ALEN); 940 /* needed for B0 steering support */ 941 mc_list[5] = priv->port; 942 err = mlx4_multicast_attach(mdev->dev, 943 &priv->rss_map.indir_qp, 944 mc_list, 945 priv->port, 0, 946 MLX4_PROT_ETH, 947 &mclist->reg_id); 948 if (err) 949 en_err(priv, "Fail to attach multicast address\n"); 950 951 } 952 } 953 } 954} 955 956static void mlx4_en_do_set_rx_mode(struct work_struct *work) 957{ 958 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 959 rx_mode_task); 960 struct mlx4_en_dev *mdev = priv->mdev; 961 struct net_device *dev = priv->dev; 962 963 964 mutex_lock(&mdev->state_lock); 965 if (!mdev->device_up) { 966 en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n"); 967 goto out; 968 } 969 if (!priv->port_up) { 970 en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n"); 971 goto out; 972 } 973 if (!mlx4_en_QUERY_PORT(mdev, priv->port)) { 974 if (priv->port_state.link_state) { 975 priv->last_link_state = MLX4_DEV_EVENT_PORT_UP; 976 /* update netif baudrate */ 977 priv->dev->if_baudrate = 978 IF_Mbps(priv->port_state.link_speed); 979 /* Important note: the following call for if_link_state_change 980 * is needed for interface up scenario (start port, link state 981 * change) */ 982 if_link_state_change(priv->dev, LINK_STATE_UP); 983 en_dbg(HW, priv, "Link Up\n"); 984 } 985 } 986 987 /* Promsicuous mode: disable all filters */ 988 if ((dev->if_flags & IFF_PROMISC) || 989 (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) { 990 mlx4_en_set_promisc_mode(priv, mdev); 991 goto out; 992 } 993 994 /* Not in promiscuous mode */ 995 if (priv->flags & MLX4_EN_FLAG_PROMISC) 996 mlx4_en_clear_promisc_mode(priv, mdev); 997 998 mlx4_en_do_multicast(priv, dev, mdev); 999out: 1000 mutex_unlock(&mdev->state_lock); 1001} 1002 1003#ifdef CONFIG_NET_POLL_CONTROLLER 1004static void mlx4_en_netpoll(struct net_device *dev) 1005{ 1006 struct mlx4_en_priv *priv = netdev_priv(dev); 1007 struct mlx4_en_cq *cq; 1008 unsigned long flags; 1009 int i; 1010 1011 for (i = 0; i < priv->rx_ring_num; i++) { 1012 cq = priv->rx_cq[i]; 1013 spin_lock_irqsave(&cq->lock, flags); 1014 napi_synchronize(&cq->napi); 1015 mlx4_en_process_rx_cq(dev, cq, 0); 1016 spin_unlock_irqrestore(&cq->lock, flags); 1017 } 1018} 1019#endif 1020 1021static void mlx4_en_watchdog_timeout(void *arg) 1022{ 1023 struct mlx4_en_priv *priv = arg; 1024 struct mlx4_en_dev *mdev = priv->mdev; 1025 1026 en_dbg(DRV, priv, "Scheduling watchdog\n"); 1027 queue_work(mdev->workqueue, &priv->watchdog_task); 1028 if (priv->port_up) 1029 callout_reset(&priv->watchdog_timer, MLX4_EN_WATCHDOG_TIMEOUT, 1030 mlx4_en_watchdog_timeout, priv); 1031} 1032 1033 1034 1035static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) 1036{ 1037 struct mlx4_en_cq *cq; 1038 int i; 1039 1040 /* If we haven't received a specific coalescing setting 1041 * (module param), we set the moderation parameters as follows: 1042 * - moder_cnt is set to the number of mtu sized packets to 1043 * satisfy our coelsing target. 1044 * - moder_time is set to a fixed value. 1045 */ 1046 priv->rx_frames = MLX4_EN_RX_COAL_TARGET / priv->dev->if_mtu + 1; 1047 priv->rx_usecs = MLX4_EN_RX_COAL_TIME; 1048 priv->tx_frames = MLX4_EN_TX_COAL_PKTS; 1049 priv->tx_usecs = MLX4_EN_TX_COAL_TIME; 1050 en_dbg(INTR, priv, "Default coalesing params for mtu: %u - " 1051 "rx_frames:%d rx_usecs:%d\n", 1052 (unsigned)priv->dev->if_mtu, priv->rx_frames, priv->rx_usecs); 1053 1054 /* Setup cq moderation params */ 1055 for (i = 0; i < priv->rx_ring_num; i++) { 1056 cq = priv->rx_cq[i]; 1057 cq->moder_cnt = priv->rx_frames; 1058 cq->moder_time = priv->rx_usecs; 1059 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF; 1060 priv->last_moder_packets[i] = 0; 1061 priv->last_moder_bytes[i] = 0; 1062 } 1063 1064 for (i = 0; i < priv->tx_ring_num; i++) { 1065 cq = priv->tx_cq[i]; 1066 cq->moder_cnt = priv->tx_frames; 1067 cq->moder_time = priv->tx_usecs; 1068 } 1069 1070 /* Reset auto-moderation params */ 1071 priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW; 1072 priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW; 1073 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH; 1074 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH; 1075 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL; 1076 priv->adaptive_rx_coal = 1; 1077 priv->last_moder_jiffies = 0; 1078 priv->last_moder_tx_packets = 0; 1079} 1080 1081static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) 1082{ 1083 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies); 1084 struct mlx4_en_cq *cq; 1085 unsigned long packets; 1086 unsigned long rate; 1087 unsigned long avg_pkt_size; 1088 unsigned long rx_packets; 1089 unsigned long rx_bytes; 1090 unsigned long rx_pkt_diff; 1091 int moder_time; 1092 int ring, err; 1093 1094 if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ) 1095 return; 1096 1097 for (ring = 0; ring < priv->rx_ring_num; ring++) { 1098 spin_lock(&priv->stats_lock); 1099 rx_packets = priv->rx_ring[ring]->packets; 1100 rx_bytes = priv->rx_ring[ring]->bytes; 1101 spin_unlock(&priv->stats_lock); 1102 1103 rx_pkt_diff = ((unsigned long) (rx_packets - 1104 priv->last_moder_packets[ring])); 1105 packets = rx_pkt_diff; 1106 rate = packets * HZ / period; 1107 avg_pkt_size = packets ? ((unsigned long) (rx_bytes - 1108 priv->last_moder_bytes[ring])) / packets : 0; 1109 1110 /* Apply auto-moderation only when packet rate 1111 * exceeds a rate that it matters */ 1112 if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) && 1113 avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) { 1114 if (rate < priv->pkt_rate_low) 1115 moder_time = priv->rx_usecs_low; 1116 else if (rate > priv->pkt_rate_high) 1117 moder_time = priv->rx_usecs_high; 1118 else 1119 moder_time = (rate - priv->pkt_rate_low) * 1120 (priv->rx_usecs_high - priv->rx_usecs_low) / 1121 (priv->pkt_rate_high - priv->pkt_rate_low) + 1122 priv->rx_usecs_low; 1123 } else { 1124 moder_time = priv->rx_usecs_low; 1125 } 1126 1127 if (moder_time != priv->last_moder_time[ring]) { 1128 priv->last_moder_time[ring] = moder_time; 1129 cq = priv->rx_cq[ring]; 1130 cq->moder_time = moder_time; 1131 err = mlx4_en_set_cq_moder(priv, cq); 1132 if (err) 1133 en_err(priv, "Failed modifying moderation for cq:%d\n", 1134 ring); 1135 } 1136 priv->last_moder_packets[ring] = rx_packets; 1137 priv->last_moder_bytes[ring] = rx_bytes; 1138 } 1139 1140 priv->last_moder_jiffies = jiffies; 1141} 1142 1143static void mlx4_en_do_get_stats(struct work_struct *work) 1144{ 1145 struct delayed_work *delay = to_delayed_work(work); 1146 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, 1147 stats_task); 1148 struct mlx4_en_dev *mdev = priv->mdev; 1149 int err; 1150 1151 mutex_lock(&mdev->state_lock); 1152 if (mdev->device_up) { 1153 if (priv->port_up) { 1154 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0); 1155 if (err) 1156 en_dbg(HW, priv, "Could not update stats\n"); 1157 1158 mlx4_en_auto_moderation(priv); 1159 } 1160 1161 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 1162 } 1163 mutex_unlock(&mdev->state_lock); 1164} 1165 1166/* mlx4_en_service_task - Run service task for tasks that needed to be done 1167 * periodically 1168 */ 1169static void mlx4_en_service_task(struct work_struct *work) 1170{ 1171 struct delayed_work *delay = to_delayed_work(work); 1172 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, 1173 service_task); 1174 struct mlx4_en_dev *mdev = priv->mdev; 1175 1176 mutex_lock(&mdev->state_lock); 1177 if (mdev->device_up) { 1178 queue_delayed_work(mdev->workqueue, &priv->service_task, 1179 SERVICE_TASK_DELAY); 1180 } 1181 mutex_unlock(&mdev->state_lock); 1182} 1183 1184static void mlx4_en_linkstate(struct work_struct *work) 1185{ 1186 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 1187 linkstate_task); 1188 struct mlx4_en_dev *mdev = priv->mdev; 1189 int linkstate = priv->link_state; 1190 1191 mutex_lock(&mdev->state_lock); 1192 /* If observable port state changed set carrier state and 1193 * report to system log */ 1194 if (priv->last_link_state != linkstate) { 1195 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) { 1196 en_info(priv, "Link Down\n"); 1197 if_link_state_change(priv->dev, LINK_STATE_DOWN); 1198 /* update netif baudrate */ 1199 priv->dev->if_baudrate = 0; 1200 1201 /* make sure the port is up before notifying the OS. 1202 * This is tricky since we get here on INIT_PORT and 1203 * in such case we can't tell the OS the port is up. 1204 * To solve this there is a call to if_link_state_change 1205 * in set_rx_mode. 1206 * */ 1207 } else if (priv->port_up && (linkstate == MLX4_DEV_EVENT_PORT_UP)){ 1208 if (mlx4_en_QUERY_PORT(priv->mdev, priv->port)) 1209 en_info(priv, "Query port failed\n"); 1210 priv->dev->if_baudrate = 1211 IF_Mbps(priv->port_state.link_speed); 1212 en_info(priv, "Link Up\n"); 1213 if_link_state_change(priv->dev, LINK_STATE_UP); 1214 } 1215 } 1216 priv->last_link_state = linkstate; 1217 mutex_unlock(&mdev->state_lock); 1218} 1219 1220 1221int mlx4_en_start_port(struct net_device *dev) 1222{ 1223 struct mlx4_en_priv *priv = netdev_priv(dev); 1224 struct mlx4_en_dev *mdev = priv->mdev; 1225 struct mlx4_en_cq *cq; 1226 struct mlx4_en_tx_ring *tx_ring; 1227 int rx_index = 0; 1228 int tx_index = 0; 1229 int err = 0; 1230 int i; 1231 int j; 1232 u8 mc_list[16] = {0}; 1233 1234 1235 if (priv->port_up) { 1236 en_dbg(DRV, priv, "start port called while port already up\n"); 1237 return 0; 1238 } 1239 1240 INIT_LIST_HEAD(&priv->mc_list); 1241 INIT_LIST_HEAD(&priv->curr_list); 1242 INIT_LIST_HEAD(&priv->ethtool_list); 1243 1244 /* Calculate Rx buf size */ 1245 dev->if_mtu = min(dev->if_mtu, priv->max_mtu); 1246 mlx4_en_calc_rx_buf(dev); 1247 priv->rx_alloc_size = max_t(int, 2 * roundup_pow_of_two(priv->rx_mb_size), 1248 PAGE_SIZE); 1249 priv->rx_alloc_order = get_order(priv->rx_alloc_size); 1250 priv->rx_buf_size = roundup_pow_of_two(priv->rx_mb_size); 1251 en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_mb_size); 1252 1253 /* Configure rx cq's and rings */ 1254 err = mlx4_en_activate_rx_rings(priv); 1255 if (err) { 1256 en_err(priv, "Failed to activate RX rings\n"); 1257 return err; 1258 } 1259 for (i = 0; i < priv->rx_ring_num; i++) { 1260 cq = priv->rx_cq[i]; 1261 1262 mlx4_en_cq_init_lock(cq); 1263 err = mlx4_en_activate_cq(priv, cq, i); 1264 if (err) { 1265 en_err(priv, "Failed activating Rx CQ\n"); 1266 goto cq_err; 1267 } 1268 for (j = 0; j < cq->size; j++) 1269 cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK; 1270 err = mlx4_en_set_cq_moder(priv, cq); 1271 if (err) { 1272 en_err(priv, "Failed setting cq moderation parameters"); 1273 mlx4_en_deactivate_cq(priv, cq); 1274 goto cq_err; 1275 } 1276 mlx4_en_arm_cq(priv, cq); 1277 priv->rx_ring[i]->cqn = cq->mcq.cqn; 1278 ++rx_index; 1279 } 1280 1281 /* Set qp number */ 1282 en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port); 1283 err = mlx4_en_get_qp(priv); 1284 if (err) { 1285 en_err(priv, "Failed getting eth qp\n"); 1286 goto cq_err; 1287 } 1288 mdev->mac_removed[priv->port] = 0; 1289 1290 /* gets default allocated counter index from func cap */ 1291 /* or sink counter index if no resources */ 1292 priv->counter_index = mdev->dev->caps.def_counter_index[priv->port - 1]; 1293 1294 en_dbg(DRV, priv, "%s: default counter index %d for port %d\n", 1295 __func__, priv->counter_index, priv->port); 1296 1297 err = mlx4_en_config_rss_steer(priv); 1298 if (err) { 1299 en_err(priv, "Failed configuring rss steering\n"); 1300 goto mac_err; 1301 } 1302 1303 err = mlx4_en_create_drop_qp(priv); 1304 if (err) 1305 goto rss_err; 1306 1307 /* Configure tx cq's and rings */ 1308 for (i = 0; i < priv->tx_ring_num; i++) { 1309 /* Configure cq */ 1310 cq = priv->tx_cq[i]; 1311 err = mlx4_en_activate_cq(priv, cq, i); 1312 if (err) { 1313 en_err(priv, "Failed activating Tx CQ\n"); 1314 goto tx_err; 1315 } 1316 err = mlx4_en_set_cq_moder(priv, cq); 1317 if (err) { 1318 en_err(priv, "Failed setting cq moderation parameters"); 1319 mlx4_en_deactivate_cq(priv, cq); 1320 goto tx_err; 1321 } 1322 en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i); 1323 cq->buf->wqe_index = cpu_to_be16(0xffff); 1324 1325 /* Configure ring */ 1326 tx_ring = priv->tx_ring[i]; 1327 1328 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn, 1329 i / priv->num_tx_rings_p_up); 1330 if (err) { 1331 en_err(priv, "Failed activating Tx ring %d\n", i); 1332 mlx4_en_deactivate_cq(priv, cq); 1333 goto tx_err; 1334 } 1335 1336 /* Arm CQ for TX completions */ 1337 mlx4_en_arm_cq(priv, cq); 1338 1339 /* Set initial ownership of all Tx TXBBs to SW (1) */ 1340 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE) 1341 *((u32 *) (tx_ring->buf + j)) = 0xffffffff; 1342 ++tx_index; 1343 } 1344 1345 /* Configure port */ 1346 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 1347 priv->rx_mb_size, 1348 priv->prof->tx_pause, 1349 priv->prof->tx_ppp, 1350 priv->prof->rx_pause, 1351 priv->prof->rx_ppp); 1352 if (err) { 1353 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n", 1354 priv->port, err); 1355 goto tx_err; 1356 } 1357 /* Set default qp number */ 1358 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0); 1359 if (err) { 1360 en_err(priv, "Failed setting default qp numbers\n"); 1361 goto tx_err; 1362 } 1363 1364 /* Init port */ 1365 en_dbg(HW, priv, "Initializing port\n"); 1366 err = mlx4_INIT_PORT(mdev->dev, priv->port); 1367 if (err) { 1368 en_err(priv, "Failed Initializing port\n"); 1369 goto tx_err; 1370 } 1371 1372 /* Attach rx QP to bradcast address */ 1373 memset(&mc_list[10], 0xff, ETH_ALEN); 1374 mc_list[5] = priv->port; /* needed for B0 steering support */ 1375 if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list, 1376 priv->port, 0, MLX4_PROT_ETH, 1377 &priv->broadcast_id)) 1378 mlx4_warn(mdev, "Failed Attaching Broadcast\n"); 1379 1380 /* Must redo promiscuous mode setup. */ 1381 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC); 1382 1383 /* Schedule multicast task to populate multicast list */ 1384 queue_work(mdev->workqueue, &priv->rx_mode_task); 1385 1386 mlx4_set_stats_bitmap(mdev->dev, priv->stats_bitmap); 1387 1388 priv->port_up = true; 1389 1390 /* Enable the queues. */ 1391 dev->if_drv_flags &= ~IFF_DRV_OACTIVE; 1392 dev->if_drv_flags |= IFF_DRV_RUNNING; 1393#ifdef CONFIG_DEBUG_FS 1394 mlx4_en_create_debug_files(priv); 1395#endif 1396 callout_reset(&priv->watchdog_timer, MLX4_EN_WATCHDOG_TIMEOUT, 1397 mlx4_en_watchdog_timeout, priv); 1398 1399 1400 return 0; 1401 1402tx_err: 1403 while (tx_index--) { 1404 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]); 1405 mlx4_en_deactivate_cq(priv, priv->tx_cq[tx_index]); 1406 } 1407 mlx4_en_destroy_drop_qp(priv); 1408rss_err: 1409 mlx4_en_release_rss_steer(priv); 1410mac_err: 1411 mlx4_en_put_qp(priv); 1412cq_err: 1413 while (rx_index--) 1414 mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]); 1415 for (i = 0; i < priv->rx_ring_num; i++) 1416 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); 1417 1418 return err; /* need to close devices */ 1419} 1420 1421 1422void mlx4_en_stop_port(struct net_device *dev) 1423{ 1424 struct mlx4_en_priv *priv = netdev_priv(dev); 1425 struct mlx4_en_dev *mdev = priv->mdev; 1426 struct mlx4_en_mc_list *mclist, *tmp; 1427 int i; 1428 u8 mc_list[16] = {0}; 1429 1430 if (!priv->port_up) { 1431 en_dbg(DRV, priv, "stop port called while port already down\n"); 1432 return; 1433 } 1434 1435#ifdef CONFIG_DEBUG_FS 1436 mlx4_en_delete_debug_files(priv); 1437#endif 1438 1439 /* close port*/ 1440 mlx4_CLOSE_PORT(mdev->dev, priv->port); 1441 1442 /* Set port as not active */ 1443 priv->port_up = false; 1444 if (priv->counter_index != 0xff) { 1445 mlx4_counter_free(mdev->dev, priv->port, priv->counter_index); 1446 priv->counter_index = 0xff; 1447 } 1448 1449 /* Promsicuous mode */ 1450 if (mdev->dev->caps.steering_mode == 1451 MLX4_STEERING_MODE_DEVICE_MANAGED) { 1452 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | 1453 MLX4_EN_FLAG_MC_PROMISC); 1454 mlx4_flow_steer_promisc_remove(mdev->dev, 1455 priv->port, 1456 MLX4_FS_ALL_DEFAULT); 1457 mlx4_flow_steer_promisc_remove(mdev->dev, 1458 priv->port, 1459 MLX4_FS_MC_DEFAULT); 1460 } else if (priv->flags & MLX4_EN_FLAG_PROMISC) { 1461 priv->flags &= ~MLX4_EN_FLAG_PROMISC; 1462 1463 /* Disable promiscouos mode */ 1464 mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn, 1465 priv->port); 1466 1467 /* Disable Multicast promisc */ 1468 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 1469 mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn, 1470 priv->port); 1471 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 1472 } 1473 } 1474 1475 /* Detach All multicasts */ 1476 memset(&mc_list[10], 0xff, ETH_ALEN); 1477 mc_list[5] = priv->port; /* needed for B0 steering support */ 1478 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list, 1479 MLX4_PROT_ETH, priv->broadcast_id); 1480 list_for_each_entry(mclist, &priv->curr_list, list) { 1481 memcpy(&mc_list[10], mclist->addr, ETH_ALEN); 1482 mc_list[5] = priv->port; 1483 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, 1484 mc_list, MLX4_PROT_ETH, mclist->reg_id); 1485 } 1486 mlx4_en_clear_list(dev); 1487 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) { 1488 list_del(&mclist->list); 1489 kfree(mclist); 1490 } 1491 1492 /* Flush multicast filter */ 1493 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG); 1494 mlx4_en_destroy_drop_qp(priv); 1495 1496 /* Free TX Rings */ 1497 for (i = 0; i < priv->tx_ring_num; i++) { 1498 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]); 1499 mlx4_en_deactivate_cq(priv, priv->tx_cq[i]); 1500 } 1501 msleep(10); 1502 1503 for (i = 0; i < priv->tx_ring_num; i++) 1504 mlx4_en_free_tx_buf(dev, priv->tx_ring[i]); 1505 1506 /* Free RSS qps */ 1507 mlx4_en_release_rss_steer(priv); 1508 1509 /* Unregister Mac address for the port */ 1510 mlx4_en_put_qp(priv); 1511 mdev->mac_removed[priv->port] = 1; 1512 1513 /* Free RX Rings */ 1514 for (i = 0; i < priv->rx_ring_num; i++) { 1515 struct mlx4_en_cq *cq = priv->rx_cq[i]; 1516 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); 1517 mlx4_en_deactivate_cq(priv, cq); 1518 } 1519 1520 callout_stop(&priv->watchdog_timer); 1521 1522 dev->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1523} 1524 1525static void mlx4_en_restart(struct work_struct *work) 1526{ 1527 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 1528 watchdog_task); 1529 struct mlx4_en_dev *mdev = priv->mdev; 1530 struct net_device *dev = priv->dev; 1531 struct mlx4_en_tx_ring *ring; 1532 int i; 1533 1534 1535 if (priv->blocked == 0 || priv->port_up == 0) 1536 return; 1537 for (i = 0; i < priv->tx_ring_num; i++) { 1538 ring = priv->tx_ring[i]; 1539 if (ring->blocked && 1540 ring->watchdog_time + MLX4_EN_WATCHDOG_TIMEOUT < ticks) 1541 goto reset; 1542 } 1543 return; 1544 1545reset: 1546 priv->port_stats.tx_timeout++; 1547 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port); 1548 1549 mutex_lock(&mdev->state_lock); 1550 if (priv->port_up) { 1551 mlx4_en_stop_port(dev); 1552 //for (i = 0; i < priv->tx_ring_num; i++) 1553 // netdev_tx_reset_queue(priv->tx_ring[i]->tx_queue); 1554 if (mlx4_en_start_port(dev)) 1555 en_err(priv, "Failed restarting port %d\n", priv->port); 1556 } 1557 mutex_unlock(&mdev->state_lock); 1558} 1559 1560static void mlx4_en_clear_stats(struct net_device *dev) 1561{ 1562 struct mlx4_en_priv *priv = netdev_priv(dev); 1563 struct mlx4_en_dev *mdev = priv->mdev; 1564 int i; 1565 1566 if (!mlx4_is_slave(mdev->dev)) 1567 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) 1568 en_dbg(HW, priv, "Failed dumping statistics\n"); 1569 1570 memset(&priv->pstats, 0, sizeof(priv->pstats)); 1571 memset(&priv->pkstats, 0, sizeof(priv->pkstats)); 1572 memset(&priv->port_stats, 0, sizeof(priv->port_stats)); 1573 memset(&priv->vport_stats, 0, sizeof(priv->vport_stats)); 1574 1575 for (i = 0; i < priv->tx_ring_num; i++) { 1576 priv->tx_ring[i]->bytes = 0; 1577 priv->tx_ring[i]->packets = 0; 1578 priv->tx_ring[i]->tx_csum = 0; 1579 priv->tx_ring[i]->oversized_packets = 0; 1580 } 1581 for (i = 0; i < priv->rx_ring_num; i++) { 1582 priv->rx_ring[i]->bytes = 0; 1583 priv->rx_ring[i]->packets = 0; 1584 priv->rx_ring[i]->csum_ok = 0; 1585 priv->rx_ring[i]->csum_none = 0; 1586 } 1587} 1588 1589static void mlx4_en_open(void* arg) 1590{ 1591 1592 struct mlx4_en_priv *priv; 1593 struct mlx4_en_dev *mdev; 1594 struct net_device *dev; 1595 int err = 0; 1596 1597 priv = arg; 1598 mdev = priv->mdev; 1599 dev = priv->dev; 1600 1601 1602 mutex_lock(&mdev->state_lock); 1603 1604 if (!mdev->device_up) { 1605 en_err(priv, "Cannot open - device down/disabled\n"); 1606 goto out; 1607 } 1608 1609 /* Reset HW statistics and SW counters */ 1610 mlx4_en_clear_stats(dev); 1611 1612 err = mlx4_en_start_port(dev); 1613 if (err) 1614 en_err(priv, "Failed starting port:%d\n", priv->port); 1615 1616out: 1617 mutex_unlock(&mdev->state_lock); 1618 return; 1619} 1620 1621void mlx4_en_free_resources(struct mlx4_en_priv *priv) 1622{ 1623 int i; 1624 1625#ifdef CONFIG_RFS_ACCEL 1626 if (priv->dev->rx_cpu_rmap) { 1627 free_irq_cpu_rmap(priv->dev->rx_cpu_rmap); 1628 priv->dev->rx_cpu_rmap = NULL; 1629 } 1630#endif 1631 1632 for (i = 0; i < priv->tx_ring_num; i++) { 1633 if (priv->tx_ring && priv->tx_ring[i]) 1634 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); 1635 if (priv->tx_cq && priv->tx_cq[i]) 1636 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); 1637 } 1638 1639 for (i = 0; i < priv->rx_ring_num; i++) { 1640 if (priv->rx_ring[i]) 1641 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i], 1642 priv->prof->rx_ring_size, priv->stride); 1643 if (priv->rx_cq[i]) 1644 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); 1645 } 1646 1647 if (priv->sysctl) 1648 sysctl_ctx_free(&priv->stat_ctx); 1649} 1650 1651int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) 1652{ 1653 struct mlx4_en_port_profile *prof = priv->prof; 1654 int i; 1655 int node = 0; 1656 1657 /* Create rx Rings */ 1658 for (i = 0; i < priv->rx_ring_num; i++) { 1659 if (mlx4_en_create_cq(priv, &priv->rx_cq[i], 1660 prof->rx_ring_size, i, RX, node)) 1661 goto err; 1662 1663 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i], 1664 prof->rx_ring_size, node)) 1665 goto err; 1666 } 1667 1668 /* Create tx Rings */ 1669 for (i = 0; i < priv->tx_ring_num; i++) { 1670 if (mlx4_en_create_cq(priv, &priv->tx_cq[i], 1671 prof->tx_ring_size, i, TX, node)) 1672 goto err; 1673 1674 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], 1675 prof->tx_ring_size, TXBB_SIZE, node, i)) 1676 goto err; 1677 } 1678 1679#ifdef CONFIG_RFS_ACCEL 1680 priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->rx_ring_num); 1681 if (!priv->dev->rx_cpu_rmap) 1682 goto err; 1683#endif 1684 /* Re-create stat sysctls in case the number of rings changed. */ 1685 mlx4_en_sysctl_stat(priv); 1686 return 0; 1687 1688err: 1689 en_err(priv, "Failed to allocate NIC resources\n"); 1690 for (i = 0; i < priv->rx_ring_num; i++) { 1691 if (priv->rx_ring[i]) 1692 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i], 1693 prof->rx_ring_size, 1694 priv->stride); 1695 if (priv->rx_cq[i]) 1696 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); 1697 } 1698 for (i = 0; i < priv->tx_ring_num; i++) { 1699 if (priv->tx_ring[i]) 1700 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); 1701 if (priv->tx_cq[i]) 1702 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); 1703 } 1704 priv->port_up = false; 1705 return -ENOMEM; 1706} 1707 1708struct en_port_attribute { 1709 struct attribute attr; 1710 ssize_t (*show)(struct en_port *, struct en_port_attribute *, char *buf); 1711 ssize_t (*store)(struct en_port *, struct en_port_attribute *, char *buf, size_t count); 1712}; 1713 1714#define PORT_ATTR_RO(_name) \ 1715struct en_port_attribute en_port_attr_##_name = __ATTR_RO(_name) 1716 1717#define EN_PORT_ATTR(_name, _mode, _show, _store) \ 1718struct en_port_attribute en_port_attr_##_name = __ATTR(_name, _mode, _show, _store) 1719 1720void mlx4_en_destroy_netdev(struct net_device *dev) 1721{ 1722 struct mlx4_en_priv *priv = netdev_priv(dev); 1723 struct mlx4_en_dev *mdev = priv->mdev; 1724 1725 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port); 1726 1727 if (priv->vlan_attach != NULL) 1728 EVENTHANDLER_DEREGISTER(vlan_config, priv->vlan_attach); 1729 if (priv->vlan_detach != NULL) 1730 EVENTHANDLER_DEREGISTER(vlan_unconfig, priv->vlan_detach); 1731 1732 /* Unregister device - this will close the port if it was up */ 1733 if (priv->registered) { 1734 mutex_lock(&mdev->state_lock); 1735 ether_ifdetach(dev); 1736 mutex_unlock(&mdev->state_lock); 1737 } 1738 1739 if (priv->allocated) 1740 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE); 1741 1742 mutex_lock(&mdev->state_lock); 1743 mlx4_en_stop_port(dev); 1744 mutex_unlock(&mdev->state_lock); 1745 1746 1747 cancel_delayed_work(&priv->stats_task); 1748 cancel_delayed_work(&priv->service_task); 1749 /* flush any pending task for this netdev */ 1750 flush_workqueue(mdev->workqueue); 1751 callout_drain(&priv->watchdog_timer); 1752 1753 /* Detach the netdev so tasks would not attempt to access it */ 1754 mutex_lock(&mdev->state_lock); 1755 mdev->pndev[priv->port] = NULL; 1756 mutex_unlock(&mdev->state_lock); 1757 1758 1759 mlx4_en_free_resources(priv); 1760 1761 /* freeing the sysctl conf cannot be called from within mlx4_en_free_resources */ 1762 if (priv->sysctl) 1763 sysctl_ctx_free(&priv->conf_ctx); 1764 1765 kfree(priv->tx_ring); 1766 kfree(priv->tx_cq); 1767 1768 kfree(priv); 1769 if_free(dev); 1770 1771} 1772 1773static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) 1774{ 1775 struct mlx4_en_priv *priv = netdev_priv(dev); 1776 struct mlx4_en_dev *mdev = priv->mdev; 1777 int err = 0; 1778 1779 en_dbg(DRV, priv, "Change MTU called - current:%u new:%u\n", 1780 (unsigned)dev->if_mtu, (unsigned)new_mtu); 1781 1782 if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) { 1783 en_err(priv, "Bad MTU size:%d.\n", new_mtu); 1784 return -EPERM; 1785 } 1786 mutex_lock(&mdev->state_lock); 1787 dev->if_mtu = new_mtu; 1788 if (dev->if_drv_flags & IFF_DRV_RUNNING) { 1789 if (!mdev->device_up) { 1790 /* NIC is probably restarting - let watchdog task reset 1791 * * the port */ 1792 en_dbg(DRV, priv, "Change MTU called with card down!?\n"); 1793 } else { 1794 mlx4_en_stop_port(dev); 1795 err = mlx4_en_start_port(dev); 1796 if (err) { 1797 en_err(priv, "Failed restarting port:%d\n", 1798 priv->port); 1799 queue_work(mdev->workqueue, &priv->watchdog_task); 1800 } 1801 } 1802 } 1803 mutex_unlock(&mdev->state_lock); 1804 return 0; 1805} 1806 1807static int mlx4_en_calc_media(struct mlx4_en_priv *priv) 1808{ 1809 int trans_type; 1810 int active; 1811 1812 active = IFM_ETHER; 1813 if (priv->last_link_state == MLX4_DEV_EVENT_PORT_DOWN) 1814 return (active); 1815 active |= IFM_FDX; 1816 trans_type = priv->port_state.transciver; 1817 /* XXX I don't know all of the transceiver values. */ 1818 switch (priv->port_state.link_speed) { 1819 case 1000: 1820 active |= IFM_1000_T; 1821 break; 1822 case 10000: 1823 if (trans_type > 0 && trans_type <= 0xC) 1824 active |= IFM_10G_SR; 1825 else if (trans_type == 0x80 || trans_type == 0) 1826 active |= IFM_10G_CX4; 1827 break; 1828 case 40000: 1829 active |= IFM_40G_CR4; 1830 break; 1831 } 1832 if (priv->prof->tx_pause) 1833 active |= IFM_ETH_TXPAUSE; 1834 if (priv->prof->rx_pause) 1835 active |= IFM_ETH_RXPAUSE; 1836 1837 return (active); 1838} 1839 1840static void mlx4_en_media_status(struct ifnet *dev, struct ifmediareq *ifmr) 1841{ 1842 struct mlx4_en_priv *priv; 1843 1844 priv = dev->if_softc; 1845 ifmr->ifm_status = IFM_AVALID; 1846 if (priv->last_link_state != MLX4_DEV_EVENT_PORT_DOWN) 1847 ifmr->ifm_status |= IFM_ACTIVE; 1848 ifmr->ifm_active = mlx4_en_calc_media(priv); 1849 1850 return; 1851} 1852 1853static int mlx4_en_media_change(struct ifnet *dev) 1854{ 1855 struct mlx4_en_priv *priv; 1856 struct ifmedia *ifm; 1857 int rxpause; 1858 int txpause; 1859 int error; 1860 1861 priv = dev->if_softc; 1862 ifm = &priv->media; 1863 rxpause = txpause = 0; 1864 error = 0; 1865 1866 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1867 return (EINVAL); 1868 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1869 case IFM_AUTO: 1870 break; 1871 case IFM_10G_SR: 1872 case IFM_10G_CX4: 1873 case IFM_1000_T: 1874 case IFM_40G_CR4: 1875 if ((IFM_SUBTYPE(ifm->ifm_media) 1876 == IFM_SUBTYPE(mlx4_en_calc_media(priv))) 1877 && (ifm->ifm_media & IFM_FDX)) 1878 break; 1879 /* Fallthrough */ 1880 default: 1881 printf("%s: Only auto media type\n", if_name(dev)); 1882 return (EINVAL); 1883 } 1884 /* Allow user to set/clear pause */ 1885 if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_RXPAUSE) 1886 rxpause = 1; 1887 if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_TXPAUSE) 1888 txpause = 1; 1889 if (priv->prof->tx_pause != txpause || priv->prof->rx_pause != rxpause) { 1890 priv->prof->tx_pause = txpause; 1891 priv->prof->rx_pause = rxpause; 1892 error = -mlx4_SET_PORT_general(priv->mdev->dev, priv->port, 1893 priv->rx_mb_size + ETHER_CRC_LEN, priv->prof->tx_pause, 1894 priv->prof->tx_ppp, priv->prof->rx_pause, 1895 priv->prof->rx_ppp); 1896 } 1897 return (error); 1898} 1899 1900static int mlx4_en_ioctl(struct ifnet *dev, u_long command, caddr_t data) 1901{ 1902 struct mlx4_en_priv *priv; 1903 struct mlx4_en_dev *mdev; 1904 struct ifreq *ifr; 1905 int error; 1906 int mask; 1907 1908 error = 0; 1909 mask = 0; 1910 priv = dev->if_softc; 1911 mdev = priv->mdev; 1912 ifr = (struct ifreq *) data; 1913 switch (command) { 1914 1915 case SIOCSIFMTU: 1916 error = -mlx4_en_change_mtu(dev, ifr->ifr_mtu); 1917 break; 1918 case SIOCSIFFLAGS: 1919 if (dev->if_flags & IFF_UP) { 1920 if ((dev->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1921 mutex_lock(&mdev->state_lock); 1922 mlx4_en_start_port(dev); 1923 mutex_unlock(&mdev->state_lock); 1924 } else { 1925 mlx4_en_set_rx_mode(dev); 1926 } 1927 } else { 1928 mutex_lock(&mdev->state_lock); 1929 if (dev->if_drv_flags & IFF_DRV_RUNNING) { 1930 mlx4_en_stop_port(dev); 1931 if_link_state_change(dev, LINK_STATE_DOWN); 1932 } 1933 mutex_unlock(&mdev->state_lock); 1934 } 1935 break; 1936 case SIOCADDMULTI: 1937 case SIOCDELMULTI: 1938 mlx4_en_set_rx_mode(dev); 1939 break; 1940 case SIOCSIFMEDIA: 1941 case SIOCGIFMEDIA: 1942 error = ifmedia_ioctl(dev, ifr, &priv->media, command); 1943 break; 1944 case SIOCSIFCAP: 1945 mutex_lock(&mdev->state_lock); 1946 mask = ifr->ifr_reqcap ^ dev->if_capenable; 1947 if (mask & IFCAP_TXCSUM) { 1948 dev->if_capenable ^= IFCAP_TXCSUM; 1949 dev->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP); 1950 1951 if (IFCAP_TSO4 & dev->if_capenable && 1952 !(IFCAP_TXCSUM & dev->if_capenable)) { 1953 dev->if_capenable &= ~IFCAP_TSO4; 1954 dev->if_hwassist &= ~CSUM_IP_TSO; 1955 if_printf(dev, 1956 "tso4 disabled due to -txcsum.\n"); 1957 } 1958 } 1959 if (mask & IFCAP_TXCSUM_IPV6) { 1960 dev->if_capenable ^= IFCAP_TXCSUM_IPV6; 1961 dev->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); 1962 1963 if (IFCAP_TSO6 & dev->if_capenable && 1964 !(IFCAP_TXCSUM_IPV6 & dev->if_capenable)) { 1965 dev->if_capenable &= ~IFCAP_TSO6; 1966 dev->if_hwassist &= ~CSUM_IP6_TSO; 1967 if_printf(dev, 1968 "tso6 disabled due to -txcsum6.\n"); 1969 } 1970 } 1971 if (mask & IFCAP_RXCSUM) 1972 dev->if_capenable ^= IFCAP_RXCSUM; 1973 if (mask & IFCAP_RXCSUM_IPV6) 1974 dev->if_capenable ^= IFCAP_RXCSUM_IPV6; 1975 1976 if (mask & IFCAP_TSO4) { 1977 if (!(IFCAP_TSO4 & dev->if_capenable) && 1978 !(IFCAP_TXCSUM & dev->if_capenable)) { 1979 if_printf(dev, "enable txcsum first.\n"); 1980 error = EAGAIN; 1981 goto out; 1982 } 1983 dev->if_capenable ^= IFCAP_TSO4; 1984 dev->if_hwassist ^= CSUM_IP_TSO; 1985 } 1986 if (mask & IFCAP_TSO6) { 1987 if (!(IFCAP_TSO6 & dev->if_capenable) && 1988 !(IFCAP_TXCSUM_IPV6 & dev->if_capenable)) { 1989 if_printf(dev, "enable txcsum6 first.\n"); 1990 error = EAGAIN; 1991 goto out; 1992 } 1993 dev->if_capenable ^= IFCAP_TSO6; 1994 dev->if_hwassist ^= CSUM_IP6_TSO; 1995 } 1996 if (mask & IFCAP_LRO) 1997 dev->if_capenable ^= IFCAP_LRO; 1998 if (mask & IFCAP_VLAN_HWTAGGING) 1999 dev->if_capenable ^= IFCAP_VLAN_HWTAGGING; 2000 if (mask & IFCAP_VLAN_HWFILTER) 2001 dev->if_capenable ^= IFCAP_VLAN_HWFILTER; 2002 if (mask & IFCAP_WOL_MAGIC) 2003 dev->if_capenable ^= IFCAP_WOL_MAGIC; 2004 if (dev->if_drv_flags & IFF_DRV_RUNNING) 2005 mlx4_en_start_port(dev); 2006out: 2007 mutex_unlock(&mdev->state_lock); 2008 VLAN_CAPABILITIES(dev); 2009 break; 2010#if __FreeBSD_version >= 1100036 2011 case SIOCGI2C: { 2012 struct ifi2creq i2c; 2013 2014 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c)); 2015 if (error) 2016 break; 2017 if (i2c.len > sizeof(i2c.data)) { 2018 error = EINVAL; 2019 break; 2020 } 2021 /* 2022 * Note that we ignore i2c.addr here. The driver hardcodes 2023 * the address to 0x50, while standard expects it to be 0xA0. 2024 */ 2025 error = mlx4_get_module_info(mdev->dev, priv->port, 2026 i2c.offset, i2c.len, i2c.data); 2027 if (error < 0) { 2028 error = -error; 2029 break; 2030 } 2031 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c)); 2032 break; 2033 } 2034#endif 2035 default: 2036 error = ether_ioctl(dev, command, data); 2037 break; 2038 } 2039 2040 return (error); 2041} 2042 2043 2044int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, 2045 struct mlx4_en_port_profile *prof) 2046{ 2047 struct net_device *dev; 2048 struct mlx4_en_priv *priv; 2049 uint8_t dev_addr[ETHER_ADDR_LEN]; 2050 int err; 2051 int i; 2052 2053 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 2054 dev = priv->dev = if_alloc(IFT_ETHER); 2055 if (dev == NULL) { 2056 en_err(priv, "Net device allocation failed\n"); 2057 kfree(priv); 2058 return -ENOMEM; 2059 } 2060 dev->if_softc = priv; 2061 if_initname(dev, "mlxen", atomic_fetchadd_int(&mlx4_en_unit, 1)); 2062 dev->if_mtu = ETHERMTU; 2063 dev->if_init = mlx4_en_open; 2064 dev->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2065 dev->if_ioctl = mlx4_en_ioctl; 2066 dev->if_transmit = mlx4_en_transmit; 2067 dev->if_qflush = mlx4_en_qflush; 2068 dev->if_snd.ifq_maxlen = prof->tx_ring_size; 2069 2070 /* 2071 * Initialize driver private data 2072 */ 2073 priv->counter_index = 0xff; 2074 spin_lock_init(&priv->stats_lock); 2075 INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode); 2076 INIT_WORK(&priv->watchdog_task, mlx4_en_restart); 2077 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); 2078 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); 2079 INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task); 2080 callout_init(&priv->watchdog_timer, 1); 2081#ifdef CONFIG_RFS_ACCEL 2082 INIT_LIST_HEAD(&priv->filters); 2083 spin_lock_init(&priv->filters_lock); 2084#endif 2085 2086 priv->msg_enable = MLX4_EN_MSG_LEVEL; 2087 priv->dev = dev; 2088 priv->mdev = mdev; 2089 priv->ddev = &mdev->pdev->dev; 2090 priv->prof = prof; 2091 priv->port = port; 2092 priv->port_up = false; 2093 priv->flags = prof->flags; 2094 2095 priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up; 2096 priv->tx_ring_num = prof->tx_ring_num; 2097 priv->tx_ring = kcalloc(MAX_TX_RINGS, 2098 sizeof(struct mlx4_en_tx_ring *), GFP_KERNEL); 2099 if (!priv->tx_ring) { 2100 err = -ENOMEM; 2101 goto out; 2102 } 2103 priv->tx_cq = kcalloc(sizeof(struct mlx4_en_cq *), MAX_TX_RINGS, 2104 GFP_KERNEL); 2105 if (!priv->tx_cq) { 2106 err = -ENOMEM; 2107 goto out; 2108 } 2109 2110 priv->rx_ring_num = prof->rx_ring_num; 2111 priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0; 2112 priv->mac_index = -1; 2113 priv->last_ifq_jiffies = 0; 2114 priv->if_counters_rx_errors = 0; 2115 priv->if_counters_rx_no_buffer = 0; 2116#ifdef CONFIG_MLX4_EN_DCB 2117 if (!mlx4_is_slave(priv->mdev->dev)) { 2118 priv->dcbx_cap = DCB_CAP_DCBX_HOST; 2119 priv->flags |= MLX4_EN_FLAG_DCB_ENABLED; 2120 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) { 2121 dev->dcbnl_ops = &mlx4_en_dcbnl_ops; 2122 } else { 2123 en_info(priv, "QoS disabled - no HW support\n"); 2124 dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops; 2125 } 2126 } 2127#endif 2128 2129 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) 2130 INIT_HLIST_HEAD(&priv->mac_hash[i]); 2131 2132 /* Query for default mac and max mtu */ 2133 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port]; 2134 priv->mac = mdev->dev->caps.def_mac[priv->port]; 2135 if (ILLEGAL_MAC(priv->mac)) { 2136#if BITS_PER_LONG == 64 2137 en_err(priv, "Port: %d, invalid mac burned: 0x%lx, quiting\n", 2138 priv->port, priv->mac); 2139#elif BITS_PER_LONG == 32 2140 en_err(priv, "Port: %d, invalid mac burned: 0x%llx, quiting\n", 2141 priv->port, priv->mac); 2142#endif 2143 err = -EINVAL; 2144 goto out; 2145 } 2146 2147 priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + 2148 DS_SIZE); 2149 2150 mlx4_en_sysctl_conf(priv); 2151 2152 err = mlx4_en_alloc_resources(priv); 2153 if (err) 2154 goto out; 2155 2156 /* Allocate page for receive rings */ 2157 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res, 2158 MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE); 2159 if (err) { 2160 en_err(priv, "Failed to allocate page for rx qps\n"); 2161 goto out; 2162 } 2163 priv->allocated = 1; 2164 2165 /* 2166 * Set driver features 2167 */ 2168 dev->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6; 2169 dev->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; 2170 dev->if_capabilities |= IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWFILTER; 2171 dev->if_capabilities |= IFCAP_LINKSTATE | IFCAP_JUMBO_MTU; 2172 dev->if_capabilities |= IFCAP_LRO; 2173 2174 if (mdev->LSO_support) 2175 dev->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6 | IFCAP_VLAN_HWTSO; 2176 2177 /* set TSO limits so that we don't have to drop TX packets */ 2178 dev->if_hw_tsomax = MLX4_EN_TX_MAX_PAYLOAD_SIZE - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) /* hdr */; 2179 dev->if_hw_tsomaxsegcount = MLX4_EN_TX_MAX_MBUF_FRAGS - 1 /* hdr */; 2180 dev->if_hw_tsomaxsegsize = MLX4_EN_TX_MAX_MBUF_SIZE; 2181 2182 dev->if_capenable = dev->if_capabilities; 2183 2184 dev->if_hwassist = 0; 2185 if (dev->if_capenable & (IFCAP_TSO4 | IFCAP_TSO6)) 2186 dev->if_hwassist |= CSUM_TSO; 2187 if (dev->if_capenable & IFCAP_TXCSUM) 2188 dev->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP); 2189 if (dev->if_capenable & IFCAP_TXCSUM_IPV6) 2190 dev->if_hwassist |= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); 2191 2192 2193 /* Register for VLAN events */ 2194 priv->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 2195 mlx4_en_vlan_rx_add_vid, priv, EVENTHANDLER_PRI_FIRST); 2196 priv->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 2197 mlx4_en_vlan_rx_kill_vid, priv, EVENTHANDLER_PRI_FIRST); 2198 2199 mdev->pndev[priv->port] = dev; 2200 2201 priv->last_link_state = MLX4_DEV_EVENT_PORT_DOWN; 2202 mlx4_en_set_default_moderation(priv); 2203 2204 /* Set default MAC */ 2205 for (i = 0; i < ETHER_ADDR_LEN; i++) 2206 dev_addr[ETHER_ADDR_LEN - 1 - i] = (u8) (priv->mac >> (8 * i)); 2207 2208 2209 ether_ifattach(dev, dev_addr); 2210 if_link_state_change(dev, LINK_STATE_DOWN); 2211 ifmedia_init(&priv->media, IFM_IMASK | IFM_ETH_FMASK, 2212 mlx4_en_media_change, mlx4_en_media_status); 2213 ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_1000_T, 0, NULL); 2214 ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_10G_SR, 0, NULL); 2215 ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_10G_CX4, 0, NULL); 2216 ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_40G_CR4, 0, NULL); 2217 ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO, 0, NULL); 2218 ifmedia_set(&priv->media, IFM_ETHER | IFM_AUTO); 2219 2220 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); 2221 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); 2222 2223 priv->registered = 1; 2224 2225 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); 2226 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); 2227 2228 2229 priv->rx_mb_size = dev->if_mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN; 2230 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 2231 priv->rx_mb_size, 2232 prof->tx_pause, prof->tx_ppp, 2233 prof->rx_pause, prof->rx_ppp); 2234 if (err) { 2235 en_err(priv, "Failed setting port general configurations " 2236 "for port %d, with error %d\n", priv->port, err); 2237 goto out; 2238 } 2239 2240 /* Init port */ 2241 en_warn(priv, "Initializing port\n"); 2242 err = mlx4_INIT_PORT(mdev->dev, priv->port); 2243 if (err) { 2244 en_err(priv, "Failed Initializing port\n"); 2245 goto out; 2246 } 2247 2248 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 2249 2250 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) 2251 queue_delayed_work(mdev->workqueue, &priv->service_task, SERVICE_TASK_DELAY); 2252 2253 return 0; 2254 2255out: 2256 mlx4_en_destroy_netdev(dev); 2257 return err; 2258} 2259 2260static int mlx4_en_set_ring_size(struct net_device *dev, 2261 int rx_size, int tx_size) 2262{ 2263 struct mlx4_en_priv *priv = netdev_priv(dev); 2264 struct mlx4_en_dev *mdev = priv->mdev; 2265 int port_up = 0; 2266 int err = 0; 2267 2268 rx_size = roundup_pow_of_two(rx_size); 2269 rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE); 2270 rx_size = min_t(u32, rx_size, MLX4_EN_MAX_RX_SIZE); 2271 tx_size = roundup_pow_of_two(tx_size); 2272 tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE); 2273 tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE); 2274 2275 if (rx_size == (priv->port_up ? 2276 priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size) && 2277 tx_size == priv->tx_ring[0]->size) 2278 return 0; 2279 mutex_lock(&mdev->state_lock); 2280 if (priv->port_up) { 2281 port_up = 1; 2282 mlx4_en_stop_port(dev); 2283 } 2284 mlx4_en_free_resources(priv); 2285 priv->prof->tx_ring_size = tx_size; 2286 priv->prof->rx_ring_size = rx_size; 2287 err = mlx4_en_alloc_resources(priv); 2288 if (err) { 2289 en_err(priv, "Failed reallocating port resources\n"); 2290 goto out; 2291 } 2292 if (port_up) { 2293 err = mlx4_en_start_port(dev); 2294 if (err) 2295 en_err(priv, "Failed starting port\n"); 2296 } 2297out: 2298 mutex_unlock(&mdev->state_lock); 2299 return err; 2300} 2301static int mlx4_en_set_rx_ring_size(SYSCTL_HANDLER_ARGS) 2302{ 2303 struct mlx4_en_priv *priv; 2304 int size; 2305 int error; 2306 2307 priv = arg1; 2308 size = priv->prof->rx_ring_size; 2309 error = sysctl_handle_int(oidp, &size, 0, req); 2310 if (error || !req->newptr) 2311 return (error); 2312 error = -mlx4_en_set_ring_size(priv->dev, size, 2313 priv->prof->tx_ring_size); 2314 return (error); 2315} 2316 2317static int mlx4_en_set_tx_ring_size(SYSCTL_HANDLER_ARGS) 2318{ 2319 struct mlx4_en_priv *priv; 2320 int size; 2321 int error; 2322 2323 priv = arg1; 2324 size = priv->prof->tx_ring_size; 2325 error = sysctl_handle_int(oidp, &size, 0, req); 2326 if (error || !req->newptr) 2327 return (error); 2328 error = -mlx4_en_set_ring_size(priv->dev, priv->prof->rx_ring_size, 2329 size); 2330 2331 return (error); 2332} 2333 2334static int mlx4_en_get_module_info(struct net_device *dev, 2335 struct ethtool_modinfo *modinfo) 2336{ 2337 struct mlx4_en_priv *priv = netdev_priv(dev); 2338 struct mlx4_en_dev *mdev = priv->mdev; 2339 int ret; 2340 u8 data[4]; 2341 2342 /* Read first 2 bytes to get Module & REV ID */ 2343 ret = mlx4_get_module_info(mdev->dev, priv->port, 2344 0/*offset*/, 2/*size*/, data); 2345 2346 if (ret < 2) { 2347 en_err(priv, "Failed to read eeprom module first two bytes, error: 0x%x\n", -ret); 2348 return -EIO; 2349 } 2350 2351 switch (data[0] /* identifier */) { 2352 case MLX4_MODULE_ID_QSFP: 2353 modinfo->type = ETH_MODULE_SFF_8436; 2354 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 2355 break; 2356 case MLX4_MODULE_ID_QSFP_PLUS: 2357 if (data[1] >= 0x3) { /* revision id */ 2358 modinfo->type = ETH_MODULE_SFF_8636; 2359 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; 2360 } else { 2361 modinfo->type = ETH_MODULE_SFF_8436; 2362 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 2363 } 2364 break; 2365 case MLX4_MODULE_ID_QSFP28: 2366 modinfo->type = ETH_MODULE_SFF_8636; 2367 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; 2368 break; 2369 case MLX4_MODULE_ID_SFP: 2370 modinfo->type = ETH_MODULE_SFF_8472; 2371 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; 2372 break; 2373 default: 2374 en_err(priv, "mlx4_en_get_module_info : Not recognized cable type\n"); 2375 return -EINVAL; 2376 } 2377 2378 return 0; 2379} 2380 2381static int mlx4_en_get_module_eeprom(struct net_device *dev, 2382 struct ethtool_eeprom *ee, 2383 u8 *data) 2384{ 2385 struct mlx4_en_priv *priv = netdev_priv(dev); 2386 struct mlx4_en_dev *mdev = priv->mdev; 2387 int offset = ee->offset; 2388 int i = 0, ret; 2389 2390 if (ee->len == 0) 2391 return -EINVAL; 2392 2393 memset(data, 0, ee->len); 2394 2395 while (i < ee->len) { 2396 en_dbg(DRV, priv, 2397 "mlx4_get_module_info i(%d) offset(%d) len(%d)\n", 2398 i, offset, ee->len - i); 2399 2400 ret = mlx4_get_module_info(mdev->dev, priv->port, 2401 offset, ee->len - i, data + i); 2402 2403 if (!ret) /* Done reading */ 2404 return 0; 2405 2406 if (ret < 0) { 2407 en_err(priv, 2408 "mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n", 2409 i, offset, ee->len - i, ret); 2410 return -1; 2411 } 2412 2413 i += ret; 2414 offset += ret; 2415 } 2416 return 0; 2417} 2418 2419static void mlx4_en_print_eeprom(u8 *data, __u32 len) 2420{ 2421 int i; 2422 int j = 0; 2423 int row = 0; 2424 const int NUM_OF_BYTES = 16; 2425 2426 printf("\nOffset\t\tValues\n"); 2427 printf("------\t\t------\n"); 2428 while(row < len){ 2429 printf("0x%04x\t\t",row); 2430 for(i=0; i < NUM_OF_BYTES; i++){ 2431 printf("%02x ", data[j]); 2432 row++; 2433 j++; 2434 } 2435 printf("\n"); 2436 } 2437} 2438 2439/* Read cable EEPROM module information by first inspecting the first 2440 * two bytes to get the length and then read the rest of the information. 2441 * The information is printed to dmesg. */ 2442static int mlx4_en_read_eeprom(SYSCTL_HANDLER_ARGS) 2443{ 2444 2445 u8* data; 2446 int error; 2447 int result = 0; 2448 struct mlx4_en_priv *priv; 2449 struct net_device *dev; 2450 struct ethtool_modinfo modinfo; 2451 struct ethtool_eeprom ee; 2452 2453 error = sysctl_handle_int(oidp, &result, 0, req); 2454 if (error || !req->newptr) 2455 return (error); 2456 2457 if (result == 1) { 2458 priv = arg1; 2459 dev = priv->dev; 2460 data = kmalloc(PAGE_SIZE, GFP_KERNEL); 2461 2462 error = mlx4_en_get_module_info(dev, &modinfo); 2463 if (error) { 2464 en_err(priv, 2465 "mlx4_en_get_module_info returned with error - FAILED (0x%x)\n", 2466 -error); 2467 goto out; 2468 } 2469 2470 ee.len = modinfo.eeprom_len; 2471 ee.offset = 0; 2472 2473 error = mlx4_en_get_module_eeprom(dev, &ee, data); 2474 if (error) { 2475 en_err(priv, 2476 "mlx4_en_get_module_eeprom returned with error - FAILED (0x%x)\n", 2477 -error); 2478 /* Continue printing partial information in case of an error */ 2479 } 2480 2481 /* EEPROM information will be printed in dmesg */ 2482 mlx4_en_print_eeprom(data, ee.len); 2483out: 2484 kfree(data); 2485 } 2486 /* Return zero to prevent sysctl failure. */ 2487 return (0); 2488} 2489 2490static int mlx4_en_set_tx_ppp(SYSCTL_HANDLER_ARGS) 2491{ 2492 struct mlx4_en_priv *priv; 2493 int ppp; 2494 int error; 2495 2496 priv = arg1; 2497 ppp = priv->prof->tx_ppp; 2498 error = sysctl_handle_int(oidp, &ppp, 0, req); 2499 if (error || !req->newptr) 2500 return (error); 2501 if (ppp > 0xff || ppp < 0) 2502 return (-EINVAL); 2503 priv->prof->tx_ppp = ppp; 2504 error = -mlx4_SET_PORT_general(priv->mdev->dev, priv->port, 2505 priv->rx_mb_size + ETHER_CRC_LEN, 2506 priv->prof->tx_pause, 2507 priv->prof->tx_ppp, 2508 priv->prof->rx_pause, 2509 priv->prof->rx_ppp); 2510 2511 return (error); 2512} 2513 2514static int mlx4_en_set_rx_ppp(SYSCTL_HANDLER_ARGS) 2515{ 2516 struct mlx4_en_priv *priv; 2517 struct mlx4_en_dev *mdev; 2518 int ppp; 2519 int error; 2520 int port_up; 2521 2522 port_up = 0; 2523 priv = arg1; 2524 mdev = priv->mdev; 2525 ppp = priv->prof->rx_ppp; 2526 error = sysctl_handle_int(oidp, &ppp, 0, req); 2527 if (error || !req->newptr) 2528 return (error); 2529 if (ppp > 0xff || ppp < 0) 2530 return (-EINVAL); 2531 /* See if we have to change the number of tx queues. */ 2532 if (!ppp != !priv->prof->rx_ppp) { 2533 mutex_lock(&mdev->state_lock); 2534 if (priv->port_up) { 2535 port_up = 1; 2536 mlx4_en_stop_port(priv->dev); 2537 } 2538 mlx4_en_free_resources(priv); 2539 priv->prof->rx_ppp = ppp; 2540 error = -mlx4_en_alloc_resources(priv); 2541 if (error) 2542 en_err(priv, "Failed reallocating port resources\n"); 2543 if (error == 0 && port_up) { 2544 error = -mlx4_en_start_port(priv->dev); 2545 if (error) 2546 en_err(priv, "Failed starting port\n"); 2547 } 2548 mutex_unlock(&mdev->state_lock); 2549 return (error); 2550 2551 } 2552 priv->prof->rx_ppp = ppp; 2553 error = -mlx4_SET_PORT_general(priv->mdev->dev, priv->port, 2554 priv->rx_mb_size + ETHER_CRC_LEN, 2555 priv->prof->tx_pause, 2556 priv->prof->tx_ppp, 2557 priv->prof->rx_pause, 2558 priv->prof->rx_ppp); 2559 2560 return (error); 2561} 2562 2563static void mlx4_en_sysctl_conf(struct mlx4_en_priv *priv) 2564{ 2565 struct net_device *dev; 2566 struct sysctl_ctx_list *ctx; 2567 struct sysctl_oid *node; 2568 struct sysctl_oid_list *node_list; 2569 struct sysctl_oid *coal; 2570 struct sysctl_oid_list *coal_list; 2571 const char *pnameunit; 2572 2573 dev = priv->dev; 2574 ctx = &priv->conf_ctx; 2575 pnameunit = device_get_nameunit(priv->mdev->pdev->dev.bsddev); 2576 2577 sysctl_ctx_init(ctx); 2578 priv->sysctl = SYSCTL_ADD_NODE(ctx, SYSCTL_STATIC_CHILDREN(_hw), 2579 OID_AUTO, dev->if_xname, CTLFLAG_RD, 0, "mlx4 10gig ethernet"); 2580 node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(priv->sysctl), OID_AUTO, 2581 "conf", CTLFLAG_RD, NULL, "Configuration"); 2582 node_list = SYSCTL_CHILDREN(node); 2583 2584 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "msg_enable", 2585 CTLFLAG_RW, &priv->msg_enable, 0, 2586 "Driver message enable bitfield"); 2587 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "rx_rings", 2588 CTLFLAG_RD, &priv->rx_ring_num, 0, 2589 "Number of receive rings"); 2590 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "tx_rings", 2591 CTLFLAG_RD, &priv->tx_ring_num, 0, 2592 "Number of transmit rings"); 2593 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "rx_size", 2594 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0, 2595 mlx4_en_set_rx_ring_size, "I", "Receive ring size"); 2596 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "tx_size", 2597 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0, 2598 mlx4_en_set_tx_ring_size, "I", "Transmit ring size"); 2599 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "tx_ppp", 2600 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0, 2601 mlx4_en_set_tx_ppp, "I", "TX Per-priority pause"); 2602 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "rx_ppp", 2603 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0, 2604 mlx4_en_set_rx_ppp, "I", "RX Per-priority pause"); 2605 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "port_num", 2606 CTLFLAG_RD, &priv->port, 0, 2607 "Port Number"); 2608 SYSCTL_ADD_STRING(ctx, node_list, OID_AUTO, "device_name", 2609 CTLFLAG_RD, __DECONST(void *, pnameunit), 0, 2610 "PCI device name"); 2611 2612 /* Add coalescer configuration. */ 2613 coal = SYSCTL_ADD_NODE(ctx, node_list, OID_AUTO, 2614 "coalesce", CTLFLAG_RD, NULL, "Interrupt coalesce configuration"); 2615 coal_list = SYSCTL_CHILDREN(coal); 2616 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "pkt_rate_low", 2617 CTLFLAG_RW, &priv->pkt_rate_low, 0, 2618 "Packets per-second for minimum delay"); 2619 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "rx_usecs_low", 2620 CTLFLAG_RW, &priv->rx_usecs_low, 0, 2621 "Minimum RX delay in micro-seconds"); 2622 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "pkt_rate_high", 2623 CTLFLAG_RW, &priv->pkt_rate_high, 0, 2624 "Packets per-second for maximum delay"); 2625 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "rx_usecs_high", 2626 CTLFLAG_RW, &priv->rx_usecs_high, 0, 2627 "Maximum RX delay in micro-seconds"); 2628 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "sample_interval", 2629 CTLFLAG_RW, &priv->sample_interval, 0, 2630 "adaptive frequency in units of HZ ticks"); 2631 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "adaptive_rx_coal", 2632 CTLFLAG_RW, &priv->adaptive_rx_coal, 0, 2633 "Enable adaptive rx coalescing"); 2634 /* EEPROM support */ 2635 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "eeprom_info", 2636 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0, 2637 mlx4_en_read_eeprom, "I", "EEPROM information"); 2638} 2639 2640static void mlx4_en_sysctl_stat(struct mlx4_en_priv *priv) 2641{ 2642 struct sysctl_ctx_list *ctx; 2643 struct sysctl_oid *node; 2644 struct sysctl_oid_list *node_list; 2645 struct sysctl_oid *ring_node; 2646 struct sysctl_oid_list *ring_list; 2647 struct mlx4_en_tx_ring *tx_ring; 2648 struct mlx4_en_rx_ring *rx_ring; 2649 char namebuf[128]; 2650 int i; 2651 2652 ctx = &priv->stat_ctx; 2653 sysctl_ctx_init(ctx); 2654 node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(priv->sysctl), OID_AUTO, 2655 "stat", CTLFLAG_RD, NULL, "Statistics"); 2656 node_list = SYSCTL_CHILDREN(node); 2657 2658#ifdef MLX4_EN_PERF_STAT 2659 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "tx_poll", CTLFLAG_RD, 2660 &priv->pstats.tx_poll, "TX Poll calls"); 2661 SYSCTL_ADD_QUAD(ctx, node_list, OID_AUTO, "tx_pktsz_avg", CTLFLAG_RD, 2662 &priv->pstats.tx_pktsz_avg, "TX average packet size"); 2663 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "inflight_avg", CTLFLAG_RD, 2664 &priv->pstats.inflight_avg, "TX average packets in-flight"); 2665 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "tx_coal_avg", CTLFLAG_RD, 2666 &priv->pstats.tx_coal_avg, "TX average coalesced completions"); 2667 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "rx_coal_avg", CTLFLAG_RD, 2668 &priv->pstats.rx_coal_avg, "RX average coalesced completions"); 2669#endif 2670 2671 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tso_packets", CTLFLAG_RD, 2672 &priv->port_stats.tso_packets, "TSO packets sent"); 2673 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "queue_stopped", CTLFLAG_RD, 2674 &priv->port_stats.queue_stopped, "Queue full"); 2675 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "wake_queue", CTLFLAG_RD, 2676 &priv->port_stats.wake_queue, "Queue resumed after full"); 2677 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_timeout", CTLFLAG_RD, 2678 &priv->port_stats.tx_timeout, "Transmit timeouts"); 2679 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_oversized_packets", CTLFLAG_RD, 2680 &priv->port_stats.oversized_packets, "TX oversized packets, m_defrag failed"); 2681 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_alloc_failed", CTLFLAG_RD, 2682 &priv->port_stats.rx_alloc_failed, "RX failed to allocate mbuf"); 2683 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_chksum_good", CTLFLAG_RD, 2684 &priv->port_stats.rx_chksum_good, "RX checksum offload success"); 2685 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_chksum_none", CTLFLAG_RD, 2686 &priv->port_stats.rx_chksum_none, "RX without checksum offload"); 2687 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_chksum_offload", 2688 CTLFLAG_RD, &priv->port_stats.tx_chksum_offload, 2689 "TX checksum offloads"); 2690 2691 /* Could strdup the names and add in a loop. This is simpler. */ 2692 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_bytes", CTLFLAG_RD, 2693 &priv->pkstats.rx_bytes, "RX Bytes"); 2694 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_packets", CTLFLAG_RD, 2695 &priv->pkstats.rx_packets, "RX packets"); 2696 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_multicast_packets", CTLFLAG_RD, 2697 &priv->pkstats.rx_multicast_packets, "RX Multicast Packets"); 2698 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_broadcast_packets", CTLFLAG_RD, 2699 &priv->pkstats.rx_broadcast_packets, "RX Broadcast Packets"); 2700 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_errors", CTLFLAG_RD, 2701 &priv->pkstats.rx_errors, "RX Errors"); 2702 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_dropped", CTLFLAG_RD, 2703 &priv->pkstats.rx_dropped, "RX Dropped"); 2704 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_length_errors", CTLFLAG_RD, 2705 &priv->pkstats.rx_length_errors, "RX Length Errors"); 2706 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_over_errors", CTLFLAG_RD, 2707 &priv->pkstats.rx_over_errors, "RX Over Errors"); 2708 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_crc_errors", CTLFLAG_RD, 2709 &priv->pkstats.rx_crc_errors, "RX CRC Errors"); 2710 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_jabbers", CTLFLAG_RD, 2711 &priv->pkstats.rx_jabbers, "RX Jabbers"); 2712 2713 2714 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_in_range_length_error", CTLFLAG_RD, 2715 &priv->pkstats.rx_in_range_length_error, "RX IN_Range Length Error"); 2716 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_out_range_length_error", 2717 CTLFLAG_RD, &priv->pkstats.rx_out_range_length_error, 2718 "RX Out Range Length Error"); 2719 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_lt_64_bytes_packets", CTLFLAG_RD, 2720 &priv->pkstats.rx_lt_64_bytes_packets, "RX Lt 64 Bytes Packets"); 2721 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_127_bytes_packets", CTLFLAG_RD, 2722 &priv->pkstats.rx_127_bytes_packets, "RX 127 bytes Packets"); 2723 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_255_bytes_packets", CTLFLAG_RD, 2724 &priv->pkstats.rx_255_bytes_packets, "RX 255 bytes Packets"); 2725 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_511_bytes_packets", CTLFLAG_RD, 2726 &priv->pkstats.rx_511_bytes_packets, "RX 511 bytes Packets"); 2727 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_1023_bytes_packets", CTLFLAG_RD, 2728 &priv->pkstats.rx_1023_bytes_packets, "RX 1023 bytes Packets"); 2729 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_1518_bytes_packets", CTLFLAG_RD, 2730 &priv->pkstats.rx_1518_bytes_packets, "RX 1518 bytes Packets"); 2731 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_1522_bytes_packets", CTLFLAG_RD, 2732 &priv->pkstats.rx_1522_bytes_packets, "RX 1522 bytes Packets"); 2733 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_1548_bytes_packets", CTLFLAG_RD, 2734 &priv->pkstats.rx_1548_bytes_packets, "RX 1548 bytes Packets"); 2735 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_gt_1548_bytes_packets", CTLFLAG_RD, 2736 &priv->pkstats.rx_gt_1548_bytes_packets, 2737 "RX Greater Then 1548 bytes Packets"); 2738 2739struct mlx4_en_pkt_stats { 2740 unsigned long tx_packets; 2741 unsigned long tx_bytes; 2742 unsigned long tx_multicast_packets; 2743 unsigned long tx_broadcast_packets; 2744 unsigned long tx_errors; 2745 unsigned long tx_dropped; 2746 unsigned long tx_lt_64_bytes_packets; 2747 unsigned long tx_127_bytes_packets; 2748 unsigned long tx_255_bytes_packets; 2749 unsigned long tx_511_bytes_packets; 2750 unsigned long tx_1023_bytes_packets; 2751 unsigned long tx_1518_bytes_packets; 2752 unsigned long tx_1522_bytes_packets; 2753 unsigned long tx_1548_bytes_packets; 2754 unsigned long tx_gt_1548_bytes_packets; 2755 unsigned long rx_prio[NUM_PRIORITIES][NUM_PRIORITY_STATS]; 2756 unsigned long tx_prio[NUM_PRIORITIES][NUM_PRIORITY_STATS]; 2757#define NUM_PKT_STATS 72 2758}; 2759 2760 2761 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_packets", CTLFLAG_RD, 2762 &priv->pkstats.tx_packets, "TX packets"); 2763 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_bytes", CTLFLAG_RD, 2764 &priv->pkstats.tx_bytes, "TX Bytes"); 2765 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_multicast_packets", CTLFLAG_RD, 2766 &priv->pkstats.tx_multicast_packets, "TX Multicast Packets"); 2767 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_broadcast_packets", CTLFLAG_RD, 2768 &priv->pkstats.tx_broadcast_packets, "TX Broadcast Packets"); 2769 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_errors", CTLFLAG_RD, 2770 &priv->pkstats.tx_errors, "TX Errors"); 2771 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_dropped", CTLFLAG_RD, 2772 &priv->pkstats.tx_dropped, "TX Dropped"); 2773 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_lt_64_bytes_packets", CTLFLAG_RD, 2774 &priv->pkstats.tx_lt_64_bytes_packets, "TX Less Then 64 Bytes Packets"); 2775 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_127_bytes_packets", CTLFLAG_RD, 2776 &priv->pkstats.tx_127_bytes_packets, "TX 127 Bytes Packets"); 2777 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_255_bytes_packets", CTLFLAG_RD, 2778 &priv->pkstats.tx_255_bytes_packets, "TX 255 Bytes Packets"); 2779 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_511_bytes_packets", CTLFLAG_RD, 2780 &priv->pkstats.tx_511_bytes_packets, "TX 511 Bytes Packets"); 2781 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_1023_bytes_packets", CTLFLAG_RD, 2782 &priv->pkstats.tx_1023_bytes_packets, "TX 1023 Bytes Packets"); 2783 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_1518_bytes_packets", CTLFLAG_RD, 2784 &priv->pkstats.tx_1518_bytes_packets, "TX 1518 Bytes Packets"); 2785 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_1522_bytes_packets", CTLFLAG_RD, 2786 &priv->pkstats.tx_1522_bytes_packets, "TX 1522 Bytes Packets"); 2787 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_1548_bytes_packets", CTLFLAG_RD, 2788 &priv->pkstats.tx_1548_bytes_packets, "TX 1548 Bytes Packets"); 2789 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_gt_1548_bytes_packets", CTLFLAG_RD, 2790 &priv->pkstats.tx_gt_1548_bytes_packets, 2791 "TX Greater Then 1548 Bytes Packets"); 2792 2793 2794 2795 for (i = 0; i < priv->tx_ring_num; i++) { 2796 tx_ring = priv->tx_ring[i]; 2797 snprintf(namebuf, sizeof(namebuf), "tx_ring%d", i); 2798 ring_node = SYSCTL_ADD_NODE(ctx, node_list, OID_AUTO, namebuf, 2799 CTLFLAG_RD, NULL, "TX Ring"); 2800 ring_list = SYSCTL_CHILDREN(ring_node); 2801 SYSCTL_ADD_ULONG(ctx, ring_list, OID_AUTO, "packets", 2802 CTLFLAG_RD, &tx_ring->packets, "TX packets"); 2803 SYSCTL_ADD_ULONG(ctx, ring_list, OID_AUTO, "bytes", 2804 CTLFLAG_RD, &tx_ring->bytes, "TX bytes"); 2805 } 2806 2807 for (i = 0; i < priv->rx_ring_num; i++) { 2808 rx_ring = priv->rx_ring[i]; 2809 snprintf(namebuf, sizeof(namebuf), "rx_ring%d", i); 2810 ring_node = SYSCTL_ADD_NODE(ctx, node_list, OID_AUTO, namebuf, 2811 CTLFLAG_RD, NULL, "RX Ring"); 2812 ring_list = SYSCTL_CHILDREN(ring_node); 2813 SYSCTL_ADD_ULONG(ctx, ring_list, OID_AUTO, "packets", 2814 CTLFLAG_RD, &rx_ring->packets, "RX packets"); 2815 SYSCTL_ADD_ULONG(ctx, ring_list, OID_AUTO, "bytes", 2816 CTLFLAG_RD, &rx_ring->bytes, "RX bytes"); 2817 SYSCTL_ADD_ULONG(ctx, ring_list, OID_AUTO, "error", 2818 CTLFLAG_RD, &rx_ring->errors, "RX soft errors"); 2819 } 2820} 2821