en_netdev.c revision 298778
1/* 2 * Copyright (c) 2007, 2014 Mellanox Technologies. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 34#include <linux/etherdevice.h> 35#include <linux/delay.h> 36#include <linux/slab.h> 37#ifdef CONFIG_NET_RX_BUSY_POLL 38#include <net/busy_poll.h> 39#endif 40 41#include <linux/list.h> 42#include <linux/if_ether.h> 43 44#include <linux/mlx4/driver.h> 45#include <linux/mlx4/device.h> 46#include <linux/mlx4/cmd.h> 47#include <linux/mlx4/cq.h> 48 49#include <sys/sockio.h> 50#include <sys/sysctl.h> 51 52#include "mlx4_en.h" 53#include "en_port.h" 54 55static void mlx4_en_sysctl_stat(struct mlx4_en_priv *priv); 56static void mlx4_en_sysctl_conf(struct mlx4_en_priv *priv); 57static int mlx4_en_unit; 58 59#ifdef CONFIG_NET_RX_BUSY_POLL 60/* must be called with local_bh_disable()d */ 61static int mlx4_en_low_latency_recv(struct napi_struct *napi) 62{ 63 struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); 64 struct net_device *dev = cq->dev; 65 struct mlx4_en_priv *priv = netdev_priv(dev); 66 struct mlx4_en_rx_ring *rx_ring = priv->rx_ring[cq->ring]; 67 int done; 68 69 if (!priv->port_up) 70 return LL_FLUSH_FAILED; 71 72 if (!mlx4_en_cq_lock_poll(cq)) 73 return LL_FLUSH_BUSY; 74 75 done = mlx4_en_process_rx_cq(dev, cq, 4); 76#ifdef LL_EXTENDED_STATS 77 if (done) 78 rx_ring->cleaned += done; 79 else 80 rx_ring->misses++; 81#endif 82 83 mlx4_en_cq_unlock_poll(cq); 84 85 return done; 86} 87#endif /* CONFIG_NET_RX_BUSY_POLL */ 88 89#ifdef CONFIG_RFS_ACCEL 90 91struct mlx4_en_filter { 92 struct list_head next; 93 struct work_struct work; 94 95 u8 ip_proto; 96 __be32 src_ip; 97 __be32 dst_ip; 98 __be16 src_port; 99 __be16 dst_port; 100 101 int rxq_index; 102 struct mlx4_en_priv *priv; 103 u32 flow_id; /* RFS infrastructure id */ 104 int id; /* mlx4_en driver id */ 105 u64 reg_id; /* Flow steering API id */ 106 u8 activated; /* Used to prevent expiry before filter 107 * is attached 108 */ 109 struct hlist_node filter_chain; 110}; 111 112static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv); 113 114static enum mlx4_net_trans_rule_id mlx4_ip_proto_to_trans_rule_id(u8 ip_proto) 115{ 116 switch (ip_proto) { 117 case IPPROTO_UDP: 118 return MLX4_NET_TRANS_RULE_ID_UDP; 119 case IPPROTO_TCP: 120 return MLX4_NET_TRANS_RULE_ID_TCP; 121 default: 122 return -EPROTONOSUPPORT; 123 } 124}; 125 126static void mlx4_en_filter_work(struct work_struct *work) 127{ 128 struct mlx4_en_filter *filter = container_of(work, 129 struct mlx4_en_filter, 130 work); 131 struct mlx4_en_priv *priv = filter->priv; 132 struct mlx4_spec_list spec_tcp_udp = { 133 .id = mlx4_ip_proto_to_trans_rule_id(filter->ip_proto), 134 { 135 .tcp_udp = { 136 .dst_port = filter->dst_port, 137 .dst_port_msk = (__force __be16)-1, 138 .src_port = filter->src_port, 139 .src_port_msk = (__force __be16)-1, 140 }, 141 }, 142 }; 143 struct mlx4_spec_list spec_ip = { 144 .id = MLX4_NET_TRANS_RULE_ID_IPV4, 145 { 146 .ipv4 = { 147 .dst_ip = filter->dst_ip, 148 .dst_ip_msk = (__force __be32)-1, 149 .src_ip = filter->src_ip, 150 .src_ip_msk = (__force __be32)-1, 151 }, 152 }, 153 }; 154 struct mlx4_spec_list spec_eth = { 155 .id = MLX4_NET_TRANS_RULE_ID_ETH, 156 }; 157 struct mlx4_net_trans_rule rule = { 158 .list = LIST_HEAD_INIT(rule.list), 159 .queue_mode = MLX4_NET_TRANS_Q_LIFO, 160 .exclusive = 1, 161 .allow_loopback = 1, 162 .promisc_mode = MLX4_FS_REGULAR, 163 .port = priv->port, 164 .priority = MLX4_DOMAIN_RFS, 165 }; 166 int rc; 167 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); 168 169 if (spec_tcp_udp.id < 0) { 170 en_warn(priv, "RFS: ignoring unsupported ip protocol (%d)\n", 171 filter->ip_proto); 172 goto ignore; 173 } 174 list_add_tail(&spec_eth.list, &rule.list); 175 list_add_tail(&spec_ip.list, &rule.list); 176 list_add_tail(&spec_tcp_udp.list, &rule.list); 177 178 rule.qpn = priv->rss_map.qps[filter->rxq_index].qpn; 179 memcpy(spec_eth.eth.dst_mac, priv->dev->dev_addr, ETH_ALEN); 180 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN); 181 182 filter->activated = 0; 183 184 if (filter->reg_id) { 185 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id); 186 if (rc && rc != -ENOENT) 187 en_err(priv, "Error detaching flow. rc = %d\n", rc); 188 } 189 190 rc = mlx4_flow_attach(priv->mdev->dev, &rule, &filter->reg_id); 191 if (rc) 192 en_err(priv, "Error attaching flow. err = %d\n", rc); 193 194ignore: 195 mlx4_en_filter_rfs_expire(priv); 196 197 filter->activated = 1; 198} 199 200static inline struct hlist_head * 201filter_hash_bucket(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, 202 __be16 src_port, __be16 dst_port) 203{ 204 unsigned long l; 205 int bucket_idx; 206 207 l = (__force unsigned long)src_port | 208 ((__force unsigned long)dst_port << 2); 209 l ^= (__force unsigned long)(src_ip ^ dst_ip); 210 211 bucket_idx = hash_long(l, MLX4_EN_FILTER_HASH_SHIFT); 212 213 return &priv->filter_hash[bucket_idx]; 214} 215 216static struct mlx4_en_filter * 217mlx4_en_filter_alloc(struct mlx4_en_priv *priv, int rxq_index, __be32 src_ip, 218 __be32 dst_ip, u8 ip_proto, __be16 src_port, 219 __be16 dst_port, u32 flow_id) 220{ 221 struct mlx4_en_filter *filter = NULL; 222 223 filter = kzalloc(sizeof(struct mlx4_en_filter), GFP_ATOMIC); 224 if (!filter) 225 return NULL; 226 227 filter->priv = priv; 228 filter->rxq_index = rxq_index; 229 INIT_WORK(&filter->work, mlx4_en_filter_work); 230 231 filter->src_ip = src_ip; 232 filter->dst_ip = dst_ip; 233 filter->ip_proto = ip_proto; 234 filter->src_port = src_port; 235 filter->dst_port = dst_port; 236 237 filter->flow_id = flow_id; 238 239 filter->id = priv->last_filter_id++ % RPS_NO_FILTER; 240 241 list_add_tail(&filter->next, &priv->filters); 242 hlist_add_head(&filter->filter_chain, 243 filter_hash_bucket(priv, src_ip, dst_ip, src_port, 244 dst_port)); 245 246 return filter; 247} 248 249static void mlx4_en_filter_free(struct mlx4_en_filter *filter) 250{ 251 struct mlx4_en_priv *priv = filter->priv; 252 int rc; 253 254 list_del(&filter->next); 255 256 rc = mlx4_flow_detach(priv->mdev->dev, filter->reg_id); 257 if (rc && rc != -ENOENT) 258 en_err(priv, "Error detaching flow. rc = %d\n", rc); 259 260 kfree(filter); 261} 262 263static inline struct mlx4_en_filter * 264mlx4_en_filter_find(struct mlx4_en_priv *priv, __be32 src_ip, __be32 dst_ip, 265 u8 ip_proto, __be16 src_port, __be16 dst_port) 266{ 267 struct hlist_node *elem; 268 struct mlx4_en_filter *filter; 269 struct mlx4_en_filter *ret = NULL; 270 271 hlist_for_each_entry(filter, elem, 272 filter_hash_bucket(priv, src_ip, dst_ip, 273 src_port, dst_port), 274 filter_chain) { 275 if (filter->src_ip == src_ip && 276 filter->dst_ip == dst_ip && 277 filter->ip_proto == ip_proto && 278 filter->src_port == src_port && 279 filter->dst_port == dst_port) { 280 ret = filter; 281 break; 282 } 283 } 284 285 return ret; 286} 287 288static int 289mlx4_en_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, 290 u16 rxq_index, u32 flow_id) 291{ 292 struct mlx4_en_priv *priv = netdev_priv(net_dev); 293 struct mlx4_en_filter *filter; 294 const struct iphdr *ip; 295 const __be16 *ports; 296 u8 ip_proto; 297 __be32 src_ip; 298 __be32 dst_ip; 299 __be16 src_port; 300 __be16 dst_port; 301 int nhoff = skb_network_offset(skb); 302 int ret = 0; 303 304 if (skb->protocol != htons(ETH_P_IP)) 305 return -EPROTONOSUPPORT; 306 307 ip = (const struct iphdr *)(skb->data + nhoff); 308 if (ip_is_fragment(ip)) 309 return -EPROTONOSUPPORT; 310 311 if ((ip->protocol != IPPROTO_TCP) && (ip->protocol != IPPROTO_UDP)) 312 return -EPROTONOSUPPORT; 313 ports = (const __be16 *)(skb->data + nhoff + 4 * ip->ihl); 314 315 ip_proto = ip->protocol; 316 src_ip = ip->saddr; 317 dst_ip = ip->daddr; 318 src_port = ports[0]; 319 dst_port = ports[1]; 320 321 spin_lock_bh(&priv->filters_lock); 322 filter = mlx4_en_filter_find(priv, src_ip, dst_ip, ip_proto, 323 src_port, dst_port); 324 if (filter) { 325 if (filter->rxq_index == rxq_index) 326 goto out; 327 328 filter->rxq_index = rxq_index; 329 } else { 330 filter = mlx4_en_filter_alloc(priv, rxq_index, 331 src_ip, dst_ip, ip_proto, 332 src_port, dst_port, flow_id); 333 if (!filter) { 334 ret = -ENOMEM; 335 goto err; 336 } 337 } 338 339 queue_work(priv->mdev->workqueue, &filter->work); 340 341out: 342 ret = filter->id; 343err: 344 spin_unlock_bh(&priv->filters_lock); 345 346 return ret; 347} 348 349void mlx4_en_cleanup_filters(struct mlx4_en_priv *priv, 350 struct mlx4_en_rx_ring *rx_ring) 351{ 352 struct mlx4_en_filter *filter, *tmp; 353 LIST_HEAD(del_list); 354 355 spin_lock_bh(&priv->filters_lock); 356 list_for_each_entry_safe(filter, tmp, &priv->filters, next) { 357 list_move(&filter->next, &del_list); 358 hlist_del(&filter->filter_chain); 359 } 360 spin_unlock_bh(&priv->filters_lock); 361 362 list_for_each_entry_safe(filter, tmp, &del_list, next) { 363 cancel_work_sync(&filter->work); 364 mlx4_en_filter_free(filter); 365 } 366} 367 368static void mlx4_en_filter_rfs_expire(struct mlx4_en_priv *priv) 369{ 370 struct mlx4_en_filter *filter = NULL, *tmp, *last_filter = NULL; 371 LIST_HEAD(del_list); 372 int i = 0; 373 374 spin_lock_bh(&priv->filters_lock); 375 list_for_each_entry_safe(filter, tmp, &priv->filters, next) { 376 if (i > MLX4_EN_FILTER_EXPIRY_QUOTA) 377 break; 378 379 if (filter->activated && 380 !work_pending(&filter->work) && 381 rps_may_expire_flow(priv->dev, 382 filter->rxq_index, filter->flow_id, 383 filter->id)) { 384 list_move(&filter->next, &del_list); 385 hlist_del(&filter->filter_chain); 386 } else 387 last_filter = filter; 388 389 i++; 390 } 391 392 if (last_filter && (&last_filter->next != priv->filters.next)) 393 list_move(&priv->filters, &last_filter->next); 394 395 spin_unlock_bh(&priv->filters_lock); 396 397 list_for_each_entry_safe(filter, tmp, &del_list, next) 398 mlx4_en_filter_free(filter); 399} 400#endif 401 402static void mlx4_en_vlan_rx_add_vid(void *arg, struct net_device *dev, u16 vid) 403{ 404 struct mlx4_en_priv *priv = netdev_priv(dev); 405 struct mlx4_en_dev *mdev = priv->mdev; 406 int err; 407 int idx; 408 409 if (arg != priv) 410 return; 411 412 en_dbg(HW, priv, "adding VLAN:%d\n", vid); 413 414 set_bit(vid, priv->active_vlans); 415 416 /* Add VID to port VLAN filter */ 417 mutex_lock(&mdev->state_lock); 418 if (mdev->device_up && priv->port_up) { 419 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 420 if (err) 421 en_err(priv, "Failed configuring VLAN filter\n"); 422 } 423 if (mlx4_register_vlan(mdev->dev, priv->port, vid, &idx)) 424 en_dbg(HW, priv, "failed adding vlan %d\n", vid); 425 mutex_unlock(&mdev->state_lock); 426 427} 428 429static void mlx4_en_vlan_rx_kill_vid(void *arg, struct net_device *dev, u16 vid) 430{ 431 struct mlx4_en_priv *priv = netdev_priv(dev); 432 struct mlx4_en_dev *mdev = priv->mdev; 433 int err; 434 435 if (arg != priv) 436 return; 437 438 en_dbg(HW, priv, "Killing VID:%d\n", vid); 439 440 clear_bit(vid, priv->active_vlans); 441 442 /* Remove VID from port VLAN filter */ 443 mutex_lock(&mdev->state_lock); 444 mlx4_unregister_vlan(mdev->dev, priv->port, vid); 445 446 if (mdev->device_up && priv->port_up) { 447 err = mlx4_SET_VLAN_FLTR(mdev->dev, priv); 448 if (err) 449 en_err(priv, "Failed configuring VLAN filter\n"); 450 } 451 mutex_unlock(&mdev->state_lock); 452 453} 454 455static int mlx4_en_uc_steer_add(struct mlx4_en_priv *priv, 456 unsigned char *mac, int *qpn, u64 *reg_id) 457{ 458 struct mlx4_en_dev *mdev = priv->mdev; 459 struct mlx4_dev *dev = mdev->dev; 460 int err; 461 462 switch (dev->caps.steering_mode) { 463 case MLX4_STEERING_MODE_B0: { 464 struct mlx4_qp qp; 465 u8 gid[16] = {0}; 466 467 qp.qpn = *qpn; 468 memcpy(&gid[10], mac, ETH_ALEN); 469 gid[5] = priv->port; 470 471 err = mlx4_unicast_attach(dev, &qp, gid, 0, MLX4_PROT_ETH); 472 break; 473 } 474 case MLX4_STEERING_MODE_DEVICE_MANAGED: { 475 struct mlx4_spec_list spec_eth = { {NULL} }; 476 __be64 mac_mask = cpu_to_be64(MLX4_MAC_MASK << 16); 477 478 struct mlx4_net_trans_rule rule = { 479 .queue_mode = MLX4_NET_TRANS_Q_FIFO, 480 .exclusive = 0, 481 .allow_loopback = 1, 482 .promisc_mode = MLX4_FS_REGULAR, 483 .priority = MLX4_DOMAIN_NIC, 484 }; 485 486 rule.port = priv->port; 487 rule.qpn = *qpn; 488 INIT_LIST_HEAD(&rule.list); 489 490 spec_eth.id = MLX4_NET_TRANS_RULE_ID_ETH; 491 memcpy(spec_eth.eth.dst_mac, mac, ETH_ALEN); 492 memcpy(spec_eth.eth.dst_mac_msk, &mac_mask, ETH_ALEN); 493 list_add_tail(&spec_eth.list, &rule.list); 494 495 err = mlx4_flow_attach(dev, &rule, reg_id); 496 break; 497 } 498 default: 499 return -EINVAL; 500 } 501 if (err) 502 en_warn(priv, "Failed Attaching Unicast\n"); 503 504 return err; 505} 506 507static void mlx4_en_uc_steer_release(struct mlx4_en_priv *priv, 508 unsigned char *mac, int qpn, u64 reg_id) 509{ 510 struct mlx4_en_dev *mdev = priv->mdev; 511 struct mlx4_dev *dev = mdev->dev; 512 513 switch (dev->caps.steering_mode) { 514 case MLX4_STEERING_MODE_B0: { 515 struct mlx4_qp qp; 516 u8 gid[16] = {0}; 517 518 qp.qpn = qpn; 519 memcpy(&gid[10], mac, ETH_ALEN); 520 gid[5] = priv->port; 521 522 mlx4_unicast_detach(dev, &qp, gid, MLX4_PROT_ETH); 523 break; 524 } 525 case MLX4_STEERING_MODE_DEVICE_MANAGED: { 526 mlx4_flow_detach(dev, reg_id); 527 break; 528 } 529 default: 530 en_err(priv, "Invalid steering mode.\n"); 531 } 532} 533 534static int mlx4_en_get_qp(struct mlx4_en_priv *priv) 535{ 536 struct mlx4_en_dev *mdev = priv->mdev; 537 struct mlx4_dev *dev = mdev->dev; 538 struct mlx4_mac_entry *entry; 539 int index = 0; 540 int err = 0; 541 u64 reg_id; 542 int *qpn = &priv->base_qpn; 543 u64 mac = mlx4_mac_to_u64(IF_LLADDR(priv->dev)); 544 545 en_dbg(DRV, priv, "Registering MAC: %pM for adding\n", 546 IF_LLADDR(priv->dev)); 547 index = mlx4_register_mac(dev, priv->port, mac); 548 if (index < 0) { 549 err = index; 550 en_err(priv, "Failed adding MAC: %pM\n", 551 IF_LLADDR(priv->dev)); 552 return err; 553 } 554 555 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) { 556 int base_qpn = mlx4_get_base_qpn(dev, priv->port); 557 *qpn = base_qpn + index; 558 return 0; 559 } 560 561 err = mlx4_qp_reserve_range(dev, 1, 1, qpn, 0); 562 en_dbg(DRV, priv, "Reserved qp %d\n", *qpn); 563 if (err) { 564 en_err(priv, "Failed to reserve qp for mac registration\n"); 565 goto qp_err; 566 } 567 568 err = mlx4_en_uc_steer_add(priv, IF_LLADDR(priv->dev), qpn, ®_id); 569 if (err) 570 goto steer_err; 571 572 entry = kmalloc(sizeof(*entry), GFP_KERNEL); 573 if (!entry) { 574 err = -ENOMEM; 575 goto alloc_err; 576 } 577 memcpy(entry->mac, IF_LLADDR(priv->dev), sizeof(entry->mac)); 578 entry->reg_id = reg_id; 579 580 hlist_add_head(&entry->hlist, 581 &priv->mac_hash[entry->mac[MLX4_EN_MAC_HASH_IDX]]); 582 583 return 0; 584 585alloc_err: 586 mlx4_en_uc_steer_release(priv, IF_LLADDR(priv->dev), *qpn, reg_id); 587 588steer_err: 589 mlx4_qp_release_range(dev, *qpn, 1); 590 591qp_err: 592 mlx4_unregister_mac(dev, priv->port, mac); 593 return err; 594} 595 596static void mlx4_en_put_qp(struct mlx4_en_priv *priv) 597{ 598 struct mlx4_en_dev *mdev = priv->mdev; 599 struct mlx4_dev *dev = mdev->dev; 600 int qpn = priv->base_qpn; 601 u64 mac; 602 603 if (dev->caps.steering_mode == MLX4_STEERING_MODE_A0) { 604 mac = mlx4_mac_to_u64(IF_LLADDR(priv->dev)); 605 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n", 606 IF_LLADDR(priv->dev)); 607 mlx4_unregister_mac(dev, priv->port, mac); 608 } else { 609 struct mlx4_mac_entry *entry; 610 struct hlist_node *n, *tmp; 611 struct hlist_head *bucket; 612 unsigned int i; 613 614 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) { 615 bucket = &priv->mac_hash[i]; 616 hlist_for_each_entry_safe(entry, n, tmp, bucket, hlist) { 617 mac = mlx4_mac_to_u64(entry->mac); 618 en_dbg(DRV, priv, "Registering MAC: %pM for deleting\n", 619 entry->mac); 620 mlx4_en_uc_steer_release(priv, entry->mac, 621 qpn, entry->reg_id); 622 623 mlx4_unregister_mac(dev, priv->port, mac); 624 hlist_del(&entry->hlist); 625 kfree(entry); 626 } 627 } 628 629 en_dbg(DRV, priv, "Releasing qp: port %d, qpn %d\n", 630 priv->port, qpn); 631 mlx4_qp_release_range(dev, qpn, 1); 632 priv->flags &= ~MLX4_EN_FLAG_FORCE_PROMISC; 633 } 634} 635 636static void mlx4_en_clear_list(struct net_device *dev) 637{ 638 struct mlx4_en_priv *priv = netdev_priv(dev); 639 struct mlx4_en_mc_list *tmp, *mc_to_del; 640 641 list_for_each_entry_safe(mc_to_del, tmp, &priv->mc_list, list) { 642 list_del(&mc_to_del->list); 643 kfree(mc_to_del); 644 } 645} 646 647static void mlx4_en_cache_mclist(struct net_device *dev) 648{ 649 struct ifmultiaddr *ifma; 650 struct mlx4_en_mc_list *tmp; 651 struct mlx4_en_priv *priv = netdev_priv(dev); 652 653 if_maddr_rlock(dev); 654 TAILQ_FOREACH(ifma, &dev->if_multiaddrs, ifma_link) { 655 if (ifma->ifma_addr->sa_family != AF_LINK) 656 continue; 657 if (((struct sockaddr_dl *)ifma->ifma_addr)->sdl_alen != 658 ETHER_ADDR_LEN) 659 continue; 660 /* Make sure the list didn't grow. */ 661 tmp = kzalloc(sizeof(struct mlx4_en_mc_list), GFP_ATOMIC); 662 if (tmp == NULL) { 663 en_err(priv, "Failed to allocate multicast list\n"); 664 break; 665 } 666 memcpy(tmp->addr, 667 LLADDR((struct sockaddr_dl *)ifma->ifma_addr), ETH_ALEN); 668 list_add_tail(&tmp->list, &priv->mc_list); 669 } 670 if_maddr_runlock(dev); 671} 672 673static void update_mclist_flags(struct mlx4_en_priv *priv, 674 struct list_head *dst, 675 struct list_head *src) 676{ 677 struct mlx4_en_mc_list *dst_tmp, *src_tmp, *new_mc; 678 bool found; 679 680 /* Find all the entries that should be removed from dst, 681 * These are the entries that are not found in src 682 */ 683 list_for_each_entry(dst_tmp, dst, list) { 684 found = false; 685 list_for_each_entry(src_tmp, src, list) { 686 if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) { 687 found = true; 688 break; 689 } 690 } 691 if (!found) 692 dst_tmp->action = MCLIST_REM; 693 } 694 695 /* Add entries that exist in src but not in dst 696 * mark them as need to add 697 */ 698 list_for_each_entry(src_tmp, src, list) { 699 found = false; 700 list_for_each_entry(dst_tmp, dst, list) { 701 if (!memcmp(dst_tmp->addr, src_tmp->addr, ETH_ALEN)) { 702 dst_tmp->action = MCLIST_NONE; 703 found = true; 704 break; 705 } 706 } 707 if (!found) { 708 new_mc = kmalloc(sizeof(struct mlx4_en_mc_list), 709 GFP_KERNEL); 710 if (!new_mc) { 711 en_err(priv, "Failed to allocate current multicast list\n"); 712 return; 713 } 714 memcpy(new_mc, src_tmp, 715 sizeof(struct mlx4_en_mc_list)); 716 new_mc->action = MCLIST_ADD; 717 list_add_tail(&new_mc->list, dst); 718 } 719 } 720} 721 722static void mlx4_en_set_rx_mode(struct net_device *dev) 723{ 724 struct mlx4_en_priv *priv = netdev_priv(dev); 725 726 if (!priv->port_up) 727 return; 728 729 queue_work(priv->mdev->workqueue, &priv->rx_mode_task); 730} 731 732static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv, 733 struct mlx4_en_dev *mdev) 734{ 735 int err = 0; 736 if (!(priv->flags & MLX4_EN_FLAG_PROMISC)) { 737 priv->flags |= MLX4_EN_FLAG_PROMISC; 738 739 /* Enable promiscouos mode */ 740 switch (mdev->dev->caps.steering_mode) { 741 case MLX4_STEERING_MODE_DEVICE_MANAGED: 742 err = mlx4_flow_steer_promisc_add(mdev->dev, 743 priv->port, 744 priv->base_qpn, 745 MLX4_FS_ALL_DEFAULT); 746 if (err) 747 en_err(priv, "Failed enabling promiscuous mode\n"); 748 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 749 break; 750 751 case MLX4_STEERING_MODE_B0: 752 err = mlx4_unicast_promisc_add(mdev->dev, 753 priv->base_qpn, 754 priv->port); 755 if (err) 756 en_err(priv, "Failed enabling unicast promiscuous mode\n"); 757 758 /* Add the default qp number as multicast 759 * promisc 760 */ 761 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { 762 err = mlx4_multicast_promisc_add(mdev->dev, 763 priv->base_qpn, 764 priv->port); 765 if (err) 766 en_err(priv, "Failed enabling multicast promiscuous mode\n"); 767 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 768 } 769 break; 770 771 case MLX4_STEERING_MODE_A0: 772 err = mlx4_SET_PORT_qpn_calc(mdev->dev, 773 priv->port, 774 priv->base_qpn, 775 1); 776 if (err) 777 en_err(priv, "Failed enabling promiscuous mode\n"); 778 break; 779 } 780 781 /* Disable port multicast filter (unconditionally) */ 782 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 783 0, MLX4_MCAST_DISABLE); 784 if (err) 785 en_err(priv, "Failed disabling multicast filter\n"); 786 } 787} 788 789static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv, 790 struct mlx4_en_dev *mdev) 791{ 792 int err = 0; 793 794 priv->flags &= ~MLX4_EN_FLAG_PROMISC; 795 796 /* Disable promiscouos mode */ 797 switch (mdev->dev->caps.steering_mode) { 798 case MLX4_STEERING_MODE_DEVICE_MANAGED: 799 err = mlx4_flow_steer_promisc_remove(mdev->dev, 800 priv->port, 801 MLX4_FS_ALL_DEFAULT); 802 if (err) 803 en_err(priv, "Failed disabling promiscuous mode\n"); 804 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 805 break; 806 807 case MLX4_STEERING_MODE_B0: 808 err = mlx4_unicast_promisc_remove(mdev->dev, 809 priv->base_qpn, 810 priv->port); 811 if (err) 812 en_err(priv, "Failed disabling unicast promiscuous mode\n"); 813 /* Disable Multicast promisc */ 814 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 815 err = mlx4_multicast_promisc_remove(mdev->dev, 816 priv->base_qpn, 817 priv->port); 818 if (err) 819 en_err(priv, "Failed disabling multicast promiscuous mode\n"); 820 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 821 } 822 break; 823 824 case MLX4_STEERING_MODE_A0: 825 err = mlx4_SET_PORT_qpn_calc(mdev->dev, 826 priv->port, 827 priv->base_qpn, 0); 828 if (err) 829 en_err(priv, "Failed disabling promiscuous mode\n"); 830 break; 831 } 832} 833 834static void mlx4_en_do_multicast(struct mlx4_en_priv *priv, 835 struct net_device *dev, 836 struct mlx4_en_dev *mdev) 837{ 838 struct mlx4_en_mc_list *mclist, *tmp; 839 u8 mc_list[16] = {0}; 840 int err = 0; 841 u64 mcast_addr = 0; 842 843 844 /* Enable/disable the multicast filter according to IFF_ALLMULTI */ 845 if (dev->if_flags & IFF_ALLMULTI) { 846 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 847 0, MLX4_MCAST_DISABLE); 848 if (err) 849 en_err(priv, "Failed disabling multicast filter\n"); 850 851 /* Add the default qp number as multicast promisc */ 852 if (!(priv->flags & MLX4_EN_FLAG_MC_PROMISC)) { 853 switch (mdev->dev->caps.steering_mode) { 854 case MLX4_STEERING_MODE_DEVICE_MANAGED: 855 err = mlx4_flow_steer_promisc_add(mdev->dev, 856 priv->port, 857 priv->base_qpn, 858 MLX4_FS_MC_DEFAULT); 859 break; 860 861 case MLX4_STEERING_MODE_B0: 862 err = mlx4_multicast_promisc_add(mdev->dev, 863 priv->base_qpn, 864 priv->port); 865 break; 866 867 case MLX4_STEERING_MODE_A0: 868 break; 869 } 870 if (err) 871 en_err(priv, "Failed entering multicast promisc mode\n"); 872 priv->flags |= MLX4_EN_FLAG_MC_PROMISC; 873 } 874 } else { 875 /* Disable Multicast promisc */ 876 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 877 switch (mdev->dev->caps.steering_mode) { 878 case MLX4_STEERING_MODE_DEVICE_MANAGED: 879 err = mlx4_flow_steer_promisc_remove(mdev->dev, 880 priv->port, 881 MLX4_FS_MC_DEFAULT); 882 break; 883 884 case MLX4_STEERING_MODE_B0: 885 err = mlx4_multicast_promisc_remove(mdev->dev, 886 priv->base_qpn, 887 priv->port); 888 break; 889 890 case MLX4_STEERING_MODE_A0: 891 break; 892 } 893 if (err) 894 en_err(priv, "Failed disabling multicast promiscuous mode\n"); 895 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 896 } 897 898 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 899 0, MLX4_MCAST_DISABLE); 900 if (err) 901 en_err(priv, "Failed disabling multicast filter\n"); 902 903 /* Flush mcast filter and init it with broadcast address */ 904 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, ETH_BCAST, 905 1, MLX4_MCAST_CONFIG); 906 907 /* Update multicast list - we cache all addresses so they won't 908 * change while HW is updated holding the command semaphor */ 909 mlx4_en_cache_mclist(dev); 910 list_for_each_entry(mclist, &priv->mc_list, list) { 911 mcast_addr = mlx4_mac_to_u64(mclist->addr); 912 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 913 mcast_addr, 0, MLX4_MCAST_CONFIG); 914 } 915 err = mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 916 0, MLX4_MCAST_ENABLE); 917 if (err) 918 en_err(priv, "Failed enabling multicast filter\n"); 919 920 update_mclist_flags(priv, &priv->curr_list, &priv->mc_list); 921 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) { 922 if (mclist->action == MCLIST_REM) { 923 /* detach this address and delete from list */ 924 memcpy(&mc_list[10], mclist->addr, ETH_ALEN); 925 mc_list[5] = priv->port; 926 err = mlx4_multicast_detach(mdev->dev, 927 &priv->rss_map.indir_qp, 928 mc_list, 929 MLX4_PROT_ETH, 930 mclist->reg_id); 931 if (err) 932 en_err(priv, "Fail to detach multicast address\n"); 933 934 /* remove from list */ 935 list_del(&mclist->list); 936 kfree(mclist); 937 } else if (mclist->action == MCLIST_ADD) { 938 /* attach the address */ 939 memcpy(&mc_list[10], mclist->addr, ETH_ALEN); 940 /* needed for B0 steering support */ 941 mc_list[5] = priv->port; 942 err = mlx4_multicast_attach(mdev->dev, 943 &priv->rss_map.indir_qp, 944 mc_list, 945 priv->port, 0, 946 MLX4_PROT_ETH, 947 &mclist->reg_id); 948 if (err) 949 en_err(priv, "Fail to attach multicast address\n"); 950 951 } 952 } 953 } 954} 955 956static void mlx4_en_do_set_rx_mode(struct work_struct *work) 957{ 958 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 959 rx_mode_task); 960 struct mlx4_en_dev *mdev = priv->mdev; 961 struct net_device *dev = priv->dev; 962 963 964 mutex_lock(&mdev->state_lock); 965 if (!mdev->device_up) { 966 en_dbg(HW, priv, "Card is not up, ignoring rx mode change.\n"); 967 goto out; 968 } 969 if (!priv->port_up) { 970 en_dbg(HW, priv, "Port is down, ignoring rx mode change.\n"); 971 goto out; 972 } 973 if (!mlx4_en_QUERY_PORT(mdev, priv->port)) { 974 if (priv->port_state.link_state) { 975 priv->last_link_state = MLX4_DEV_EVENT_PORT_UP; 976 /* update netif baudrate */ 977 priv->dev->if_baudrate = 978 IF_Mbps(priv->port_state.link_speed); 979 /* Important note: the following call for if_link_state_change 980 * is needed for interface up scenario (start port, link state 981 * change) */ 982 if_link_state_change(priv->dev, LINK_STATE_UP); 983 en_dbg(HW, priv, "Link Up\n"); 984 } 985 } 986 987 /* Promsicuous mode: disable all filters */ 988 if ((dev->if_flags & IFF_PROMISC) || 989 (priv->flags & MLX4_EN_FLAG_FORCE_PROMISC)) { 990 mlx4_en_set_promisc_mode(priv, mdev); 991 goto out; 992 } 993 994 /* Not in promiscuous mode */ 995 if (priv->flags & MLX4_EN_FLAG_PROMISC) 996 mlx4_en_clear_promisc_mode(priv, mdev); 997 998 mlx4_en_do_multicast(priv, dev, mdev); 999out: 1000 mutex_unlock(&mdev->state_lock); 1001} 1002 1003#ifdef CONFIG_NET_POLL_CONTROLLER 1004static void mlx4_en_netpoll(struct net_device *dev) 1005{ 1006 struct mlx4_en_priv *priv = netdev_priv(dev); 1007 struct mlx4_en_cq *cq; 1008 unsigned long flags; 1009 int i; 1010 1011 for (i = 0; i < priv->rx_ring_num; i++) { 1012 cq = priv->rx_cq[i]; 1013 spin_lock_irqsave(&cq->lock, flags); 1014 napi_synchronize(&cq->napi); 1015 mlx4_en_process_rx_cq(dev, cq, 0); 1016 spin_unlock_irqrestore(&cq->lock, flags); 1017 } 1018} 1019#endif 1020 1021static void mlx4_en_watchdog_timeout(void *arg) 1022{ 1023 struct mlx4_en_priv *priv = arg; 1024 struct mlx4_en_dev *mdev = priv->mdev; 1025 1026 en_dbg(DRV, priv, "Scheduling watchdog\n"); 1027 queue_work(mdev->workqueue, &priv->watchdog_task); 1028 if (priv->port_up) 1029 callout_reset(&priv->watchdog_timer, MLX4_EN_WATCHDOG_TIMEOUT, 1030 mlx4_en_watchdog_timeout, priv); 1031} 1032 1033 1034 1035static void mlx4_en_set_default_moderation(struct mlx4_en_priv *priv) 1036{ 1037 struct mlx4_en_cq *cq; 1038 int i; 1039 1040 /* If we haven't received a specific coalescing setting 1041 * (module param), we set the moderation parameters as follows: 1042 * - moder_cnt is set to the number of mtu sized packets to 1043 * satisfy our coelsing target. 1044 * - moder_time is set to a fixed value. 1045 */ 1046 priv->rx_frames = MLX4_EN_RX_COAL_TARGET / priv->dev->if_mtu + 1; 1047 priv->rx_usecs = MLX4_EN_RX_COAL_TIME; 1048 priv->tx_frames = MLX4_EN_TX_COAL_PKTS; 1049 priv->tx_usecs = MLX4_EN_TX_COAL_TIME; 1050 en_dbg(INTR, priv, "Default coalesing params for mtu: %u - " 1051 "rx_frames:%d rx_usecs:%d\n", 1052 (unsigned)priv->dev->if_mtu, priv->rx_frames, priv->rx_usecs); 1053 1054 /* Setup cq moderation params */ 1055 for (i = 0; i < priv->rx_ring_num; i++) { 1056 cq = priv->rx_cq[i]; 1057 cq->moder_cnt = priv->rx_frames; 1058 cq->moder_time = priv->rx_usecs; 1059 priv->last_moder_time[i] = MLX4_EN_AUTO_CONF; 1060 priv->last_moder_packets[i] = 0; 1061 priv->last_moder_bytes[i] = 0; 1062 } 1063 1064 for (i = 0; i < priv->tx_ring_num; i++) { 1065 cq = priv->tx_cq[i]; 1066 cq->moder_cnt = priv->tx_frames; 1067 cq->moder_time = priv->tx_usecs; 1068 } 1069 1070 /* Reset auto-moderation params */ 1071 priv->pkt_rate_low = MLX4_EN_RX_RATE_LOW; 1072 priv->rx_usecs_low = MLX4_EN_RX_COAL_TIME_LOW; 1073 priv->pkt_rate_high = MLX4_EN_RX_RATE_HIGH; 1074 priv->rx_usecs_high = MLX4_EN_RX_COAL_TIME_HIGH; 1075 priv->sample_interval = MLX4_EN_SAMPLE_INTERVAL; 1076 priv->adaptive_rx_coal = 1; 1077 priv->last_moder_jiffies = 0; 1078 priv->last_moder_tx_packets = 0; 1079} 1080 1081static void mlx4_en_auto_moderation(struct mlx4_en_priv *priv) 1082{ 1083 unsigned long period = (unsigned long) (jiffies - priv->last_moder_jiffies); 1084 struct mlx4_en_cq *cq; 1085 unsigned long packets; 1086 unsigned long rate; 1087 unsigned long avg_pkt_size; 1088 unsigned long rx_packets; 1089 unsigned long rx_bytes; 1090 unsigned long rx_pkt_diff; 1091 int moder_time; 1092 int ring, err; 1093 1094 if (!priv->adaptive_rx_coal || period < priv->sample_interval * HZ) 1095 return; 1096 1097 for (ring = 0; ring < priv->rx_ring_num; ring++) { 1098 spin_lock(&priv->stats_lock); 1099 rx_packets = priv->rx_ring[ring]->packets; 1100 rx_bytes = priv->rx_ring[ring]->bytes; 1101 spin_unlock(&priv->stats_lock); 1102 1103 rx_pkt_diff = ((unsigned long) (rx_packets - 1104 priv->last_moder_packets[ring])); 1105 packets = rx_pkt_diff; 1106 rate = packets * HZ / period; 1107 avg_pkt_size = packets ? ((unsigned long) (rx_bytes - 1108 priv->last_moder_bytes[ring])) / packets : 0; 1109 1110 /* Apply auto-moderation only when packet rate 1111 * exceeds a rate that it matters */ 1112 if (rate > (MLX4_EN_RX_RATE_THRESH / priv->rx_ring_num) && 1113 avg_pkt_size > MLX4_EN_AVG_PKT_SMALL) { 1114 if (rate < priv->pkt_rate_low) 1115 moder_time = priv->rx_usecs_low; 1116 else if (rate > priv->pkt_rate_high) 1117 moder_time = priv->rx_usecs_high; 1118 else 1119 moder_time = (rate - priv->pkt_rate_low) * 1120 (priv->rx_usecs_high - priv->rx_usecs_low) / 1121 (priv->pkt_rate_high - priv->pkt_rate_low) + 1122 priv->rx_usecs_low; 1123 } else { 1124 moder_time = priv->rx_usecs_low; 1125 } 1126 1127 if (moder_time != priv->last_moder_time[ring]) { 1128 priv->last_moder_time[ring] = moder_time; 1129 cq = priv->rx_cq[ring]; 1130 cq->moder_time = moder_time; 1131 err = mlx4_en_set_cq_moder(priv, cq); 1132 if (err) 1133 en_err(priv, "Failed modifying moderation for cq:%d\n", 1134 ring); 1135 } 1136 priv->last_moder_packets[ring] = rx_packets; 1137 priv->last_moder_bytes[ring] = rx_bytes; 1138 } 1139 1140 priv->last_moder_jiffies = jiffies; 1141} 1142 1143static void mlx4_en_do_get_stats(struct work_struct *work) 1144{ 1145 struct delayed_work *delay = to_delayed_work(work); 1146 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, 1147 stats_task); 1148 struct mlx4_en_dev *mdev = priv->mdev; 1149 int err; 1150 1151 mutex_lock(&mdev->state_lock); 1152 if (mdev->device_up) { 1153 if (priv->port_up) { 1154 err = mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 0); 1155 if (err) 1156 en_dbg(HW, priv, "Could not update stats\n"); 1157 1158 mlx4_en_auto_moderation(priv); 1159 } 1160 1161 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 1162 } 1163 mutex_unlock(&mdev->state_lock); 1164} 1165 1166/* mlx4_en_service_task - Run service task for tasks that needed to be done 1167 * periodically 1168 */ 1169static void mlx4_en_service_task(struct work_struct *work) 1170{ 1171 struct delayed_work *delay = to_delayed_work(work); 1172 struct mlx4_en_priv *priv = container_of(delay, struct mlx4_en_priv, 1173 service_task); 1174 struct mlx4_en_dev *mdev = priv->mdev; 1175 1176 mutex_lock(&mdev->state_lock); 1177 if (mdev->device_up) { 1178 queue_delayed_work(mdev->workqueue, &priv->service_task, 1179 SERVICE_TASK_DELAY); 1180 } 1181 mutex_unlock(&mdev->state_lock); 1182} 1183 1184static void mlx4_en_linkstate(struct work_struct *work) 1185{ 1186 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 1187 linkstate_task); 1188 struct mlx4_en_dev *mdev = priv->mdev; 1189 int linkstate = priv->link_state; 1190 1191 mutex_lock(&mdev->state_lock); 1192 /* If observable port state changed set carrier state and 1193 * report to system log */ 1194 if (priv->last_link_state != linkstate) { 1195 if (linkstate == MLX4_DEV_EVENT_PORT_DOWN) { 1196 en_info(priv, "Link Down\n"); 1197 if_link_state_change(priv->dev, LINK_STATE_DOWN); 1198 /* update netif baudrate */ 1199 priv->dev->if_baudrate = 0; 1200 1201 /* make sure the port is up before notifying the OS. 1202 * This is tricky since we get here on INIT_PORT and 1203 * in such case we can't tell the OS the port is up. 1204 * To solve this there is a call to if_link_state_change 1205 * in set_rx_mode. 1206 * */ 1207 } else if (priv->port_up && (linkstate == MLX4_DEV_EVENT_PORT_UP)){ 1208 if (mlx4_en_QUERY_PORT(priv->mdev, priv->port)) 1209 en_info(priv, "Query port failed\n"); 1210 priv->dev->if_baudrate = 1211 IF_Mbps(priv->port_state.link_speed); 1212 en_info(priv, "Link Up\n"); 1213 if_link_state_change(priv->dev, LINK_STATE_UP); 1214 } 1215 } 1216 priv->last_link_state = linkstate; 1217 mutex_unlock(&mdev->state_lock); 1218} 1219 1220 1221int mlx4_en_start_port(struct net_device *dev) 1222{ 1223 struct mlx4_en_priv *priv = netdev_priv(dev); 1224 struct mlx4_en_dev *mdev = priv->mdev; 1225 struct mlx4_en_cq *cq; 1226 struct mlx4_en_tx_ring *tx_ring; 1227 int rx_index = 0; 1228 int tx_index = 0; 1229 int err = 0; 1230 int i; 1231 int j; 1232 u8 mc_list[16] = {0}; 1233 1234 1235 if (priv->port_up) { 1236 en_dbg(DRV, priv, "start port called while port already up\n"); 1237 return 0; 1238 } 1239 1240 INIT_LIST_HEAD(&priv->mc_list); 1241 INIT_LIST_HEAD(&priv->curr_list); 1242 INIT_LIST_HEAD(&priv->ethtool_list); 1243 1244 /* Calculate Rx buf size */ 1245 dev->if_mtu = min(dev->if_mtu, priv->max_mtu); 1246 mlx4_en_calc_rx_buf(dev); 1247 en_dbg(DRV, priv, "Rx buf size:%d\n", priv->rx_mb_size); 1248 1249 /* Configure rx cq's and rings */ 1250 err = mlx4_en_activate_rx_rings(priv); 1251 if (err) { 1252 en_err(priv, "Failed to activate RX rings\n"); 1253 return err; 1254 } 1255 for (i = 0; i < priv->rx_ring_num; i++) { 1256 cq = priv->rx_cq[i]; 1257 1258 mlx4_en_cq_init_lock(cq); 1259 err = mlx4_en_activate_cq(priv, cq, i); 1260 if (err) { 1261 en_err(priv, "Failed activating Rx CQ\n"); 1262 goto cq_err; 1263 } 1264 for (j = 0; j < cq->size; j++) 1265 cq->buf[j].owner_sr_opcode = MLX4_CQE_OWNER_MASK; 1266 err = mlx4_en_set_cq_moder(priv, cq); 1267 if (err) { 1268 en_err(priv, "Failed setting cq moderation parameters"); 1269 mlx4_en_deactivate_cq(priv, cq); 1270 goto cq_err; 1271 } 1272 mlx4_en_arm_cq(priv, cq); 1273 priv->rx_ring[i]->cqn = cq->mcq.cqn; 1274 ++rx_index; 1275 } 1276 1277 /* Set qp number */ 1278 en_dbg(DRV, priv, "Getting qp number for port %d\n", priv->port); 1279 err = mlx4_en_get_qp(priv); 1280 if (err) { 1281 en_err(priv, "Failed getting eth qp\n"); 1282 goto cq_err; 1283 } 1284 mdev->mac_removed[priv->port] = 0; 1285 1286 /* gets default allocated counter index from func cap */ 1287 /* or sink counter index if no resources */ 1288 priv->counter_index = mdev->dev->caps.def_counter_index[priv->port - 1]; 1289 1290 en_dbg(DRV, priv, "%s: default counter index %d for port %d\n", 1291 __func__, priv->counter_index, priv->port); 1292 1293 err = mlx4_en_config_rss_steer(priv); 1294 if (err) { 1295 en_err(priv, "Failed configuring rss steering\n"); 1296 goto mac_err; 1297 } 1298 1299 err = mlx4_en_create_drop_qp(priv); 1300 if (err) 1301 goto rss_err; 1302 1303 /* Configure tx cq's and rings */ 1304 for (i = 0; i < priv->tx_ring_num; i++) { 1305 /* Configure cq */ 1306 cq = priv->tx_cq[i]; 1307 err = mlx4_en_activate_cq(priv, cq, i); 1308 if (err) { 1309 en_err(priv, "Failed activating Tx CQ\n"); 1310 goto tx_err; 1311 } 1312 err = mlx4_en_set_cq_moder(priv, cq); 1313 if (err) { 1314 en_err(priv, "Failed setting cq moderation parameters"); 1315 mlx4_en_deactivate_cq(priv, cq); 1316 goto tx_err; 1317 } 1318 en_dbg(DRV, priv, "Resetting index of collapsed CQ:%d to -1\n", i); 1319 cq->buf->wqe_index = cpu_to_be16(0xffff); 1320 1321 /* Configure ring */ 1322 tx_ring = priv->tx_ring[i]; 1323 1324 err = mlx4_en_activate_tx_ring(priv, tx_ring, cq->mcq.cqn, 1325 i / priv->num_tx_rings_p_up); 1326 if (err) { 1327 en_err(priv, "Failed activating Tx ring %d\n", i); 1328 mlx4_en_deactivate_cq(priv, cq); 1329 goto tx_err; 1330 } 1331 1332 /* Arm CQ for TX completions */ 1333 mlx4_en_arm_cq(priv, cq); 1334 1335 /* Set initial ownership of all Tx TXBBs to SW (1) */ 1336 for (j = 0; j < tx_ring->buf_size; j += STAMP_STRIDE) 1337 *((u32 *) (tx_ring->buf + j)) = 0xffffffff; 1338 ++tx_index; 1339 } 1340 1341 /* Configure port */ 1342 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 1343 priv->rx_mb_size, 1344 priv->prof->tx_pause, 1345 priv->prof->tx_ppp, 1346 priv->prof->rx_pause, 1347 priv->prof->rx_ppp); 1348 if (err) { 1349 en_err(priv, "Failed setting port general configurations for port %d, with error %d\n", 1350 priv->port, err); 1351 goto tx_err; 1352 } 1353 /* Set default qp number */ 1354 err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0); 1355 if (err) { 1356 en_err(priv, "Failed setting default qp numbers\n"); 1357 goto tx_err; 1358 } 1359 1360 /* Init port */ 1361 en_dbg(HW, priv, "Initializing port\n"); 1362 err = mlx4_INIT_PORT(mdev->dev, priv->port); 1363 if (err) { 1364 en_err(priv, "Failed Initializing port\n"); 1365 goto tx_err; 1366 } 1367 1368 /* Attach rx QP to bradcast address */ 1369 memset(&mc_list[10], 0xff, ETH_ALEN); 1370 mc_list[5] = priv->port; /* needed for B0 steering support */ 1371 if (mlx4_multicast_attach(mdev->dev, &priv->rss_map.indir_qp, mc_list, 1372 priv->port, 0, MLX4_PROT_ETH, 1373 &priv->broadcast_id)) 1374 mlx4_warn(mdev, "Failed Attaching Broadcast\n"); 1375 1376 /* Must redo promiscuous mode setup. */ 1377 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | MLX4_EN_FLAG_MC_PROMISC); 1378 1379 /* Schedule multicast task to populate multicast list */ 1380 queue_work(mdev->workqueue, &priv->rx_mode_task); 1381 1382 mlx4_set_stats_bitmap(mdev->dev, priv->stats_bitmap); 1383 1384 priv->port_up = true; 1385 1386 /* Enable the queues. */ 1387 dev->if_drv_flags &= ~IFF_DRV_OACTIVE; 1388 dev->if_drv_flags |= IFF_DRV_RUNNING; 1389#ifdef CONFIG_DEBUG_FS 1390 mlx4_en_create_debug_files(priv); 1391#endif 1392 callout_reset(&priv->watchdog_timer, MLX4_EN_WATCHDOG_TIMEOUT, 1393 mlx4_en_watchdog_timeout, priv); 1394 1395 1396 return 0; 1397 1398tx_err: 1399 while (tx_index--) { 1400 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[tx_index]); 1401 mlx4_en_deactivate_cq(priv, priv->tx_cq[tx_index]); 1402 } 1403 mlx4_en_destroy_drop_qp(priv); 1404rss_err: 1405 mlx4_en_release_rss_steer(priv); 1406mac_err: 1407 mlx4_en_put_qp(priv); 1408cq_err: 1409 while (rx_index--) 1410 mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]); 1411 for (i = 0; i < priv->rx_ring_num; i++) 1412 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); 1413 1414 return err; /* need to close devices */ 1415} 1416 1417 1418void mlx4_en_stop_port(struct net_device *dev) 1419{ 1420 struct mlx4_en_priv *priv = netdev_priv(dev); 1421 struct mlx4_en_dev *mdev = priv->mdev; 1422 struct mlx4_en_mc_list *mclist, *tmp; 1423 int i; 1424 u8 mc_list[16] = {0}; 1425 1426 if (!priv->port_up) { 1427 en_dbg(DRV, priv, "stop port called while port already down\n"); 1428 return; 1429 } 1430 1431#ifdef CONFIG_DEBUG_FS 1432 mlx4_en_delete_debug_files(priv); 1433#endif 1434 1435 /* close port*/ 1436 mlx4_CLOSE_PORT(mdev->dev, priv->port); 1437 1438 /* Set port as not active */ 1439 priv->port_up = false; 1440 if (priv->counter_index != 0xff) { 1441 mlx4_counter_free(mdev->dev, priv->port, priv->counter_index); 1442 priv->counter_index = 0xff; 1443 } 1444 1445 /* Promsicuous mode */ 1446 if (mdev->dev->caps.steering_mode == 1447 MLX4_STEERING_MODE_DEVICE_MANAGED) { 1448 priv->flags &= ~(MLX4_EN_FLAG_PROMISC | 1449 MLX4_EN_FLAG_MC_PROMISC); 1450 mlx4_flow_steer_promisc_remove(mdev->dev, 1451 priv->port, 1452 MLX4_FS_ALL_DEFAULT); 1453 mlx4_flow_steer_promisc_remove(mdev->dev, 1454 priv->port, 1455 MLX4_FS_MC_DEFAULT); 1456 } else if (priv->flags & MLX4_EN_FLAG_PROMISC) { 1457 priv->flags &= ~MLX4_EN_FLAG_PROMISC; 1458 1459 /* Disable promiscouos mode */ 1460 mlx4_unicast_promisc_remove(mdev->dev, priv->base_qpn, 1461 priv->port); 1462 1463 /* Disable Multicast promisc */ 1464 if (priv->flags & MLX4_EN_FLAG_MC_PROMISC) { 1465 mlx4_multicast_promisc_remove(mdev->dev, priv->base_qpn, 1466 priv->port); 1467 priv->flags &= ~MLX4_EN_FLAG_MC_PROMISC; 1468 } 1469 } 1470 1471 /* Detach All multicasts */ 1472 memset(&mc_list[10], 0xff, ETH_ALEN); 1473 mc_list[5] = priv->port; /* needed for B0 steering support */ 1474 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list, 1475 MLX4_PROT_ETH, priv->broadcast_id); 1476 list_for_each_entry(mclist, &priv->curr_list, list) { 1477 memcpy(&mc_list[10], mclist->addr, ETH_ALEN); 1478 mc_list[5] = priv->port; 1479 mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, 1480 mc_list, MLX4_PROT_ETH, mclist->reg_id); 1481 } 1482 mlx4_en_clear_list(dev); 1483 list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) { 1484 list_del(&mclist->list); 1485 kfree(mclist); 1486 } 1487 1488 /* Flush multicast filter */ 1489 mlx4_SET_MCAST_FLTR(mdev->dev, priv->port, 0, 1, MLX4_MCAST_CONFIG); 1490 mlx4_en_destroy_drop_qp(priv); 1491 1492 /* Free TX Rings */ 1493 for (i = 0; i < priv->tx_ring_num; i++) { 1494 mlx4_en_deactivate_tx_ring(priv, priv->tx_ring[i]); 1495 mlx4_en_deactivate_cq(priv, priv->tx_cq[i]); 1496 } 1497 msleep(10); 1498 1499 for (i = 0; i < priv->tx_ring_num; i++) 1500 mlx4_en_free_tx_buf(dev, priv->tx_ring[i]); 1501 1502 /* Free RSS qps */ 1503 mlx4_en_release_rss_steer(priv); 1504 1505 /* Unregister Mac address for the port */ 1506 mlx4_en_put_qp(priv); 1507 mdev->mac_removed[priv->port] = 1; 1508 1509 /* Free RX Rings */ 1510 for (i = 0; i < priv->rx_ring_num; i++) { 1511 struct mlx4_en_cq *cq = priv->rx_cq[i]; 1512 mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]); 1513 mlx4_en_deactivate_cq(priv, cq); 1514 } 1515 1516 callout_stop(&priv->watchdog_timer); 1517 1518 dev->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); 1519} 1520 1521static void mlx4_en_restart(struct work_struct *work) 1522{ 1523 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 1524 watchdog_task); 1525 struct mlx4_en_dev *mdev = priv->mdev; 1526 struct net_device *dev = priv->dev; 1527 struct mlx4_en_tx_ring *ring; 1528 int i; 1529 1530 1531 if (priv->blocked == 0 || priv->port_up == 0) 1532 return; 1533 for (i = 0; i < priv->tx_ring_num; i++) { 1534 ring = priv->tx_ring[i]; 1535 if (ring->blocked && 1536 ring->watchdog_time + MLX4_EN_WATCHDOG_TIMEOUT < ticks) 1537 goto reset; 1538 } 1539 return; 1540 1541reset: 1542 priv->port_stats.tx_timeout++; 1543 en_dbg(DRV, priv, "Watchdog task called for port %d\n", priv->port); 1544 1545 mutex_lock(&mdev->state_lock); 1546 if (priv->port_up) { 1547 mlx4_en_stop_port(dev); 1548 //for (i = 0; i < priv->tx_ring_num; i++) 1549 // netdev_tx_reset_queue(priv->tx_ring[i]->tx_queue); 1550 if (mlx4_en_start_port(dev)) 1551 en_err(priv, "Failed restarting port %d\n", priv->port); 1552 } 1553 mutex_unlock(&mdev->state_lock); 1554} 1555 1556static void mlx4_en_clear_stats(struct net_device *dev) 1557{ 1558 struct mlx4_en_priv *priv = netdev_priv(dev); 1559 struct mlx4_en_dev *mdev = priv->mdev; 1560 int i; 1561 1562 if (!mlx4_is_slave(mdev->dev)) 1563 if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1)) 1564 en_dbg(HW, priv, "Failed dumping statistics\n"); 1565 1566 memset(&priv->pstats, 0, sizeof(priv->pstats)); 1567 memset(&priv->pkstats, 0, sizeof(priv->pkstats)); 1568 memset(&priv->port_stats, 0, sizeof(priv->port_stats)); 1569 memset(&priv->vport_stats, 0, sizeof(priv->vport_stats)); 1570 1571 for (i = 0; i < priv->tx_ring_num; i++) { 1572 priv->tx_ring[i]->bytes = 0; 1573 priv->tx_ring[i]->packets = 0; 1574 priv->tx_ring[i]->tx_csum = 0; 1575 priv->tx_ring[i]->oversized_packets = 0; 1576 } 1577 for (i = 0; i < priv->rx_ring_num; i++) { 1578 priv->rx_ring[i]->bytes = 0; 1579 priv->rx_ring[i]->packets = 0; 1580 priv->rx_ring[i]->csum_ok = 0; 1581 priv->rx_ring[i]->csum_none = 0; 1582 } 1583} 1584 1585static void mlx4_en_open(void* arg) 1586{ 1587 1588 struct mlx4_en_priv *priv; 1589 struct mlx4_en_dev *mdev; 1590 struct net_device *dev; 1591 int err = 0; 1592 1593 priv = arg; 1594 mdev = priv->mdev; 1595 dev = priv->dev; 1596 1597 1598 mutex_lock(&mdev->state_lock); 1599 1600 if (!mdev->device_up) { 1601 en_err(priv, "Cannot open - device down/disabled\n"); 1602 goto out; 1603 } 1604 1605 /* Reset HW statistics and SW counters */ 1606 mlx4_en_clear_stats(dev); 1607 1608 err = mlx4_en_start_port(dev); 1609 if (err) 1610 en_err(priv, "Failed starting port:%d\n", priv->port); 1611 1612out: 1613 mutex_unlock(&mdev->state_lock); 1614 return; 1615} 1616 1617void mlx4_en_free_resources(struct mlx4_en_priv *priv) 1618{ 1619 int i; 1620 1621#ifdef CONFIG_RFS_ACCEL 1622 if (priv->dev->rx_cpu_rmap) { 1623 free_irq_cpu_rmap(priv->dev->rx_cpu_rmap); 1624 priv->dev->rx_cpu_rmap = NULL; 1625 } 1626#endif 1627 1628 for (i = 0; i < priv->tx_ring_num; i++) { 1629 if (priv->tx_ring && priv->tx_ring[i]) 1630 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); 1631 if (priv->tx_cq && priv->tx_cq[i]) 1632 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); 1633 } 1634 1635 for (i = 0; i < priv->rx_ring_num; i++) { 1636 if (priv->rx_ring[i]) 1637 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i], 1638 priv->prof->rx_ring_size, priv->stride); 1639 if (priv->rx_cq[i]) 1640 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); 1641 } 1642 1643 if (priv->sysctl) 1644 sysctl_ctx_free(&priv->stat_ctx); 1645} 1646 1647int mlx4_en_alloc_resources(struct mlx4_en_priv *priv) 1648{ 1649 struct mlx4_en_port_profile *prof = priv->prof; 1650 int i; 1651 int node = 0; 1652 1653 /* Create rx Rings */ 1654 for (i = 0; i < priv->rx_ring_num; i++) { 1655 if (mlx4_en_create_cq(priv, &priv->rx_cq[i], 1656 prof->rx_ring_size, i, RX, node)) 1657 goto err; 1658 1659 if (mlx4_en_create_rx_ring(priv, &priv->rx_ring[i], 1660 prof->rx_ring_size, node)) 1661 goto err; 1662 } 1663 1664 /* Create tx Rings */ 1665 for (i = 0; i < priv->tx_ring_num; i++) { 1666 if (mlx4_en_create_cq(priv, &priv->tx_cq[i], 1667 prof->tx_ring_size, i, TX, node)) 1668 goto err; 1669 1670 if (mlx4_en_create_tx_ring(priv, &priv->tx_ring[i], 1671 prof->tx_ring_size, TXBB_SIZE, node, i)) 1672 goto err; 1673 } 1674 1675#ifdef CONFIG_RFS_ACCEL 1676 priv->dev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->rx_ring_num); 1677 if (!priv->dev->rx_cpu_rmap) 1678 goto err; 1679#endif 1680 /* Re-create stat sysctls in case the number of rings changed. */ 1681 mlx4_en_sysctl_stat(priv); 1682 return 0; 1683 1684err: 1685 en_err(priv, "Failed to allocate NIC resources\n"); 1686 for (i = 0; i < priv->rx_ring_num; i++) { 1687 if (priv->rx_ring[i]) 1688 mlx4_en_destroy_rx_ring(priv, &priv->rx_ring[i], 1689 prof->rx_ring_size, 1690 priv->stride); 1691 if (priv->rx_cq[i]) 1692 mlx4_en_destroy_cq(priv, &priv->rx_cq[i]); 1693 } 1694 for (i = 0; i < priv->tx_ring_num; i++) { 1695 if (priv->tx_ring[i]) 1696 mlx4_en_destroy_tx_ring(priv, &priv->tx_ring[i]); 1697 if (priv->tx_cq[i]) 1698 mlx4_en_destroy_cq(priv, &priv->tx_cq[i]); 1699 } 1700 priv->port_up = false; 1701 return -ENOMEM; 1702} 1703 1704struct en_port_attribute { 1705 struct attribute attr; 1706 ssize_t (*show)(struct en_port *, struct en_port_attribute *, char *buf); 1707 ssize_t (*store)(struct en_port *, struct en_port_attribute *, char *buf, size_t count); 1708}; 1709 1710#define PORT_ATTR_RO(_name) \ 1711struct en_port_attribute en_port_attr_##_name = __ATTR_RO(_name) 1712 1713#define EN_PORT_ATTR(_name, _mode, _show, _store) \ 1714struct en_port_attribute en_port_attr_##_name = __ATTR(_name, _mode, _show, _store) 1715 1716void mlx4_en_destroy_netdev(struct net_device *dev) 1717{ 1718 struct mlx4_en_priv *priv = netdev_priv(dev); 1719 struct mlx4_en_dev *mdev = priv->mdev; 1720 1721 en_dbg(DRV, priv, "Destroying netdev on port:%d\n", priv->port); 1722 1723 if (priv->vlan_attach != NULL) 1724 EVENTHANDLER_DEREGISTER(vlan_config, priv->vlan_attach); 1725 if (priv->vlan_detach != NULL) 1726 EVENTHANDLER_DEREGISTER(vlan_unconfig, priv->vlan_detach); 1727 1728 /* Unregister device - this will close the port if it was up */ 1729 if (priv->registered) { 1730 mutex_lock(&mdev->state_lock); 1731 ether_ifdetach(dev); 1732 mutex_unlock(&mdev->state_lock); 1733 } 1734 1735 if (priv->allocated) 1736 mlx4_free_hwq_res(mdev->dev, &priv->res, MLX4_EN_PAGE_SIZE); 1737 1738 mutex_lock(&mdev->state_lock); 1739 mlx4_en_stop_port(dev); 1740 mutex_unlock(&mdev->state_lock); 1741 1742 1743 cancel_delayed_work(&priv->stats_task); 1744 cancel_delayed_work(&priv->service_task); 1745 /* flush any pending task for this netdev */ 1746 flush_workqueue(mdev->workqueue); 1747 callout_drain(&priv->watchdog_timer); 1748 1749 /* Detach the netdev so tasks would not attempt to access it */ 1750 mutex_lock(&mdev->state_lock); 1751 mdev->pndev[priv->port] = NULL; 1752 mutex_unlock(&mdev->state_lock); 1753 1754 1755 mlx4_en_free_resources(priv); 1756 1757 /* freeing the sysctl conf cannot be called from within mlx4_en_free_resources */ 1758 if (priv->sysctl) 1759 sysctl_ctx_free(&priv->conf_ctx); 1760 1761 kfree(priv->tx_ring); 1762 kfree(priv->tx_cq); 1763 1764 kfree(priv); 1765 if_free(dev); 1766 1767} 1768 1769static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu) 1770{ 1771 struct mlx4_en_priv *priv = netdev_priv(dev); 1772 struct mlx4_en_dev *mdev = priv->mdev; 1773 int err = 0; 1774 1775 en_dbg(DRV, priv, "Change MTU called - current:%u new:%u\n", 1776 (unsigned)dev->if_mtu, (unsigned)new_mtu); 1777 1778 if ((new_mtu < MLX4_EN_MIN_MTU) || (new_mtu > priv->max_mtu)) { 1779 en_err(priv, "Bad MTU size:%d.\n", new_mtu); 1780 return -EPERM; 1781 } 1782 mutex_lock(&mdev->state_lock); 1783 dev->if_mtu = new_mtu; 1784 if (dev->if_drv_flags & IFF_DRV_RUNNING) { 1785 if (!mdev->device_up) { 1786 /* NIC is probably restarting - let watchdog task reset 1787 * * the port */ 1788 en_dbg(DRV, priv, "Change MTU called with card down!?\n"); 1789 } else { 1790 mlx4_en_stop_port(dev); 1791 err = mlx4_en_start_port(dev); 1792 if (err) { 1793 en_err(priv, "Failed restarting port:%d\n", 1794 priv->port); 1795 queue_work(mdev->workqueue, &priv->watchdog_task); 1796 } 1797 } 1798 } 1799 mutex_unlock(&mdev->state_lock); 1800 return 0; 1801} 1802 1803static int mlx4_en_calc_media(struct mlx4_en_priv *priv) 1804{ 1805 int trans_type; 1806 int active; 1807 1808 active = IFM_ETHER; 1809 if (priv->last_link_state == MLX4_DEV_EVENT_PORT_DOWN) 1810 return (active); 1811 active |= IFM_FDX; 1812 trans_type = priv->port_state.transciver; 1813 /* XXX I don't know all of the transceiver values. */ 1814 switch (priv->port_state.link_speed) { 1815 case 1000: 1816 active |= IFM_1000_T; 1817 break; 1818 case 10000: 1819 if (trans_type > 0 && trans_type <= 0xC) 1820 active |= IFM_10G_SR; 1821 else if (trans_type == 0x80 || trans_type == 0) 1822 active |= IFM_10G_CX4; 1823 break; 1824 case 40000: 1825 active |= IFM_40G_CR4; 1826 break; 1827 } 1828 if (priv->prof->tx_pause) 1829 active |= IFM_ETH_TXPAUSE; 1830 if (priv->prof->rx_pause) 1831 active |= IFM_ETH_RXPAUSE; 1832 1833 return (active); 1834} 1835 1836static void mlx4_en_media_status(struct ifnet *dev, struct ifmediareq *ifmr) 1837{ 1838 struct mlx4_en_priv *priv; 1839 1840 priv = dev->if_softc; 1841 ifmr->ifm_status = IFM_AVALID; 1842 if (priv->last_link_state != MLX4_DEV_EVENT_PORT_DOWN) 1843 ifmr->ifm_status |= IFM_ACTIVE; 1844 ifmr->ifm_active = mlx4_en_calc_media(priv); 1845 1846 return; 1847} 1848 1849static int mlx4_en_media_change(struct ifnet *dev) 1850{ 1851 struct mlx4_en_priv *priv; 1852 struct ifmedia *ifm; 1853 int rxpause; 1854 int txpause; 1855 int error; 1856 1857 priv = dev->if_softc; 1858 ifm = &priv->media; 1859 rxpause = txpause = 0; 1860 error = 0; 1861 1862 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1863 return (EINVAL); 1864 switch (IFM_SUBTYPE(ifm->ifm_media)) { 1865 case IFM_AUTO: 1866 break; 1867 case IFM_10G_SR: 1868 case IFM_10G_CX4: 1869 case IFM_1000_T: 1870 case IFM_40G_CR4: 1871 if ((IFM_SUBTYPE(ifm->ifm_media) 1872 == IFM_SUBTYPE(mlx4_en_calc_media(priv))) 1873 && (ifm->ifm_media & IFM_FDX)) 1874 break; 1875 /* Fallthrough */ 1876 default: 1877 printf("%s: Only auto media type\n", if_name(dev)); 1878 return (EINVAL); 1879 } 1880 /* Allow user to set/clear pause */ 1881 if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_RXPAUSE) 1882 rxpause = 1; 1883 if (IFM_OPTIONS(ifm->ifm_media) & IFM_ETH_TXPAUSE) 1884 txpause = 1; 1885 if (priv->prof->tx_pause != txpause || priv->prof->rx_pause != rxpause) { 1886 priv->prof->tx_pause = txpause; 1887 priv->prof->rx_pause = rxpause; 1888 error = -mlx4_SET_PORT_general(priv->mdev->dev, priv->port, 1889 priv->rx_mb_size + ETHER_CRC_LEN, priv->prof->tx_pause, 1890 priv->prof->tx_ppp, priv->prof->rx_pause, 1891 priv->prof->rx_ppp); 1892 } 1893 return (error); 1894} 1895 1896static int mlx4_en_ioctl(struct ifnet *dev, u_long command, caddr_t data) 1897{ 1898 struct mlx4_en_priv *priv; 1899 struct mlx4_en_dev *mdev; 1900 struct ifreq *ifr; 1901 int error; 1902 int mask; 1903 1904 error = 0; 1905 mask = 0; 1906 priv = dev->if_softc; 1907 mdev = priv->mdev; 1908 ifr = (struct ifreq *) data; 1909 switch (command) { 1910 1911 case SIOCSIFMTU: 1912 error = -mlx4_en_change_mtu(dev, ifr->ifr_mtu); 1913 break; 1914 case SIOCSIFFLAGS: 1915 if (dev->if_flags & IFF_UP) { 1916 if ((dev->if_drv_flags & IFF_DRV_RUNNING) == 0) { 1917 mutex_lock(&mdev->state_lock); 1918 mlx4_en_start_port(dev); 1919 mutex_unlock(&mdev->state_lock); 1920 } else { 1921 mlx4_en_set_rx_mode(dev); 1922 } 1923 } else { 1924 mutex_lock(&mdev->state_lock); 1925 if (dev->if_drv_flags & IFF_DRV_RUNNING) { 1926 mlx4_en_stop_port(dev); 1927 if_link_state_change(dev, LINK_STATE_DOWN); 1928 } 1929 mutex_unlock(&mdev->state_lock); 1930 } 1931 break; 1932 case SIOCADDMULTI: 1933 case SIOCDELMULTI: 1934 mlx4_en_set_rx_mode(dev); 1935 break; 1936 case SIOCSIFMEDIA: 1937 case SIOCGIFMEDIA: 1938 error = ifmedia_ioctl(dev, ifr, &priv->media, command); 1939 break; 1940 case SIOCSIFCAP: 1941 mutex_lock(&mdev->state_lock); 1942 mask = ifr->ifr_reqcap ^ dev->if_capenable; 1943 if (mask & IFCAP_TXCSUM) { 1944 dev->if_capenable ^= IFCAP_TXCSUM; 1945 dev->if_hwassist ^= (CSUM_TCP | CSUM_UDP | CSUM_IP); 1946 1947 if (IFCAP_TSO4 & dev->if_capenable && 1948 !(IFCAP_TXCSUM & dev->if_capenable)) { 1949 dev->if_capenable &= ~IFCAP_TSO4; 1950 dev->if_hwassist &= ~CSUM_IP_TSO; 1951 if_printf(dev, 1952 "tso4 disabled due to -txcsum.\n"); 1953 } 1954 } 1955 if (mask & IFCAP_TXCSUM_IPV6) { 1956 dev->if_capenable ^= IFCAP_TXCSUM_IPV6; 1957 dev->if_hwassist ^= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); 1958 1959 if (IFCAP_TSO6 & dev->if_capenable && 1960 !(IFCAP_TXCSUM_IPV6 & dev->if_capenable)) { 1961 dev->if_capenable &= ~IFCAP_TSO6; 1962 dev->if_hwassist &= ~CSUM_IP6_TSO; 1963 if_printf(dev, 1964 "tso6 disabled due to -txcsum6.\n"); 1965 } 1966 } 1967 if (mask & IFCAP_RXCSUM) 1968 dev->if_capenable ^= IFCAP_RXCSUM; 1969 if (mask & IFCAP_RXCSUM_IPV6) 1970 dev->if_capenable ^= IFCAP_RXCSUM_IPV6; 1971 1972 if (mask & IFCAP_TSO4) { 1973 if (!(IFCAP_TSO4 & dev->if_capenable) && 1974 !(IFCAP_TXCSUM & dev->if_capenable)) { 1975 if_printf(dev, "enable txcsum first.\n"); 1976 error = EAGAIN; 1977 goto out; 1978 } 1979 dev->if_capenable ^= IFCAP_TSO4; 1980 dev->if_hwassist ^= CSUM_IP_TSO; 1981 } 1982 if (mask & IFCAP_TSO6) { 1983 if (!(IFCAP_TSO6 & dev->if_capenable) && 1984 !(IFCAP_TXCSUM_IPV6 & dev->if_capenable)) { 1985 if_printf(dev, "enable txcsum6 first.\n"); 1986 error = EAGAIN; 1987 goto out; 1988 } 1989 dev->if_capenable ^= IFCAP_TSO6; 1990 dev->if_hwassist ^= CSUM_IP6_TSO; 1991 } 1992 if (mask & IFCAP_LRO) 1993 dev->if_capenable ^= IFCAP_LRO; 1994 if (mask & IFCAP_VLAN_HWTAGGING) 1995 dev->if_capenable ^= IFCAP_VLAN_HWTAGGING; 1996 if (mask & IFCAP_VLAN_HWFILTER) 1997 dev->if_capenable ^= IFCAP_VLAN_HWFILTER; 1998 if (mask & IFCAP_WOL_MAGIC) 1999 dev->if_capenable ^= IFCAP_WOL_MAGIC; 2000 if (dev->if_drv_flags & IFF_DRV_RUNNING) 2001 mlx4_en_start_port(dev); 2002out: 2003 mutex_unlock(&mdev->state_lock); 2004 VLAN_CAPABILITIES(dev); 2005 break; 2006#if __FreeBSD_version >= 1100036 2007 case SIOCGI2C: { 2008 struct ifi2creq i2c; 2009 2010 error = copyin(ifr->ifr_data, &i2c, sizeof(i2c)); 2011 if (error) 2012 break; 2013 if (i2c.len > sizeof(i2c.data)) { 2014 error = EINVAL; 2015 break; 2016 } 2017 /* 2018 * Note that we ignore i2c.addr here. The driver hardcodes 2019 * the address to 0x50, while standard expects it to be 0xA0. 2020 */ 2021 error = mlx4_get_module_info(mdev->dev, priv->port, 2022 i2c.offset, i2c.len, i2c.data); 2023 if (error < 0) { 2024 error = -error; 2025 break; 2026 } 2027 error = copyout(&i2c, ifr->ifr_data, sizeof(i2c)); 2028 break; 2029 } 2030#endif 2031 default: 2032 error = ether_ioctl(dev, command, data); 2033 break; 2034 } 2035 2036 return (error); 2037} 2038 2039 2040int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, 2041 struct mlx4_en_port_profile *prof) 2042{ 2043 struct net_device *dev; 2044 struct mlx4_en_priv *priv; 2045 uint8_t dev_addr[ETHER_ADDR_LEN]; 2046 int err; 2047 int i; 2048 2049 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 2050 dev = priv->dev = if_alloc(IFT_ETHER); 2051 if (dev == NULL) { 2052 en_err(priv, "Net device allocation failed\n"); 2053 kfree(priv); 2054 return -ENOMEM; 2055 } 2056 dev->if_softc = priv; 2057 if_initname(dev, "mlxen", atomic_fetchadd_int(&mlx4_en_unit, 1)); 2058 dev->if_mtu = ETHERMTU; 2059 dev->if_init = mlx4_en_open; 2060 dev->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 2061 dev->if_ioctl = mlx4_en_ioctl; 2062 dev->if_transmit = mlx4_en_transmit; 2063 dev->if_qflush = mlx4_en_qflush; 2064 dev->if_snd.ifq_maxlen = prof->tx_ring_size; 2065 2066 /* 2067 * Initialize driver private data 2068 */ 2069 priv->counter_index = 0xff; 2070 spin_lock_init(&priv->stats_lock); 2071 INIT_WORK(&priv->rx_mode_task, mlx4_en_do_set_rx_mode); 2072 INIT_WORK(&priv->watchdog_task, mlx4_en_restart); 2073 INIT_WORK(&priv->linkstate_task, mlx4_en_linkstate); 2074 INIT_DELAYED_WORK(&priv->stats_task, mlx4_en_do_get_stats); 2075 INIT_DELAYED_WORK(&priv->service_task, mlx4_en_service_task); 2076 callout_init(&priv->watchdog_timer, 1); 2077#ifdef CONFIG_RFS_ACCEL 2078 INIT_LIST_HEAD(&priv->filters); 2079 spin_lock_init(&priv->filters_lock); 2080#endif 2081 2082 priv->msg_enable = MLX4_EN_MSG_LEVEL; 2083 priv->dev = dev; 2084 priv->mdev = mdev; 2085 priv->ddev = &mdev->pdev->dev; 2086 priv->prof = prof; 2087 priv->port = port; 2088 priv->port_up = false; 2089 priv->flags = prof->flags; 2090 2091 priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up; 2092 priv->tx_ring_num = prof->tx_ring_num; 2093 priv->tx_ring = kcalloc(MAX_TX_RINGS, 2094 sizeof(struct mlx4_en_tx_ring *), GFP_KERNEL); 2095 if (!priv->tx_ring) { 2096 err = -ENOMEM; 2097 goto out; 2098 } 2099 priv->tx_cq = kcalloc(sizeof(struct mlx4_en_cq *), MAX_TX_RINGS, 2100 GFP_KERNEL); 2101 if (!priv->tx_cq) { 2102 err = -ENOMEM; 2103 goto out; 2104 } 2105 2106 priv->rx_ring_num = prof->rx_ring_num; 2107 priv->cqe_factor = (mdev->dev->caps.cqe_size == 64) ? 1 : 0; 2108 priv->mac_index = -1; 2109 priv->last_ifq_jiffies = 0; 2110 priv->if_counters_rx_errors = 0; 2111 priv->if_counters_rx_no_buffer = 0; 2112#ifdef CONFIG_MLX4_EN_DCB 2113 if (!mlx4_is_slave(priv->mdev->dev)) { 2114 priv->dcbx_cap = DCB_CAP_DCBX_HOST; 2115 priv->flags |= MLX4_EN_FLAG_DCB_ENABLED; 2116 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_ETS_CFG) { 2117 dev->dcbnl_ops = &mlx4_en_dcbnl_ops; 2118 } else { 2119 en_info(priv, "QoS disabled - no HW support\n"); 2120 dev->dcbnl_ops = &mlx4_en_dcbnl_pfc_ops; 2121 } 2122 } 2123#endif 2124 2125 for (i = 0; i < MLX4_EN_MAC_HASH_SIZE; ++i) 2126 INIT_HLIST_HEAD(&priv->mac_hash[i]); 2127 2128 /* Query for default mac and max mtu */ 2129 priv->max_mtu = mdev->dev->caps.eth_mtu_cap[priv->port]; 2130 priv->mac = mdev->dev->caps.def_mac[priv->port]; 2131 if (ILLEGAL_MAC(priv->mac)) { 2132#if BITS_PER_LONG == 64 2133 en_err(priv, "Port: %d, invalid mac burned: 0x%lx, quiting\n", 2134 priv->port, priv->mac); 2135#elif BITS_PER_LONG == 32 2136 en_err(priv, "Port: %d, invalid mac burned: 0x%llx, quiting\n", 2137 priv->port, priv->mac); 2138#endif 2139 err = -EINVAL; 2140 goto out; 2141 } 2142 2143 priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) + 2144 DS_SIZE); 2145 2146 mlx4_en_sysctl_conf(priv); 2147 2148 err = mlx4_en_alloc_resources(priv); 2149 if (err) 2150 goto out; 2151 2152 /* Allocate page for receive rings */ 2153 err = mlx4_alloc_hwq_res(mdev->dev, &priv->res, 2154 MLX4_EN_PAGE_SIZE, MLX4_EN_PAGE_SIZE); 2155 if (err) { 2156 en_err(priv, "Failed to allocate page for rx qps\n"); 2157 goto out; 2158 } 2159 priv->allocated = 1; 2160 2161 /* 2162 * Set driver features 2163 */ 2164 dev->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6; 2165 dev->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_VLAN_HWTAGGING; 2166 dev->if_capabilities |= IFCAP_VLAN_HWCSUM | IFCAP_VLAN_HWFILTER; 2167 dev->if_capabilities |= IFCAP_LINKSTATE | IFCAP_JUMBO_MTU; 2168 dev->if_capabilities |= IFCAP_LRO; 2169 2170 if (mdev->LSO_support) 2171 dev->if_capabilities |= IFCAP_TSO4 | IFCAP_TSO6 | IFCAP_VLAN_HWTSO; 2172 2173 /* set TSO limits so that we don't have to drop TX packets */ 2174 dev->if_hw_tsomax = MLX4_EN_TX_MAX_PAYLOAD_SIZE - (ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN) /* hdr */; 2175 dev->if_hw_tsomaxsegcount = MLX4_EN_TX_MAX_MBUF_FRAGS - 1 /* hdr */; 2176 dev->if_hw_tsomaxsegsize = MLX4_EN_TX_MAX_MBUF_SIZE; 2177 2178 dev->if_capenable = dev->if_capabilities; 2179 2180 dev->if_hwassist = 0; 2181 if (dev->if_capenable & (IFCAP_TSO4 | IFCAP_TSO6)) 2182 dev->if_hwassist |= CSUM_TSO; 2183 if (dev->if_capenable & IFCAP_TXCSUM) 2184 dev->if_hwassist |= (CSUM_TCP | CSUM_UDP | CSUM_IP); 2185 if (dev->if_capenable & IFCAP_TXCSUM_IPV6) 2186 dev->if_hwassist |= (CSUM_UDP_IPV6 | CSUM_TCP_IPV6); 2187 2188 2189 /* Register for VLAN events */ 2190 priv->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, 2191 mlx4_en_vlan_rx_add_vid, priv, EVENTHANDLER_PRI_FIRST); 2192 priv->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, 2193 mlx4_en_vlan_rx_kill_vid, priv, EVENTHANDLER_PRI_FIRST); 2194 2195 mdev->pndev[priv->port] = dev; 2196 2197 priv->last_link_state = MLX4_DEV_EVENT_PORT_DOWN; 2198 mlx4_en_set_default_moderation(priv); 2199 2200 /* Set default MAC */ 2201 for (i = 0; i < ETHER_ADDR_LEN; i++) 2202 dev_addr[ETHER_ADDR_LEN - 1 - i] = (u8) (priv->mac >> (8 * i)); 2203 2204 2205 ether_ifattach(dev, dev_addr); 2206 if_link_state_change(dev, LINK_STATE_DOWN); 2207 ifmedia_init(&priv->media, IFM_IMASK | IFM_ETH_FMASK, 2208 mlx4_en_media_change, mlx4_en_media_status); 2209 ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_1000_T, 0, NULL); 2210 ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_10G_SR, 0, NULL); 2211 ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_10G_CX4, 0, NULL); 2212 ifmedia_add(&priv->media, IFM_ETHER | IFM_FDX | IFM_40G_CR4, 0, NULL); 2213 ifmedia_add(&priv->media, IFM_ETHER | IFM_AUTO, 0, NULL); 2214 ifmedia_set(&priv->media, IFM_ETHER | IFM_AUTO); 2215 2216 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); 2217 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); 2218 2219 priv->registered = 1; 2220 2221 en_warn(priv, "Using %d TX rings\n", prof->tx_ring_num); 2222 en_warn(priv, "Using %d RX rings\n", prof->rx_ring_num); 2223 2224 2225 priv->rx_mb_size = dev->if_mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN; 2226 err = mlx4_SET_PORT_general(mdev->dev, priv->port, 2227 priv->rx_mb_size, 2228 prof->tx_pause, prof->tx_ppp, 2229 prof->rx_pause, prof->rx_ppp); 2230 if (err) { 2231 en_err(priv, "Failed setting port general configurations " 2232 "for port %d, with error %d\n", priv->port, err); 2233 goto out; 2234 } 2235 2236 /* Init port */ 2237 en_warn(priv, "Initializing port\n"); 2238 err = mlx4_INIT_PORT(mdev->dev, priv->port); 2239 if (err) { 2240 en_err(priv, "Failed Initializing port\n"); 2241 goto out; 2242 } 2243 2244 queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY); 2245 2246 if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS) 2247 queue_delayed_work(mdev->workqueue, &priv->service_task, SERVICE_TASK_DELAY); 2248 2249 return 0; 2250 2251out: 2252 mlx4_en_destroy_netdev(dev); 2253 return err; 2254} 2255 2256static int mlx4_en_set_ring_size(struct net_device *dev, 2257 int rx_size, int tx_size) 2258{ 2259 struct mlx4_en_priv *priv = netdev_priv(dev); 2260 struct mlx4_en_dev *mdev = priv->mdev; 2261 int port_up = 0; 2262 int err = 0; 2263 2264 rx_size = roundup_pow_of_two(rx_size); 2265 rx_size = max_t(u32, rx_size, MLX4_EN_MIN_RX_SIZE); 2266 rx_size = min_t(u32, rx_size, MLX4_EN_MAX_RX_SIZE); 2267 tx_size = roundup_pow_of_two(tx_size); 2268 tx_size = max_t(u32, tx_size, MLX4_EN_MIN_TX_SIZE); 2269 tx_size = min_t(u32, tx_size, MLX4_EN_MAX_TX_SIZE); 2270 2271 if (rx_size == (priv->port_up ? 2272 priv->rx_ring[0]->actual_size : priv->rx_ring[0]->size) && 2273 tx_size == priv->tx_ring[0]->size) 2274 return 0; 2275 mutex_lock(&mdev->state_lock); 2276 if (priv->port_up) { 2277 port_up = 1; 2278 mlx4_en_stop_port(dev); 2279 } 2280 mlx4_en_free_resources(priv); 2281 priv->prof->tx_ring_size = tx_size; 2282 priv->prof->rx_ring_size = rx_size; 2283 err = mlx4_en_alloc_resources(priv); 2284 if (err) { 2285 en_err(priv, "Failed reallocating port resources\n"); 2286 goto out; 2287 } 2288 if (port_up) { 2289 err = mlx4_en_start_port(dev); 2290 if (err) 2291 en_err(priv, "Failed starting port\n"); 2292 } 2293out: 2294 mutex_unlock(&mdev->state_lock); 2295 return err; 2296} 2297static int mlx4_en_set_rx_ring_size(SYSCTL_HANDLER_ARGS) 2298{ 2299 struct mlx4_en_priv *priv; 2300 int size; 2301 int error; 2302 2303 priv = arg1; 2304 size = priv->prof->rx_ring_size; 2305 error = sysctl_handle_int(oidp, &size, 0, req); 2306 if (error || !req->newptr) 2307 return (error); 2308 error = -mlx4_en_set_ring_size(priv->dev, size, 2309 priv->prof->tx_ring_size); 2310 return (error); 2311} 2312 2313static int mlx4_en_set_tx_ring_size(SYSCTL_HANDLER_ARGS) 2314{ 2315 struct mlx4_en_priv *priv; 2316 int size; 2317 int error; 2318 2319 priv = arg1; 2320 size = priv->prof->tx_ring_size; 2321 error = sysctl_handle_int(oidp, &size, 0, req); 2322 if (error || !req->newptr) 2323 return (error); 2324 error = -mlx4_en_set_ring_size(priv->dev, priv->prof->rx_ring_size, 2325 size); 2326 2327 return (error); 2328} 2329 2330static int mlx4_en_get_module_info(struct net_device *dev, 2331 struct ethtool_modinfo *modinfo) 2332{ 2333 struct mlx4_en_priv *priv = netdev_priv(dev); 2334 struct mlx4_en_dev *mdev = priv->mdev; 2335 int ret; 2336 u8 data[4]; 2337 2338 /* Read first 2 bytes to get Module & REV ID */ 2339 ret = mlx4_get_module_info(mdev->dev, priv->port, 2340 0/*offset*/, 2/*size*/, data); 2341 2342 if (ret < 2) { 2343 en_err(priv, "Failed to read eeprom module first two bytes, error: 0x%x\n", -ret); 2344 return -EIO; 2345 } 2346 2347 switch (data[0] /* identifier */) { 2348 case MLX4_MODULE_ID_QSFP: 2349 modinfo->type = ETH_MODULE_SFF_8436; 2350 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 2351 break; 2352 case MLX4_MODULE_ID_QSFP_PLUS: 2353 if (data[1] >= 0x3) { /* revision id */ 2354 modinfo->type = ETH_MODULE_SFF_8636; 2355 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; 2356 } else { 2357 modinfo->type = ETH_MODULE_SFF_8436; 2358 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; 2359 } 2360 break; 2361 case MLX4_MODULE_ID_QSFP28: 2362 modinfo->type = ETH_MODULE_SFF_8636; 2363 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; 2364 break; 2365 case MLX4_MODULE_ID_SFP: 2366 modinfo->type = ETH_MODULE_SFF_8472; 2367 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN; 2368 break; 2369 default: 2370 en_err(priv, "mlx4_en_get_module_info : Not recognized cable type\n"); 2371 return -EINVAL; 2372 } 2373 2374 return 0; 2375} 2376 2377static int mlx4_en_get_module_eeprom(struct net_device *dev, 2378 struct ethtool_eeprom *ee, 2379 u8 *data) 2380{ 2381 struct mlx4_en_priv *priv = netdev_priv(dev); 2382 struct mlx4_en_dev *mdev = priv->mdev; 2383 int offset = ee->offset; 2384 int i = 0, ret; 2385 2386 if (ee->len == 0) 2387 return -EINVAL; 2388 2389 memset(data, 0, ee->len); 2390 2391 while (i < ee->len) { 2392 en_dbg(DRV, priv, 2393 "mlx4_get_module_info i(%d) offset(%d) len(%d)\n", 2394 i, offset, ee->len - i); 2395 2396 ret = mlx4_get_module_info(mdev->dev, priv->port, 2397 offset, ee->len - i, data + i); 2398 2399 if (!ret) /* Done reading */ 2400 return 0; 2401 2402 if (ret < 0) { 2403 en_err(priv, 2404 "mlx4_get_module_info i(%d) offset(%d) bytes_to_read(%d) - FAILED (0x%x)\n", 2405 i, offset, ee->len - i, ret); 2406 return -1; 2407 } 2408 2409 i += ret; 2410 offset += ret; 2411 } 2412 return 0; 2413} 2414 2415static void mlx4_en_print_eeprom(u8 *data, __u32 len) 2416{ 2417 int i; 2418 int j = 0; 2419 int row = 0; 2420 const int NUM_OF_BYTES = 16; 2421 2422 printf("\nOffset\t\tValues\n"); 2423 printf("------\t\t------\n"); 2424 while(row < len){ 2425 printf("0x%04x\t\t",row); 2426 for(i=0; i < NUM_OF_BYTES; i++){ 2427 printf("%02x ", data[j]); 2428 row++; 2429 j++; 2430 } 2431 printf("\n"); 2432 } 2433} 2434 2435/* Read cable EEPROM module information by first inspecting the first 2436 * two bytes to get the length and then read the rest of the information. 2437 * The information is printed to dmesg. */ 2438static int mlx4_en_read_eeprom(SYSCTL_HANDLER_ARGS) 2439{ 2440 2441 u8* data; 2442 int error; 2443 int result = 0; 2444 struct mlx4_en_priv *priv; 2445 struct net_device *dev; 2446 struct ethtool_modinfo modinfo; 2447 struct ethtool_eeprom ee; 2448 2449 error = sysctl_handle_int(oidp, &result, 0, req); 2450 if (error || !req->newptr) 2451 return (error); 2452 2453 if (result == 1) { 2454 priv = arg1; 2455 dev = priv->dev; 2456 data = kmalloc(PAGE_SIZE, GFP_KERNEL); 2457 2458 error = mlx4_en_get_module_info(dev, &modinfo); 2459 if (error) { 2460 en_err(priv, 2461 "mlx4_en_get_module_info returned with error - FAILED (0x%x)\n", 2462 -error); 2463 goto out; 2464 } 2465 2466 ee.len = modinfo.eeprom_len; 2467 ee.offset = 0; 2468 2469 error = mlx4_en_get_module_eeprom(dev, &ee, data); 2470 if (error) { 2471 en_err(priv, 2472 "mlx4_en_get_module_eeprom returned with error - FAILED (0x%x)\n", 2473 -error); 2474 /* Continue printing partial information in case of an error */ 2475 } 2476 2477 /* EEPROM information will be printed in dmesg */ 2478 mlx4_en_print_eeprom(data, ee.len); 2479out: 2480 kfree(data); 2481 } 2482 /* Return zero to prevent sysctl failure. */ 2483 return (0); 2484} 2485 2486static int mlx4_en_set_tx_ppp(SYSCTL_HANDLER_ARGS) 2487{ 2488 struct mlx4_en_priv *priv; 2489 int ppp; 2490 int error; 2491 2492 priv = arg1; 2493 ppp = priv->prof->tx_ppp; 2494 error = sysctl_handle_int(oidp, &ppp, 0, req); 2495 if (error || !req->newptr) 2496 return (error); 2497 if (ppp > 0xff || ppp < 0) 2498 return (-EINVAL); 2499 priv->prof->tx_ppp = ppp; 2500 error = -mlx4_SET_PORT_general(priv->mdev->dev, priv->port, 2501 priv->rx_mb_size + ETHER_CRC_LEN, 2502 priv->prof->tx_pause, 2503 priv->prof->tx_ppp, 2504 priv->prof->rx_pause, 2505 priv->prof->rx_ppp); 2506 2507 return (error); 2508} 2509 2510static int mlx4_en_set_rx_ppp(SYSCTL_HANDLER_ARGS) 2511{ 2512 struct mlx4_en_priv *priv; 2513 struct mlx4_en_dev *mdev; 2514 int ppp; 2515 int error; 2516 int port_up; 2517 2518 port_up = 0; 2519 priv = arg1; 2520 mdev = priv->mdev; 2521 ppp = priv->prof->rx_ppp; 2522 error = sysctl_handle_int(oidp, &ppp, 0, req); 2523 if (error || !req->newptr) 2524 return (error); 2525 if (ppp > 0xff || ppp < 0) 2526 return (-EINVAL); 2527 /* See if we have to change the number of tx queues. */ 2528 if (!ppp != !priv->prof->rx_ppp) { 2529 mutex_lock(&mdev->state_lock); 2530 if (priv->port_up) { 2531 port_up = 1; 2532 mlx4_en_stop_port(priv->dev); 2533 } 2534 mlx4_en_free_resources(priv); 2535 priv->prof->rx_ppp = ppp; 2536 error = -mlx4_en_alloc_resources(priv); 2537 if (error) 2538 en_err(priv, "Failed reallocating port resources\n"); 2539 if (error == 0 && port_up) { 2540 error = -mlx4_en_start_port(priv->dev); 2541 if (error) 2542 en_err(priv, "Failed starting port\n"); 2543 } 2544 mutex_unlock(&mdev->state_lock); 2545 return (error); 2546 2547 } 2548 priv->prof->rx_ppp = ppp; 2549 error = -mlx4_SET_PORT_general(priv->mdev->dev, priv->port, 2550 priv->rx_mb_size + ETHER_CRC_LEN, 2551 priv->prof->tx_pause, 2552 priv->prof->tx_ppp, 2553 priv->prof->rx_pause, 2554 priv->prof->rx_ppp); 2555 2556 return (error); 2557} 2558 2559static void mlx4_en_sysctl_conf(struct mlx4_en_priv *priv) 2560{ 2561 struct net_device *dev; 2562 struct sysctl_ctx_list *ctx; 2563 struct sysctl_oid *node; 2564 struct sysctl_oid_list *node_list; 2565 struct sysctl_oid *coal; 2566 struct sysctl_oid_list *coal_list; 2567 const char *pnameunit; 2568 2569 dev = priv->dev; 2570 ctx = &priv->conf_ctx; 2571 pnameunit = device_get_nameunit(priv->mdev->pdev->dev.bsddev); 2572 2573 sysctl_ctx_init(ctx); 2574 priv->sysctl = SYSCTL_ADD_NODE(ctx, SYSCTL_STATIC_CHILDREN(_hw), 2575 OID_AUTO, dev->if_xname, CTLFLAG_RD, 0, "mlx4 10gig ethernet"); 2576 node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(priv->sysctl), OID_AUTO, 2577 "conf", CTLFLAG_RD, NULL, "Configuration"); 2578 node_list = SYSCTL_CHILDREN(node); 2579 2580 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "msg_enable", 2581 CTLFLAG_RW, &priv->msg_enable, 0, 2582 "Driver message enable bitfield"); 2583 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "rx_rings", 2584 CTLFLAG_RD, &priv->rx_ring_num, 0, 2585 "Number of receive rings"); 2586 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "tx_rings", 2587 CTLFLAG_RD, &priv->tx_ring_num, 0, 2588 "Number of transmit rings"); 2589 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "rx_size", 2590 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0, 2591 mlx4_en_set_rx_ring_size, "I", "Receive ring size"); 2592 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "tx_size", 2593 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0, 2594 mlx4_en_set_tx_ring_size, "I", "Transmit ring size"); 2595 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "tx_ppp", 2596 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0, 2597 mlx4_en_set_tx_ppp, "I", "TX Per-priority pause"); 2598 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "rx_ppp", 2599 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0, 2600 mlx4_en_set_rx_ppp, "I", "RX Per-priority pause"); 2601 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "port_num", 2602 CTLFLAG_RD, &priv->port, 0, 2603 "Port Number"); 2604 SYSCTL_ADD_STRING(ctx, node_list, OID_AUTO, "device_name", 2605 CTLFLAG_RD, __DECONST(void *, pnameunit), 0, 2606 "PCI device name"); 2607 2608 /* Add coalescer configuration. */ 2609 coal = SYSCTL_ADD_NODE(ctx, node_list, OID_AUTO, 2610 "coalesce", CTLFLAG_RD, NULL, "Interrupt coalesce configuration"); 2611 coal_list = SYSCTL_CHILDREN(coal); 2612 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "pkt_rate_low", 2613 CTLFLAG_RW, &priv->pkt_rate_low, 0, 2614 "Packets per-second for minimum delay"); 2615 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "rx_usecs_low", 2616 CTLFLAG_RW, &priv->rx_usecs_low, 0, 2617 "Minimum RX delay in micro-seconds"); 2618 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "pkt_rate_high", 2619 CTLFLAG_RW, &priv->pkt_rate_high, 0, 2620 "Packets per-second for maximum delay"); 2621 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "rx_usecs_high", 2622 CTLFLAG_RW, &priv->rx_usecs_high, 0, 2623 "Maximum RX delay in micro-seconds"); 2624 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "sample_interval", 2625 CTLFLAG_RW, &priv->sample_interval, 0, 2626 "adaptive frequency in units of HZ ticks"); 2627 SYSCTL_ADD_UINT(ctx, coal_list, OID_AUTO, "adaptive_rx_coal", 2628 CTLFLAG_RW, &priv->adaptive_rx_coal, 0, 2629 "Enable adaptive rx coalescing"); 2630 /* EEPROM support */ 2631 SYSCTL_ADD_PROC(ctx, node_list, OID_AUTO, "eeprom_info", 2632 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE, priv, 0, 2633 mlx4_en_read_eeprom, "I", "EEPROM information"); 2634} 2635 2636static void mlx4_en_sysctl_stat(struct mlx4_en_priv *priv) 2637{ 2638 struct sysctl_ctx_list *ctx; 2639 struct sysctl_oid *node; 2640 struct sysctl_oid_list *node_list; 2641 struct sysctl_oid *ring_node; 2642 struct sysctl_oid_list *ring_list; 2643 struct mlx4_en_tx_ring *tx_ring; 2644 struct mlx4_en_rx_ring *rx_ring; 2645 char namebuf[128]; 2646 int i; 2647 2648 ctx = &priv->stat_ctx; 2649 sysctl_ctx_init(ctx); 2650 node = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(priv->sysctl), OID_AUTO, 2651 "stat", CTLFLAG_RD, NULL, "Statistics"); 2652 node_list = SYSCTL_CHILDREN(node); 2653 2654#ifdef MLX4_EN_PERF_STAT 2655 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "tx_poll", CTLFLAG_RD, 2656 &priv->pstats.tx_poll, "TX Poll calls"); 2657 SYSCTL_ADD_QUAD(ctx, node_list, OID_AUTO, "tx_pktsz_avg", CTLFLAG_RD, 2658 &priv->pstats.tx_pktsz_avg, "TX average packet size"); 2659 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "inflight_avg", CTLFLAG_RD, 2660 &priv->pstats.inflight_avg, "TX average packets in-flight"); 2661 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "tx_coal_avg", CTLFLAG_RD, 2662 &priv->pstats.tx_coal_avg, "TX average coalesced completions"); 2663 SYSCTL_ADD_UINT(ctx, node_list, OID_AUTO, "rx_coal_avg", CTLFLAG_RD, 2664 &priv->pstats.rx_coal_avg, "RX average coalesced completions"); 2665#endif 2666 2667 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tso_packets", CTLFLAG_RD, 2668 &priv->port_stats.tso_packets, "TSO packets sent"); 2669 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "queue_stopped", CTLFLAG_RD, 2670 &priv->port_stats.queue_stopped, "Queue full"); 2671 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "wake_queue", CTLFLAG_RD, 2672 &priv->port_stats.wake_queue, "Queue resumed after full"); 2673 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_timeout", CTLFLAG_RD, 2674 &priv->port_stats.tx_timeout, "Transmit timeouts"); 2675 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_oversized_packets", CTLFLAG_RD, 2676 &priv->port_stats.oversized_packets, "TX oversized packets, m_defrag failed"); 2677 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_alloc_failed", CTLFLAG_RD, 2678 &priv->port_stats.rx_alloc_failed, "RX failed to allocate mbuf"); 2679 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_chksum_good", CTLFLAG_RD, 2680 &priv->port_stats.rx_chksum_good, "RX checksum offload success"); 2681 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_chksum_none", CTLFLAG_RD, 2682 &priv->port_stats.rx_chksum_none, "RX without checksum offload"); 2683 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_chksum_offload", 2684 CTLFLAG_RD, &priv->port_stats.tx_chksum_offload, 2685 "TX checksum offloads"); 2686 2687 /* Could strdup the names and add in a loop. This is simpler. */ 2688 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_bytes", CTLFLAG_RD, 2689 &priv->pkstats.rx_bytes, "RX Bytes"); 2690 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_packets", CTLFLAG_RD, 2691 &priv->pkstats.rx_packets, "RX packets"); 2692 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_multicast_packets", CTLFLAG_RD, 2693 &priv->pkstats.rx_multicast_packets, "RX Multicast Packets"); 2694 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_broadcast_packets", CTLFLAG_RD, 2695 &priv->pkstats.rx_broadcast_packets, "RX Broadcast Packets"); 2696 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_errors", CTLFLAG_RD, 2697 &priv->pkstats.rx_errors, "RX Errors"); 2698 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_dropped", CTLFLAG_RD, 2699 &priv->pkstats.rx_dropped, "RX Dropped"); 2700 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_length_errors", CTLFLAG_RD, 2701 &priv->pkstats.rx_length_errors, "RX Length Errors"); 2702 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_over_errors", CTLFLAG_RD, 2703 &priv->pkstats.rx_over_errors, "RX Over Errors"); 2704 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_crc_errors", CTLFLAG_RD, 2705 &priv->pkstats.rx_crc_errors, "RX CRC Errors"); 2706 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_jabbers", CTLFLAG_RD, 2707 &priv->pkstats.rx_jabbers, "RX Jabbers"); 2708 2709 2710 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_in_range_length_error", CTLFLAG_RD, 2711 &priv->pkstats.rx_in_range_length_error, "RX IN_Range Length Error"); 2712 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_out_range_length_error", 2713 CTLFLAG_RD, &priv->pkstats.rx_out_range_length_error, 2714 "RX Out Range Length Error"); 2715 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_lt_64_bytes_packets", CTLFLAG_RD, 2716 &priv->pkstats.rx_lt_64_bytes_packets, "RX Lt 64 Bytes Packets"); 2717 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_127_bytes_packets", CTLFLAG_RD, 2718 &priv->pkstats.rx_127_bytes_packets, "RX 127 bytes Packets"); 2719 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_255_bytes_packets", CTLFLAG_RD, 2720 &priv->pkstats.rx_255_bytes_packets, "RX 255 bytes Packets"); 2721 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_511_bytes_packets", CTLFLAG_RD, 2722 &priv->pkstats.rx_511_bytes_packets, "RX 511 bytes Packets"); 2723 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_1023_bytes_packets", CTLFLAG_RD, 2724 &priv->pkstats.rx_1023_bytes_packets, "RX 1023 bytes Packets"); 2725 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_1518_bytes_packets", CTLFLAG_RD, 2726 &priv->pkstats.rx_1518_bytes_packets, "RX 1518 bytes Packets"); 2727 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_1522_bytes_packets", CTLFLAG_RD, 2728 &priv->pkstats.rx_1522_bytes_packets, "RX 1522 bytes Packets"); 2729 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_1548_bytes_packets", CTLFLAG_RD, 2730 &priv->pkstats.rx_1548_bytes_packets, "RX 1548 bytes Packets"); 2731 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "rx_gt_1548_bytes_packets", CTLFLAG_RD, 2732 &priv->pkstats.rx_gt_1548_bytes_packets, 2733 "RX Greater Then 1548 bytes Packets"); 2734 2735struct mlx4_en_pkt_stats { 2736 unsigned long tx_packets; 2737 unsigned long tx_bytes; 2738 unsigned long tx_multicast_packets; 2739 unsigned long tx_broadcast_packets; 2740 unsigned long tx_errors; 2741 unsigned long tx_dropped; 2742 unsigned long tx_lt_64_bytes_packets; 2743 unsigned long tx_127_bytes_packets; 2744 unsigned long tx_255_bytes_packets; 2745 unsigned long tx_511_bytes_packets; 2746 unsigned long tx_1023_bytes_packets; 2747 unsigned long tx_1518_bytes_packets; 2748 unsigned long tx_1522_bytes_packets; 2749 unsigned long tx_1548_bytes_packets; 2750 unsigned long tx_gt_1548_bytes_packets; 2751 unsigned long rx_prio[NUM_PRIORITIES][NUM_PRIORITY_STATS]; 2752 unsigned long tx_prio[NUM_PRIORITIES][NUM_PRIORITY_STATS]; 2753#define NUM_PKT_STATS 72 2754}; 2755 2756 2757 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_packets", CTLFLAG_RD, 2758 &priv->pkstats.tx_packets, "TX packets"); 2759 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_bytes", CTLFLAG_RD, 2760 &priv->pkstats.tx_bytes, "TX Bytes"); 2761 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_multicast_packets", CTLFLAG_RD, 2762 &priv->pkstats.tx_multicast_packets, "TX Multicast Packets"); 2763 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_broadcast_packets", CTLFLAG_RD, 2764 &priv->pkstats.tx_broadcast_packets, "TX Broadcast Packets"); 2765 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_errors", CTLFLAG_RD, 2766 &priv->pkstats.tx_errors, "TX Errors"); 2767 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_dropped", CTLFLAG_RD, 2768 &priv->pkstats.tx_dropped, "TX Dropped"); 2769 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_lt_64_bytes_packets", CTLFLAG_RD, 2770 &priv->pkstats.tx_lt_64_bytes_packets, "TX Less Then 64 Bytes Packets"); 2771 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_127_bytes_packets", CTLFLAG_RD, 2772 &priv->pkstats.tx_127_bytes_packets, "TX 127 Bytes Packets"); 2773 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_255_bytes_packets", CTLFLAG_RD, 2774 &priv->pkstats.tx_255_bytes_packets, "TX 255 Bytes Packets"); 2775 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_511_bytes_packets", CTLFLAG_RD, 2776 &priv->pkstats.tx_511_bytes_packets, "TX 511 Bytes Packets"); 2777 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_1023_bytes_packets", CTLFLAG_RD, 2778 &priv->pkstats.tx_1023_bytes_packets, "TX 1023 Bytes Packets"); 2779 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_1518_bytes_packets", CTLFLAG_RD, 2780 &priv->pkstats.tx_1518_bytes_packets, "TX 1518 Bytes Packets"); 2781 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_1522_bytes_packets", CTLFLAG_RD, 2782 &priv->pkstats.tx_1522_bytes_packets, "TX 1522 Bytes Packets"); 2783 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_1548_bytes_packets", CTLFLAG_RD, 2784 &priv->pkstats.tx_1548_bytes_packets, "TX 1548 Bytes Packets"); 2785 SYSCTL_ADD_ULONG(ctx, node_list, OID_AUTO, "tx_gt_1548_bytes_packets", CTLFLAG_RD, 2786 &priv->pkstats.tx_gt_1548_bytes_packets, 2787 "TX Greater Then 1548 Bytes Packets"); 2788 2789 2790 2791 for (i = 0; i < priv->tx_ring_num; i++) { 2792 tx_ring = priv->tx_ring[i]; 2793 snprintf(namebuf, sizeof(namebuf), "tx_ring%d", i); 2794 ring_node = SYSCTL_ADD_NODE(ctx, node_list, OID_AUTO, namebuf, 2795 CTLFLAG_RD, NULL, "TX Ring"); 2796 ring_list = SYSCTL_CHILDREN(ring_node); 2797 SYSCTL_ADD_ULONG(ctx, ring_list, OID_AUTO, "packets", 2798 CTLFLAG_RD, &tx_ring->packets, "TX packets"); 2799 SYSCTL_ADD_ULONG(ctx, ring_list, OID_AUTO, "bytes", 2800 CTLFLAG_RD, &tx_ring->bytes, "TX bytes"); 2801 } 2802 2803 for (i = 0; i < priv->rx_ring_num; i++) { 2804 rx_ring = priv->rx_ring[i]; 2805 snprintf(namebuf, sizeof(namebuf), "rx_ring%d", i); 2806 ring_node = SYSCTL_ADD_NODE(ctx, node_list, OID_AUTO, namebuf, 2807 CTLFLAG_RD, NULL, "RX Ring"); 2808 ring_list = SYSCTL_CHILDREN(ring_node); 2809 SYSCTL_ADD_ULONG(ctx, ring_list, OID_AUTO, "packets", 2810 CTLFLAG_RD, &rx_ring->packets, "RX packets"); 2811 SYSCTL_ADD_ULONG(ctx, ring_list, OID_AUTO, "bytes", 2812 CTLFLAG_RD, &rx_ring->bytes, "RX bytes"); 2813 SYSCTL_ADD_ULONG(ctx, ring_list, OID_AUTO, "error", 2814 CTLFLAG_RD, &rx_ring->errors, "RX soft errors"); 2815 } 2816} 2817