Lines Matching defs:rf

59  * @rf: RDMA PCI function
63 irdma_init_tunable(struct irdma_pci_f *rf, uint8_t pf_id)
66 struct irdma_tunable_info *t_info = &rf->tun_info;
87 OID_AUTO, "debug", CTLFLAG_RWTUN, &rf->sc_dev.debug_mask,
98 rf->protocol_used = IRDMA_IWARP_PROTOCOL_ONLY;
100 rf->protocol_used = IRDMA_ROCE_PROTOCOL_ONLY;
105 (rf->protocol_used == IRDMA_IWARP_PROTOCOL_ONLY) ? "iWARP" : "RoCEv2",
113 irdma_dcqcn_tunables_init(rf);
114 irdma_sysctl_settings(rf);
129 if (!hdl->iwdev->rf->peer_info)
131 if (hdl->iwdev->rf->peer_info->dev == p_dev->dev) {
165 irdma_get_qos_info(struct irdma_pci_f *rf, struct irdma_l2params *l2params,
192 if (!(rf->sc_dev.debug_mask & IRDMA_DEBUG_DCB))
219 irdma_debug(&rf->sc_dev, IRDMA_DEBUG_DCB, "num_tc: %d\n", l2params->num_tc);
220 irdma_debug(&rf->sc_dev, IRDMA_DEBUG_DCB, "num_apps: %d\n", l2params->num_apps);
221 irdma_debug(&rf->sc_dev, IRDMA_DEBUG_DCB, "vsi_prio_type: %d\n", l2params->vsi_prio_type);
222 irdma_debug(&rf->sc_dev, IRDMA_DEBUG_DCB, "vsi_rel_bw: %d\n", l2params->vsi_rel_bw);
223 irdma_debug(&rf->sc_dev, IRDMA_DEBUG_DCB, "egress_virt_up: %s\n", txt[0]);
224 irdma_debug(&rf->sc_dev, IRDMA_DEBUG_DCB, "ingress_virt_up:%s\n", txt[1]);
225 irdma_debug(&rf->sc_dev, IRDMA_DEBUG_DCB, "prio_type: %s\n", txt[2]);
226 irdma_debug(&rf->sc_dev, IRDMA_DEBUG_DCB, "rel_bw: %s\n", txt[3]);
227 irdma_debug(&rf->sc_dev, IRDMA_DEBUG_DCB, "tc_ctx: %s\n", txt[4]);
228 irdma_debug(&rf->sc_dev, IRDMA_DEBUG_DCB, "up2tc: %s\n", txt[5]);
229 irdma_debug(&rf->sc_dev, IRDMA_DEBUG_DCB, "dscp_mode: %s\n", txt[6]);
231 irdma_debug_buf(&rf->sc_dev, IRDMA_DEBUG_DCB, "l2params", l2params, sizeof(*l2params));
311 irdma_log_invalid_mtu(l2params.mtu, &iwdev->rf->sc_dev);
326 if (!atomic_inc_not_zero(&iwdev->rf->dev_ctx.event_rfcnt)) {
329 atomic_read(&iwdev->rf->dev_ctx.event_rfcnt));
342 irdma_get_qos_info(iwdev->rf, &l2params, &event->port_qos);
343 if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
350 atomic_dec(&iwdev->rf->dev_ctx.event_rfcnt);
357 pe_criterr = readl(iwdev->rf->sc_dev.hw_regs[IRDMA_GLPE_CRITERR]);
361 iwdev->rf->reset = true;
363 irdma_dev_warn(to_ibdev(&iwdev->rf->sc_dev),
369 iwdev->rf->reset = true;
371 if (iwdev->rf->reset)
372 iwdev->rf->gen_ops.request_reset(iwdev->rf);
375 iwdev->rf->reset = true;
410 struct irdma_pci_f *rf = iwdev->rf;
417 irdma_debug(&rf->sc_dev, IRDMA_DEBUG_INIT,
419 rf->peer_info->pf_id, if_getdunit(peer->ifp));
420 atomic_dec(&rf->dev_ctx.event_rfcnt);
422 !atomic_read(&rf->dev_ctx.event_rfcnt),
424 if (atomic_read(&rf->dev_ctx.event_rfcnt) != 0) {
427 atomic_read(&rf->dev_ctx.event_rfcnt));
429 irdma_dereg_ipaddr_event_cb(rf);
437 irdma_debug(&rf->sc_dev, IRDMA_DEBUG_INIT,
439 rf->peer_info->pf_id, if_getdunit(peer->ifp));
440 irdma_get_qos_info(iwdev->rf, &l2params, &peer->initial_qos_info);
441 if (iwdev->rf->protocol_used != IRDMA_IWARP_PROTOCOL_ONLY)
457 irdma_sw_stats_tunables_init(rf);
461 irdma_reg_ipaddr_event_cb(rf);
462 atomic_inc(&rf->dev_ctx.event_rfcnt);
463 irdma_debug(&rf->sc_dev, IRDMA_DEBUG_INIT,
465 rf->peer_info->pf_id, if_getdunit(peer->ifp));
472 * @rf: RDMA PCI function
475 irdma_alloc_pcidev(struct ice_rdma_peer *peer, struct irdma_pci_f *rf)
477 rf->pcidev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
478 if (!rf->pcidev) {
481 if (linux_pci_attach_device(rf->dev_ctx.dev, NULL, NULL, rf->pcidev))
489 * @rf: RDMA PCI function
492 irdma_dealloc_pcidev(struct irdma_pci_f *rf)
494 linux_pci_detach_device(rf->pcidev);
495 kfree(rf->pcidev);
499 * irdma_fill_device_info - assign initial values to rf variables
507 struct irdma_pci_f *rf = iwdev->rf;
509 rf->peer_info = peer;
510 rf->gen_ops.register_qset = irdma_register_qset;
511 rf->gen_ops.unregister_qset = irdma_unregister_qset;
513 rf->rdma_ver = IRDMA_GEN_2;
514 rf->sc_dev.hw_attrs.uk_attrs.hw_rev = IRDMA_GEN_2;
515 rf->rsrc_profile = IRDMA_HMC_PROFILE_DEFAULT;
516 rf->rst_to = IRDMA_RST_TIMEOUT_HZ;
517 rf->check_fc = irdma_check_fc_for_qp;
518 rf->gen_ops.request_reset = irdma_request_reset;
519 irdma_set_rf_user_cfg_params(rf);
521 rf->default_vsi.vsi_idx = peer->pf_vsi_num;
522 rf->dev_ctx.dev = peer->dev;
523 rf->dev_ctx.mem_bus_space_tag = rman_get_bustag(peer->pci_mem);
524 rf->dev_ctx.mem_bus_space_handle = rman_get_bushandle(peer->pci_mem);
525 rf->dev_ctx.mem_bus_space_size = rman_get_size(peer->pci_mem);
527 rf->hw.dev_context = &rf->dev_ctx;
528 rf->hw.hw_addr = (u8 *)rman_get_virtual(peer->pci_mem);
529 rf->msix_count = peer->msix.count;
530 rf->msix_info.entry = peer->msix.base;
531 rf->msix_info.vector = peer->msix.count;
533 rf->msix_count, rf->msix_info.entry, rf->msix_info.vector);
535 rf->iwdev = iwdev;
545 if (rf->protocol_used == IRDMA_ROCE_PROTOCOL_ONLY) {
561 struct irdma_pci_f *rf;
583 iwdev->rf = kzalloc(sizeof(*rf), GFP_KERNEL);
584 if (!iwdev->rf) {
592 irdma_init_tunable(iwdev->rf, if_getdunit(peer->ifp));
594 rf = iwdev->rf;
596 if (irdma_alloc_pcidev(peer, rf))
601 if (irdma_ctrl_init_hw(rf)) {
606 rf->dev_ctx.task_arg.peer = peer;
607 rf->dev_ctx.task_arg.iwdev = iwdev;
608 rf->dev_ctx.task_arg.peer = peer;
610 TASK_INIT(&hdl->deferred_task, 0, irdma_finalize_task, &rf->dev_ctx.task_arg);
622 irdma_dealloc_pcidev(rf);
624 kfree(iwdev->rf);
662 hdl->iwdev->rf->dev_ctx.task_arg.iwdev = NULL;
663 hdl->iwdev->rf->dev_ctx.task_arg.peer = NULL;
665 sysctl_ctx_free(&iwdev->rf->tun_info.irdma_sysctl_ctx);
666 hdl->iwdev->rf->tun_info.irdma_sysctl_tree = NULL;
667 hdl->iwdev->rf->tun_info.sws_sysctl_tree = NULL;
669 irdma_ctrl_deinit_hw(iwdev->rf);
671 irdma_dealloc_pcidev(iwdev->rf);
675 kfree(iwdev->rf);
725 if (iwdev && iwdev->rf->reset)
745 if (!hdl->iwdev->rf->peer_info)
753 IRDMA_CLOSE(hdl->iwdev->rf->peer_info);
754 IRDMA_REMOVE(hdl->iwdev->rf->peer_info);