Lines Matching defs:to

93  * Pad ethernet payload up to this boundary.
96 * Any power of 2 from 32 to 4096 (both inclusive) is also a valid value.
129 * -1: let the driver decide whether to enable buffer packing or not.
141 * T5: 16, or a power of 2 from 64 to 4096 (both inclusive) is a valid value.
148 * Largest rx cluster size that the driver is allowed to allocate.
155 * Size of cluster allocation that's most likely to succeed. The driver will
156 * fall back to this size if it fails to allocate clusters larger than this.
164 * Knob to control TCP timestamp rewriting, and the granularity of the tick used
372 * handler dispatch table. Reject any attempt to install a handler for
408 * we have to rely on the hardware tid (is_ftid) to determine
409 * that this is a response to a filter.
650 * it to the minimum allowed in all other cases.
655 * For fl_pad = 0 we'll still write a reasonable value to the
657 * We'll complain here only if the user tried to set it to a
727 ("%s: trying to change chip settings when not master.", __func__));
816 * may have to deal with is MAXPHYS + 1 page.
832 * SGE wants the buffer to be at least 64B and then a multiple of 16. Its
834 * need to be aligned to the pad boundary as well. We'll just make sure that
835 * the size is a multiple of the pad boundary here, it is up to the buffer
836 * allocation code to make sure the start of the buffer is aligned.
932 * needs to be carefully reviewed for PAGE_SHIFT vs sp->page_shift.
982 "failed to create main DMA tag: %d\n", rc);
1031 * Returns errno on failure. Resources allocated up to that point may still be
1126 /* Interrupt vector to start from (when using multiple vectors) */
1138 * We don't have buffers to back the netmap rx queues
1487 * Process the head only, and send it to the back of the list if
1536 * Our goal here is to have a result that is:
1543 * don't want to overflow the uint64_t numbers we are using.
1790 /* copy data to mbuf */
2031 /* checksum(s) calculated and found to be correct. */
2054 * way to pass the inner csum_data to the stack.
2055 * We don't want the stack to use the inner
2056 * csum_data to validate the outer frame or it
2152 MPASS(wr != NULL); /* Must be called with something useful to do */
2215 * Doesn't fail. Holds on to work requests it can't send right away.
2238 /* Doorbell must have caught up to the pidx. */
2344 * Try to allocate an mbuf to contain a raw work request. To make it
2345 * easy to construct the work request, don't allocate a chain but a
2571 * routine to return 0 if skip accounts for all the contents of the mbuf chain.
2640 * Analyze the mbuf to determine its tx needs. The mbuf passed in may change:
2728 * Ethofld is limited to TCP and UDP for now, and only when L4 hw
2729 * checksumming is enabled. needs_outer_l4_csum happens to check for
3028 /* Doorbell must have caught up to the pidx. */
3046 /* maybe put a GL limit too, to avoid silliness? */
3114 * r->items[cidx] to r->items[pidx], with a wraparound at r->size, are ready to
3131 void *wr; /* start of the last WR written to the ring */
3291 * If nothing was submitted to the chip for tx (it was coalesced
3293 * then we need to send txpkts now.
3516 * Returns errno on failure. Resources allocated up to that point may still be
3543 /* Forwarded interrupts, all headed to fwq */
3594 CH_ERR(sc, "failed to create hw ingress queue: %d\n", rc);
3649 /* Enough to make sure the SGE doesn't think it's starved */
3679 CH_ERR(sc, "failed to free iq %p: %d\n", iq, rc);
3770 CH_ERR(sc, "failed to allocate fwq: %d\n", rc);
3781 CH_ERR(sc, "failed to create hw fwq: %d\n", rc);
3838 CH_ERR(sc, "failed to allocate ctrlq%d: %d\n", idx, rc);
3850 CH_ERR(sc, "failed to create hw ctrlq%d: %d\n", idx, rc);
3892 /* Convert the driver knob to the mode understood by the firmware. */
3927 CH_ERR(sc, "failed to set congestion manager context "
3975 CH_ERR(vi, "failed to allocate rxq%d: %d\n", idx, rc);
3991 CH_ERR(vi, "failed to create hw rxq%d: %d\n", idx, rc);
4093 CH_ERR(vi, "failed to allocate ofld_rxq%d: %d\n", idx,
4112 CH_ERR(vi, "failed to create hw ofld_rxq%d: %d\n", idx,
4262 CH_ERR(sc, "failed to create hw ctrlq for tx_chan %d: %d\n",
4308 "failed to create Ethernet egress queue: %d\n", rc);
4353 "failed to create egress queue for TCP offload: %d\n", rc);
4461 CH_ERR(sc, "failed to allocate egress queue(%d): %d\n",
4512 CH_ERR(sc, "failed to free eq (type %d): %d\n", eq->type, rc);
4605 CH_ERR(vi, "failed to allocate mp_ring for txq%d: %d\n",
4614 CH_ERR(vi, "failed to allocate txq%d: %d\n", idx, rc);
4634 CH_ERR(vi, "failed to create hw txq%d: %d\n", idx, rc);
4738 "# of times txpkts had to be flushed out by an egress-update");
4826 CH_ERR(vi, "failed to allocate ofld_txq%d: %d\n", idx,
4847 CH_ERR(vi, "failed to create hw ofld_txq%d: %d\n", idx,
4950 * Fills up the freelist by allocating up to 'n' buffers. Buffers that are
4953 * Returns non-zero to indicate that this freelist should be added to the list
4972 * before the one with the hw cidx. This is to avoid hw pidx = hw cidx,
4973 * which would mean an empty freelist to the chip.
4992 * received in the cluster were small enough to
5000 * Cluster is guaranteed to have metadata. Clusters
5067 * Attempt to refill all starving freelists.
5141 ("%s: nsegs changed for mbuf %p from %d to %d", __func__, m,
5312 * checksum to calculate.
5414 #define VM_TX_L2HDR_LEN 16 /* ethmacdst to vlantci */
5417 * Write a VM txpkt WR for this packet to the hardware descriptors, update the
5459 * simpler to always copy it rather than making it
5460 * conditional. Also, it seems that we do not have to set
5496 * around to the front of the ring explicitly for the start of
5515 * Write a raw WR to the hardware descriptors, update the software
5548 * Write a txpkt WR for this packet to the hardware descriptors, update the
5581 /* Immediate data. Recalculate len16 and set nsegs to 0. */
5813 * Write a txpkts WR for the packets in txp to the hardware descriptors, update
5847 * set then we know the WR is going to wrap around somewhere. We'll
6015 write_gl_to_txd(struct sge_txq *txq, struct mbuf *m, caddr_t *to, int checkwrap)
6024 KASSERT(((uintptr_t)(*to) & 0xf) == 0,
6025 ("%s: SGL must start at a 16 byte boundary: %p", __func__, *to));
6026 MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]);
6027 MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]);
6034 flitp = (__be64 *)(*to);
6082 *to = (void *)eq->desc;
6084 *to = (void *)flitp;
6088 copy_to_txd(struct sge_eq *eq, caddr_t from, caddr_t *to, int len)
6091 MPASS((uintptr_t)(*to) >= (uintptr_t)&eq->desc[0]);
6092 MPASS((uintptr_t)(*to) < (uintptr_t)&eq->desc[eq->sidx]);
6094 if (__predict_true((uintptr_t)(*to) + len <=
6096 bcopy(from, *to, len);
6097 (*to) += len;
6099 int portion = (uintptr_t)&eq->desc[eq->sidx] - (uintptr_t)(*to);
6101 bcopy(from, *to, portion);
6105 (*to) = (caddr_t)eq->desc + portion;
6770 /* How many len16 credits do we need to send this mbuf. */
6863 * requests to ensure that we don't try to free the tag during
6950 * should hold a reference to the tag.