Lines Matching refs:idx

874 			int idx, i;
880 idx = le32toh(api->api_index);
881 if (idx >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
888 setbit(sc->sc_ucode_api, i + (32 * idx));
895 int idx, i;
901 idx = le32toh(capa->api_index);
902 if (idx >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
908 setbit(sc->sc_enabled_capa, i + (32 * idx));
2448 int err, idx, scd_bug;
2465 idx = IWM_AGG_SSN_TO_TXQ_IDX(ssn);
2466 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | idx);
2467 ring->cur = idx;
2468 ring->tail = idx;
4565 iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
4568 struct iwm_rx_data *data = &ring->data[idx];
4607 ((uint64_t *)ring->desc)[idx] =
4610 idx * sizeof(uint64_t), sizeof(uint64_t),
4613 ((uint32_t *)ring->desc)[idx] =
4616 idx * sizeof(uint32_t), sizeof(uint32_t),
5709 iwm_txq_advance(struct iwm_softc *sc, struct iwm_tx_ring *ring, int idx)
5713 while (ring->tail != idx) {
5734 struct iwm_tx_data *txdata = &txq->data[cmd_hdr->idx];
5752 uint8_t idx = agg_status[i].idx;
5762 txdata = &txq->data[idx];
5854 int idx = cmd_hdr->idx;
5878 txd = &ring->data[idx];
5927 int idx, end_idx;
5932 idx = IWM_AGG_SSN_TO_TXQ_IDX(seq);
5934 while (idx != end_idx) {
5935 struct iwm_tx_data *txdata = &txq->data[idx];
5955 idx = (idx + 1) % IWM_TX_RING_COUNT;
6295 int idx, code, async, group_id;
6302 idx = ring->cur;
6315 if (sc->sc_cmd_resp_pkt[idx] != NULL)
6321 sc->sc_cmd_resp_pkt[idx] = resp_buf;
6322 sc->sc_cmd_resp_len[idx] = hcmd->resp_pkt_len;
6324 sc->sc_cmd_resp_pkt[idx] = NULL;
6329 desc = &ring->desc[idx];
6330 txdata = &ring->data[idx];
6369 cmd = &ring->cmd[idx];
6377 cmd->hdr_wide.idx = idx;
6385 cmd->hdr.idx = idx;
6445 hcmd->resp_pkt = (void *)sc->sc_cmd_resp_pkt[idx];
6446 sc->sc_cmd_resp_pkt[idx] = NULL;
6448 free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
6449 sc->sc_cmd_resp_len[idx]);
6450 sc->sc_cmd_resp_pkt[idx] = NULL;
6527 iwm_cmd_done(struct iwm_softc *sc, int qid, int idx, int code)
6536 data = &ring->data[idx];
6545 wakeup(&ring->desc[idx]);
6561 iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
6579 scd_bc_tbl[qid].tfd_offset[idx] = val;
6580 if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP)
6581 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = val;
6587 iwm_reset_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id)
6600 scd_bc_tbl[qid].tfd_offset[idx] = val;
6601 if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP)
6602 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = val;
6795 cmd->hdr.idx = ring->cur;
8433 int i, idx, num_active_macs, quota, quota_rem;
8467 for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
8471 cmd.quotas[idx].id_and_color =
8475 cmd.quotas[idx].quota = htole32(0);
8476 cmd.quotas[idx].max_duration = htole32(0);
8478 cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
8479 cmd.quotas[idx].max_duration = htole32(0);
8481 idx++;
9980 int sec_idx, idx;
10032 for (idx = 1; idx < sc->num_of_paging_blk; idx++) {
10033 memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,
10035 sc->fw_paging_db[idx].fw_paging_size);
10038 DEVNAME(sc), sc->fw_paging_db[idx].fw_paging_size, idx));
10040 offset += sc->fw_paging_db[idx].fw_paging_size;
10045 memcpy(sc->fw_paging_db[idx].fw_paging_block.vaddr,
10050 DEVNAME(sc), sc->num_of_pages_in_last_blk, idx));
10975 int qid, idx, code;
10978 idx = pkt->hdr.idx;
10981 return (!(qid == 0 && idx == 0 && code == 0) &&
10993 int qid, idx, code, handled = 1;
11002 idx = pkt->hdr.idx;
11210 if (sc->sc_cmd_resp_pkt[idx] == NULL)
11221 pkt_len > sc->sc_cmd_resp_len[idx]) {
11222 free(sc->sc_cmd_resp_pkt[idx], M_DEVBUF,
11223 sc->sc_cmd_resp_len[idx]);
11224 sc->sc_cmd_resp_pkt[idx] = NULL;
11230 memcpy(sc->sc_cmd_resp_pkt[idx], pkt, pkt_len);
11324 (qid & ~0x80), idx);
11336 iwm_cmd_done(sc, qid, idx, code);