Lines Matching defs:ar

48 static inline unsigned int __carl9170_get_queue(struct ar9170 *ar,
64 static inline unsigned int carl9170_get_queue(struct ar9170 *ar,
67 return __carl9170_get_queue(ar, skb_get_queue_mapping(skb));
70 static bool is_mem_full(struct ar9170 *ar)
72 return (DIV_ROUND_UP(IEEE80211_MAX_FRAME_LEN, ar->fw.mem_block_size) >
73 atomic_read(&ar->mem_free_blocks));
76 static void carl9170_tx_accounting(struct ar9170 *ar, struct sk_buff *skb)
81 atomic_inc(&ar->tx_total_queued);
84 spin_lock_bh(&ar->tx_stats_lock);
92 ar->tx_stats[queue].len++;
93 ar->tx_stats[queue].count++;
95 mem_full = is_mem_full(ar);
96 for (i = 0; i < ar->hw->queues; i++) {
97 if (mem_full || ar->tx_stats[i].len >= ar->tx_stats[i].limit) {
98 ieee80211_stop_queue(ar->hw, i);
99 ar->queue_stop_timeout[i] = jiffies;
103 spin_unlock_bh(&ar->tx_stats_lock);
107 static struct ieee80211_sta *__carl9170_get_tx_sta(struct ar9170 *ar,
121 vif = rcu_dereference(ar->vif_priv[vif_id].vif);
138 static void carl9170_tx_ps_unblock(struct ar9170 *ar, struct sk_buff *skb)
144 sta = __carl9170_get_tx_sta(ar, skb);
150 ieee80211_sta_block_awake(ar->hw, sta, false);
156 static void carl9170_tx_accounting_free(struct ar9170 *ar, struct sk_buff *skb)
162 spin_lock_bh(&ar->tx_stats_lock);
164 ar->tx_stats[queue].len--;
166 if (!is_mem_full(ar)) {
168 for (i = 0; i < ar->hw->queues; i++) {
169 if (ar->tx_stats[i].len >= CARL9170_NUM_TX_LIMIT_SOFT)
172 if (ieee80211_queue_stopped(ar->hw, i)) {
175 tmp = jiffies - ar->queue_stop_timeout[i];
176 if (tmp > ar->max_queue_stop_timeout[i])
177 ar->max_queue_stop_timeout[i] = tmp;
180 ieee80211_wake_queue(ar->hw, i);
184 spin_unlock_bh(&ar->tx_stats_lock);
186 if (atomic_dec_and_test(&ar->tx_total_queued))
187 complete(&ar->tx_flush);
190 static int carl9170_alloc_dev_space(struct ar9170 *ar, struct sk_buff *skb)
196 atomic_inc(&ar->mem_allocs);
198 chunks = DIV_ROUND_UP(skb->len, ar->fw.mem_block_size);
199 if (unlikely(atomic_sub_return(chunks, &ar->mem_free_blocks) < 0)) {
200 atomic_add(chunks, &ar->mem_free_blocks);
204 spin_lock_bh(&ar->mem_lock);
205 cookie = bitmap_find_free_region(ar->mem_bitmap, ar->fw.mem_blocks, 0);
206 spin_unlock_bh(&ar->mem_lock);
209 atomic_add(chunks, &ar->mem_free_blocks);
226 static void carl9170_release_dev_space(struct ar9170 *ar, struct sk_buff *skb)
250 WARN_ON_ONCE(cookie > ar->fw.mem_blocks))
253 atomic_add(DIV_ROUND_UP(skb->len, ar->fw.mem_block_size),
254 &ar->mem_free_blocks);
256 spin_lock_bh(&ar->mem_lock);
257 bitmap_release_region(ar->mem_bitmap, cookie - 1, 0);
258 spin_unlock_bh(&ar->mem_lock);
264 struct ar9170 *ar;
274 ar = arinfo->ar;
275 if (WARN_ON_ONCE(!ar))
286 if (atomic_read(&ar->tx_total_queued))
287 ar->tx_schedule = true;
290 if (!atomic_read(&ar->tx_ampdu_upload))
291 ar->tx_ampdu_schedule = true;
317 ieee80211_free_txskb(ar->hw, skb);
328 ieee80211_tx_status_irqsafe(ar->hw, skb);
347 static void carl9170_tx_shift_bm(struct ar9170 *ar,
379 static void carl9170_tx_status_process_ampdu(struct ar9170 *ar,
394 sta = __carl9170_get_tx_sta(ar, skb);
407 carl9170_tx_shift_bm(ar, tid_info, get_seq_h(hdr));
438 static void carl9170_tx_bar_status(struct ar9170 *ar, struct sk_buff *skb,
459 list_for_each_entry_rcu(entry, &ar->bar_list[queue], list) {
461 spin_lock_bh(&ar->bar_list_lock[queue]);
463 spin_unlock_bh(&ar->bar_list_lock[queue]);
477 void carl9170_tx_status(struct ar9170 *ar, struct sk_buff *skb,
482 carl9170_tx_accounting_free(ar, skb);
486 carl9170_tx_bar_status(ar, skb, txinfo);
491 ar->tx_ack_failures++;
494 carl9170_tx_status_process_ampdu(ar, skb, txinfo);
496 carl9170_tx_ps_unblock(ar, skb);
501 void carl9170_tx_callback(struct ar9170 *ar, struct sk_buff *skb)
505 atomic_dec(&ar->tx_total_pending);
508 atomic_dec(&ar->tx_ampdu_upload);
511 tasklet_hi_schedule(&ar->usb_tasklet);
514 static struct sk_buff *carl9170_get_queued_skb(struct ar9170 *ar, u8 cookie,
529 carl9170_release_dev_space(ar, skb);
537 static void carl9170_tx_fill_rateinfo(struct ar9170 *ar, unsigned int rix,
559 static void carl9170_check_queue_stop_timeout(struct ar9170 *ar)
567 for (i = 0; i < ar->hw->queues; i++) {
568 spin_lock_bh(&ar->tx_status[i].lock);
570 skb = skb_peek(&ar->tx_status[i]);
583 spin_unlock_bh(&ar->tx_status[i].lock);
600 carl9170_restart(ar, CARL9170_RR_STUCK_TX);
604 static void carl9170_tx_ampdu_timeout(struct ar9170 *ar)
613 list_for_each_entry_rcu(iter, &ar->tx_ampdu_list, list) {
642 struct ar9170 *ar = container_of(work, struct ar9170,
644 if (!IS_STARTED(ar))
647 ar->tx_janitor_last_run = jiffies;
649 carl9170_check_queue_stop_timeout(ar);
650 carl9170_tx_ampdu_timeout(ar);
652 if (!atomic_read(&ar->tx_total_queued))
655 ieee80211_queue_delayed_work(ar->hw, &ar->tx_janitor,
659 static void __carl9170_tx_process_status(struct ar9170 *ar,
669 skb = carl9170_get_queued_skb(ar, cookie, &ar->tx_status[q]);
686 carl9170_tx_fill_rateinfo(ar, r, t, txinfo);
687 carl9170_tx_status(ar, skb, success);
690 void carl9170_tx_process_status(struct ar9170 *ar,
702 __carl9170_tx_process_status(ar, cmd->_tx_status[i].cookie,
707 static void carl9170_tx_rate_tpc_chains(struct ar9170 *ar,
725 txpower = ar->power_2G_ht40;
727 txpower = ar->power_5G_ht40;
730 txpower = ar->power_2G_ht20;
732 txpower = ar->power_5G_ht20;
740 txpower = ar->power_2G_cck;
742 txpower = ar->power_2G_ofdm;
744 txpower = ar->power_5G_leg;
753 if (ar->eeprom.tx_mask == 1) {
763 *tpc = min_t(unsigned int, *tpc, ar->hw->conf.power_level * 2);
766 static __le32 carl9170_tx_physet(struct ar9170 *ar,
817 carl9170_tx_rate_tpc_chains(ar, info, txrate,
826 static bool carl9170_tx_rts_check(struct ar9170 *ar,
830 switch (ar->erp_mode) {
853 static bool carl9170_tx_cts_check(struct ar9170 *ar,
856 switch (ar->erp_mode) {
873 static void carl9170_tx_get_rates(struct ar9170 *ar,
890 static void carl9170_tx_apply_rateset(struct ar9170 *ar,
922 phy_set = carl9170_tx_physet(ar, info, txrate);
932 if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack))
934 else if (carl9170_tx_cts_check(ar, txrate))
948 if (carl9170_tx_rts_check(ar, txrate, ampdu, no_ack))
951 else if (carl9170_tx_cts_check(ar, txrate))
960 static int carl9170_tx_prepare(struct ar9170 *ar,
984 hw_queue = ar9170_qmap(carl9170_get_queue(ar, skb));
1076 arinfo->ar = ar;
1085 static void carl9170_set_immba(struct ar9170 *ar, struct sk_buff *skb)
1093 static void carl9170_set_ampdu_params(struct ar9170 *ar, struct sk_buff *skb)
1114 if (tmp != ar->current_density) {
1115 ar->current_density = tmp;
1123 if (tmp != ar->current_factor) {
1124 ar->current_factor = tmp;
1130 static void carl9170_tx_ampdu(struct ar9170 *ar)
1139 atomic_inc(&ar->tx_ampdu_scheduler);
1140 ar->tx_ampdu_schedule = false;
1142 if (atomic_read(&ar->tx_ampdu_upload))
1145 if (!ar->tx_ampdu_list_len)
1151 tid_info = rcu_dereference(ar->tx_ampdu_iter);
1158 list_for_each_entry_continue_rcu(tid_info, &ar->tx_ampdu_list, list) {
1193 carl9170_tx_get_rates(ar, tid_info->vif,
1198 carl9170_tx_apply_rateset(ar, tx_info_first, skb);
1200 atomic_inc(&ar->tx_ampdu_upload);
1228 carl9170_set_ampdu_params(ar, skb_peek(&agg));
1231 carl9170_set_immba(ar, skb_peek_tail(&agg));
1233 spin_lock_bh(&ar->tx_pending[queue].lock);
1234 skb_queue_splice_tail_init(&agg, &ar->tx_pending[queue]);
1235 spin_unlock_bh(&ar->tx_pending[queue].lock);
1236 ar->tx_schedule = true;
1241 rcu_assign_pointer(ar->tx_ampdu_iter, tid_info);
1245 static struct sk_buff *carl9170_tx_pick_skb(struct ar9170 *ar,
1259 if (carl9170_alloc_dev_space(ar, skb))
1276 void carl9170_tx_drop(struct ar9170 *ar, struct sk_buff *skb)
1281 ar->tx_dropped++;
1285 ar9170_qmap(carl9170_get_queue(ar, skb)));
1286 __carl9170_tx_process_status(ar, super->s.cookie, q);
1289 static bool carl9170_tx_ps_drop(struct ar9170 *ar, struct sk_buff *skb)
1296 sta = __carl9170_get_tx_sta(ar, skb);
1309 atomic_dec(&ar->tx_ampdu_upload);
1312 carl9170_release_dev_space(ar, skb);
1313 carl9170_tx_status(ar, skb, false);
1322 static void carl9170_bar_check(struct ar9170 *ar, struct sk_buff *skb)
1335 spin_lock_bh(&ar->bar_list_lock[queue]);
1336 list_add_tail_rcu(&entry->list, &ar->bar_list[queue]);
1337 spin_unlock_bh(&ar->bar_list_lock[queue]);
1342 static void carl9170_tx(struct ar9170 *ar)
1348 ar->tx_schedule = false;
1350 if (unlikely(!IS_STARTED(ar)))
1353 carl9170_usb_handle_tx_err(ar);
1355 for (i = 0; i < ar->hw->queues; i++) {
1356 while (!skb_queue_empty(&ar->tx_pending[i])) {
1357 skb = carl9170_tx_pick_skb(ar, &ar->tx_pending[i]);
1361 if (unlikely(carl9170_tx_ps_drop(ar, skb)))
1364 carl9170_bar_check(ar, skb);
1366 atomic_inc(&ar->tx_total_pending);
1368 q = __carl9170_get_queue(ar, i);
1373 skb_queue_tail(&ar->tx_status[q], skb);
1385 carl9170_usb_tx(ar, skb);
1393 ieee80211_queue_delayed_work(ar->hw, &ar->tx_janitor,
1397 static bool carl9170_tx_ampdu_queue(struct ar9170 *ar,
1468 carl9170_tx_status(ar, skb, false);
1469 ar->tx_dropped++;
1477 struct ar9170 *ar = hw->priv;
1483 if (unlikely(!IS_STARTED(ar)))
1489 if (unlikely(carl9170_tx_prepare(ar, sta, skb)))
1492 carl9170_tx_accounting(ar, skb);
1511 run = carl9170_tx_ampdu_queue(ar, sta, skb, info);
1513 carl9170_tx_ampdu(ar);
1518 carl9170_tx_get_rates(ar, vif, sta, skb);
1519 carl9170_tx_apply_rateset(ar, info, skb);
1520 skb_queue_tail(&ar->tx_pending[queue], skb);
1523 carl9170_tx(ar);
1527 ar->tx_dropped++;
1528 ieee80211_free_txskb(ar->hw, skb);
1531 void carl9170_tx_scheduler(struct ar9170 *ar)
1534 if (ar->tx_ampdu_schedule)
1535 carl9170_tx_ampdu(ar);
1537 if (ar->tx_schedule)
1538 carl9170_tx(ar);
1542 static struct carl9170_vif_info *carl9170_pick_beaconing_vif(struct ar9170 *ar)
1554 cvif = rcu_dereference(ar->beacon_iter);
1555 if (ar->vifs > 0 && cvif) {
1557 list_for_each_entry_continue_rcu(cvif, &ar->vif_list,
1562 } while (ar->beacon_enabled && i--);
1569 RCU_INIT_POINTER(ar->beacon_iter, cvif);
1573 static bool carl9170_tx_beacon_physet(struct ar9170 *ar, struct sk_buff *skb,
1584 carl9170_tx_rate_tpc_chains(ar, txinfo, rate, plcp, &power, &chains);
1617 int carl9170_update_beacon(struct ar9170 *ar, const bool submit)
1627 cvif = carl9170_pick_beaconing_vif(ar);
1631 skb = ieee80211_beacon_get_tim(ar->hw, carl9170_get_vif(cvif),
1639 spin_lock_bh(&ar->beacon_lock);
1645 addr = ar->fw.beacon_addr + off;
1648 if ((off + len) > ar->fw.beacon_max_len) {
1650 wiphy_err(ar->hw->wiphy, "beacon does not "
1659 wiphy_err(ar->hw->wiphy, "no support for beacons "
1668 ht_rate = carl9170_tx_beacon_physet(ar, skb, &ht1, &plcp);
1670 carl9170_async_regwrite_begin(ar);
1697 spin_unlock_bh(&ar->beacon_lock);
1702 err = carl9170_bcn_ctrl(ar, cvif->id,
1714 spin_unlock_bh(&ar->beacon_lock);