1/*
2 * Copyright (c) 2002-2009 Sam Leffler, Errno Consulting
3 * Copyright (c) 2002-2008 Atheros Communications, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 *
17 * $FreeBSD$
18 */
19#include "opt_ah.h"
20
21#include "ah.h"
22#include "ah_desc.h"
23#include "ah_internal.h"
24
25#include "ar5416/ar5416.h"
26#include "ar5416/ar5416reg.h"
27#include "ar5416/ar5416phy.h"
28#include "ar5416/ar5416desc.h"
29
30/*
31 * Stop transmit on the specified queue
32 */
33HAL_BOOL
34ar5416StopTxDma(struct ath_hal *ah, u_int q)
35{
36#define	STOP_DMA_TIMEOUT	4000	/* us */
37#define	STOP_DMA_ITER		100	/* us */
38	u_int i;
39
40	HALASSERT(q < AH_PRIVATE(ah)->ah_caps.halTotalQueues);
41
42	HALASSERT(AH5212(ah)->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE);
43
44	OS_REG_WRITE(ah, AR_Q_TXD, 1 << q);
45	for (i = STOP_DMA_TIMEOUT/STOP_DMA_ITER; i != 0; i--) {
46		if (ar5212NumTxPending(ah, q) == 0)
47			break;
48		OS_DELAY(STOP_DMA_ITER);
49	}
50#ifdef AH_DEBUG
51	if (i == 0) {
52		HALDEBUG(ah, HAL_DEBUG_ANY,
53		    "%s: queue %u DMA did not stop in 400 msec\n", __func__, q);
54		HALDEBUG(ah, HAL_DEBUG_ANY,
55		    "%s: QSTS 0x%x Q_TXE 0x%x Q_TXD 0x%x Q_CBR 0x%x\n", __func__,
56		    OS_REG_READ(ah, AR_QSTS(q)), OS_REG_READ(ah, AR_Q_TXE),
57		    OS_REG_READ(ah, AR_Q_TXD), OS_REG_READ(ah, AR_QCBRCFG(q)));
58		HALDEBUG(ah, HAL_DEBUG_ANY,
59		    "%s: Q_MISC 0x%x Q_RDYTIMECFG 0x%x Q_RDYTIMESHDN 0x%x\n",
60		    __func__, OS_REG_READ(ah, AR_QMISC(q)),
61		    OS_REG_READ(ah, AR_QRDYTIMECFG(q)),
62		    OS_REG_READ(ah, AR_Q_RDYTIMESHDN));
63	}
64#endif /* AH_DEBUG */
65
66	/* ar5416 and up can kill packets at the PCU level */
67	if (ar5212NumTxPending(ah, q)) {
68		uint32_t j;
69
70		HALDEBUG(ah, HAL_DEBUG_TXQUEUE,
71		    "%s: Num of pending TX Frames %d on Q %d\n",
72		    __func__, ar5212NumTxPending(ah, q), q);
73
74		/* Kill last PCU Tx Frame */
75		/* TODO - save off and restore current values of Q1/Q2? */
76		for (j = 0; j < 2; j++) {
77			uint32_t tsfLow = OS_REG_READ(ah, AR_TSF_L32);
78			OS_REG_WRITE(ah, AR_QUIET2,
79			    SM(10, AR_QUIET2_QUIET_DUR));
80			OS_REG_WRITE(ah, AR_QUIET_PERIOD, 100);
81			OS_REG_WRITE(ah, AR_NEXT_QUIET, tsfLow >> 10);
82			OS_REG_SET_BIT(ah, AR_TIMER_MODE, AR_TIMER_MODE_QUIET);
83
84			if ((OS_REG_READ(ah, AR_TSF_L32)>>10) == (tsfLow>>10))
85				break;
86
87			HALDEBUG(ah, HAL_DEBUG_ANY,
88			    "%s: TSF moved while trying to set quiet time "
89			    "TSF: 0x%08x\n", __func__, tsfLow);
90			HALASSERT(j < 1); /* TSF shouldn't count twice or reg access is taking forever */
91		}
92
93		OS_REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_CHAN_IDLE);
94
95		/* Allow the quiet mechanism to do its work */
96		OS_DELAY(200);
97		OS_REG_CLR_BIT(ah, AR_TIMER_MODE, AR_TIMER_MODE_QUIET);
98
99		/* Verify the transmit q is empty */
100		for (i = STOP_DMA_TIMEOUT/STOP_DMA_ITER; i != 0; i--) {
101			if (ar5212NumTxPending(ah, q) == 0)
102				break;
103			OS_DELAY(STOP_DMA_ITER);
104		}
105		if (i == 0) {
106			HALDEBUG(ah, HAL_DEBUG_ANY,
107			    "%s: Failed to stop Tx DMA in %d msec after killing"
108			    " last frame\n", __func__, STOP_DMA_TIMEOUT / 1000);
109		}
110		OS_REG_CLR_BIT(ah, AR_DIAG_SW, AR_DIAG_CHAN_IDLE);
111	}
112
113	OS_REG_WRITE(ah, AR_Q_TXD, 0);
114	return (i != 0);
115#undef STOP_DMA_ITER
116#undef STOP_DMA_TIMEOUT
117}
118
119#define VALID_KEY_TYPES \
120        ((1 << HAL_KEY_TYPE_CLEAR) | (1 << HAL_KEY_TYPE_WEP)|\
121         (1 << HAL_KEY_TYPE_AES)   | (1 << HAL_KEY_TYPE_TKIP))
122#define isValidKeyType(_t)      ((1 << (_t)) & VALID_KEY_TYPES)
123
124#define set11nTries(_series, _index) \
125        (SM((_series)[_index].Tries, AR_XmitDataTries##_index))
126
127#define set11nRate(_series, _index) \
128        (SM((_series)[_index].Rate, AR_XmitRate##_index))
129
130#define set11nPktDurRTSCTS(_series, _index) \
131        (SM((_series)[_index].PktDuration, AR_PacketDur##_index) |\
132         ((_series)[_index].RateFlags & HAL_RATESERIES_RTS_CTS   ?\
133         AR_RTSCTSQual##_index : 0))
134
135#define set11nRateFlags(_series, _index) \
136        ((_series)[_index].RateFlags & HAL_RATESERIES_2040 ? AR_2040_##_index : 0) \
137        |((_series)[_index].RateFlags & HAL_RATESERIES_HALFGI ? AR_GI##_index : 0) \
138        |((_series)[_index].RateFlags & HAL_RATESERIES_STBC ? AR_STBC##_index : 0) \
139        |SM((_series)[_index].ChSel, AR_ChainSel##_index)
140
141/*
142 * Descriptor Access Functions
143 */
144
145#define VALID_PKT_TYPES \
146        ((1<<HAL_PKT_TYPE_NORMAL)|(1<<HAL_PKT_TYPE_ATIM)|\
147         (1<<HAL_PKT_TYPE_PSPOLL)|(1<<HAL_PKT_TYPE_PROBE_RESP)|\
148         (1<<HAL_PKT_TYPE_BEACON)|(1<<HAL_PKT_TYPE_AMPDU))
149#define isValidPktType(_t)      ((1<<(_t)) & VALID_PKT_TYPES)
150#define VALID_TX_RATES \
151        ((1<<0x0b)|(1<<0x0f)|(1<<0x0a)|(1<<0x0e)|(1<<0x09)|(1<<0x0d)|\
152         (1<<0x08)|(1<<0x0c)|(1<<0x1b)|(1<<0x1a)|(1<<0x1e)|(1<<0x19)|\
153	 (1<<0x1d)|(1<<0x18)|(1<<0x1c)|(1<<0x01)|(1<<0x02)|(1<<0x03)|\
154	 (1<<0x04)|(1<<0x05)|(1<<0x06)|(1<<0x07)|(1<<0x00))
155/* NB: accept HT rates */
156#define	isValidTxRate(_r)	((1<<((_r) & 0x7f)) & VALID_TX_RATES)
157
158static inline int
159ar5416RateToRateTable(struct ath_hal *ah, uint8_t rate, HAL_BOOL is_ht40)
160{
161
162	/*
163	 * Handle the non-MCS rates
164	 */
165	switch (rate) {
166	case /*   1 Mb */ 0x1b:
167	case /*   1 MbS*/ 0x1b | 0x4:
168		return (AH5416(ah)->ah_ratesArray[rate1l]);
169	case /*   2 Mb */ 0x1a:
170		return (AH5416(ah)->ah_ratesArray[rate2l]);
171	case /*   2 MbS*/ 0x1a | 0x4:
172		return (AH5416(ah)->ah_ratesArray[rate2s]);
173	case /* 5.5 Mb */ 0x19:
174		return (AH5416(ah)->ah_ratesArray[rate5_5l]);
175	case /* 5.5 MbS*/ 0x19 | 0x4:
176		return (AH5416(ah)->ah_ratesArray[rate5_5s]);
177	case /*  11 Mb */ 0x18:
178		return (AH5416(ah)->ah_ratesArray[rate11l]);
179	case /*  11 MbS*/ 0x18 | 0x4:
180		return (AH5416(ah)->ah_ratesArray[rate11s]);
181	}
182
183	/* OFDM rates */
184	switch (rate) {
185	case /*   6 Mb */ 0x0b:
186		return (AH5416(ah)->ah_ratesArray[rate6mb]);
187	case /*   9 Mb */ 0x0f:
188		return (AH5416(ah)->ah_ratesArray[rate9mb]);
189	case /*  12 Mb */ 0x0a:
190		return (AH5416(ah)->ah_ratesArray[rate12mb]);
191	case /*  18 Mb */ 0x0e:
192		return (AH5416(ah)->ah_ratesArray[rate18mb]);
193	case /*  24 Mb */ 0x09:
194		return (AH5416(ah)->ah_ratesArray[rate24mb]);
195	case /*  36 Mb */ 0x0d:
196		return (AH5416(ah)->ah_ratesArray[rate36mb]);
197	case /*  48 Mb */ 0x08:
198		return (AH5416(ah)->ah_ratesArray[rate48mb]);
199	case /*  54 Mb */ 0x0c:
200		return (AH5416(ah)->ah_ratesArray[rate54mb]);
201	}
202
203	/*
204	 * Handle HT20/HT40 - we only have to do MCS0-7;
205	 * there's no stream differences.
206	 */
207	if ((rate & 0x80) && is_ht40) {
208		return (AH5416(ah)->ah_ratesArray[rateHt40_0 + (rate & 0x7)]);
209	} else if (rate & 0x80) {
210		return (AH5416(ah)->ah_ratesArray[rateHt20_0 + (rate & 0x7)]);
211	}
212
213	/* XXX default (eg XR, bad bad person!) */
214	return (AH5416(ah)->ah_ratesArray[rate6mb]);
215}
216
217/*
218 * Return the TX power to be used for the given rate/chains/TX power.
219 *
220 * There are a bunch of tweaks to make to a given TX power based on
221 * the current configuration, so...
222 */
223static uint16_t
224ar5416GetTxRatePower(struct ath_hal *ah, uint8_t rate, uint8_t tx_chainmask,
225    uint16_t txPower, HAL_BOOL is_ht40)
226{
227	int n_txpower, max_txpower;
228	const int cck_ofdm_delta = 2;
229#define	EEP_MINOR(_ah) \
230	(AH_PRIVATE(_ah)->ah_eeversion & AR5416_EEP_VER_MINOR_MASK)
231#define	IS_EEP_MINOR_V2(_ah)	(EEP_MINOR(_ah) >= AR5416_EEP_MINOR_VER_2)
232
233	/* Take a copy ; we may underflow and thus need to clamp things */
234	n_txpower = txPower;
235
236	/* HT40? Need to adjust the TX power by this */
237	if (is_ht40)
238		n_txpower += AH5416(ah)->ah_ht40PowerIncForPdadc;
239
240	/*
241	 * Merlin? Offset the target TX power offset - it defaults to
242	 * starting at -5.0dBm, but that can change!
243	 *
244	 * Kiwi/Kite? Always -5.0dBm offset.
245	 */
246	if (AR_SREV_KIWI_10_OR_LATER(ah)) {
247		n_txpower -= (AR5416_PWR_TABLE_OFFSET_DB * 2);
248	} else if (AR_SREV_MERLIN_20_OR_LATER(ah)) {
249		int8_t pwr_table_offset = 0;
250		/* This is in dBm, convert to 1/2 dBm */
251		(void) ath_hal_eepromGet(ah, AR_EEP_PWR_TABLE_OFFSET,
252		    &pwr_table_offset);
253		n_txpower -= (pwr_table_offset * 2);
254	}
255
256	/*
257	 * If Open-loop TX power control is used, the CCK rates need
258	 * to be offset by that.
259	 *
260	 * Rates: 2S, 2L, 1S, 1L, 5.5S, 5.5L
261	 *
262	 * XXX Odd, we don't have a PHY table entry for long preamble
263	 * 1mbit CCK?
264	 */
265	if (AR_SREV_MERLIN_20_OR_LATER(ah) &&
266	    ath_hal_eepromGetFlag(ah, AR_EEP_OL_PWRCTRL)) {
267
268		if (rate == 0x19 || rate == 0x1a || rate == 0x1b ||
269		    rate == (0x19 | 0x04) || rate == (0x1a | 0x04) ||
270		    rate == (0x1b | 0x04)) {
271			n_txpower -= cck_ofdm_delta;
272		}
273	}
274
275	/*
276	 * We're now offset by the same amount that the static maximum
277	 * PHY power tables are.  So, clamp the value based on that rate.
278	 */
279	max_txpower = ar5416RateToRateTable(ah, rate, is_ht40);
280#if 0
281	ath_hal_printf(ah, "%s: n_txpower = %d, max_txpower = %d, "
282	    "rate = 0x%x , is_ht40 = %d\n",
283	    __func__,
284	    n_txpower,
285	    max_txpower,
286	    rate,
287	    is_ht40);
288#endif
289	n_txpower = MIN(max_txpower, n_txpower);
290
291	/*
292	 * We don't have to offset the TX power for two or three
293	 * chain operation here - it's done by the AR_PHY_POWER_TX_SUB
294	 * register setting via the EEPROM.
295	 *
296	 * So for vendors that programmed the maximum target power assuming
297	 * that 2/3 chains are always on, things will just plain work.
298	 * (They won't reach that target power if only one chain is on, but
299	 * that's a different problem.)
300	 */
301
302	/* Over/underflow? Adjust */
303	if (n_txpower < 0)
304		n_txpower = 0;
305	else if (n_txpower > 63)
306		n_txpower = 63;
307
308	/*
309	 * For some odd reason the AR9160 with txpower=0 results in a
310	 * much higher (max?) TX power.  So, if it's a chipset before
311	 * AR9220/AR9280, just clamp the minimum value at 1.
312	 */
313	if ((! AR_SREV_MERLIN_10_OR_LATER(ah)) && (n_txpower == 0))
314		n_txpower = 1;
315
316	return (n_txpower);
317#undef	EEP_MINOR
318#undef	IS_EEP_MINOR_V2
319}
320
321HAL_BOOL
322ar5416SetupTxDesc(struct ath_hal *ah, struct ath_desc *ds,
323	u_int pktLen,
324	u_int hdrLen,
325	HAL_PKT_TYPE type,
326	u_int txPower,
327	u_int txRate0, u_int txTries0,
328	u_int keyIx,
329	u_int antMode,
330	u_int flags,
331	u_int rtsctsRate,
332	u_int rtsctsDuration,
333	u_int compicvLen,
334	u_int compivLen,
335	u_int comp)
336{
337#define	RTSCTS	(HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)
338	struct ar5416_desc *ads = AR5416DESC(ds);
339	struct ath_hal_5416 *ahp = AH5416(ah);
340
341	(void) hdrLen;
342
343	HALASSERT(txTries0 != 0);
344	HALASSERT(isValidPktType(type));
345	HALASSERT(isValidTxRate(txRate0));
346	HALASSERT((flags & RTSCTS) != RTSCTS);
347	/* XXX validate antMode */
348
349        txPower = (txPower + AH5212(ah)->ah_txPowerIndexOffset);
350        if (txPower > 63)
351		txPower = 63;
352
353	/*
354	 * XXX For now, just assume that this isn't a HT40 frame.
355	 */
356	if (AH5212(ah)->ah_tpcEnabled) {
357		txPower = ar5416GetTxRatePower(ah, txRate0,
358		    ahp->ah_tx_chainmask,
359		    txPower,
360		    AH_FALSE);
361	}
362
363	ads->ds_ctl0 = (pktLen & AR_FrameLen)
364		     | (txPower << AR_XmitPower_S)
365		     | (flags & HAL_TXDESC_VEOL ? AR_VEOL : 0)
366		     | (flags & HAL_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
367		     | (flags & HAL_TXDESC_INTREQ ? AR_TxIntrReq : 0)
368		     ;
369	ads->ds_ctl1 = (type << AR_FrameType_S)
370		     | (flags & HAL_TXDESC_NOACK ? AR_NoAck : 0)
371                     ;
372	ads->ds_ctl2 = SM(txTries0, AR_XmitDataTries0)
373		     | (flags & HAL_TXDESC_DURENA ? AR_DurUpdateEn : 0)
374		     ;
375	ads->ds_ctl3 = (txRate0 << AR_XmitRate0_S)
376		     ;
377	ads->ds_ctl4 = 0;
378	ads->ds_ctl5 = 0;
379	ads->ds_ctl6 = 0;
380	ads->ds_ctl7 = SM(ahp->ah_tx_chainmask, AR_ChainSel0)
381		     | SM(ahp->ah_tx_chainmask, AR_ChainSel1)
382		     | SM(ahp->ah_tx_chainmask, AR_ChainSel2)
383		     | SM(ahp->ah_tx_chainmask, AR_ChainSel3)
384		     ;
385	ads->ds_ctl8 = SM(0, AR_AntCtl0);
386	ads->ds_ctl9 = SM(0, AR_AntCtl1) | SM(txPower, AR_XmitPower1);
387	ads->ds_ctl10 = SM(0, AR_AntCtl2) | SM(txPower, AR_XmitPower2);
388	ads->ds_ctl11 = SM(0, AR_AntCtl3) | SM(txPower, AR_XmitPower3);
389
390	if (keyIx != HAL_TXKEYIX_INVALID) {
391		/* XXX validate key index */
392		ads->ds_ctl1 |= SM(keyIx, AR_DestIdx);
393		ads->ds_ctl0 |= AR_DestIdxValid;
394		ads->ds_ctl6 |= SM(ahp->ah_keytype[keyIx], AR_EncrType);
395	}
396	if (flags & RTSCTS) {
397		if (!isValidTxRate(rtsctsRate)) {
398			HALDEBUG(ah, HAL_DEBUG_ANY,
399			    "%s: invalid rts/cts rate 0x%x\n",
400			    __func__, rtsctsRate);
401			return AH_FALSE;
402		}
403		/* XXX validate rtsctsDuration */
404		ads->ds_ctl0 |= (flags & HAL_TXDESC_CTSENA ? AR_CTSEnable : 0)
405			     | (flags & HAL_TXDESC_RTSENA ? AR_RTSEnable : 0)
406			     ;
407		ads->ds_ctl7 |= (rtsctsRate << AR_RTSCTSRate_S);
408	}
409
410	/*
411	 * Set the TX antenna to 0 for Kite
412	 * To preserve existing behaviour, also set the TPC bits to 0;
413	 * when TPC is enabled these should be filled in appropriately.
414	 *
415	 * XXX TODO: when doing TPC, set the TX power up appropriately?
416	 */
417	if (AR_SREV_KITE(ah)) {
418		ads->ds_ctl8 = SM(0, AR_AntCtl0);
419		ads->ds_ctl9 = SM(0, AR_AntCtl1) | SM(0, AR_XmitPower1);
420		ads->ds_ctl10 = SM(0, AR_AntCtl2) | SM(0, AR_XmitPower2);
421		ads->ds_ctl11 = SM(0, AR_AntCtl3) | SM(0, AR_XmitPower3);
422	}
423	return AH_TRUE;
424#undef RTSCTS
425}
426
427HAL_BOOL
428ar5416SetupXTxDesc(struct ath_hal *ah, struct ath_desc *ds,
429	u_int txRate1, u_int txTries1,
430	u_int txRate2, u_int txTries2,
431	u_int txRate3, u_int txTries3)
432{
433	struct ar5416_desc *ads = AR5416DESC(ds);
434
435	if (txTries1) {
436		HALASSERT(isValidTxRate(txRate1));
437		ads->ds_ctl2 |= SM(txTries1, AR_XmitDataTries1);
438		ads->ds_ctl3 |= (txRate1 << AR_XmitRate1_S);
439	}
440	if (txTries2) {
441		HALASSERT(isValidTxRate(txRate2));
442		ads->ds_ctl2 |= SM(txTries2, AR_XmitDataTries2);
443		ads->ds_ctl3 |= (txRate2 << AR_XmitRate2_S);
444	}
445	if (txTries3) {
446		HALASSERT(isValidTxRate(txRate3));
447		ads->ds_ctl2 |= SM(txTries3, AR_XmitDataTries3);
448		ads->ds_ctl3 |= (txRate3 << AR_XmitRate3_S);
449	}
450	return AH_TRUE;
451}
452
453HAL_BOOL
454ar5416FillTxDesc(struct ath_hal *ah, struct ath_desc *ds,
455	HAL_DMA_ADDR *bufAddrList, uint32_t *segLenList, u_int descId,
456	u_int qcuId, HAL_BOOL firstSeg, HAL_BOOL lastSeg,
457	const struct ath_desc *ds0)
458{
459	struct ar5416_desc *ads = AR5416DESC(ds);
460	uint32_t segLen = segLenList[0];
461
462	HALASSERT((segLen &~ AR_BufLen) == 0);
463
464	ds->ds_data = bufAddrList[0];
465
466	if (firstSeg) {
467		/*
468		 * First descriptor, don't clobber xmit control data
469		 * setup by ar5212SetupTxDesc.
470		 */
471		ads->ds_ctl1 |= segLen | (lastSeg ? 0 : AR_TxMore);
472	} else if (lastSeg) {		/* !firstSeg && lastSeg */
473		/*
474		 * Last descriptor in a multi-descriptor frame,
475		 * copy the multi-rate transmit parameters from
476		 * the first frame for processing on completion.
477		 */
478		ads->ds_ctl1 = segLen;
479#ifdef AH_NEED_DESC_SWAP
480		ads->ds_ctl0 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl0)
481		    & AR_TxIntrReq;
482		ads->ds_ctl2 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl2);
483		ads->ds_ctl3 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl3);
484		/* ctl6 - we only need encrtype; the rest are blank */
485		ads->ds_ctl6 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl6 & AR_EncrType);
486#else
487		ads->ds_ctl0 = AR5416DESC_CONST(ds0)->ds_ctl0 & AR_TxIntrReq;
488		ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2;
489		ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3;
490		/* ctl6 - we only need encrtype; the rest are blank */
491		ads->ds_ctl6 = AR5416DESC_CONST(ds0)->ds_ctl6 & AR_EncrType;
492#endif
493	} else {			/* !firstSeg && !lastSeg */
494		/*
495		 * Intermediate descriptor in a multi-descriptor frame.
496		 */
497#ifdef AH_NEED_DESC_SWAP
498		ads->ds_ctl0 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl0)
499		    & AR_TxIntrReq;
500		ads->ds_ctl6 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl6 & AR_EncrType);
501#else
502		ads->ds_ctl0 = AR5416DESC_CONST(ds0)->ds_ctl0 & AR_TxIntrReq;
503		ads->ds_ctl6 = AR5416DESC_CONST(ds0)->ds_ctl6 & AR_EncrType;
504#endif
505		ads->ds_ctl1 = segLen | AR_TxMore;
506		ads->ds_ctl2 = 0;
507		ads->ds_ctl3 = 0;
508	}
509	/* XXX only on last descriptor? */
510	OS_MEMZERO(ads->u.tx.status, sizeof(ads->u.tx.status));
511	return AH_TRUE;
512}
513
514/*
515 * NB: cipher is no longer used, it's calculated.
516 */
517HAL_BOOL
518ar5416ChainTxDesc(struct ath_hal *ah, struct ath_desc *ds,
519	HAL_DMA_ADDR *bufAddrList,
520	uint32_t *segLenList,
521	u_int pktLen,
522	u_int hdrLen,
523	HAL_PKT_TYPE type,
524	u_int keyIx,
525	HAL_CIPHER cipher,
526	uint8_t delims,
527	HAL_BOOL firstSeg,
528	HAL_BOOL lastSeg,
529	HAL_BOOL lastAggr)
530{
531	struct ar5416_desc *ads = AR5416DESC(ds);
532	uint32_t *ds_txstatus = AR5416_DS_TXSTATUS(ah,ads);
533	struct ath_hal_5416 *ahp = AH5416(ah);
534	u_int segLen = segLenList[0];
535
536	int isaggr = 0;
537	uint32_t last_aggr = 0;
538
539	(void) hdrLen;
540	(void) ah;
541
542	HALASSERT((segLen &~ AR_BufLen) == 0);
543	ds->ds_data = bufAddrList[0];
544
545	HALASSERT(isValidPktType(type));
546	if (type == HAL_PKT_TYPE_AMPDU) {
547		type = HAL_PKT_TYPE_NORMAL;
548		isaggr = 1;
549		if (lastAggr == AH_FALSE)
550			last_aggr = AR_MoreAggr;
551	}
552
553	/*
554	 * Since this function is called before any of the other
555	 * descriptor setup functions (at least in this particular
556	 * 802.11n aggregation implementation), always bzero() the
557	 * descriptor. Previously this would be done for all but
558	 * the first segment.
559	 * XXX TODO: figure out why; perhaps I'm using this slightly
560	 * XXX incorrectly.
561	 */
562	OS_MEMZERO(ds->ds_hw, AR5416_DESC_TX_CTL_SZ);
563
564	/*
565	 * Note: VEOL should only be for the last descriptor in the chain.
566	 */
567	ads->ds_ctl0 = (pktLen & AR_FrameLen);
568
569	/*
570	 * For aggregates:
571	 * + IsAggr must be set for all descriptors of all subframes of
572	 *   the aggregate
573	 * + MoreAggr must be set for all descriptors of all subframes
574	 *   of the aggregate EXCEPT the last subframe;
575	 * + MoreAggr must be _CLEAR_ for all descrpitors of the last
576	 *   subframe of the aggregate.
577	 */
578	ads->ds_ctl1 = (type << AR_FrameType_S)
579			| (isaggr ? (AR_IsAggr | last_aggr) : 0);
580
581	ads->ds_ctl2 = 0;
582	ads->ds_ctl3 = 0;
583	if (keyIx != HAL_TXKEYIX_INVALID) {
584		/* XXX validate key index */
585		ads->ds_ctl1 |= SM(keyIx, AR_DestIdx);
586		ads->ds_ctl0 |= AR_DestIdxValid;
587	}
588
589	ads->ds_ctl6 |= SM(ahp->ah_keytype[keyIx], AR_EncrType);
590	if (isaggr) {
591		ads->ds_ctl6 |= SM(delims, AR_PadDelim);
592	}
593
594	if (firstSeg) {
595		ads->ds_ctl1 |= segLen | (lastSeg ? 0 : AR_TxMore);
596	} else if (lastSeg) {           /* !firstSeg && lastSeg */
597		ads->ds_ctl0 = 0;
598		ads->ds_ctl1 |= segLen;
599	} else {                        /* !firstSeg && !lastSeg */
600		/*
601		 * Intermediate descriptor in a multi-descriptor frame.
602		 */
603		ads->ds_ctl0 = 0;
604		ads->ds_ctl1 |= segLen | AR_TxMore;
605	}
606	ds_txstatus[0] = ds_txstatus[1] = 0;
607	ds_txstatus[9] &= ~AR_TxDone;
608
609	return AH_TRUE;
610}
611
612HAL_BOOL
613ar5416SetupFirstTxDesc(struct ath_hal *ah, struct ath_desc *ds,
614	u_int aggrLen, u_int flags, u_int txPower,
615	u_int txRate0, u_int txTries0, u_int antMode,
616	u_int rtsctsRate, u_int rtsctsDuration)
617{
618#define RTSCTS  (HAL_TXDESC_RTSENA|HAL_TXDESC_CTSENA)
619	struct ar5416_desc *ads = AR5416DESC(ds);
620	struct ath_hal_5212 *ahp = AH5212(ah);
621
622	HALASSERT(txTries0 != 0);
623	HALASSERT(isValidTxRate(txRate0));
624	HALASSERT((flags & RTSCTS) != RTSCTS);
625	/* XXX validate antMode */
626
627	txPower = (txPower + ahp->ah_txPowerIndexOffset );
628	if(txPower > 63)  txPower=63;
629
630	ads->ds_ctl0 |= (txPower << AR_XmitPower_S)
631		| (flags & HAL_TXDESC_VEOL ? AR_VEOL : 0)
632		| (flags & HAL_TXDESC_CLRDMASK ? AR_ClrDestMask : 0)
633		| (flags & HAL_TXDESC_INTREQ ? AR_TxIntrReq : 0);
634	ads->ds_ctl1 |= (flags & HAL_TXDESC_NOACK ? AR_NoAck : 0);
635	ads->ds_ctl2 |= SM(txTries0, AR_XmitDataTries0);
636	ads->ds_ctl3 |= (txRate0 << AR_XmitRate0_S);
637	ads->ds_ctl7 = SM(AH5416(ah)->ah_tx_chainmask, AR_ChainSel0)
638		| SM(AH5416(ah)->ah_tx_chainmask, AR_ChainSel1)
639		| SM(AH5416(ah)->ah_tx_chainmask, AR_ChainSel2)
640		| SM(AH5416(ah)->ah_tx_chainmask, AR_ChainSel3);
641
642	/* NB: no V1 WAR */
643	ads->ds_ctl8 = SM(0, AR_AntCtl0);
644	ads->ds_ctl9 = SM(0, AR_AntCtl1) | SM(txPower, AR_XmitPower1);
645	ads->ds_ctl10 = SM(0, AR_AntCtl2) | SM(txPower, AR_XmitPower2);
646	ads->ds_ctl11 = SM(0, AR_AntCtl3) | SM(txPower, AR_XmitPower3);
647
648	ads->ds_ctl6 &= ~(0xffff);
649	ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen);
650
651	if (flags & RTSCTS) {
652		/* XXX validate rtsctsDuration */
653		ads->ds_ctl0 |= (flags & HAL_TXDESC_CTSENA ? AR_CTSEnable : 0)
654			| (flags & HAL_TXDESC_RTSENA ? AR_RTSEnable : 0);
655	}
656
657	/*
658	 * Set the TX antenna to 0 for Kite
659	 * To preserve existing behaviour, also set the TPC bits to 0;
660	 * when TPC is enabled these should be filled in appropriately.
661	 */
662	if (AR_SREV_KITE(ah)) {
663		ads->ds_ctl8 = SM(0, AR_AntCtl0);
664		ads->ds_ctl9 = SM(0, AR_AntCtl1) | SM(0, AR_XmitPower1);
665		ads->ds_ctl10 = SM(0, AR_AntCtl2) | SM(0, AR_XmitPower2);
666		ads->ds_ctl11 = SM(0, AR_AntCtl3) | SM(0, AR_XmitPower3);
667	}
668
669	return AH_TRUE;
670#undef RTSCTS
671}
672
673HAL_BOOL
674ar5416SetupLastTxDesc(struct ath_hal *ah, struct ath_desc *ds,
675		const struct ath_desc *ds0)
676{
677	struct ar5416_desc *ads = AR5416DESC(ds);
678
679	ads->ds_ctl1 &= ~AR_MoreAggr;
680	ads->ds_ctl6 &= ~AR_PadDelim;
681
682	/* hack to copy rate info to last desc for later processing */
683#ifdef AH_NEED_DESC_SWAP
684	ads->ds_ctl2 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl2);
685	ads->ds_ctl3 = __bswap32(AR5416DESC_CONST(ds0)->ds_ctl3);
686#else
687	ads->ds_ctl2 = AR5416DESC_CONST(ds0)->ds_ctl2;
688	ads->ds_ctl3 = AR5416DESC_CONST(ds0)->ds_ctl3;
689#endif
690	return AH_TRUE;
691}
692
693#ifdef AH_NEED_DESC_SWAP
694/* Swap transmit descriptor */
695static __inline void
696ar5416SwapTxDesc(struct ath_desc *ds)
697{
698	ds->ds_data = __bswap32(ds->ds_data);
699	ds->ds_ctl0 = __bswap32(ds->ds_ctl0);
700	ds->ds_ctl1 = __bswap32(ds->ds_ctl1);
701	ds->ds_hw[0] = __bswap32(ds->ds_hw[0]);
702	ds->ds_hw[1] = __bswap32(ds->ds_hw[1]);
703	ds->ds_hw[2] = __bswap32(ds->ds_hw[2]);
704	ds->ds_hw[3] = __bswap32(ds->ds_hw[3]);
705}
706#endif
707
708/*
709 * Processing of HW TX descriptor.
710 */
711HAL_STATUS
712ar5416ProcTxDesc(struct ath_hal *ah,
713	struct ath_desc *ds, struct ath_tx_status *ts)
714{
715	struct ar5416_desc *ads = AR5416DESC(ds);
716	uint32_t *ds_txstatus = AR5416_DS_TXSTATUS(ah,ads);
717
718#ifdef AH_NEED_DESC_SWAP
719	if ((ds_txstatus[9] & __bswap32(AR_TxDone)) == 0)
720		return HAL_EINPROGRESS;
721	ar5416SwapTxDesc(ds);
722#else
723	if ((ds_txstatus[9] & AR_TxDone) == 0)
724		return HAL_EINPROGRESS;
725#endif
726
727	/* Update software copies of the HW status */
728	ts->ts_seqnum = MS(ds_txstatus[9], AR_SeqNum);
729	ts->ts_tstamp = AR_SendTimestamp(ds_txstatus);
730	ts->ts_tid = MS(ds_txstatus[9], AR_TxTid);
731
732	ts->ts_status = 0;
733	if (ds_txstatus[1] & AR_ExcessiveRetries)
734		ts->ts_status |= HAL_TXERR_XRETRY;
735	if (ds_txstatus[1] & AR_Filtered)
736		ts->ts_status |= HAL_TXERR_FILT;
737	if (ds_txstatus[1] & AR_FIFOUnderrun)
738		ts->ts_status |= HAL_TXERR_FIFO;
739	if (ds_txstatus[9] & AR_TxOpExceeded)
740		ts->ts_status |= HAL_TXERR_XTXOP;
741	if (ds_txstatus[1] & AR_TxTimerExpired)
742		ts->ts_status |= HAL_TXERR_TIMER_EXPIRED;
743
744	ts->ts_flags  = 0;
745	if (ds_txstatus[0] & AR_TxBaStatus) {
746		ts->ts_flags |= HAL_TX_BA;
747		ts->ts_ba_low = AR_BaBitmapLow(ds_txstatus);
748		ts->ts_ba_high = AR_BaBitmapHigh(ds_txstatus);
749	}
750	if (ds->ds_ctl1 & AR_IsAggr)
751		ts->ts_flags |= HAL_TX_AGGR;
752	if (ds_txstatus[1] & AR_DescCfgErr)
753		ts->ts_flags |= HAL_TX_DESC_CFG_ERR;
754	if (ds_txstatus[1] & AR_TxDataUnderrun)
755		ts->ts_flags |= HAL_TX_DATA_UNDERRUN;
756	if (ds_txstatus[1] & AR_TxDelimUnderrun)
757		ts->ts_flags |= HAL_TX_DELIM_UNDERRUN;
758
759	/*
760	 * Extract the transmit rate used and mark the rate as
761	 * ``alternate'' if it wasn't the series 0 rate.
762	 */
763	ts->ts_finaltsi =  MS(ds_txstatus[9], AR_FinalTxIdx);
764	switch (ts->ts_finaltsi) {
765	case 0:
766		ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate0);
767		break;
768	case 1:
769		ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate1);
770		break;
771	case 2:
772		ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate2);
773		break;
774	case 3:
775		ts->ts_rate = MS(ads->ds_ctl3, AR_XmitRate3);
776		break;
777	}
778
779	ts->ts_rssi = MS(ds_txstatus[5], AR_TxRSSICombined);
780	ts->ts_rssi_ctl[0] = MS(ds_txstatus[0], AR_TxRSSIAnt00);
781	ts->ts_rssi_ctl[1] = MS(ds_txstatus[0], AR_TxRSSIAnt01);
782	ts->ts_rssi_ctl[2] = MS(ds_txstatus[0], AR_TxRSSIAnt02);
783	ts->ts_rssi_ext[0] = MS(ds_txstatus[5], AR_TxRSSIAnt10);
784	ts->ts_rssi_ext[1] = MS(ds_txstatus[5], AR_TxRSSIAnt11);
785	ts->ts_rssi_ext[2] = MS(ds_txstatus[5], AR_TxRSSIAnt12);
786	ts->ts_evm0 = AR_TxEVM0(ds_txstatus);
787	ts->ts_evm1 = AR_TxEVM1(ds_txstatus);
788	ts->ts_evm2 = AR_TxEVM2(ds_txstatus);
789
790	ts->ts_shortretry = MS(ds_txstatus[1], AR_RTSFailCnt);
791	ts->ts_longretry = MS(ds_txstatus[1], AR_DataFailCnt);
792	/*
793	 * The retry count has the number of un-acked tries for the
794	 * final series used.  When doing multi-rate retry we must
795	 * fixup the retry count by adding in the try counts for
796	 * each series that was fully-processed.  Beware that this
797	 * takes values from the try counts in the final descriptor.
798	 * These are not required by the hardware.  We assume they
799	 * are placed there by the driver as otherwise we have no
800	 * access and the driver can't do the calculation because it
801	 * doesn't know the descriptor format.
802	 */
803	switch (ts->ts_finaltsi) {
804	case 3: ts->ts_longretry += MS(ads->ds_ctl2, AR_XmitDataTries2);
805	case 2: ts->ts_longretry += MS(ads->ds_ctl2, AR_XmitDataTries1);
806	case 1: ts->ts_longretry += MS(ads->ds_ctl2, AR_XmitDataTries0);
807	}
808
809	/*
810	 * These fields are not used. Zero these to preserve compatability
811	 * with existing drivers.
812	 */
813	ts->ts_virtcol = MS(ads->ds_ctl1, AR_VirtRetryCnt);
814	ts->ts_antenna = 0; /* We don't switch antennas on Owl*/
815
816	/* handle tx trigger level changes internally */
817	if ((ts->ts_status & HAL_TXERR_FIFO) ||
818	    (ts->ts_flags & (HAL_TX_DATA_UNDERRUN | HAL_TX_DELIM_UNDERRUN)))
819		ar5212UpdateTxTrigLevel(ah, AH_TRUE);
820
821	return HAL_OK;
822}
823
824HAL_BOOL
825ar5416SetGlobalTxTimeout(struct ath_hal *ah, u_int tu)
826{
827	struct ath_hal_5416 *ahp = AH5416(ah);
828
829	if (tu > 0xFFFF) {
830		HALDEBUG(ah, HAL_DEBUG_ANY, "%s: bad global tx timeout %u\n",
831		    __func__, tu);
832		/* restore default handling */
833		ahp->ah_globaltxtimeout = (u_int) -1;
834		return AH_FALSE;
835	}
836	OS_REG_RMW_FIELD(ah, AR_GTXTO, AR_GTXTO_TIMEOUT_LIMIT, tu);
837	ahp->ah_globaltxtimeout = tu;
838	return AH_TRUE;
839}
840
841u_int
842ar5416GetGlobalTxTimeout(struct ath_hal *ah)
843{
844	return MS(OS_REG_READ(ah, AR_GTXTO), AR_GTXTO_TIMEOUT_LIMIT);
845}
846
847#define	HT_RC_2_MCS(_rc)	((_rc) & 0x0f)
848static const u_int8_t baDurationDelta[] = {
849	24,	//  0: BPSK
850	12,	//  1: QPSK 1/2
851	12,	//  2: QPSK 3/4
852	4,	//  3: 16-QAM 1/2
853	4,	//  4: 16-QAM 3/4
854	4,	//  5: 64-QAM 2/3
855	4,	//  6: 64-QAM 3/4
856	4,	//  7: 64-QAM 5/6
857	24,	//  8: BPSK
858	12,	//  9: QPSK 1/2
859	12,	// 10: QPSK 3/4
860	4,	// 11: 16-QAM 1/2
861	4,	// 12: 16-QAM 3/4
862	4,	// 13: 64-QAM 2/3
863	4,	// 14: 64-QAM 3/4
864	4,	// 15: 64-QAM 5/6
865};
866
867void
868ar5416Set11nRateScenario(struct ath_hal *ah, struct ath_desc *ds,
869        u_int durUpdateEn, u_int rtsctsRate,
870	HAL_11N_RATE_SERIES series[], u_int nseries, u_int flags)
871{
872	struct ar5416_desc *ads = AR5416DESC(ds);
873	uint32_t ds_ctl0;
874
875	HALASSERT(nseries == 4);
876	(void)nseries;
877
878	/*
879	 * Only one of RTS and CTS enable must be set.
880	 * If a frame has both set, just do RTS protection -
881	 * that's enough to satisfy legacy protection.
882	 */
883	if (flags & (HAL_TXDESC_RTSENA | HAL_TXDESC_CTSENA)) {
884		ds_ctl0 = ads->ds_ctl0;
885
886		if (flags & HAL_TXDESC_RTSENA) {
887			ds_ctl0 &= ~AR_CTSEnable;
888			ds_ctl0 |= AR_RTSEnable;
889		} else {
890			ds_ctl0 &= ~AR_RTSEnable;
891			ds_ctl0 |= AR_CTSEnable;
892		}
893
894		ads->ds_ctl0 = ds_ctl0;
895	} else {
896		ads->ds_ctl0 =
897		    (ads->ds_ctl0 & ~(AR_RTSEnable | AR_CTSEnable));
898	}
899
900	ads->ds_ctl2 = set11nTries(series, 0)
901		     | set11nTries(series, 1)
902		     | set11nTries(series, 2)
903		     | set11nTries(series, 3)
904		     | (durUpdateEn ? AR_DurUpdateEn : 0);
905
906	ads->ds_ctl3 = set11nRate(series, 0)
907		     | set11nRate(series, 1)
908		     | set11nRate(series, 2)
909		     | set11nRate(series, 3);
910
911	ads->ds_ctl4 = set11nPktDurRTSCTS(series, 0)
912		     | set11nPktDurRTSCTS(series, 1);
913
914	ads->ds_ctl5 = set11nPktDurRTSCTS(series, 2)
915		     | set11nPktDurRTSCTS(series, 3);
916
917	ads->ds_ctl7 = set11nRateFlags(series, 0)
918		     | set11nRateFlags(series, 1)
919		     | set11nRateFlags(series, 2)
920		     | set11nRateFlags(series, 3)
921		     | SM(rtsctsRate, AR_RTSCTSRate);
922
923	/*
924	 * Doing per-packet TPC - update the TX power for the first
925	 * field; program in the other series.
926	 */
927	if (AH5212(ah)->ah_tpcEnabled) {
928		uint32_t ds_ctl0;
929		uint16_t txPower;
930
931		/* Modify the tx power field for rate 0 */
932		txPower = ar5416GetTxRatePower(ah, series[0].Rate,
933		    series[0].ChSel,
934		    series[0].tx_power_cap,
935		    !! (series[0].RateFlags & HAL_RATESERIES_2040));
936		ds_ctl0 = ads->ds_ctl0 & ~AR_XmitPower;
937		ds_ctl0 |= (txPower << AR_XmitPower_S);
938		ads->ds_ctl0 = ds_ctl0;
939
940		/*
941		 * Override the whole descriptor field for each TX power.
942		 *
943		 * This will need changing if we ever support antenna control
944		 * programming.
945		 */
946		txPower = ar5416GetTxRatePower(ah, series[1].Rate,
947		    series[1].ChSel,
948		    series[1].tx_power_cap,
949		    !! (series[1].RateFlags & HAL_RATESERIES_2040));
950		ads->ds_ctl9 = SM(0, AR_AntCtl1) | SM(txPower, AR_XmitPower1);
951
952		txPower = ar5416GetTxRatePower(ah, series[2].Rate,
953		    series[2].ChSel,
954		    series[2].tx_power_cap,
955		    !! (series[2].RateFlags & HAL_RATESERIES_2040));
956		ads->ds_ctl10 = SM(0, AR_AntCtl2) | SM(txPower, AR_XmitPower2);
957
958		txPower = ar5416GetTxRatePower(ah, series[3].Rate,
959		    series[3].ChSel,
960		    series[3].tx_power_cap,
961		    !! (series[3].RateFlags & HAL_RATESERIES_2040));
962		ads->ds_ctl11 = SM(0, AR_AntCtl3) | SM(txPower, AR_XmitPower3);
963	}
964}
965
966/*
967 * Note: this should be called before calling ar5416SetBurstDuration()
968 * (if it is indeed called) in order to ensure that the burst duration
969 * is correctly updated with the BA delta workaround.
970 */
971void
972ar5416Set11nAggrFirst(struct ath_hal *ah, struct ath_desc *ds, u_int aggrLen,
973    u_int numDelims)
974{
975	struct ar5416_desc *ads = AR5416DESC(ds);
976	uint32_t flags;
977	uint32_t burstDur;
978	uint8_t rate;
979
980	ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
981
982	ads->ds_ctl6 &= ~(AR_AggrLen | AR_PadDelim);
983	ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen);
984	ads->ds_ctl6 |= SM(numDelims, AR_PadDelim);
985
986	if (! AR_SREV_MERLIN_10_OR_LATER(ah)) {
987		/*
988		 * XXX It'd be nice if I were passed in the rate scenario
989		 * at this point..
990		 */
991		rate = MS(ads->ds_ctl3, AR_XmitRate0);
992		flags = ads->ds_ctl0 & (AR_CTSEnable | AR_RTSEnable);
993		/*
994		 * WAR - MAC assumes normal ACK time instead of
995		 * block ACK while computing packet duration.
996		 * Add this delta to the burst duration in the descriptor.
997		 */
998		if (flags && (ads->ds_ctl1 & AR_IsAggr)) {
999			burstDur = baDurationDelta[HT_RC_2_MCS(rate)];
1000			ads->ds_ctl2 &= ~(AR_BurstDur);
1001			ads->ds_ctl2 |= SM(burstDur, AR_BurstDur);
1002		}
1003	}
1004}
1005
1006void
1007ar5416Set11nAggrMiddle(struct ath_hal *ah, struct ath_desc *ds, u_int numDelims)
1008{
1009	struct ar5416_desc *ads = AR5416DESC(ds);
1010	uint32_t *ds_txstatus = AR5416_DS_TXSTATUS(ah,ads);
1011
1012	ads->ds_ctl1 |= (AR_IsAggr | AR_MoreAggr);
1013
1014	ads->ds_ctl6 &= ~AR_PadDelim;
1015	ads->ds_ctl6 |= SM(numDelims, AR_PadDelim);
1016	ads->ds_ctl6 &= ~AR_AggrLen;
1017
1018	/*
1019	 * Clear the TxDone status here, may need to change
1020	 * func name to reflect this
1021	 */
1022	ds_txstatus[9] &= ~AR_TxDone;
1023}
1024
1025void
1026ar5416Set11nAggrLast(struct ath_hal *ah, struct ath_desc *ds)
1027{
1028	struct ar5416_desc *ads = AR5416DESC(ds);
1029
1030	ads->ds_ctl1 |= AR_IsAggr;
1031	ads->ds_ctl1 &= ~AR_MoreAggr;
1032	ads->ds_ctl6 &= ~AR_PadDelim;
1033}
1034
1035void
1036ar5416Clr11nAggr(struct ath_hal *ah, struct ath_desc *ds)
1037{
1038	struct ar5416_desc *ads = AR5416DESC(ds);
1039
1040	ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr);
1041	ads->ds_ctl6 &= ~AR_PadDelim;
1042	ads->ds_ctl6 &= ~AR_AggrLen;
1043}
1044
1045void
1046ar5416Set11nVirtualMoreFrag(struct ath_hal *ah, struct ath_desc *ds,
1047    u_int vmf)
1048{
1049	struct ar5416_desc *ads = AR5416DESC(ds);
1050	if (vmf)
1051		ads->ds_ctl0 |= AR_VirtMoreFrag;
1052	else
1053		ads->ds_ctl0 &= ~AR_VirtMoreFrag;
1054}
1055
1056/*
1057 * Program the burst duration, with the included BA delta if it's
1058 * applicable.
1059 */
1060void
1061ar5416Set11nBurstDuration(struct ath_hal *ah, struct ath_desc *ds,
1062                                                  u_int burstDuration)
1063{
1064	struct ar5416_desc *ads = AR5416DESC(ds);
1065	uint32_t burstDur = 0;
1066	uint8_t rate;
1067
1068	if (! AR_SREV_MERLIN_10_OR_LATER(ah)) {
1069		/*
1070		 * XXX It'd be nice if I were passed in the rate scenario
1071		 * at this point..
1072		 */
1073		rate = MS(ads->ds_ctl3, AR_XmitDataTries0);
1074		/*
1075		 * WAR - MAC assumes normal ACK time instead of
1076		 * block ACK while computing packet duration.
1077		 * Add this delta to the burst duration in the descriptor.
1078		 */
1079		if (ads->ds_ctl1 & AR_IsAggr) {
1080			burstDur = baDurationDelta[HT_RC_2_MCS(rate)];
1081		}
1082	}
1083
1084	ads->ds_ctl2 &= ~AR_BurstDur;
1085	ads->ds_ctl2 |= SM(burstDur + burstDuration, AR_BurstDur);
1086}
1087
1088/*
1089 * Retrieve the rate table from the given TX completion descriptor
1090 */
1091HAL_BOOL
1092ar5416GetTxCompletionRates(struct ath_hal *ah, const struct ath_desc *ds0, int *rates, int *tries)
1093{
1094	const struct ar5416_desc *ads = AR5416DESC_CONST(ds0);
1095
1096	rates[0] = MS(ads->ds_ctl3, AR_XmitRate0);
1097	rates[1] = MS(ads->ds_ctl3, AR_XmitRate1);
1098	rates[2] = MS(ads->ds_ctl3, AR_XmitRate2);
1099	rates[3] = MS(ads->ds_ctl3, AR_XmitRate3);
1100
1101	tries[0] = MS(ads->ds_ctl2, AR_XmitDataTries0);
1102	tries[1] = MS(ads->ds_ctl2, AR_XmitDataTries1);
1103	tries[2] = MS(ads->ds_ctl2, AR_XmitDataTries2);
1104	tries[3] = MS(ads->ds_ctl2, AR_XmitDataTries3);
1105
1106	return AH_TRUE;
1107}
1108
1109
1110/*
1111 * TX queue management routines - AR5416 and later chipsets
1112 */
1113
1114/*
1115 * Allocate and initialize a tx DCU/QCU combination.
1116 */
1117int
1118ar5416SetupTxQueue(struct ath_hal *ah, HAL_TX_QUEUE type,
1119	const HAL_TXQ_INFO *qInfo)
1120{
1121	struct ath_hal_5212 *ahp = AH5212(ah);
1122	HAL_TX_QUEUE_INFO *qi;
1123	HAL_CAPABILITIES *pCap = &AH_PRIVATE(ah)->ah_caps;
1124	int q, defqflags;
1125
1126	/* by default enable OK+ERR+DESC+URN interrupts */
1127	defqflags = HAL_TXQ_TXOKINT_ENABLE
1128		  | HAL_TXQ_TXERRINT_ENABLE
1129		  | HAL_TXQ_TXDESCINT_ENABLE
1130		  | HAL_TXQ_TXURNINT_ENABLE;
1131	/* XXX move queue assignment to driver */
1132	switch (type) {
1133	case HAL_TX_QUEUE_BEACON:
1134		q = pCap->halTotalQueues-1;	/* highest priority */
1135		defqflags |= HAL_TXQ_DBA_GATED
1136		       | HAL_TXQ_CBR_DIS_QEMPTY
1137		       | HAL_TXQ_ARB_LOCKOUT_GLOBAL
1138		       | HAL_TXQ_BACKOFF_DISABLE;
1139		break;
1140	case HAL_TX_QUEUE_CAB:
1141		q = pCap->halTotalQueues-2;	/* next highest priority */
1142		defqflags |= HAL_TXQ_DBA_GATED
1143		       | HAL_TXQ_CBR_DIS_QEMPTY
1144		       | HAL_TXQ_CBR_DIS_BEMPTY
1145		       | HAL_TXQ_ARB_LOCKOUT_GLOBAL
1146		       | HAL_TXQ_BACKOFF_DISABLE;
1147		break;
1148	case HAL_TX_QUEUE_PSPOLL:
1149		q = 1;				/* lowest priority */
1150		defqflags |= HAL_TXQ_DBA_GATED
1151		       | HAL_TXQ_CBR_DIS_QEMPTY
1152		       | HAL_TXQ_CBR_DIS_BEMPTY
1153		       | HAL_TXQ_ARB_LOCKOUT_GLOBAL
1154		       | HAL_TXQ_BACKOFF_DISABLE;
1155		break;
1156	case HAL_TX_QUEUE_UAPSD:
1157		q = pCap->halTotalQueues-3;	/* nextest highest priority */
1158		if (ahp->ah_txq[q].tqi_type != HAL_TX_QUEUE_INACTIVE) {
1159			HALDEBUG(ah, HAL_DEBUG_ANY,
1160			    "%s: no available UAPSD tx queue\n", __func__);
1161			return -1;
1162		}
1163		break;
1164	case HAL_TX_QUEUE_DATA:
1165		for (q = 0; q < pCap->halTotalQueues; q++)
1166			if (ahp->ah_txq[q].tqi_type == HAL_TX_QUEUE_INACTIVE)
1167				break;
1168		if (q == pCap->halTotalQueues) {
1169			HALDEBUG(ah, HAL_DEBUG_ANY,
1170			    "%s: no available tx queue\n", __func__);
1171			return -1;
1172		}
1173		break;
1174	default:
1175		HALDEBUG(ah, HAL_DEBUG_ANY,
1176		    "%s: bad tx queue type %u\n", __func__, type);
1177		return -1;
1178	}
1179
1180	HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: queue %u\n", __func__, q);
1181
1182	qi = &ahp->ah_txq[q];
1183	if (qi->tqi_type != HAL_TX_QUEUE_INACTIVE) {
1184		HALDEBUG(ah, HAL_DEBUG_ANY, "%s: tx queue %u already active\n",
1185		    __func__, q);
1186		return -1;
1187	}
1188	OS_MEMZERO(qi, sizeof(HAL_TX_QUEUE_INFO));
1189	qi->tqi_type = type;
1190	if (qInfo == AH_NULL) {
1191		qi->tqi_qflags = defqflags;
1192		qi->tqi_aifs = INIT_AIFS;
1193		qi->tqi_cwmin = HAL_TXQ_USEDEFAULT;	/* NB: do at reset */
1194		qi->tqi_cwmax = INIT_CWMAX;
1195		qi->tqi_shretry = INIT_SH_RETRY;
1196		qi->tqi_lgretry = INIT_LG_RETRY;
1197		qi->tqi_physCompBuf = 0;
1198	} else {
1199		qi->tqi_physCompBuf = qInfo->tqi_compBuf;
1200		(void) ar5212SetTxQueueProps(ah, q, qInfo);
1201	}
1202	/* NB: must be followed by ar5212ResetTxQueue */
1203	return q;
1204}
1205
1206/*
1207 * Update the h/w interrupt registers to reflect a tx q's configuration.
1208 */
1209static void
1210setTxQInterrupts(struct ath_hal *ah, HAL_TX_QUEUE_INFO *qi)
1211{
1212	struct ath_hal_5212 *ahp = AH5212(ah);
1213
1214	HALDEBUG(ah, HAL_DEBUG_TXQUEUE,
1215	    "%s: tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n", __func__,
1216	    ahp->ah_txOkInterruptMask, ahp->ah_txErrInterruptMask,
1217	    ahp->ah_txDescInterruptMask, ahp->ah_txEolInterruptMask,
1218	    ahp->ah_txUrnInterruptMask);
1219
1220	OS_REG_WRITE(ah, AR_IMR_S0,
1221		  SM(ahp->ah_txOkInterruptMask, AR_IMR_S0_QCU_TXOK)
1222		| SM(ahp->ah_txDescInterruptMask, AR_IMR_S0_QCU_TXDESC)
1223	);
1224	OS_REG_WRITE(ah, AR_IMR_S1,
1225		  SM(ahp->ah_txErrInterruptMask, AR_IMR_S1_QCU_TXERR)
1226		| SM(ahp->ah_txEolInterruptMask, AR_IMR_S1_QCU_TXEOL)
1227	);
1228	OS_REG_RMW_FIELD(ah, AR_IMR_S2,
1229		AR_IMR_S2_QCU_TXURN, ahp->ah_txUrnInterruptMask);
1230}
1231
1232/*
1233 * Set the retry, aifs, cwmin/max, readyTime regs for specified queue
1234 * Assumes:
1235 *  phwChannel has been set to point to the current channel
1236 */
1237#define	TU_TO_USEC(_tu)		((_tu) << 10)
1238HAL_BOOL
1239ar5416ResetTxQueue(struct ath_hal *ah, u_int q)
1240{
1241	struct ath_hal_5212 *ahp = AH5212(ah);
1242	HAL_CAPABILITIES *pCap = &AH_PRIVATE(ah)->ah_caps;
1243	const struct ieee80211_channel *chan = AH_PRIVATE(ah)->ah_curchan;
1244	HAL_TX_QUEUE_INFO *qi;
1245	uint32_t cwMin, chanCwMin, qmisc, dmisc;
1246
1247	if (q >= pCap->halTotalQueues) {
1248		HALDEBUG(ah, HAL_DEBUG_ANY, "%s: invalid queue num %u\n",
1249		    __func__, q);
1250		return AH_FALSE;
1251	}
1252	qi = &ahp->ah_txq[q];
1253	if (qi->tqi_type == HAL_TX_QUEUE_INACTIVE) {
1254		HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: inactive queue %u\n",
1255		    __func__, q);
1256		return AH_TRUE;		/* XXX??? */
1257	}
1258
1259	HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: reset queue %u\n", __func__, q);
1260
1261	if (qi->tqi_cwmin == HAL_TXQ_USEDEFAULT) {
1262		/*
1263		 * Select cwmin according to channel type.
1264		 * NB: chan can be NULL during attach
1265		 */
1266		if (chan && IEEE80211_IS_CHAN_B(chan))
1267			chanCwMin = INIT_CWMIN_11B;
1268		else
1269			chanCwMin = INIT_CWMIN;
1270		/* make sure that the CWmin is of the form (2^n - 1) */
1271		for (cwMin = 1; cwMin < chanCwMin; cwMin = (cwMin << 1) | 1)
1272			;
1273	} else
1274		cwMin = qi->tqi_cwmin;
1275
1276	/* set cwMin/Max and AIFS values */
1277	OS_REG_WRITE(ah, AR_DLCL_IFS(q),
1278		  SM(cwMin, AR_D_LCL_IFS_CWMIN)
1279		| SM(qi->tqi_cwmax, AR_D_LCL_IFS_CWMAX)
1280		| SM(qi->tqi_aifs, AR_D_LCL_IFS_AIFS));
1281
1282	/* Set retry limit values */
1283	OS_REG_WRITE(ah, AR_DRETRY_LIMIT(q),
1284		   SM(INIT_SSH_RETRY, AR_D_RETRY_LIMIT_STA_SH)
1285		 | SM(INIT_SLG_RETRY, AR_D_RETRY_LIMIT_STA_LG)
1286		 | SM(qi->tqi_lgretry, AR_D_RETRY_LIMIT_FR_LG)
1287		 | SM(qi->tqi_shretry, AR_D_RETRY_LIMIT_FR_SH)
1288	);
1289
1290	/* NB: always enable early termination on the QCU */
1291	qmisc = AR_Q_MISC_DCU_EARLY_TERM_REQ
1292	      | SM(AR_Q_MISC_FSP_ASAP, AR_Q_MISC_FSP);
1293
1294	/* NB: always enable DCU to wait for next fragment from QCU */
1295	dmisc = AR_D_MISC_FRAG_WAIT_EN;
1296
1297	/* Enable exponential backoff window */
1298	dmisc |= AR_D_MISC_BKOFF_PERSISTENCE;
1299
1300	/*
1301	 * The chip reset default is to use a DCU backoff threshold of 0x2.
1302	 * Restore this when programming the DCU MISC register.
1303	 */
1304	dmisc |= 0x2;
1305
1306	/* multiqueue support */
1307	if (qi->tqi_cbrPeriod) {
1308		OS_REG_WRITE(ah, AR_QCBRCFG(q),
1309			  SM(qi->tqi_cbrPeriod,AR_Q_CBRCFG_CBR_INTERVAL)
1310			| SM(qi->tqi_cbrOverflowLimit, AR_Q_CBRCFG_CBR_OVF_THRESH));
1311		qmisc = (qmisc &~ AR_Q_MISC_FSP) | AR_Q_MISC_FSP_CBR;
1312		if (qi->tqi_cbrOverflowLimit)
1313			qmisc |= AR_Q_MISC_CBR_EXP_CNTR_LIMIT;
1314	}
1315
1316	if (qi->tqi_readyTime && (qi->tqi_type != HAL_TX_QUEUE_CAB)) {
1317		OS_REG_WRITE(ah, AR_QRDYTIMECFG(q),
1318			  SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_INT)
1319			| AR_Q_RDYTIMECFG_ENA);
1320	}
1321
1322	OS_REG_WRITE(ah, AR_DCHNTIME(q),
1323		  SM(qi->tqi_burstTime, AR_D_CHNTIME_DUR)
1324		| (qi->tqi_burstTime ? AR_D_CHNTIME_EN : 0));
1325
1326	if (qi->tqi_readyTime &&
1327	    (qi->tqi_qflags & HAL_TXQ_RDYTIME_EXP_POLICY_ENABLE))
1328		qmisc |= AR_Q_MISC_RDYTIME_EXP_POLICY;
1329	if (qi->tqi_qflags & HAL_TXQ_DBA_GATED)
1330		qmisc = (qmisc &~ AR_Q_MISC_FSP) | AR_Q_MISC_FSP_DBA_GATED;
1331	if (MS(qmisc, AR_Q_MISC_FSP) != AR_Q_MISC_FSP_ASAP) {
1332		/*
1333		 * These are meangingful only when not scheduled asap.
1334		 */
1335		if (qi->tqi_qflags & HAL_TXQ_CBR_DIS_BEMPTY)
1336			qmisc |= AR_Q_MISC_CBR_INCR_DIS0;
1337		else
1338			qmisc &= ~AR_Q_MISC_CBR_INCR_DIS0;
1339		if (qi->tqi_qflags & HAL_TXQ_CBR_DIS_QEMPTY)
1340			qmisc |= AR_Q_MISC_CBR_INCR_DIS1;
1341		else
1342			qmisc &= ~AR_Q_MISC_CBR_INCR_DIS1;
1343	}
1344
1345	if (qi->tqi_qflags & HAL_TXQ_BACKOFF_DISABLE)
1346		dmisc |= AR_D_MISC_POST_FR_BKOFF_DIS;
1347	if (qi->tqi_qflags & HAL_TXQ_FRAG_BURST_BACKOFF_ENABLE)
1348		dmisc |= AR_D_MISC_FRAG_BKOFF_EN;
1349	if (qi->tqi_qflags & HAL_TXQ_ARB_LOCKOUT_GLOBAL)
1350		dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
1351			    AR_D_MISC_ARB_LOCKOUT_CNTRL);
1352	else if (qi->tqi_qflags & HAL_TXQ_ARB_LOCKOUT_INTRA)
1353		dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_INTRA_FR,
1354			    AR_D_MISC_ARB_LOCKOUT_CNTRL);
1355	if (qi->tqi_qflags & HAL_TXQ_IGNORE_VIRTCOL)
1356		dmisc |= SM(AR_D_MISC_VIR_COL_HANDLING_IGNORE,
1357			    AR_D_MISC_VIR_COL_HANDLING);
1358	if (qi->tqi_qflags & HAL_TXQ_SEQNUM_INC_DIS)
1359		dmisc |= AR_D_MISC_SEQ_NUM_INCR_DIS;
1360
1361	/*
1362	 * Fillin type-dependent bits.  Most of this can be
1363	 * removed by specifying the queue parameters in the
1364	 * driver; it's here for backwards compatibility.
1365	 */
1366	switch (qi->tqi_type) {
1367	case HAL_TX_QUEUE_BEACON:		/* beacon frames */
1368		qmisc |= AR_Q_MISC_FSP_DBA_GATED
1369		      |  AR_Q_MISC_BEACON_USE
1370		      |  AR_Q_MISC_CBR_INCR_DIS1;
1371
1372		dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
1373			    AR_D_MISC_ARB_LOCKOUT_CNTRL)
1374		      |  AR_D_MISC_BEACON_USE
1375		      |  AR_D_MISC_POST_FR_BKOFF_DIS;
1376		break;
1377	case HAL_TX_QUEUE_CAB:			/* CAB  frames */
1378		/*
1379		 * No longer Enable AR_Q_MISC_RDYTIME_EXP_POLICY,
1380		 * There is an issue with the CAB Queue
1381		 * not properly refreshing the Tx descriptor if
1382		 * the TXE clear setting is used.
1383		 */
1384		qmisc |= AR_Q_MISC_FSP_DBA_GATED
1385		      |  AR_Q_MISC_CBR_INCR_DIS1
1386		      |  AR_Q_MISC_CBR_INCR_DIS0;
1387		HALDEBUG(ah, HAL_DEBUG_TXQUEUE, "%s: CAB: tqi_readyTime = %d\n",
1388		    __func__, qi->tqi_readyTime);
1389		if (qi->tqi_readyTime) {
1390			HALDEBUG(ah, HAL_DEBUG_TXQUEUE,
1391			    "%s: using tqi_readyTime\n", __func__);
1392			OS_REG_WRITE(ah, AR_QRDYTIMECFG(q),
1393			    SM(qi->tqi_readyTime, AR_Q_RDYTIMECFG_INT) |
1394			    AR_Q_RDYTIMECFG_ENA);
1395		} else {
1396			int value;
1397			/*
1398			 * NB: don't set default ready time if driver
1399			 * has explicitly specified something.  This is
1400			 * here solely for backwards compatibility.
1401			 */
1402			/*
1403			 * XXX for now, hard-code a CAB interval of 70%
1404			 * XXX of the total beacon interval.
1405			 *
1406			 * XXX This keeps Merlin and later based MACs
1407			 * XXX quite a bit happier (stops stuck beacons,
1408			 * XXX which I gather is because of such a long
1409			 * XXX cabq time.)
1410			 */
1411			value = (ahp->ah_beaconInterval * 50 / 100)
1412				- ah->ah_config.ah_additional_swba_backoff
1413				- ah->ah_config.ah_sw_beacon_response_time
1414				+ ah->ah_config.ah_dma_beacon_response_time;
1415			/*
1416			 * XXX Ensure it isn't too low - nothing lower
1417			 * XXX than 10 TU
1418			 */
1419			if (value < 10)
1420				value = 10;
1421			HALDEBUG(ah, HAL_DEBUG_TXQUEUE,
1422			    "%s: defaulting to rdytime = %d uS\n",
1423			    __func__, value);
1424			OS_REG_WRITE(ah, AR_QRDYTIMECFG(q),
1425			    SM(TU_TO_USEC(value), AR_Q_RDYTIMECFG_INT) |
1426			    AR_Q_RDYTIMECFG_ENA);
1427		}
1428		dmisc |= SM(AR_D_MISC_ARB_LOCKOUT_CNTRL_GLOBAL,
1429			    AR_D_MISC_ARB_LOCKOUT_CNTRL);
1430		break;
1431	case HAL_TX_QUEUE_PSPOLL:
1432		qmisc |= AR_Q_MISC_CBR_INCR_DIS1;
1433		break;
1434	case HAL_TX_QUEUE_UAPSD:
1435		dmisc |= AR_D_MISC_POST_FR_BKOFF_DIS;
1436		break;
1437	default:			/* NB: silence compiler */
1438		break;
1439	}
1440
1441	OS_REG_WRITE(ah, AR_QMISC(q), qmisc);
1442	OS_REG_WRITE(ah, AR_DMISC(q), dmisc);
1443
1444	/* Setup compression scratchpad buffer */
1445	/*
1446	 * XXX: calling this asynchronously to queue operation can
1447	 *      cause unexpected behavior!!!
1448	 */
1449	if (qi->tqi_physCompBuf) {
1450		HALASSERT(qi->tqi_type == HAL_TX_QUEUE_DATA ||
1451			  qi->tqi_type == HAL_TX_QUEUE_UAPSD);
1452		OS_REG_WRITE(ah, AR_Q_CBBS, (80 + 2*q));
1453		OS_REG_WRITE(ah, AR_Q_CBBA, qi->tqi_physCompBuf);
1454		OS_REG_WRITE(ah, AR_Q_CBC,  HAL_COMP_BUF_MAX_SIZE/1024);
1455		OS_REG_WRITE(ah, AR_Q0_MISC + 4*q,
1456			     OS_REG_READ(ah, AR_Q0_MISC + 4*q)
1457			     | AR_Q_MISC_QCU_COMP_EN);
1458	}
1459
1460	/*
1461	 * Always update the secondary interrupt mask registers - this
1462	 * could be a new queue getting enabled in a running system or
1463	 * hw getting re-initialized during a reset!
1464	 *
1465	 * Since we don't differentiate between tx interrupts corresponding
1466	 * to individual queues - secondary tx mask regs are always unmasked;
1467	 * tx interrupts are enabled/disabled for all queues collectively
1468	 * using the primary mask reg
1469	 */
1470	if (qi->tqi_qflags & HAL_TXQ_TXOKINT_ENABLE)
1471		ahp->ah_txOkInterruptMask |= 1 << q;
1472	else
1473		ahp->ah_txOkInterruptMask &= ~(1 << q);
1474	if (qi->tqi_qflags & HAL_TXQ_TXERRINT_ENABLE)
1475		ahp->ah_txErrInterruptMask |= 1 << q;
1476	else
1477		ahp->ah_txErrInterruptMask &= ~(1 << q);
1478	if (qi->tqi_qflags & HAL_TXQ_TXDESCINT_ENABLE)
1479		ahp->ah_txDescInterruptMask |= 1 << q;
1480	else
1481		ahp->ah_txDescInterruptMask &= ~(1 << q);
1482	if (qi->tqi_qflags & HAL_TXQ_TXEOLINT_ENABLE)
1483		ahp->ah_txEolInterruptMask |= 1 << q;
1484	else
1485		ahp->ah_txEolInterruptMask &= ~(1 << q);
1486	if (qi->tqi_qflags & HAL_TXQ_TXURNINT_ENABLE)
1487		ahp->ah_txUrnInterruptMask |= 1 << q;
1488	else
1489		ahp->ah_txUrnInterruptMask &= ~(1 << q);
1490	setTxQInterrupts(ah, qi);
1491
1492	return AH_TRUE;
1493}
1494#undef	TU_TO_USEC
1495