xref: /illumos-gate/usr/src/uts/common/io/arn/arn_xmit.c (revision fd7c5980)
1 /*
2  * Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
3  * Use is subject to license terms.
4  */
5 
6 /*
7  * Copyright (c) 2008 Atheros Communications Inc.
8  *
9  * Permission to use, copy, modify, and/or distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 #include <sys/param.h>
22 #include <sys/types.h>
23 #include <sys/signal.h>
24 #include <sys/stream.h>
25 #include <sys/termio.h>
26 #include <sys/errno.h>
27 #include <sys/file.h>
28 #include <sys/cmn_err.h>
29 #include <sys/stropts.h>
30 #include <sys/strsubr.h>
31 #include <sys/strtty.h>
32 #include <sys/kbio.h>
33 #include <sys/cred.h>
34 #include <sys/stat.h>
35 #include <sys/consdev.h>
36 #include <sys/kmem.h>
37 #include <sys/modctl.h>
38 #include <sys/ddi.h>
39 #include <sys/sunddi.h>
40 #include <sys/pci.h>
41 #include <sys/errno.h>
42 #include <sys/mac_provider.h>
43 #include <sys/dlpi.h>
44 #include <sys/ethernet.h>
45 #include <sys/list.h>
46 #include <sys/byteorder.h>
47 #include <sys/strsun.h>
48 #include <sys/policy.h>
49 #include <inet/common.h>
50 #include <inet/nd.h>
51 #include <inet/mi.h>
52 #include <inet/wifi_ioctl.h>
53 #include <sys/mac_wifi.h>
54 
55 #include "arn_core.h"
56 
57 #define	BITS_PER_BYTE		8
58 #define	OFDM_PLCP_BITS		22
59 #define	HT_RC_2_MCS(_rc)	((_rc) & 0x0f)
60 #define	HT_RC_2_STREAMS(_rc)	((((_rc) & 0x78) >> 3) + 1)
61 #define	L_STF			8
62 #define	L_LTF			8
63 #define	L_SIG			4
64 #define	HT_SIG			8
65 #define	HT_STF			4
66 #define	HT_LTF(_ns)		(4 * (_ns))
67 #define	SYMBOL_TIME(_ns)	((_ns) << 2) /* ns * 4 us */
68 #define	SYMBOL_TIME_HALFGI(_ns)	(((_ns) * 18 + 4) / 5)  /* ns * 3.6 us */
69 #define	NUM_SYMBOLS_PER_USEC(_usec)	(_usec >> 2)
70 #define	NUM_SYMBOLS_PER_USEC_HALFGI(_usec)	(((_usec*5)-4)/18)
71 
72 #define	OFDM_SIFS_TIME	16
73 
74 static uint32_t bits_per_symbol[][2] = {
75 	/* 20MHz 40MHz */
76 	{    26,  54 },		/*  0: BPSK */
77 	{    52,  108 },	/*  1: QPSK 1/2 */
78 	{    78,  162 },	/*  2: QPSK 3/4 */
79 	{   104,  216 },	/*  3: 16-QAM 1/2 */
80 	{   156,  324 },	/*  4: 16-QAM 3/4 */
81 	{   208,  432 },	/*  5: 64-QAM 2/3 */
82 	{   234,  486 },	/*  6: 64-QAM 3/4 */
83 	{   260,  540 },	/*  7: 64-QAM 5/6 */
84 	{    52,  108 },	/*  8: BPSK */
85 	{   104,  216 },	/*  9: QPSK 1/2 */
86 	{   156,  324 },	/* 10: QPSK 3/4 */
87 	{   208,  432 },	/* 11: 16-QAM 1/2 */
88 	{   312,  648 },	/* 12: 16-QAM 3/4 */
89 	{   416,  864 },	/* 13: 64-QAM 2/3 */
90 	{   468,  972 },	/* 14: 64-QAM 3/4 */
91 	{   520,  1080 },	/* 15: 64-QAM 5/6 */
92 };
93 
94 #define	IS_HT_RATE(_rate)	((_rate) & 0x80)
95 
96 #ifdef ARN_TX_AGGREGRATION
97 static void arn_tx_send_ht_normal(struct arn_softc *sc, struct ath_txq *txq,
98     struct ath_atx_tid *tid, list_t *bf_list);
99 static void arn_tx_complete_buf(struct arn_softc *sc, struct ath_buf *bf,
100     list_t *bf_q, int txok, int sendbar);
101 static void arn_tx_txqaddbuf(struct arn_softc *sc, struct ath_txq *txq,
102     list_t *buf_list);
103 static void arn_buf_set_rate(struct arn_softc *sc, struct ath_buf *bf);
104 static int arn_tx_num_badfrms(struct arn_softc *sc,
105     struct ath_buf *bf, int txok);
106 static void arn_tx_rc_status(struct ath_buf *bf, struct ath_desc *ds,
107     int nbad, int txok, boolean_t update_rc);
108 #endif
109 
110 static void
arn_get_beaconconfig(struct arn_softc * sc,struct ath_beacon_config * conf)111 arn_get_beaconconfig(struct arn_softc *sc, struct ath_beacon_config *conf)
112 {
113 	ieee80211com_t *ic = (ieee80211com_t *)sc;
114 	struct ieee80211_node *in = ic->ic_bss;
115 
116 	/* fill in beacon config data */
117 
118 	conf->beacon_interval = in->in_intval ?
119 	    in->in_intval : ATH_DEFAULT_BINTVAL;
120 	conf->listen_interval = 100;
121 	conf->dtim_count = 1;
122 	conf->bmiss_timeout = ATH_DEFAULT_BMISS_LIMIT * conf->listen_interval;
123 }
124 
125 /* Aggregation logic */
126 
127 #ifdef ARN_TX_AGGREGATION
128 
129 /* Check if it's okay to send out aggregates */
130 static int
arn_aggr_query(struct arn_softc * sc,struct ath_node * an,uint8_t tidno)131 arn_aggr_query(struct arn_softc *sc, struct ath_node *an, uint8_t tidno)
132 {
133 	struct ath_atx_tid *tid;
134 	tid = ATH_AN_2_TID(an, tidno);
135 
136 	if (tid->state & AGGR_ADDBA_COMPLETE ||
137 	    tid->state & AGGR_ADDBA_PROGRESS)
138 		return (1);
139 	else
140 		return (0);
141 }
142 
143 /*
144  * queue up a dest/ac pair for tx scheduling
145  * NB: must be called with txq lock held
146  */
147 static void
arn_tx_queue_tid(struct ath_txq * txq,struct ath_atx_tid * tid)148 arn_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
149 {
150 	struct ath_atx_ac *ac = tid->ac;
151 
152 	/* if tid is paused, hold off */
153 	if (tid->paused)
154 		return;
155 
156 	/* add tid to ac atmost once */
157 	if (tid->sched)
158 		return;
159 
160 	tid->sched = B_TRUE;
161 	list_insert_tail(&ac->tid_q, &tid->list);
162 
163 	/* add node ac to txq atmost once */
164 	if (ac->sched)
165 		return;
166 
167 	ac->sched = B_TRUE;
168 	list_insert_tail(&txq->axq_acq, &ac->list);
169 }
170 
171 /* pause a tid */
172 static void
arn_tx_pause_tid(struct arn_softc * sc,struct ath_atx_tid * tid)173 arn_tx_pause_tid(struct arn_softc *sc, struct ath_atx_tid *tid)
174 {
175 	struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
176 
177 	mutex_enter(&txq->axq_lock);
178 
179 	tid->paused++;
180 
181 	mutex_exit(&txq->axq_lock);
182 }
183 
184 /* resume a tid and schedule aggregate */
185 void
arn_tx_resume_tid(struct arn_softc * sc,struct ath_atx_tid * tid)186 arn_tx_resume_tid(struct arn_softc *sc, struct ath_atx_tid *tid)
187 {
188 	struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
189 
190 	ASSERT(tid->paused > 0);
191 	mutex_enter(&txq->axq_lock);
192 
193 	tid->paused--;
194 
195 	if (tid->paused > 0)
196 		goto unlock;
197 
198 	if (list_empty(&tid->buf_q))
199 		goto unlock;
200 
201 	/*
202 	 * Add this TID to scheduler and try to send out aggregates
203 	 */
204 	arn_tx_queue_tid(txq, tid);
205 	arn_txq_schedule(sc, txq);
206 unlock:
207 	mutex_exit(&txq->axq_lock);
208 }
209 
210 /* flush tid's software queue and send frames as non-ampdu's */
211 static void
arn_tx_flush_tid(struct arn_softc * sc,struct ath_atx_tid * tid)212 arn_tx_flush_tid(struct arn_softc *sc, struct ath_atx_tid *tid)
213 {
214 	struct ath_txq *txq = &sc->sc_txq[tid->ac->qnum];
215 	struct ath_buf *bf;
216 
217 	list_t list;
218 	list_create(&list, sizeof (struct ath_buf),
219 	    offsetof(struct ath_buf, bf_node));
220 
221 	ASSERT(tid->paused > 0);
222 	mutex_enter(&txq->axq_lock);
223 
224 	tid->paused--;
225 
226 	if (tid->paused > 0) {
227 		mutex_exit(&txq->axq_lock);
228 		return;
229 	}
230 
231 	while (!list_empty(&tid->buf_q)) {
232 		bf = list_head(&tid->buf_q);
233 		ASSERT(!bf_isretried(bf));
234 		list_remove(&tid->buf_q, bf);
235 		list_insert_tail(&list, bf);
236 		arn_tx_send_ht_normal(sc, txq, tid, &list);
237 	}
238 
239 	mutex_exit(&txq->axq_lock);
240 }
241 
242 /* Update block ack window */
243 static void
arn_tx_update_baw(struct arn_softc * sc,struct ath_atx_tid * tid,int seqno)244 arn_tx_update_baw(struct arn_softc *sc, struct ath_atx_tid *tid, int seqno)
245 {
246 	int index, cindex;
247 
248 	index  = ATH_BA_INDEX(tid->seq_start, seqno);
249 	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
250 
251 	tid->tx_buf[cindex] = NULL;
252 
253 	while (tid->baw_head != tid->baw_tail && !tid->tx_buf[tid->baw_head]) {
254 		INCR(tid->seq_start, IEEE80211_SEQ_MAX);
255 		INCR(tid->baw_head, ATH_TID_MAX_BUFS);
256 	}
257 }
258 
259 /* Add a sub-frame to block ack window */
260 static void
arn_tx_addto_baw(struct arn_softc * sc,struct ath_atx_tid * tid,struct ath_buf * bf)261 arn_tx_addto_baw(struct arn_softc *sc, struct ath_atx_tid *tid,
262     struct ath_buf *bf)
263 {
264 	int index, cindex;
265 
266 	if (bf_isretried(bf))
267 		return;
268 
269 	index  = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
270 	cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
271 
272 	ASSERT(tid->tx_buf[cindex] == NULL);
273 	tid->tx_buf[cindex] = bf;
274 
275 	if (index >= ((tid->baw_tail - tid->baw_head) &
276 	    (ATH_TID_MAX_BUFS - 1))) {
277 		tid->baw_tail = cindex;
278 		INCR(tid->baw_tail, ATH_TID_MAX_BUFS);
279 	}
280 }
281 
282 /*
283  * TODO: For frame(s) that are in the retry state, we will reuse the
284  * sequence number(s) without setting the retry bit. The
285  * alternative is to give up on these and BAR the receiver's window
286  * forward.
287  */
288 static void
arn_tid_drain(struct arn_softc * sc,struct ath_txq * txq,struct ath_atx_tid * tid)289 arn_tid_drain(struct arn_softc *sc,
290     struct ath_txq *txq,
291     struct ath_atx_tid *tid)
292 {
293 	struct ath_buf *bf;
294 
295 	list_t list;
296 	list_create(&list, sizeof (struct ath_buf),
297 	    offsetof(struct ath_buf, bf_node));
298 
299 	for (;;) {
300 		if (list_empty(&tid->buf_q))
301 			break;
302 
303 		bf = list_head(&tid->buf_q);
304 		list_remove(&tid->buf_q, bf);
305 		list_insert_tail(&list, bf);
306 
307 		if (bf_isretried(bf))
308 			arn_tx_update_baw(sc, tid, bf->bf_seqno);
309 
310 		mutex_enter(&txq->axq_lock);
311 		arn_tx_complete_buf(sc, bf, &list, 0, 0);
312 		mutex_exit(&txq->axq_lock);
313 	}
314 
315 	tid->seq_next = tid->seq_start;
316 	tid->baw_tail = tid->baw_head;
317 }
318 
319 static void
arn_tx_set_retry(struct arn_softc * sc,struct ath_buf * bf)320 arn_tx_set_retry(struct arn_softc *sc, struct ath_buf *bf)
321 {
322 	struct ieee80211_frame *wh;
323 	wh = (struct ieee80211_frame *)bf->bf_dma.mem_va;
324 
325 	bf->bf_state.bf_type |= BUF_RETRY;
326 	bf->bf_retries++;
327 
328 	*(uint16_t *)&wh->i_seq[0] |= LE_16(0x0800); /* ??? */
329 }
330 
331 static struct ath_buf *
arn_clone_txbuf(struct arn_softc * sc,struct ath_buf * bf)332 arn_clone_txbuf(struct arn_softc *sc, struct ath_buf *bf)
333 {
334 	struct ath_buf *tbf;
335 
336 	mutex_enter(&sc->sc_txbuflock);
337 	ASSERT(!list_empty((&sc->sc_txbuf_list)));
338 
339 	tbf = list_head(&sc->sc_txbuf_list);
340 	list_remove(&sc->sc_txbuf_list, tbf);
341 	mutex_exit(&sc->sc_txbuflock);
342 
343 	ATH_TXBUF_RESET(tbf);
344 
345 	tbf->bf_daddr = bf->bf_daddr; /* physical addr of desc */
346 	tbf->bf_dma = bf->bf_dma; /* dma area for buf */
347 	*(tbf->bf_desc) = *(bf->bf_desc); /* virtual addr of desc */
348 	tbf->bf_state = bf->bf_state; /* buffer state */
349 
350 	return (tbf);
351 }
352 
353 static void
arn_tx_complete_aggr(struct arn_softc * sc,struct ath_txq * txq,struct ath_buf * bf,list_t * bf_q,int txok)354 arn_tx_complete_aggr(struct arn_softc *sc, struct ath_txq *txq,
355     struct ath_buf *bf, list_t *bf_q, int txok)
356 {
357 	struct ieee80211_node *in;
358 	struct ath_node *an = NULL;
359 	struct ath_atx_tid *tid = NULL;
360 	struct ath_buf *bf_next, *bf_last = bf->bf_lastbf;
361 	struct ath_desc *ds = bf_last->bf_desc;
362 
363 	list_t list, list_pending;
364 	uint16_t seq_st = 0, acked_cnt = 0, txfail_cnt = 0;
365 	uint32_t ba[WME_BA_BMP_SIZE >> 5];
366 	int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
367 	boolean_t rc_update = B_TRUE;
368 
369 	an = ATH_NODE(in); /* Be sure in != NULL */
370 	tid = ATH_AN_2_TID(an, bf->bf_tidno);
371 
372 	isaggr = bf_isaggr(bf);
373 	memset(ba, 0, WME_BA_BMP_SIZE >> 3);
374 
375 	if (isaggr && txok) {
376 		if (ATH_DS_TX_BA(ds)) {
377 			seq_st = ATH_DS_BA_SEQ(ds);
378 			memcpy(ba, ATH_DS_BA_BITMAP(ds),
379 			    WME_BA_BMP_SIZE >> 3);
380 		} else {
381 			/*
382 			 * AR5416 can become deaf/mute when BA
383 			 * issue happens. Chip needs to be reset.
384 			 * But AP code may have sychronization issues
385 			 * when perform internal reset in this routine.
386 			 * Only enable reset in STA mode for now.
387 			 */
388 			if (sc->sc_ah->ah_opmode == ATH9K_M_STA)
389 				needreset = 1;
390 		}
391 	}
392 
393 	list_create(&list_pending, sizeof (struct ath_buf),
394 	    offsetof(struct ath_buf, bf_node));
395 	list_create(&list, sizeof (struct ath_buf),
396 	    offsetof(struct ath_buf, bf_node));
397 
398 	nbad = arn_tx_num_badfrms(sc, bf, txok);
399 	while (bf) {
400 		txfail = txpending = 0;
401 		bf_next = bf->bf_next;
402 
403 		if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) {
404 			/*
405 			 * transmit completion, subframe is
406 			 * acked by block ack
407 			 */
408 			acked_cnt++;
409 		} else if (!isaggr && txok) {
410 			/* transmit completion */
411 			acked_cnt++;
412 		} else {
413 			if (!(tid->state & AGGR_CLEANUP) &&
414 			    ds->ds_txstat.ts_flags != ATH9K_TX_SW_ABORTED) {
415 				if (bf->bf_retries < ATH_MAX_SW_RETRIES) {
416 					arn_tx_set_retry(sc, bf);
417 					txpending = 1;
418 				} else {
419 					bf->bf_state.bf_type |= BUF_XRETRY;
420 					txfail = 1;
421 					sendbar = 1;
422 					txfail_cnt++;
423 				}
424 			} else {
425 				/*
426 				 * cleanup in progress, just fail
427 				 * the un-acked sub-frames
428 				 */
429 				txfail = 1;
430 			}
431 		}
432 
433 		if (bf_next == NULL) {
434 			/* INIT_LIST_HEAD */
435 			list_create(&list, sizeof (struct ath_buf),
436 			    offsetof(struct ath_buf, bf_node));
437 		} else {
438 			ASSERT(!list_empty(bf_q));
439 			list_remove(bf_q, bf);
440 			list_insert_tail(&list, bf);
441 		}
442 
443 		if (!txpending) {
444 			/*
445 			 * complete the acked-ones/xretried ones; update
446 			 * block-ack window
447 			 */
448 			mutex_enter(&txq->axq_lock);
449 			arn_tx_update_baw(sc, tid, bf->bf_seqno);
450 			mutex_exit(&txq->axq_lock);
451 
452 			if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
453 				ath_tx_rc_status(bf, ds, nbad, txok, B_TRUE);
454 				rc_update = B_FALSE;
455 			} else {
456 				ath_tx_rc_status(bf, ds, nbad, txok, B_FALSE);
457 			}
458 
459 			ath_tx_complete_buf(sc, bf, list, !txfail, sendbar);
460 		} else {
461 			/* retry the un-acked ones */
462 			if (bf->bf_next == NULL &&
463 			    bf_last->bf_status & ATH_BUFSTATUS_STALE) {
464 				struct ath_buf *tbf;
465 
466 				tbf = arn_clone_txbuf(sc, bf_last);
467 				ath9k_hw_cleartxdesc(sc->sc_ah, tbf->bf_desc);
468 				list_insert_tail(&list, tbf);
469 			} else {
470 				/*
471 				 * Clear descriptor status words for
472 				 * software retry
473 				 */
474 				ath9k_hw_cleartxdesc(sc->sc_ah, bf->bf_desc);
475 			}
476 
477 			/*
478 			 * Put this buffer to the temporary pending
479 			 * queue to retain ordering
480 			 */
481 			list_splice_tail_init(&list, &list_pending);
482 			/*
483 			 * Insert src list after dst list.
484 			 * Empty src list thereafter
485 			 */
486 			list_move_tail(&list_pending, &list);
487 			/* should re-initialize list here??? */
488 		}
489 
490 		bf = bf_next;
491 	}
492 
493 	if (tid->state & AGGR_CLEANUP) {
494 		if (tid->baw_head == tid->baw_tail) {
495 			tid->state &= ~AGGR_ADDBA_COMPLETE;
496 			tid->addba_exchangeattempts = 0;
497 			tid->state &= ~AGGR_CLEANUP;
498 
499 			/* send buffered frames as singles */
500 			arn_tx_flush_tid(sc, tid);
501 		}
502 		return;
503 	}
504 
505 	/*
506 	 * prepend un-acked frames to the beginning of
507 	 * the pending frame queue
508 	 */
509 
510 	if (!list_empty(&list_pending)) {
511 		mutex_enter(&txq->axq_lock);
512 		list_move_tail(&list_pending, &tid->buf_q);
513 		arn_tx_queue_tid(txq, tid);
514 		mutex_exit(&txq->axq_lock);
515 	}
516 }
517 
518 static uint32_t
arn_lookup_rate(struct arn_softc * sc,struct ath_buf * bf,struct ath_atx_tid * tid)519 arn_lookup_rate(struct arn_softc *sc, struct ath_buf *bf,
520     struct ath_atx_tid *tid)
521 {
522 	struct ath_rate_table *rate_table = sc->sc_currates;
523 	struct ath9k_tx_rate *rates;
524 	struct ath_tx_info_priv *tx_info_priv;
525 	uint32_t max_4ms_framelen, frmlen;
526 	uint16_t aggr_limit, legacy = 0, maxampdu;
527 	int i;
528 
529 	/* ???  */
530 	rates = (struct ath9k_tx_rate *)bf->rates;
531 	tx_info_priv = (struct ath_tx_info_priv *)&bf->tx_info_priv;
532 
533 	/*
534 	 * Find the lowest frame length among the rate series that will have a
535 	 * 4ms transmit duration.
536 	 * TODO - TXOP limit needs to be considered.
537 	 */
538 	max_4ms_framelen = ATH_AMPDU_LIMIT_MAX;
539 
540 	for (i = 0; i < 4; i++) {
541 		if (rates[i].count) {
542 			if (!WLAN_RC_PHY_HT
543 			    (rate_table->info[rates[i].idx].phy)) {
544 				legacy = 1;
545 				break;
546 			}
547 
548 			frmlen =
549 			    rate_table->info[rates[i].idx].max_4ms_framelen;
550 			max_4ms_framelen = min(max_4ms_framelen, frmlen);
551 		}
552 	}
553 
554 	/*
555 	 * limit aggregate size by the minimum rate if rate selected is
556 	 * not a probe rate, if rate selected is a probe rate then
557 	 * avoid aggregation of this packet.
558 	 */
559 	if (legacy)
560 		return (0);
561 
562 	aggr_limit = min(max_4ms_framelen, (uint32_t)ATH_AMPDU_LIMIT_DEFAULT);
563 
564 	/*
565 	 * h/w can accept aggregates upto 16 bit lengths (65535).
566 	 * The IE, however can hold upto 65536, which shows up here
567 	 * as zero. Ignore 65536 since we  are constrained by hw.
568 	 */
569 	maxampdu = tid->an->maxampdu;
570 	if (maxampdu)
571 		aggr_limit = min(aggr_limit, maxampdu);
572 
573 	return (aggr_limit);
574 }
575 
576 /*
577  * Returns the number of delimiters to be added to
578  * meet the minimum required mpdudensity.
579  * caller should make sure that the rate is HT rate .
580  */
581 static int
arn_compute_num_delims(struct arn_softc * sc,struct ath_atx_tid * tid,struct ath_buf * bf,uint16_t frmlen)582 arn_compute_num_delims(struct arn_softc *sc, struct ath_atx_tid *tid,
583     struct ath_buf *bf, uint16_t frmlen)
584 {
585 	struct ath_rate_table *rt = sc->sc_currates;
586 	struct ath9k_tx_rate *rates = (struct ath9k_tx_rate *)bf->rates;
587 	uint32_t nsymbits, nsymbols, mpdudensity;
588 	uint16_t minlen;
589 	uint8_t rc, flags, rix;
590 	int width, half_gi, ndelim, mindelim;
591 
592 	/* Select standard number of delimiters based on frame length alone */
593 	ndelim = ATH_AGGR_GET_NDELIM(frmlen);
594 
595 	/*
596 	 * If encryption enabled, hardware requires some more padding between
597 	 * subframes.
598 	 * TODO - this could be improved to be dependent on the rate.
599 	 * The hardware can keep up at lower rates, but not higher rates
600 	 */
601 	if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR)
602 		ndelim += ATH_AGGR_ENCRYPTDELIM;
603 
604 	/*
605 	 * Convert desired mpdu density from microeconds to bytes based
606 	 * on highest rate in rate series (i.e. first rate) to determine
607 	 * required minimum length for subframe. Take into account
608 	 * whether high rate is 20 or 40Mhz and half or full GI.
609 	 */
610 	mpdudensity = tid->an->mpdudensity;
611 
612 	/*
613 	 * If there is no mpdu density restriction, no further calculation
614 	 * is needed.
615 	 */
616 	if (mpdudensity == 0)
617 		return (ndelim);
618 
619 	rix = rates[0].idx;
620 	flags = rates[0].flags;
621 	rc = rt->info[rix].ratecode;
622 	width = (flags & ATH9K_TX_RC_40_MHZ_WIDTH) ? 1 : 0;
623 	half_gi = (flags & ATH9K_TX_RC_SHORT_GI) ? 1 : 0;
624 
625 	if (half_gi)
626 		nsymbols = NUM_SYMBOLS_PER_USEC_HALFGI(mpdudensity);
627 	else
628 		nsymbols = NUM_SYMBOLS_PER_USEC(mpdudensity);
629 
630 	if (nsymbols == 0)
631 		nsymbols = 1;
632 
633 	nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
634 	minlen = (nsymbols * nsymbits) / BITS_PER_BYTE;
635 
636 	if (frmlen < minlen) {
637 		mindelim = (minlen - frmlen) / ATH_AGGR_DELIM_SZ;
638 		ndelim = max(mindelim, ndelim);
639 	}
640 
641 	return (ndelim);
642 }
643 
644 static enum ATH_AGGR_STATUS
arn_tx_form_aggr(struct arn_softc * sc,struct ath_atx_tid * tid,list_t * bf_q)645 arn_tx_form_aggr(struct arn_softc *sc, struct ath_atx_tid *tid,
646     list_t *bf_q)
647 {
648 #define	PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
649 	struct ath_buf *bf, *bf_first, *bf_prev = NULL;
650 	int rl = 0, nframes = 0, ndelim, prev_al = 0;
651 	uint16_t aggr_limit = 0, al = 0, bpad = 0,
652 	    al_delta, h_baw = tid->baw_size / 2;
653 	enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
654 
655 	bf_first = list_head(&tid->buf_q);
656 
657 	do {
658 		bf = list_head(&tid->buf_q);
659 
660 		/* do not step over block-ack window */
661 		if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) {
662 			status = ATH_AGGR_BAW_CLOSED;
663 			break;
664 		}
665 
666 		if (!rl) {
667 			aggr_limit = arn_lookup_rate(sc, bf, tid);
668 			rl = 1;
669 		}
670 
671 		/* do not exceed aggregation limit */
672 		al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen;
673 
674 		if (nframes &&
675 		    (aggr_limit < (al + bpad + al_delta + prev_al))) {
676 			status = ATH_AGGR_LIMITED;
677 			break;
678 		}
679 
680 		/* do not exceed subframe limit */
681 		if (nframes >= min((int)h_baw, ATH_AMPDU_SUBFRAME_DEFAULT)) {
682 			status = ATH_AGGR_LIMITED;
683 			break;
684 		}
685 		nframes++;
686 
687 		/* add padding for previous frame to aggregation length */
688 		al += bpad + al_delta;
689 
690 		/*
691 		 * Get the delimiters needed to meet the MPDU
692 		 * density for this node.
693 		 */
694 		ndelim =
695 		    arn_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen);
696 		bpad = PADBYTES(al_delta) + (ndelim << 2);
697 
698 		bf->bf_next = NULL;
699 		bf->bf_desc->ds_link = 0;
700 
701 		/* link buffers of this frame to the aggregate */
702 		arn_tx_addto_baw(sc, tid, bf);
703 		ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
704 		list_remove(&tid->buf_q, bf);
705 		list_insert_tail(bf_q, bf);
706 		if (bf_prev) {
707 			bf_prev->bf_next = bf;
708 			bf_prev->bf_desc->ds_link = bf->bf_daddr;
709 		}
710 		bf_prev = bf;
711 	} while (!list_empty(&tid->buf_q));
712 
713 	bf_first->bf_al = al;
714 	bf_first->bf_nframes = nframes;
715 
716 	return (status);
717 #undef PADBYTES
718 }
719 
720 static void
arn_tx_sched_aggr(struct arn_softc * sc,struct ath_txq * txq,struct ath_atx_tid * tid)721 arn_tx_sched_aggr(struct arn_softc *sc, struct ath_txq *txq,
722     struct ath_atx_tid *tid)
723 {
724 	struct ath_buf *bf;
725 	enum ATH_AGGR_STATUS status;
726 	list_t bf_q;
727 
728 	do {
729 		if (list_empty(&tid->buf_q))
730 			return;
731 
732 		/* INIT_LIST_HEAD */
733 		list_create(&bf_q, sizeof (struct ath_buf),
734 		    offsetof(struct ath_buf, bf_node));
735 
736 		status = arn_tx_form_aggr(sc, tid, &bf_q);
737 
738 		/*
739 		 * no frames picked up to be aggregated;
740 		 * block-ack window is not open.
741 		 */
742 		if (list_empty(&bf_q))
743 			break;
744 
745 		bf = list_head(&bf_q);
746 		bf->bf_lastbf = list_object(&bf_q, bf->bf_node.list_prev);
747 
748 		/* if only one frame, send as non-aggregate */
749 		if (bf->bf_nframes == 1) {
750 			bf->bf_state.bf_type &= ~BUF_AGGR;
751 			ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
752 			ath_buf_set_rate(sc, bf);
753 			arn_tx_txqaddbuf(sc, txq, &bf_q);
754 			continue;
755 		}
756 
757 		/* setup first desc of aggregate */
758 		bf->bf_state.bf_type |= BUF_AGGR;
759 		ath_buf_set_rate(sc, bf);
760 		ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al);
761 
762 		/* anchor last desc of aggregate */
763 		ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
764 
765 		txq->axq_aggr_depth++;
766 		arn_tx_txqaddbuf(sc, txq, &bf_q);
767 
768 	} while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH &&
769 	    status != ATH_AGGR_BAW_CLOSED);
770 }
771 
772 int
arn_tx_aggr_start(struct arn_softc * sc,struct ieee80211_node * in,uint16_t tid,uint16_t * ssn)773 arn_tx_aggr_start(struct arn_softc *sc, struct ieee80211_node *in,
774     uint16_t tid, uint16_t *ssn)
775 {
776 	struct ath_atx_tid *txtid;
777 	struct ath_node *an;
778 
779 	an = ATH_NODE(in);
780 
781 	if (sc->sc_flags & SC_OP_TXAGGR) {
782 		txtid = ATH_AN_2_TID(an, tid);
783 		txtid->state |= AGGR_ADDBA_PROGRESS;
784 		arn_tx_pause_tid(sc, txtid);
785 		*ssn = txtid->seq_start;
786 	}
787 
788 	return (0);
789 }
790 
791 int
arn_tx_aggr_stop(struct arn_softc * sc,struct ieee80211_node * in,uint16_t tid)792 arn_tx_aggr_stop(struct arn_softc *sc, struct ieee80211_node *in, uint16_t tid)
793 {
794 	struct ath_node *an = ATH_NODE(in);
795 	struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
796 	struct ath_txq *txq = &sc->sc_txq[txtid->ac->qnum];
797 	struct ath_buf *bf;
798 
799 	list_t list;
800 	list_create(&list, sizeof (struct ath_buf),
801 	    offsetof(struct ath_buf, bf_node));
802 
803 	if (txtid->state & AGGR_CLEANUP)
804 		return (0);
805 
806 	if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
807 		txtid->addba_exchangeattempts = 0;
808 		return (0);
809 	}
810 
811 	arn_tx_pause_tid(sc, txtid);
812 
813 	/* drop all software retried frames and mark this TID */
814 	mutex_enter(&txq->axq_lock);
815 	while (!list_empty(&txtid->buf_q)) {
816 		/* list_first_entry */
817 		bf = list_head(&txtid->buf_q);
818 		if (!bf_isretried(bf)) {
819 			/*
820 			 * NB: it's based on the assumption that
821 			 * software retried frame will always stay
822 			 * at the head of software queue.
823 			 */
824 			break;
825 		}
826 		list_remove(&txtid->buf_q, bf);
827 		list_insert_tail(&list, bf);
828 		arn_tx_update_baw(sc, txtid, bf->bf_seqno);
829 		// ath_tx_complete_buf(sc, bf, &list, 0, 0); /* to do */
830 	}
831 	mutex_exit(&txq->axq_lock);
832 
833 	if (txtid->baw_head != txtid->baw_tail) {
834 		txtid->state |= AGGR_CLEANUP;
835 	} else {
836 		txtid->state &= ~AGGR_ADDBA_COMPLETE;
837 		txtid->addba_exchangeattempts = 0;
838 		arn_tx_flush_tid(sc, txtid);
839 	}
840 
841 	return (0);
842 }
843 
844 void
arn_tx_aggr_resume(struct arn_softc * sc,struct ieee80211_node * in,uint16_t tid)845 arn_tx_aggr_resume(struct arn_softc *sc,
846     struct ieee80211_node *in,
847     uint16_t tid)
848 {
849 	struct ath_atx_tid *txtid;
850 	struct ath_node *an;
851 
852 	an = ATH_NODE(in);
853 
854 	if (sc->sc_flags & SC_OP_TXAGGR) {
855 		txtid = ATH_AN_2_TID(an, tid);
856 		txtid->baw_size = (0x8) << sc->sc_ht_conf.ampdu_factor;
857 		txtid->state |= AGGR_ADDBA_COMPLETE;
858 		txtid->state &= ~AGGR_ADDBA_PROGRESS;
859 		arn_tx_resume_tid(sc, txtid);
860 	}
861 }
862 
863 boolean_t
arn_tx_aggr_check(struct arn_softc * sc,struct ath_node * an,uint8_t tidno)864 arn_tx_aggr_check(struct arn_softc *sc, struct ath_node *an, uint8_t tidno)
865 {
866 	struct ath_atx_tid *txtid;
867 
868 	if (!(sc->sc_flags & SC_OP_TXAGGR))
869 		return (B_FALSE);
870 
871 	txtid = ATH_AN_2_TID(an, tidno);
872 
873 	if (!(txtid->state & AGGR_ADDBA_COMPLETE)) {
874 		if (!(txtid->state & AGGR_ADDBA_PROGRESS) &&
875 		    (txtid->addba_exchangeattempts < ADDBA_EXCHANGE_ATTEMPTS)) {
876 			txtid->addba_exchangeattempts++;
877 			return (B_TRUE);
878 		}
879 	}
880 
881 	return (B_FALSE);
882 }
883 
884 /* Queue Management */
885 
886 static void
arn_txq_drain_pending_buffers(struct arn_softc * sc,struct ath_txq * txq)887 arn_txq_drain_pending_buffers(struct arn_softc *sc, struct ath_txq *txq)
888 {
889 	struct ath_atx_ac *ac, *ac_tmp;
890 	struct ath_atx_tid *tid, *tid_tmp;
891 
892 	list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq) {
893 		list_remove(&txq->axq_acq, ac);
894 		ac->sched = B_FALSE;
895 		list_for_each_entry_safe(tid, tid_tmp, &ac->tid_q) {
896 			list_remove(&ac->tid_q, tid);
897 			tid->sched = B_FALSE;
898 			arn_tid_drain(sc, txq, tid);
899 		}
900 	}
901 }
902 
903 int
arn_tx_get_qnum(struct arn_softc * sc,int qtype,int haltype)904 arn_tx_get_qnum(struct arn_softc *sc, int qtype, int haltype)
905 {
906 	int qnum;
907 
908 	switch (qtype) {
909 	case ATH9K_TX_QUEUE_DATA:
910 		if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) {
911 			ARN_DBG((ARN_DBG_FATAL, "arn: arn_tx_get_qnum(): "
912 			    "HAL AC %u out of range, max %zu!\n",
913 			    haltype, ARRAY_SIZE(sc->sc_haltype2q)));
914 			return (-1);
915 		}
916 		qnum = sc->sc_haltype2q[haltype];
917 		break;
918 	case ATH9K_TX_QUEUE_BEACON:
919 		qnum = sc->sc_beaconq;
920 		break;
921 	case ATH9K_TX_QUEUE_CAB:
922 		qnum = sc->sc_cabq->axq_qnum;
923 		break;
924 	default:
925 		qnum = -1;
926 	}
927 	return (qnum);
928 }
929 
930 struct ath_txq *
arn_test_get_txq(struct arn_softc * sc,struct ieee80211_node * in,struct ieee80211_frame * wh,uint8_t type)931 arn_test_get_txq(struct arn_softc *sc, struct ieee80211_node *in,
932     struct ieee80211_frame *wh, uint8_t type)
933 {
934 	struct ieee80211_qosframe *qwh = NULL;
935 	struct ath_txq *txq = NULL;
936 	int tid = -1;
937 	int qos_ac;
938 	int qnum;
939 
940 	if (in->in_flags & IEEE80211_NODE_QOS) {
941 
942 		if ((type & IEEE80211_FC0_TYPE_MASK) ==
943 		    IEEE80211_FC0_TYPE_DATA) {
944 
945 			if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) {
946 				qwh = (struct ieee80211_qosframe *)wh;
947 
948 				tid = qwh->i_qos[0] & IEEE80211_QOS_TID;
949 				switch (tid) {
950 				case 1:
951 				case 2:
952 					qos_ac = WME_AC_BK;
953 				case 0:
954 				case 3:
955 					qos_ac = WME_AC_BE;
956 				case 4:
957 				case 5:
958 					qos_ac = WME_AC_VI;
959 				case 6:
960 				case 7:
961 					qos_ac = WME_AC_VO;
962 				}
963 			}
964 		} else {
965 			qos_ac = WME_AC_VO;
966 		}
967 	} else if ((type & IEEE80211_FC0_TYPE_MASK) ==
968 	    IEEE80211_FC0_TYPE_MGT) {
969 			qos_ac = WME_AC_VO;
970 	} else if ((type & IEEE80211_FC0_TYPE_MASK) ==
971 	    IEEE80211_FC0_TYPE_CTL) {
972 			qos_ac = WME_AC_VO;
973 	} else {
974 			qos_ac = WME_AC_BK;
975 	}
976 	qnum = arn_get_hal_qnum(qos_ac, sc);
977 	txq = &sc->sc_txq[qnum];
978 
979 	mutex_enter(&txq->axq_lock);
980 
981 	if (txq->axq_depth >= (ATH_TXBUF - 20)) {
982 		ARN_DBG((ARN_DBG_XMIT,
983 		    "TX queue: %d is full, depth: %d\n",
984 		    qnum, txq->axq_depth));
985 		/* stop th queue */
986 		sc->sc_resched_needed = B_TRUE;
987 		txq->stopped = 1;
988 		mutex_exit(&txq->axq_lock);
989 		return (NULL);
990 	}
991 
992 	mutex_exit(&txq->axq_lock);
993 
994 	return (txq);
995 }
996 
997 /* Called only when tx aggregation is enabled and HT is supported */
998 static void
assign_aggr_tid_seqno(struct arn_softc * sc,struct ath_buf * bf,struct ieee80211_frame * wh)999 assign_aggr_tid_seqno(struct arn_softc *sc,
1000     struct ath_buf *bf,
1001     struct ieee80211_frame *wh)
1002 {
1003 	struct ath_node *an;
1004 	struct ath_atx_tid *tid;
1005 	struct ieee80211_node *in;
1006 	struct ieee80211_qosframe *qwh = NULL;
1007 	ieee80211com_t *ic = (ieee80211com_t *)sc;
1008 
1009 	in = ieee80211_find_txnode(ic, wh->i_addr1);
1010 	if (in == NULL) {
1011 		arn_problem("assign_aggr_tid_seqno():"
1012 		    "failed to find tx node\n");
1013 		return;
1014 	}
1015 	an = ATH_NODE(in);
1016 
1017 	/* Get tidno */
1018 	if (in->in_flags & IEEE80211_NODE_QOS) {
1019 		if (wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_QOS) {
1020 			qwh = (struct ieee80211_qosframe *)wh;
1021 			bf->bf_tidno = qwh->i_qos[0] & IEEE80211_QOS_TID;
1022 		}
1023 	}
1024 
1025 	/* Get seqno */
1026 	/*
1027 	 * For HT capable stations, we save tidno for later use.
1028 	 * We also override seqno set by upper layer with the one
1029 	 * in tx aggregation state.
1030 	 *
1031 	 * If fragmentation is on, the sequence number is
1032 	 * not overridden, since it has been
1033 	 * incremented by the fragmentation routine.
1034 	 *
1035 	 * FIXME: check if the fragmentation threshold exceeds
1036 	 * IEEE80211 max.
1037 	 */
1038 	tid = ATH_AN_2_TID(an, bf->bf_tidno);
1039 
1040 	*(uint16_t *)&wh->i_seq[0] =
1041 	    LE_16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
1042 	bf->bf_seqno = tid->seq_next;
1043 	/* LINTED E_CONSTANT_CONDITION */
1044 	INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1045 
1046 	/* release node */
1047 	ieee80211_free_node(in);
1048 }
1049 
1050 /* Compute the number of bad frames */
1051 /* ARGSUSED */
1052 static int
arn_tx_num_badfrms(struct arn_softc * sc,struct ath_buf * bf,int txok)1053 arn_tx_num_badfrms(struct arn_softc *sc, struct ath_buf *bf, int txok)
1054 {
1055 	struct ath_buf *bf_last = bf->bf_lastbf;
1056 	struct ath_desc *ds = bf_last->bf_desc;
1057 	uint16_t seq_st = 0;
1058 	uint32_t ba[WME_BA_BMP_SIZE >> 5];
1059 	int ba_index;
1060 	int nbad = 0;
1061 	int isaggr = 0;
1062 
1063 	if (ds->ds_txstat.ts_flags == ATH9K_TX_SW_ABORTED)
1064 		return (0);
1065 
1066 	isaggr = bf_isaggr(bf);
1067 	if (isaggr) {
1068 		seq_st = ATH_DS_BA_SEQ(ds);
1069 		memcpy(ba, ATH_DS_BA_BITMAP(ds), WME_BA_BMP_SIZE >> 3);
1070 	}
1071 
1072 	while (bf) {
1073 		ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
1074 		if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
1075 			nbad++;
1076 
1077 		bf = bf->bf_next;
1078 	}
1079 
1080 	return (nbad);
1081 }
1082 
1083 static void
arn_tx_send_ht_normal(struct arn_softc * sc,struct ath_txq * txq,struct ath_atx_tid * tid,list_t * list)1084 arn_tx_send_ht_normal(struct arn_softc *sc,
1085     struct ath_txq *txq,
1086     struct ath_atx_tid *tid,
1087     list_t *list)
1088 {
1089 	struct ath_buf *bf;
1090 
1091 	bf = list_head(list);
1092 	bf->bf_state.bf_type &= ~BUF_AMPDU;
1093 
1094 	/* update starting sequence number for subsequent ADDBA request */
1095 	INCR(tid->seq_start, IEEE80211_SEQ_MAX);
1096 
1097 	bf->bf_nframes = 1;
1098 	bf->bf_lastbf = bf;
1099 	ath_buf_set_rate(sc, bf);
1100 	arn_tx_txqaddbuf(sc, txq, list);
1101 }
1102 
1103 /*
1104  * Insert a chain of ath_buf (descriptors) on a txq and
1105  * assume the descriptors are already chained together by caller.
1106  */
1107 static void
arn_tx_txqaddbuf(struct arn_softc * sc,struct ath_txq * txq,list_t * list)1108 arn_tx_txqaddbuf(struct arn_softc *sc,
1109     struct ath_txq *txq,
1110     list_t *list)
1111 {
1112 	struct ath_buf *bf;
1113 
1114 	/*
1115 	 * Insert the frame on the outbound list and
1116 	 * pass it on to the hardware.
1117 	 */
1118 
1119 	if (list_empty(list))
1120 		return;
1121 
1122 	bf = list_head(list);
1123 
1124 	list_splice_tail_init(list, &txq->axq_q);
1125 
1126 	txq->axq_depth++;
1127 	txq->axq_totalqueued++;
1128 	txq->axq_linkbuf = list_object(list, txq->axq_q.prev);
1129 
1130 	ARN_DBG((ARN_DBG_QUEUE,
1131 	    "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth));
1132 
1133 	if (txq->axq_link == NULL) {
1134 		ath9k_hw_puttxbuf(sc->sc_ah, txq->axq_qnum, bf->bf_daddr);
1135 		ARN_DBG((ARN_DBG_XMIT,
1136 		    "TXDP[%u] = %llx (%p)\n",
1137 		    txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc));
1138 	} else {
1139 		*txq->axq_link = bf->bf_daddr;
1140 		ARN_DBG((ARN_DBG_XMIT, "link[%u] (%p)=%llx (%p)\n",
1141 		    txq->axq_qnum, txq->axq_link,
1142 		    ito64(bf->bf_daddr), bf->bf_desc));
1143 	}
1144 	txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link);
1145 	ath9k_hw_txstart(sc->sc_ah, txq->axq_qnum);
1146 }
1147 #endif /* ARN_TX_AGGREGATION */
1148 
1149 static struct ath_buf *
arn_tx_get_buffer(struct arn_softc * sc)1150 arn_tx_get_buffer(struct arn_softc *sc)
1151 {
1152 	struct ath_buf *bf = NULL;
1153 
1154 	mutex_enter(&sc->sc_txbuflock);
1155 	bf = list_head(&sc->sc_txbuf_list);
1156 	/* Check if a tx buffer is available */
1157 	if (bf != NULL)
1158 		list_remove(&sc->sc_txbuf_list, bf);
1159 	if (list_empty(&sc->sc_txbuf_list)) {
1160 		ARN_DBG((ARN_DBG_XMIT, "arn: arn_tx(): "
1161 		    "stop queue\n"));
1162 		sc->sc_stats.ast_tx_qstop++;
1163 	}
1164 	mutex_exit(&sc->sc_txbuflock);
1165 
1166 	return (bf);
1167 }
1168 
1169 static uint32_t
setup_tx_flags(struct arn_softc * sc,struct ieee80211_frame * wh,uint32_t pktlen)1170 setup_tx_flags(struct arn_softc *sc,
1171     struct ieee80211_frame *wh,
1172     uint32_t pktlen)
1173 {
1174 	int flags = 0;
1175 	ieee80211com_t *ic = (ieee80211com_t *)sc;
1176 
1177 	flags |= ATH9K_TXDESC_CLRDMASK; /* needed for crypto errors */
1178 	flags |= ATH9K_TXDESC_INTREQ;
1179 
1180 	if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
1181 		flags |= ATH9K_TXDESC_NOACK;	/* no ack on broad/multicast */
1182 		sc->sc_stats.ast_tx_noack++;
1183 	}
1184 	if (pktlen > ic->ic_rtsthreshold) {
1185 		flags |= ATH9K_TXDESC_RTSENA;	/* RTS based on frame length */
1186 		sc->sc_stats.ast_tx_rts++;
1187 	}
1188 
1189 	return (flags);
1190 }
1191 
1192 static void
ath_tx_setup_buffer(struct arn_softc * sc,struct ath_buf * bf,struct ieee80211_node * in,struct ieee80211_frame * wh,uint32_t pktlen,uint32_t keytype)1193 ath_tx_setup_buffer(struct arn_softc *sc, struct ath_buf *bf,
1194     struct ieee80211_node *in, struct ieee80211_frame *wh,
1195     uint32_t pktlen, uint32_t keytype)
1196 {
1197 	ieee80211com_t *ic = (ieee80211com_t *)sc;
1198 	int i;
1199 
1200 	/* Buf reset */
1201 	ATH_TXBUF_RESET(bf);
1202 	for (i = 0; i < 4; i++) {
1203 		bf->rates[i].idx = -1;
1204 		bf->rates[i].flags = 0;
1205 		bf->rates[i].count = 1;
1206 	}
1207 
1208 	bf->bf_in = in;
1209 	/* LINTED E_ASSIGN_NARROW_CONV */
1210 	bf->bf_frmlen = pktlen;
1211 
1212 	/* Frame type */
1213 	IEEE80211_IS_DATA(wh) ?
1214 	    (bf->bf_state.bf_type |= BUF_DATA) :
1215 	    (bf->bf_state.bf_type &= ~BUF_DATA);
1216 	IEEE80211_IS_BACK_REQ(wh) ?
1217 	    (bf->bf_state.bf_type |= BUF_BAR) :
1218 	    (bf->bf_state.bf_type &= ~BUF_BAR);
1219 	IEEE80211_IS_PSPOLL(wh) ?
1220 	    (bf->bf_state.bf_type |= BUF_PSPOLL) :
1221 	    (bf->bf_state.bf_type &= ~BUF_PSPOLL);
1222 	/*
1223 	 * The 802.11 layer marks whether or not we should
1224 	 * use short preamble based on the current mode and
1225 	 * negotiated parameters.
1226 	 */
1227 	((ic->ic_flags & IEEE80211_F_SHPREAMBLE) &&
1228 	    (in->in_capinfo & IEEE80211_CAPINFO_SHORT_PREAMBLE)) ?
1229 	    (bf->bf_state.bf_type |= BUF_SHORT_PREAMBLE) :
1230 	    (bf->bf_state.bf_type &= ~BUF_SHORT_PREAMBLE);
1231 
1232 	bf->bf_flags = setup_tx_flags(sc, wh, pktlen);
1233 
1234 	/* Crypto */
1235 	bf->bf_keytype = keytype;
1236 
1237 	/* Assign seqno, tidno for tx aggrefation */
1238 
1239 #ifdef ARN_TX_AGGREGATION
1240 	if (ieee80211_is_data_qos(wh) && (sc->sc_flags & SC_OP_TXAGGR))
1241 		assign_aggr_tid_seqno(sc, bf, wh);
1242 #endif /* ARN_TX_AGGREGATION */
1243 
1244 }
1245 
1246 /*
1247  * ath_pkt_dur - compute packet duration (NB: not NAV)
1248  *
1249  * rix - rate index
1250  * pktlen - total bytes (delims + data + fcs + pads + pad delims)
1251  * width  - 0 for 20 MHz, 1 for 40 MHz
1252  * half_gi - to use 4us v/s 3.6 us for symbol time
1253  */
1254 static uint32_t
ath_pkt_duration(struct arn_softc * sc,uint8_t rix,struct ath_buf * bf,int width,int half_gi,boolean_t shortPreamble)1255 ath_pkt_duration(struct arn_softc *sc, uint8_t rix, struct ath_buf *bf,
1256     int width, int half_gi, boolean_t shortPreamble)
1257 {
1258 	struct ath_rate_table *rate_table = sc->sc_currates;
1259 	uint32_t nbits, nsymbits, duration, nsymbols;
1260 	uint8_t rc;
1261 	int streams, pktlen;
1262 
1263 	pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
1264 	rc = rate_table->info[rix].ratecode;
1265 
1266 	/* for legacy rates, use old function to compute packet duration */
1267 	if (!IS_HT_RATE(rc))
1268 		return (ath9k_hw_computetxtime(sc->sc_ah, rate_table, pktlen,
1269 		    rix, shortPreamble));
1270 
1271 	/* find number of symbols: PLCP + data */
1272 	nbits = (pktlen << 3) + OFDM_PLCP_BITS;
1273 	nsymbits = bits_per_symbol[HT_RC_2_MCS(rc)][width];
1274 	nsymbols = (nbits + nsymbits - 1) / nsymbits;
1275 
1276 	if (!half_gi)
1277 		duration = SYMBOL_TIME(nsymbols);
1278 	else
1279 		duration = SYMBOL_TIME_HALFGI(nsymbols);
1280 
1281 	/* addup duration for legacy/ht training and signal fields */
1282 	streams = HT_RC_2_STREAMS(rc);
1283 	duration += L_STF + L_LTF + L_SIG + HT_SIG + HT_STF + HT_LTF(streams);
1284 
1285 	return (duration);
1286 }
1287 
1288 /* Rate module function to set rate related fields in tx descriptor */
1289 static void
ath_buf_set_rate(struct arn_softc * sc,struct ath_buf * bf,struct ieee80211_frame * wh)1290 ath_buf_set_rate(struct arn_softc *sc,
1291     struct ath_buf *bf,
1292     struct ieee80211_frame *wh)
1293 {
1294 	struct ath_hal *ah = sc->sc_ah;
1295 	struct ath_rate_table *rt;
1296 	struct ath_desc *ds = bf->bf_desc;
1297 	struct ath_desc *lastds = bf->bf_desc; /* temp workground */
1298 	struct ath9k_11n_rate_series series[4];
1299 	struct ath9k_tx_rate *rates;
1300 	int i, flags, rtsctsena = 0;
1301 	uint32_t ctsduration = 0;
1302 	uint8_t rix = 0, cix, ctsrate = 0;
1303 
1304 	(void) memset(series, 0, sizeof (struct ath9k_11n_rate_series) * 4);
1305 
1306 	rates = bf->rates;
1307 
1308 	if (IEEE80211_HAS_MOREFRAGS(wh) ||
1309 	    wh->i_seq[0] & IEEE80211_SEQ_FRAG_MASK) {
1310 		rates[1].count = rates[2].count = rates[3].count = 0;
1311 		rates[1].idx = rates[2].idx = rates[3].idx = 0;
1312 		rates[0].count = ATH_TXMAXTRY;
1313 	}
1314 
1315 	/* get the cix for the lowest valid rix */
1316 	rt = sc->sc_currates;
1317 	for (i = 3; i >= 0; i--) {
1318 		if (rates[i].count && (rates[i].idx >= 0)) {
1319 			rix = rates[i].idx;
1320 			break;
1321 		}
1322 	}
1323 
1324 	flags = (bf->bf_flags & (ATH9K_TXDESC_RTSENA | ATH9K_TXDESC_CTSENA));
1325 	cix = rt->info[rix].ctrl_rate;
1326 
1327 	/*
1328 	 * If 802.11g protection is enabled, determine whether to use RTS/CTS or
1329 	 * just CTS.  Note that this is only done for OFDM/HT unicast frames.
1330 	 */
1331 	if (sc->sc_protmode != PROT_M_NONE &&
1332 	    !(bf->bf_flags & ATH9K_TXDESC_NOACK) &&
1333 	    (rt->info[rix].phy == WLAN_RC_PHY_OFDM ||
1334 	    WLAN_RC_PHY_HT(rt->info[rix].phy))) {
1335 		if (sc->sc_protmode == PROT_M_RTSCTS)
1336 			flags = ATH9K_TXDESC_RTSENA;
1337 		else if (sc->sc_protmode == PROT_M_CTSONLY)
1338 			flags = ATH9K_TXDESC_CTSENA;
1339 
1340 		cix = rt->info[sc->sc_protrix].ctrl_rate;
1341 		rtsctsena = 1;
1342 	}
1343 
1344 	/*
1345 	 * For 11n, the default behavior is to enable RTS for hw retried frames.
1346 	 * We enable the global flag here and let rate series flags determine
1347 	 * which rates will actually use RTS.
1348 	 */
1349 	if ((ah->ah_caps.hw_caps & ATH9K_HW_CAP_HT) && bf_isdata(bf)) {
1350 		/* 802.11g protection not needed, use our default behavior */
1351 		if (!rtsctsena)
1352 			flags = ATH9K_TXDESC_RTSENA;
1353 	}
1354 
1355 	/* Set protection if aggregate protection on */
1356 	if (sc->sc_config.ath_aggr_prot &&
1357 	    (!bf_isaggr(bf) || (bf_isaggr(bf) && bf->bf_al < 8192))) {
1358 		flags = ATH9K_TXDESC_RTSENA;
1359 		cix = rt->info[sc->sc_protrix].ctrl_rate;
1360 		rtsctsena = 1;
1361 	}
1362 
1363 	/* For AR5416 - RTS cannot be followed by a frame larger than 8K */
1364 	if (bf_isaggr(bf) && (bf->bf_al > ah->ah_caps.rts_aggr_limit))
1365 		flags &= ~(ATH9K_TXDESC_RTSENA);
1366 
1367 	/*
1368 	 * CTS transmit rate is derived from the transmit rate by looking in the
1369 	 * h/w rate table.  We must also factor in whether or not a short
1370 	 * preamble is to be used. NB: cix is set above where RTS/CTS is enabled
1371 	 */
1372 	ctsrate = rt->info[cix].ratecode |
1373 	    (bf_isshpreamble(bf) ? rt->info[cix].short_preamble : 0);
1374 
1375 	for (i = 0; i < 4; i++) {
1376 		if (!rates[i].count || (rates[i].idx < 0))
1377 			continue;
1378 
1379 		rix = rates[i].idx;
1380 
1381 		series[i].Rate = rt->info[rix].ratecode |
1382 		    (bf_isshpreamble(bf) ?
1383 		    rt->info[rix].short_preamble : 0);
1384 
1385 		series[i].Tries = rates[i].count;
1386 
1387 		series[i].RateFlags =
1388 		    ((rates[i].flags & ATH9K_TX_RC_USE_RTS_CTS) ?
1389 		    ATH9K_RATESERIES_RTS_CTS : 0) |
1390 		    ((rates[i].flags & ATH9K_TX_RC_40_MHZ_WIDTH) ?
1391 		    ATH9K_RATESERIES_2040 : 0) |
1392 		    ((rates[i].flags & ATH9K_TX_RC_SHORT_GI) ?
1393 		    ATH9K_RATESERIES_HALFGI : 0);
1394 
1395 		series[i].PktDuration = ath_pkt_duration(sc, rix, bf,
1396 		    (rates[i].flags & ATH9K_TX_RC_40_MHZ_WIDTH) != 0,
1397 		    (rates[i].flags & ATH9K_TX_RC_SHORT_GI),
1398 		    bf_isshpreamble(bf));
1399 
1400 		series[i].ChSel = sc->sc_tx_chainmask;
1401 
1402 		if (rtsctsena)
1403 			series[i].RateFlags |= ATH9K_RATESERIES_RTS_CTS;
1404 
1405 		ARN_DBG((ARN_DBG_RATE,
1406 		    "series[%d]--flags & ATH9K_TX_RC_USE_RTS_CTS = %08x"
1407 		    "--flags & ATH9K_TX_RC_40_MHZ_WIDTH = %08x"
1408 		    "--flags & ATH9K_TX_RC_SHORT_GI = %08x\n",
1409 		    rates[i].flags & ATH9K_TX_RC_USE_RTS_CTS,
1410 		    rates[i].flags & ATH9K_TX_RC_40_MHZ_WIDTH,
1411 		    rates[i].flags & ATH9K_TX_RC_SHORT_GI));
1412 
1413 		ARN_DBG((ARN_DBG_RATE,
1414 		    "series[%d]:"
1415 		    "dot11rate:%d"
1416 		    "index:%d"
1417 		    "retry count:%d\n",
1418 		    i,
1419 		    (rt->info[rates[i].idx].ratekbps)/1000,
1420 		    rates[i].idx,
1421 		    rates[i].count));
1422 	}
1423 
1424 	/* set dur_update_en for l-sig computation except for PS-Poll frames */
1425 	ath9k_hw_set11n_ratescenario(ah, ds, lastds, !bf_ispspoll(bf),
1426 	    ctsrate, ctsduration,
1427 	    series, 4, flags);
1428 
1429 	if (sc->sc_config.ath_aggr_prot && flags)
1430 		ath9k_hw_set11n_burstduration(ah, ds, 8192);
1431 }
1432 
1433 static void
ath_tx_complete(struct arn_softc * sc,struct ath_buf * bf,struct ath_xmit_status * tx_status)1434 ath_tx_complete(struct arn_softc *sc, struct ath_buf *bf,
1435     struct ath_xmit_status *tx_status)
1436 {
1437 	boolean_t is_data = bf_isdata(bf);
1438 
1439 	ARN_DBG((ARN_DBG_XMIT, "TX complete\n"));
1440 
1441 	if (tx_status->flags & ATH_TX_BAR)
1442 		tx_status->flags &= ~ATH_TX_BAR;
1443 
1444 	bf->rates[0].count = tx_status->retries + 1;
1445 
1446 	arn_tx_status(sc, bf, is_data);
1447 }
1448 
1449 /* To complete a chain of buffers associated a frame */
1450 static void
ath_tx_complete_buf(struct arn_softc * sc,struct ath_buf * bf,int txok,int sendbar)1451 ath_tx_complete_buf(struct arn_softc *sc, struct ath_buf *bf,
1452     int txok, int sendbar)
1453 {
1454 	struct ath_xmit_status tx_status;
1455 
1456 	/*
1457 	 * Set retry information.
1458 	 * NB: Don't use the information in the descriptor, because the frame
1459 	 * could be software retried.
1460 	 */
1461 	tx_status.retries = bf->bf_retries;
1462 	tx_status.flags = 0;
1463 
1464 	if (sendbar)
1465 		tx_status.flags = ATH_TX_BAR;
1466 
1467 	if (!txok) {
1468 		tx_status.flags |= ATH_TX_ERROR;
1469 
1470 		if (bf_isxretried(bf))
1471 			tx_status.flags |= ATH_TX_XRETRY;
1472 	}
1473 
1474 	/* complete this frame */
1475 	ath_tx_complete(sc, bf, &tx_status);
1476 
1477 	/*
1478 	 * Return the list of ath_buf of this mpdu to free queue
1479 	 */
1480 }
1481 
1482 static void
arn_tx_stopdma(struct arn_softc * sc,struct ath_txq * txq)1483 arn_tx_stopdma(struct arn_softc *sc, struct ath_txq *txq)
1484 {
1485 	struct ath_hal *ah = sc->sc_ah;
1486 
1487 	(void) ath9k_hw_stoptxdma(ah, txq->axq_qnum);
1488 
1489 	ARN_DBG((ARN_DBG_XMIT, "arn: arn_drain_txdataq(): "
1490 	    "tx queue [%u] %x, link %p\n",
1491 	    txq->axq_qnum,
1492 	    ath9k_hw_gettxbuf(ah, txq->axq_qnum), txq->axq_link));
1493 
1494 }
1495 
1496 /* Drain only the data queues */
1497 /* ARGSUSED */
1498 static void
arn_drain_txdataq(struct arn_softc * sc,boolean_t retry_tx)1499 arn_drain_txdataq(struct arn_softc *sc, boolean_t retry_tx)
1500 {
1501 	struct ath_hal *ah = sc->sc_ah;
1502 	int i, status, npend = 0;
1503 
1504 	if (!(sc->sc_flags & SC_OP_INVALID)) {
1505 		for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1506 			if (ARN_TXQ_SETUP(sc, i)) {
1507 				arn_tx_stopdma(sc, &sc->sc_txq[i]);
1508 				/*
1509 				 * The TxDMA may not really be stopped.
1510 				 * Double check the hal tx pending count
1511 				 */
1512 				npend += ath9k_hw_numtxpending(ah,
1513 				    sc->sc_txq[i].axq_qnum);
1514 			}
1515 		}
1516 	}
1517 
1518 	if (npend) {
1519 		/* TxDMA not stopped, reset the hal */
1520 		ARN_DBG((ARN_DBG_XMIT, "arn: arn_drain_txdataq(): "
1521 		    "Unable to stop TxDMA. Reset HAL!\n"));
1522 
1523 		if (!ath9k_hw_reset(ah,
1524 		    sc->sc_ah->ah_curchan,
1525 		    sc->tx_chan_width,
1526 		    sc->sc_tx_chainmask, sc->sc_rx_chainmask,
1527 		    sc->sc_ht_extprotspacing, B_TRUE, &status)) {
1528 			ARN_DBG((ARN_DBG_FATAL, "arn: arn_drain_txdataq(): "
1529 			    "unable to reset hardware; hal status %u\n",
1530 			    status));
1531 		}
1532 	}
1533 
1534 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1535 		if (ARN_TXQ_SETUP(sc, i))
1536 			arn_tx_draintxq(sc, &sc->sc_txq[i]);
1537 	}
1538 }
1539 
1540 /* Setup a h/w transmit queue */
1541 struct ath_txq *
arn_txq_setup(struct arn_softc * sc,int qtype,int subtype)1542 arn_txq_setup(struct arn_softc *sc, int qtype, int subtype)
1543 {
1544 	struct ath_hal *ah = sc->sc_ah;
1545 	struct ath9k_tx_queue_info qi;
1546 	int qnum;
1547 
1548 	(void) memset(&qi, 0, sizeof (qi));
1549 	qi.tqi_subtype = subtype;
1550 	qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
1551 	qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
1552 	qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
1553 	qi.tqi_physCompBuf = 0;
1554 
1555 	/*
1556 	 * Enable interrupts only for EOL and DESC conditions.
1557 	 * We mark tx descriptors to receive a DESC interrupt
1558 	 * when a tx queue gets deep; otherwise waiting for the
1559 	 * EOL to reap descriptors.  Note that this is done to
1560 	 * reduce interrupt load and this only defers reaping
1561 	 * descriptors, never transmitting frames.  Aside from
1562 	 * reducing interrupts this also permits more concurrency.
1563 	 * The only potential downside is if the tx queue backs
1564 	 * up in which case the top half of the kernel may backup
1565 	 * due to a lack of tx descriptors.
1566 	 *
1567 	 * The UAPSD queue is an exception, since we take a desc-
1568 	 * based intr on the EOSP frames.
1569 	 */
1570 	if (qtype == ATH9K_TX_QUEUE_UAPSD)
1571 		qi.tqi_qflags = TXQ_FLAG_TXDESCINT_ENABLE;
1572 	else
1573 		qi.tqi_qflags = TXQ_FLAG_TXEOLINT_ENABLE |
1574 		    TXQ_FLAG_TXDESCINT_ENABLE;
1575 	qnum = ath9k_hw_setuptxqueue(ah, qtype, &qi);
1576 	if (qnum == -1) {
1577 		/*
1578 		 * NB: don't print a message, this happens
1579 		 * normally on parts with too few tx queues
1580 		 */
1581 		return (NULL);
1582 	}
1583 	if (qnum >= ARRAY_SIZE(sc->sc_txq)) {
1584 		ARN_DBG((ARN_DBG_FATAL, "arn: arn_txq_setup(): "
1585 		    "hal qnum %u out of range, max %u!\n",
1586 		    qnum, (unsigned int)ARRAY_SIZE(sc->sc_txq)));
1587 		(void) ath9k_hw_releasetxqueue(ah, qnum);
1588 		return (NULL);
1589 	}
1590 	if (!ARN_TXQ_SETUP(sc, qnum)) {
1591 		struct ath_txq *txq = &sc->sc_txq[qnum];
1592 
1593 		txq->axq_qnum = qnum;
1594 		txq->axq_intrcnt = 0; /* legacy */
1595 		txq->axq_link = NULL;
1596 
1597 		list_create(&txq->axq_list, sizeof (struct ath_buf),
1598 		    offsetof(struct ath_buf, bf_node));
1599 		list_create(&txq->axq_acq, sizeof (struct ath_buf),
1600 		    offsetof(struct ath_buf, bf_node));
1601 		mutex_init(&txq->axq_lock, NULL, MUTEX_DRIVER, NULL);
1602 
1603 		txq->axq_depth = 0;
1604 		txq->axq_aggr_depth = 0;
1605 		txq->axq_totalqueued = 0;
1606 		txq->axq_linkbuf = NULL;
1607 		sc->sc_txqsetup |= 1<<qnum;
1608 	}
1609 	return (&sc->sc_txq[qnum]);
1610 }
1611 
1612 /* Reclaim resources for a setup queue */
1613 
1614 void
arn_tx_cleanupq(struct arn_softc * sc,struct ath_txq * txq)1615 arn_tx_cleanupq(struct arn_softc *sc, struct ath_txq *txq)
1616 {
1617 	(void) ath9k_hw_releasetxqueue(sc->sc_ah, txq->axq_qnum);
1618 	sc->sc_txqsetup &= ~(1<<txq->axq_qnum);
1619 }
1620 
1621 /*
1622  * Setup a hardware data transmit queue for the specified
1623  * access control.  The hal may not support all requested
1624  * queues in which case it will return a reference to a
1625  * previously setup queue.  We record the mapping from ac's
1626  * to h/w queues for use by arn_tx_start and also track
1627  * the set of h/w queues being used to optimize work in the
1628  * transmit interrupt handler and related routines.
1629  */
1630 
1631 int
arn_tx_setup(struct arn_softc * sc,int haltype)1632 arn_tx_setup(struct arn_softc *sc, int haltype)
1633 {
1634 	struct ath_txq *txq;
1635 
1636 	if (haltype >= ARRAY_SIZE(sc->sc_haltype2q)) {
1637 		ARN_DBG((ARN_DBG_FATAL, "arn: arn_tx_setup(): "
1638 		    "HAL AC %u out of range, max %zu!\n",
1639 		    haltype, ARRAY_SIZE(sc->sc_haltype2q)));
1640 		return (0);
1641 	}
1642 	txq = arn_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
1643 	if (txq != NULL) {
1644 		sc->sc_haltype2q[haltype] = txq->axq_qnum;
1645 		return (1);
1646 	} else
1647 		return (0);
1648 }
1649 
1650 void
arn_tx_draintxq(struct arn_softc * sc,struct ath_txq * txq)1651 arn_tx_draintxq(struct arn_softc *sc, struct ath_txq *txq)
1652 {
1653 	struct ath_buf *bf;
1654 
1655 	/*
1656 	 * This assumes output has been stopped.
1657 	 */
1658 	for (;;) {
1659 		mutex_enter(&txq->axq_lock);
1660 		bf = list_head(&txq->axq_list);
1661 		if (bf == NULL) {
1662 			txq->axq_link = NULL;
1663 			mutex_exit(&txq->axq_lock);
1664 			break;
1665 		}
1666 		list_remove(&txq->axq_list, bf);
1667 		mutex_exit(&txq->axq_lock);
1668 		bf->bf_in = NULL;
1669 		mutex_enter(&sc->sc_txbuflock);
1670 		list_insert_tail(&sc->sc_txbuf_list, bf);
1671 		mutex_exit(&sc->sc_txbuflock);
1672 	}
1673 }
1674 
1675 /* Drain the transmit queues and reclaim resources */
1676 
1677 void
arn_draintxq(struct arn_softc * sc,boolean_t retry_tx)1678 arn_draintxq(struct arn_softc *sc, boolean_t retry_tx)
1679 {
1680 	/*
1681 	 * stop beacon queue. The beacon will be freed when
1682 	 * we go to INIT state
1683 	 */
1684 	if (!(sc->sc_flags & SC_OP_INVALID)) {
1685 		(void) ath9k_hw_stoptxdma(sc->sc_ah, sc->sc_beaconq);
1686 		ARN_DBG((ARN_DBG_XMIT, "arn: arn_draintxq(): "
1687 		    "beacon queue %x\n",
1688 		    ath9k_hw_gettxbuf(sc->sc_ah, sc->sc_beaconq)));
1689 	}
1690 
1691 	arn_drain_txdataq(sc, retry_tx);
1692 }
1693 
1694 uint32_t
arn_txq_depth(struct arn_softc * sc,int qnum)1695 arn_txq_depth(struct arn_softc *sc, int qnum)
1696 {
1697 	return (sc->sc_txq[qnum].axq_depth);
1698 }
1699 
1700 uint32_t
arn_txq_aggr_depth(struct arn_softc * sc,int qnum)1701 arn_txq_aggr_depth(struct arn_softc *sc, int qnum)
1702 {
1703 	return (sc->sc_txq[qnum].axq_aggr_depth);
1704 }
1705 
1706 /* Update parameters for a transmit queue */
1707 int
arn_txq_update(struct arn_softc * sc,int qnum,struct ath9k_tx_queue_info * qinfo)1708 arn_txq_update(struct arn_softc *sc, int qnum,
1709     struct ath9k_tx_queue_info *qinfo)
1710 {
1711 	struct ath_hal *ah = sc->sc_ah;
1712 	int error = 0;
1713 	struct ath9k_tx_queue_info qi;
1714 
1715 	if (qnum == sc->sc_beaconq) {
1716 		/*
1717 		 * XXX: for beacon queue, we just save the parameter.
1718 		 * It will be picked up by arn_beaconq_config() when
1719 		 * it's necessary.
1720 		 */
1721 		sc->sc_beacon_qi = *qinfo;
1722 		return (0);
1723 	}
1724 
1725 	ASSERT(sc->sc_txq[qnum].axq_qnum == qnum);
1726 
1727 	(void) ath9k_hw_get_txq_props(ah, qnum, &qi);
1728 	qi.tqi_aifs = qinfo->tqi_aifs;
1729 	qi.tqi_cwmin = qinfo->tqi_cwmin;
1730 	qi.tqi_cwmax = qinfo->tqi_cwmax;
1731 	qi.tqi_burstTime = qinfo->tqi_burstTime;
1732 	qi.tqi_readyTime = qinfo->tqi_readyTime;
1733 
1734 	if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
1735 		ARN_DBG((ARN_DBG_FATAL,
1736 		    "Unable to update hardware queue %u!\n", qnum));
1737 		error = -EIO;
1738 	} else {
1739 		(void) ath9k_hw_resettxqueue(ah, qnum); /* push to h/w */
1740 	}
1741 
1742 	return (error);
1743 }
1744 
1745 int
ath_cabq_update(struct arn_softc * sc)1746 ath_cabq_update(struct arn_softc *sc)
1747 {
1748 	struct ath9k_tx_queue_info qi;
1749 	int qnum = sc->sc_cabq->axq_qnum;
1750 	struct ath_beacon_config conf;
1751 
1752 	(void) ath9k_hw_get_txq_props(sc->sc_ah, qnum, &qi);
1753 	/*
1754 	 * Ensure the readytime % is within the bounds.
1755 	 */
1756 	if (sc->sc_config.cabqReadytime < ATH9K_READY_TIME_LO_BOUND)
1757 		sc->sc_config.cabqReadytime = ATH9K_READY_TIME_LO_BOUND;
1758 	else if (sc->sc_config.cabqReadytime > ATH9K_READY_TIME_HI_BOUND)
1759 		sc->sc_config.cabqReadytime = ATH9K_READY_TIME_HI_BOUND;
1760 
1761 	arn_get_beaconconfig(sc, &conf);
1762 	qi.tqi_readyTime =
1763 	    (conf.beacon_interval * sc->sc_config.cabqReadytime) / 100;
1764 	(void) arn_txq_update(sc, qnum, &qi);
1765 
1766 	return (0);
1767 }
1768 
1769 static uint32_t
arn_tx_get_keytype(const struct ieee80211_cipher * cip)1770 arn_tx_get_keytype(const struct ieee80211_cipher *cip)
1771 {
1772 	uint32_t index;
1773 	static const uint8_t ciphermap[] = {
1774 	    ATH9K_CIPHER_WEP,		/* IEEE80211_CIPHER_WEP */
1775 	    ATH9K_CIPHER_TKIP,		/* IEEE80211_CIPHER_TKIP */
1776 	    ATH9K_CIPHER_AES_OCB,	/* IEEE80211_CIPHER_AES_OCB */
1777 	    ATH9K_CIPHER_AES_CCM,	/* IEEE80211_CIPHER_AES_CCM */
1778 	    ATH9K_CIPHER_CKIP,		/* IEEE80211_CIPHER_CKIP */
1779 	    ATH9K_CIPHER_CLR,		/* IEEE80211_CIPHER_NONE */
1780 	};
1781 
1782 	ASSERT(cip->ic_cipher < ARRAY_SIZE(ciphermap));
1783 	index = cip->ic_cipher;
1784 
1785 	if (ciphermap[index] == ATH9K_CIPHER_WEP)
1786 		return (ATH9K_KEY_TYPE_WEP);
1787 	else if (ciphermap[index] == ATH9K_CIPHER_TKIP)
1788 		return (ATH9K_KEY_TYPE_TKIP);
1789 	else if (ciphermap[index] == ATH9K_CIPHER_AES_CCM)
1790 		return (ATH9K_KEY_TYPE_AES);
1791 
1792 	return (ATH9K_KEY_TYPE_CLEAR);
1793 
1794 }
1795 
1796 /* Display buffer */
1797 void
arn_dump_line(unsigned char * p,uint32_t len,boolean_t isaddress,uint32_t group)1798 arn_dump_line(unsigned char *p, uint32_t len, boolean_t isaddress,
1799     uint32_t group)
1800 {
1801 	char *pnumeric = "0123456789ABCDEF";
1802 	char hex[((2 + 1) * 16) + 1];
1803 	char *phex = hex;
1804 	char ascii[16 + 1];
1805 	char *pascii = ascii;
1806 	uint32_t grouped = 0;
1807 
1808 	if (isaddress) {
1809 		arn_problem("arn: %08x: ", p);
1810 	} else {
1811 		arn_problem("arn: ");
1812 	}
1813 
1814 	while (len) {
1815 		*phex++ = pnumeric[((uint8_t)*p) / 16];
1816 		*phex++ = pnumeric[((uint8_t)*p) % 16];
1817 		if (++grouped >= group) {
1818 			*phex++ = ' ';
1819 			grouped = 0;
1820 		}
1821 
1822 		*pascii++ = (*p >= 32 && *p < 128) ? *p : '.';
1823 
1824 		++p;
1825 		--len;
1826 	}
1827 
1828 	*phex = '\0';
1829 	*pascii = '\0';
1830 
1831 	arn_problem("%-*s|%-*s|\n", (2 * 16) +
1832 	    (16 / group), hex, 16, ascii);
1833 }
1834 
1835 void
arn_dump_pkg(unsigned char * p,uint32_t len,boolean_t isaddress,uint32_t group)1836 arn_dump_pkg(unsigned char *p, uint32_t len, boolean_t isaddress,
1837     uint32_t group)
1838 {
1839 	uint32_t perline;
1840 	while (len) {
1841 		perline = (len < 16) ? len : 16;
1842 		arn_dump_line(p, perline, isaddress, group);
1843 		len -= perline;
1844 		p += perline;
1845 	}
1846 }
1847 
1848 /*
1849  * The input parameter mp has following assumption:
1850  * For data packets, GLDv3 mac_wifi plugin allocates and fills the
1851  * ieee80211 header. For management packets, net80211 allocates and
1852  * fills the ieee80211 header. In both cases, enough spaces in the
1853  * header are left for encryption option.
1854  */
1855 static int32_t
arn_tx_start(struct arn_softc * sc,struct ieee80211_node * in,struct ath_buf * bf,mblk_t * mp)1856 arn_tx_start(struct arn_softc *sc, struct ieee80211_node *in,
1857     struct ath_buf *bf, mblk_t *mp)
1858 {
1859 	ieee80211com_t *ic = (ieee80211com_t *)sc;
1860 	struct ieee80211_frame *wh = (struct ieee80211_frame *)mp->b_rptr;
1861 	struct ath_hal *ah = sc->sc_ah;
1862 	struct ath_node *an;
1863 	struct ath_desc *ds;
1864 	struct ath_txq *txq;
1865 	struct ath_rate_table *rt;
1866 	enum ath9k_pkt_type atype;
1867 	boolean_t shortPreamble, is_padding = B_FALSE;
1868 	uint32_t subtype, keytype = ATH9K_KEY_TYPE_CLEAR;
1869 	int32_t keyix, iswep, hdrlen, pktlen, mblen, mbslen;
1870 	caddr_t dest;
1871 
1872 	/*
1873 	 * CRC are added by H/W, not encaped by driver,
1874 	 * but we must count it in pkt length.
1875 	 */
1876 	pktlen = IEEE80211_CRC_LEN;
1877 	iswep = wh->i_fc[1] & IEEE80211_FC1_WEP;
1878 	keyix = ATH9K_TXKEYIX_INVALID;
1879 	hdrlen = ieee80211_hdrspace(ic, mp->b_rptr);
1880 	if (hdrlen == 28)
1881 		is_padding = B_TRUE;
1882 
1883 	if (iswep != 0) {
1884 		const struct ieee80211_cipher *cip;
1885 		struct ieee80211_key *k;
1886 
1887 		/*
1888 		 * Construct the 802.11 header+trailer for an encrypted
1889 		 * frame. The only reason this can fail is because of an
1890 		 * unknown or unsupported cipher/key type.
1891 		 */
1892 		k = ieee80211_crypto_encap(ic, mp);
1893 		if (k == NULL) {
1894 			ARN_DBG((ARN_DBG_XMIT, "arn: arn_tx_start "
1895 			    "crypto_encap failed\n"));
1896 			/*
1897 			 * This can happen when the key is yanked after the
1898 			 * frame was queued.  Just discard the frame; the
1899 			 * 802.11 layer counts failures and provides
1900 			 * debugging/diagnostics.
1901 			 */
1902 			return (EIO);
1903 		}
1904 		cip = k->wk_cipher;
1905 
1906 		keytype = arn_tx_get_keytype(cip);
1907 
1908 		/*
1909 		 * Adjust the packet + header lengths for the crypto
1910 		 * additions and calculate the h/w key index.  When
1911 		 * a s/w mic is done the frame will have had any mic
1912 		 * added to it prior to entry so m0->m_pkthdr.len above will
1913 		 * account for it. Otherwise we need to add it to the
1914 		 * packet length.
1915 		 */
1916 		hdrlen += cip->ic_header;
1917 		pktlen += cip->ic_trailer;
1918 		if ((k->wk_flags & IEEE80211_KEY_SWMIC) == 0)
1919 			pktlen += cip->ic_miclen;
1920 
1921 		keyix = k->wk_keyix;
1922 
1923 		/* packet header may have moved, reset our local pointer */
1924 		wh = (struct ieee80211_frame *)mp->b_rptr;
1925 	}
1926 
1927 	dest = bf->bf_dma.mem_va;
1928 	for (; mp != NULL; mp = mp->b_cont) {
1929 		mblen = MBLKL(mp);
1930 		bcopy(mp->b_rptr, dest, mblen);
1931 		dest += mblen;
1932 	}
1933 	mbslen = (uintptr_t)dest - (uintptr_t)bf->bf_dma.mem_va;
1934 	pktlen += mbslen;
1935 	if (is_padding && (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) ==
1936 	    IEEE80211_FC0_TYPE_DATA)
1937 		pktlen -= 2; /* real pkg len */
1938 
1939 	/* buf setup */
1940 	ath_tx_setup_buffer(sc, bf, in, wh, pktlen, keytype);
1941 
1942 	/* setup descriptors */
1943 	ds = bf->bf_desc;
1944 	rt = sc->sc_currates;
1945 	ASSERT(rt != NULL);
1946 
1947 	arn_get_rate(sc, bf, wh);
1948 	an = (struct ath_node *)(in);
1949 
1950 	/*
1951 	 * Calculate Atheros packet type from IEEE80211 packet header
1952 	 * and setup for rate calculations.
1953 	 */
1954 	switch (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) {
1955 	case IEEE80211_FC0_TYPE_MGT:
1956 		subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
1957 		if (subtype == IEEE80211_FC0_SUBTYPE_BEACON)
1958 			atype = ATH9K_PKT_TYPE_BEACON;
1959 		else if (subtype == IEEE80211_FC0_SUBTYPE_PROBE_RESP)
1960 			atype = ATH9K_PKT_TYPE_PROBE_RESP;
1961 		else if (subtype == IEEE80211_FC0_SUBTYPE_ATIM)
1962 			atype = ATH9K_PKT_TYPE_ATIM;
1963 		else
1964 			atype = ATH9K_PKT_TYPE_NORMAL;
1965 
1966 		/* force all ctl frames to highest queue */
1967 		txq = &sc->sc_txq[arn_get_hal_qnum(WME_AC_VO, sc)];
1968 		break;
1969 	case IEEE80211_FC0_TYPE_CTL:
1970 		atype = ATH9K_PKT_TYPE_PSPOLL;
1971 		subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
1972 
1973 		/* force all ctl frames to highest queue */
1974 		txq = &sc->sc_txq[arn_get_hal_qnum(WME_AC_VO, sc)];
1975 		break;
1976 	case IEEE80211_FC0_TYPE_DATA:
1977 		// arn_dump_pkg((unsigned char *)bf->bf_dma.mem_va,
1978 		//    pktlen, 1, 1);
1979 		atype = ATH9K_PKT_TYPE_NORMAL;
1980 
1981 		/* Always use background queue */
1982 		txq = &sc->sc_txq[arn_get_hal_qnum(WME_AC_BE, sc)];
1983 		break;
1984 	default:
1985 		/* Unknown 802.11 frame */
1986 		sc->sc_stats.ast_tx_invalid++;
1987 		return (1);
1988 	}
1989 
1990 	/* setup descriptor */
1991 	ds->ds_link = 0;
1992 	ds->ds_data = bf->bf_dma.cookie.dmac_address;
1993 
1994 	/*
1995 	 * Formulate first tx descriptor with tx controls.
1996 	 */
1997 	ath9k_hw_set11n_txdesc(ah, ds,
1998 	    (pktlen), /* packet length */
1999 	    atype, /* Atheros packet type */
2000 	    MAX_RATE_POWER /* MAX_RATE_POWER */,
2001 	    keyix /* ATH9K_TXKEYIX_INVALID */,
2002 	    keytype /* ATH9K_KEY_TYPE_CLEAR */,
2003 	    bf->bf_flags /* flags */);
2004 
2005 	/* LINTED E_BAD_PTR_CAST_ALIGN */
2006 	ARN_DBG((ARN_DBG_XMIT, "arn: arn_tx_start(): to %s totlen=%d "
2007 	    "an->an_tx_rate1sp=%d tx_rate2sp=%d tx_rate3sp=%d "
2008 	    "qnum=%d sht=%d dur = %d\n",
2009 	    ieee80211_macaddr_sprintf(wh->i_addr1), mbslen, an->an_tx_rate1sp,
2010 	    an->an_tx_rate2sp, an->an_tx_rate3sp,
2011 	    txq->axq_qnum, shortPreamble, *(uint16_t *)wh->i_dur));
2012 
2013 	(void) ath9k_hw_filltxdesc(ah, ds,
2014 	    mbslen,		/* segment length */
2015 	    B_TRUE,		/* first segment */
2016 	    B_TRUE,		/* last segment */
2017 	    ds);		/* first descriptor */
2018 
2019 	/* set rate related fields in tx descriptor */
2020 	ath_buf_set_rate(sc, bf, wh);
2021 
2022 	ARN_DMA_SYNC(bf->bf_dma, DDI_DMA_SYNC_FORDEV);
2023 
2024 	mutex_enter(&txq->axq_lock);
2025 	list_insert_tail(&txq->axq_list, bf);
2026 	if (txq->axq_link == NULL) {
2027 		(void) ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
2028 	} else {
2029 		*txq->axq_link = bf->bf_daddr;
2030 	}
2031 	txq->axq_link = &ds->ds_link;
2032 	mutex_exit(&txq->axq_lock);
2033 
2034 	// arn_dump_pkg((unsigned char *)bf->bf_dma.mem_va, pktlen, 1, 1);
2035 
2036 	(void) ath9k_hw_txstart(ah, txq->axq_qnum);
2037 
2038 	ic->ic_stats.is_tx_frags++;
2039 	ic->ic_stats.is_tx_bytes += pktlen;
2040 
2041 	return (0);
2042 }
2043 
2044 /*
2045  * Transmit a management frame.
2046  * Note that management frames come directly from the 802.11 layer
2047  * and do not honor the send queue flow control.
2048  */
2049 /* Upon failure caller should free mp */
2050 int
arn_tx(ieee80211com_t * ic,mblk_t * mp,uint8_t type)2051 arn_tx(ieee80211com_t *ic, mblk_t *mp, uint8_t type)
2052 {
2053 	struct arn_softc *sc = (struct arn_softc *)ic;
2054 	struct ath_hal *ah = sc->sc_ah;
2055 	struct ieee80211_node *in = NULL;
2056 	struct ath_buf *bf = NULL;
2057 	struct ieee80211_frame *wh;
2058 	int error = 0;
2059 
2060 	ASSERT(mp->b_next == NULL);
2061 	/* should check later */
2062 	if (sc->sc_flags & SC_OP_INVALID) {
2063 		if ((type & IEEE80211_FC0_TYPE_MASK) !=
2064 		    IEEE80211_FC0_TYPE_DATA) {
2065 			freemsg(mp);
2066 		}
2067 		return (ENXIO);
2068 	}
2069 
2070 	/* Grab a TX buffer */
2071 	bf = arn_tx_get_buffer(sc);
2072 	if (bf == NULL) {
2073 		ARN_DBG((ARN_DBG_XMIT, "arn: arn_tx(): discard, "
2074 		    "no xmit buf\n"));
2075 		ic->ic_stats.is_tx_nobuf++;
2076 		if ((type & IEEE80211_FC0_TYPE_MASK) ==
2077 		    IEEE80211_FC0_TYPE_DATA) {
2078 			sc->sc_stats.ast_tx_nobuf++;
2079 			mutex_enter(&sc->sc_resched_lock);
2080 			sc->sc_resched_needed = B_TRUE;
2081 			mutex_exit(&sc->sc_resched_lock);
2082 		} else {
2083 			sc->sc_stats.ast_tx_nobufmgt++;
2084 			freemsg(mp);
2085 		}
2086 		return (ENOMEM);
2087 	}
2088 
2089 	wh = (struct ieee80211_frame *)mp->b_rptr;
2090 
2091 	/* Locate node */
2092 	in = ieee80211_find_txnode(ic,  wh->i_addr1);
2093 	if (in == NULL) {
2094 		error = EIO;
2095 		goto bad;
2096 	}
2097 
2098 	in->in_inact = 0;
2099 	switch (type & IEEE80211_FC0_TYPE_MASK) {
2100 	case IEEE80211_FC0_TYPE_DATA:
2101 		(void) ieee80211_encap(ic, mp, in);
2102 		break;
2103 	default:
2104 		if ((wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK) ==
2105 		    IEEE80211_FC0_SUBTYPE_PROBE_RESP) {
2106 			/* fill time stamp */
2107 			uint64_t tsf;
2108 			uint32_t *tstamp;
2109 
2110 			tsf = ath9k_hw_gettsf64(ah);
2111 			/* adjust 100us delay to xmit */
2112 			tsf += 100;
2113 			/* LINTED E_BAD_PTR_CAST_ALIGN */
2114 			tstamp = (uint32_t *)&wh[1];
2115 			tstamp[0] = LE_32(tsf & 0xffffffff);
2116 			tstamp[1] = LE_32(tsf >> 32);
2117 		}
2118 		sc->sc_stats.ast_tx_mgmt++;
2119 		break;
2120 	}
2121 
2122 	error = arn_tx_start(sc, in, bf, mp);
2123 
2124 	if (error != 0) {
2125 bad:
2126 		ic->ic_stats.is_tx_failed++;
2127 		if (bf != NULL) {
2128 			mutex_enter(&sc->sc_txbuflock);
2129 			list_insert_tail(&sc->sc_txbuf_list, bf);
2130 			mutex_exit(&sc->sc_txbuflock);
2131 		}
2132 	}
2133 	if (in != NULL)
2134 		ieee80211_free_node(in);
2135 	if ((type & IEEE80211_FC0_TYPE_MASK) != IEEE80211_FC0_TYPE_DATA ||
2136 	    error == 0) {
2137 		freemsg(mp);
2138 	}
2139 
2140 	return (error);
2141 }
2142 
2143 static void
arn_printtxbuf(struct ath_buf * bf,int done)2144 arn_printtxbuf(struct ath_buf *bf, int done)
2145 {
2146 	struct ath_desc *ds = bf->bf_desc;
2147 	const struct ath_tx_status *ts = &ds->ds_txstat;
2148 
2149 	ARN_DBG((ARN_DBG_XMIT, "arn: T(%p %p) %08x %08x %08x %08x %08x"
2150 	    " %08x %08x %08x %c\n",
2151 	    ds, bf->bf_daddr,
2152 	    ds->ds_link, ds->ds_data,
2153 	    ds->ds_ctl0, ds->ds_ctl1,
2154 	    ds->ds_hw[0], ds->ds_hw[1], ds->ds_hw[2], ds->ds_hw[3],
2155 	    !done ? ' ' : (ts->ts_status == 0) ? '*' : '!'));
2156 }
2157 
2158 /* ARGSUSED */
2159 static void
ath_tx_rc_status(struct ath_buf * bf,struct ath_desc * ds,int nbad,int txok,boolean_t update_rc)2160 ath_tx_rc_status(struct ath_buf *bf,
2161     struct ath_desc *ds,
2162     int nbad,
2163     int txok,
2164     boolean_t update_rc)
2165 {
2166 	struct ath_tx_info_priv *tx_info_priv =
2167 	    (struct ath_tx_info_priv *)&bf->tx_info_priv;
2168 
2169 	tx_info_priv->update_rc = B_FALSE;
2170 
2171 	if ((ds->ds_txstat.ts_status & ATH9K_TXERR_FILT) == 0 &&
2172 	    (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) {
2173 		if (bf_isdata(bf)) {
2174 			(void) memcpy(&tx_info_priv->tx, &ds->ds_txstat,
2175 			    sizeof (tx_info_priv->tx));
2176 			tx_info_priv->n_frames = bf->bf_nframes;
2177 			tx_info_priv->n_bad_frames = nbad;
2178 			tx_info_priv->update_rc = B_TRUE;
2179 		}
2180 	}
2181 }
2182 
2183 /* Process completed xmit descriptors from the specified queue */
2184 static int
arn_tx_processq(struct arn_softc * sc,struct ath_txq * txq)2185 arn_tx_processq(struct arn_softc *sc, struct ath_txq *txq)
2186 {
2187 	ieee80211com_t *ic = (ieee80211com_t *)sc;
2188 	struct ath_hal *ah = sc->sc_ah;
2189 	struct ath_buf *bf;
2190 	struct ath_desc *ds;
2191 	struct ieee80211_node *in;
2192 	struct ath_tx_status *ts;
2193 	struct ath_node *an;
2194 	int32_t sr, lr, nacked = 0;
2195 	int txok, nbad = 0;
2196 	int status;
2197 
2198 	for (;;) {
2199 		mutex_enter(&txq->axq_lock);
2200 		bf = list_head(&txq->axq_list);
2201 		if (bf == NULL) {
2202 			txq->axq_link = NULL;
2203 			/* txq->axq_linkbuf = NULL; */
2204 			mutex_exit(&txq->axq_lock);
2205 			break;
2206 		}
2207 		ds = bf->bf_desc;	/* last decriptor */
2208 		ts = &ds->ds_txstat;
2209 		status = ath9k_hw_txprocdesc(ah, ds);
2210 
2211 #ifdef DEBUG
2212 		arn_printtxbuf(bf, status == 0);
2213 #endif
2214 
2215 		if (status == EINPROGRESS) {
2216 			mutex_exit(&txq->axq_lock);
2217 			break;
2218 		}
2219 		list_remove(&txq->axq_list, bf);
2220 		mutex_exit(&txq->axq_lock);
2221 		in = bf->bf_in;
2222 		if (in != NULL) {
2223 			an = ATH_NODE(in);
2224 			/* Successful transmition */
2225 			if (ts->ts_status == 0) {
2226 				an->an_tx_ok++;
2227 				an->an_tx_antenna = ts->ts_antenna;
2228 				sc->sc_stats.ast_tx_rssidelta =
2229 				    ts->ts_rssi - sc->sc_stats.ast_tx_rssi;
2230 				sc->sc_stats.ast_tx_rssi = ts->ts_rssi;
2231 			} else {
2232 				an->an_tx_err++;
2233 				if (ts->ts_status & ATH9K_TXERR_XRETRY) {
2234 					sc->sc_stats.ast_tx_xretries++;
2235 				}
2236 				if (ts->ts_status & ATH9K_TXERR_FIFO) {
2237 					sc->sc_stats.ast_tx_fifoerr++;
2238 				}
2239 				if (ts->ts_status & ATH9K_TXERR_FILT) {
2240 					sc->sc_stats.ast_tx_filtered++;
2241 				}
2242 				an->an_tx_antenna = 0;	/* invalidate */
2243 			}
2244 			sr = ts->ts_shortretry;
2245 			lr = ts->ts_longretry;
2246 			sc->sc_stats.ast_tx_shortretry += sr;
2247 			sc->sc_stats.ast_tx_longretry += lr;
2248 			/*
2249 			 * Hand the descriptor to the rate control algorithm.
2250 			 */
2251 			if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
2252 			    (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0) {
2253 				/*
2254 				 * If frame was ack'd update the last rx time
2255 				 * used to workaround phantom bmiss interrupts.
2256 				 */
2257 				if (ts->ts_status == 0) {
2258 					nacked++;
2259 					an->an_tx_ok++;
2260 				} else {
2261 					an->an_tx_err++;
2262 				}
2263 				an->an_tx_retr += sr + lr;
2264 			}
2265 		}
2266 
2267 		txok = (ds->ds_txstat.ts_status == 0);
2268 		if (!bf_isampdu(bf)) {
2269 			/*
2270 			 * This frame is sent out as a single frame.
2271 			 * Use hardware retry status for this frame.
2272 			 */
2273 			bf->bf_retries = ds->ds_txstat.ts_longretry;
2274 			if (ds->ds_txstat.ts_status & ATH9K_TXERR_XRETRY)
2275 				bf->bf_state.bf_type |= BUF_XRETRY;
2276 			nbad = 0;
2277 		}
2278 		ath_tx_rc_status(bf, ds, nbad, B_TRUE, txok);
2279 
2280 		ath_tx_complete_buf(sc, bf, txok, 0);
2281 
2282 		// arn_dump_pkg((unsigned char *)bf->bf_dma.mem_va,
2283 		//    bf->bf_frmlen, 1, 1);
2284 
2285 		bf->bf_in = NULL;
2286 		mutex_enter(&sc->sc_txbuflock);
2287 		list_insert_tail(&sc->sc_txbuf_list, bf);
2288 		mutex_exit(&sc->sc_txbuflock);
2289 
2290 		/*
2291 		 * Reschedule stalled outbound packets
2292 		 */
2293 		mutex_enter(&sc->sc_resched_lock);
2294 		if (sc->sc_resched_needed) {
2295 			sc->sc_resched_needed = B_FALSE;
2296 			mac_tx_update(ic->ic_mach);
2297 		}
2298 		mutex_exit(&sc->sc_resched_lock);
2299 	}
2300 
2301 	return (nacked);
2302 }
2303 
2304 static void
arn_tx_handler(struct arn_softc * sc)2305 arn_tx_handler(struct arn_softc *sc)
2306 {
2307 	int i;
2308 	int nacked = 0;
2309 	uint32_t qcumask = ((1 << ATH9K_NUM_TX_QUEUES) - 1);
2310 	ath9k_hw_gettxintrtxqs(sc->sc_ah, &qcumask);
2311 
2312 	/*
2313 	 * Process each active queue.
2314 	 */
2315 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2316 		if (ARN_TXQ_SETUP(sc, i) && (qcumask & (1 << i))) {
2317 			nacked += arn_tx_processq(sc, &sc->sc_txq[i]);
2318 		}
2319 	}
2320 
2321 	if (nacked)
2322 		sc->sc_lastrx = ath9k_hw_gettsf64(sc->sc_ah);
2323 }
2324 
2325 /* Deferred processing of transmit interrupt */
2326 
2327 void
arn_tx_int_proc(void * arg)2328 arn_tx_int_proc(void *arg)
2329 {
2330 	struct arn_softc *sc = arg;
2331 	arn_tx_handler(sc);
2332 }
2333 
2334 /* Node init & cleanup functions */
2335 
2336 #ifdef ARN_TX_AGGREGATION
2337 void
arn_tx_node_init(struct arn_softc * sc,struct ath_node * an)2338 arn_tx_node_init(struct arn_softc *sc, struct ath_node *an)
2339 {
2340 	struct ath_atx_tid *tid;
2341 	struct ath_atx_ac *ac;
2342 	int tidno, acno;
2343 
2344 	for (tidno = 0, tid = &an->tid[tidno]; tidno < WME_NUM_TID;
2345 	    tidno++, tid++) {
2346 		tid->an = an;
2347 		tid->tidno = tidno;
2348 		tid->seq_start = tid->seq_next = 0;
2349 		tid->baw_size  = WME_MAX_BA;
2350 		tid->baw_head  = tid->baw_tail = 0;
2351 		tid->sched = B_FALSE;
2352 		tid->paused = B_FALSE;
2353 		tid->state &= ~AGGR_CLEANUP;
2354 		list_create(&tid->buf_q, sizeof (struct ath_buf),
2355 		    offsetof(struct ath_buf, bf_node));
2356 		acno = TID_TO_WME_AC(tidno);
2357 		tid->ac = &an->ac[acno];
2358 		tid->state &= ~AGGR_ADDBA_COMPLETE;
2359 		tid->state &= ~AGGR_ADDBA_PROGRESS;
2360 		tid->addba_exchangeattempts = 0;
2361 	}
2362 
2363 	for (acno = 0, ac = &an->ac[acno]; acno < WME_NUM_AC; acno++, ac++) {
2364 		ac->sched = B_FALSE;
2365 		list_create(&ac->tid_q, sizeof (struct ath_atx_tid),
2366 		    offsetof(struct ath_atx_tid, list));
2367 
2368 		switch (acno) {
2369 		case WME_AC_BE:
2370 			ac->qnum = arn_tx_get_qnum(sc,
2371 			    ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
2372 			break;
2373 		case WME_AC_BK:
2374 			ac->qnum = arn_tx_get_qnum(sc,
2375 			    ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BK);
2376 			break;
2377 		case WME_AC_VI:
2378 			ac->qnum = arn_tx_get_qnum(sc,
2379 			    ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VI);
2380 			break;
2381 		case WME_AC_VO:
2382 			ac->qnum = arn_tx_get_qnum(sc,
2383 			    ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_VO);
2384 			break;
2385 		}
2386 	}
2387 }
2388 
2389 void
arn_tx_node_cleanup(struct arn_softc * sc,struct ieee80211_node * in)2390 arn_tx_node_cleanup(struct arn_softc *sc, struct ieee80211_node *in)
2391 {
2392 	int i;
2393 	struct ath_atx_ac *ac, *ac_tmp;
2394 	struct ath_atx_tid *tid, *tid_tmp;
2395 	struct ath_txq *txq;
2396 	struct ath_node *an = ATH_NODE(in);
2397 
2398 	for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
2399 		if (ARN_TXQ_SETUP(sc, i)) {
2400 			txq = &sc->sc_txq[i];
2401 
2402 			mutex_enter(&txq->axq_lock);
2403 
2404 			list_for_each_entry_safe(ac, ac_tmp, &txq->axq_acq) {
2405 				tid = list_head(&ac->tid_q);
2406 				if (tid && tid->an != an)
2407 					continue;
2408 				list_remove(&txq->axq_acq, ac);
2409 				ac->sched = B_FALSE;
2410 
2411 				list_for_each_entry_safe(tid, tid_tmp,
2412 				    &ac->tid_q) {
2413 					list_remove(&ac->tid_q, tid);
2414 					bf = list_head(&tid->buf_q);
2415 					while (bf != NULL) {
2416 						if (bf->bf_in == in)
2417 							bf->bf_in = NULL;
2418 					}
2419 					bf = list_next(&txq->axq_list, bf);
2420 					tid->sched = B_FALSE;
2421 					arn_tid_drain(sc, txq, tid);
2422 					tid->state &= ~AGGR_ADDBA_COMPLETE;
2423 					tid->addba_exchangeattempts = 0;
2424 					tid->state &= ~AGGR_CLEANUP;
2425 				}
2426 			}
2427 
2428 			mutex_exit(&txq->axq_lock);
2429 		}
2430 	}
2431 }
2432 #endif /* ARN_TX_AGGREGATION */
2433