93aa33d0c700d43370acaf3e2d143aeae94a5132
[openwrt.git] / package / mac80211 / patches / 563-ath9k_simplify_tx_locking.patch
1 --- a/drivers/net/wireless/ath/ath9k/xmit.c
2 +++ b/drivers/net/wireless/ath/ath9k/xmit.c
3 @@ -169,13 +169,11 @@ static void ath_tx_flush_tid(struct ath_
4         INIT_LIST_HEAD(&bf_head);
5  
6         memset(&ts, 0, sizeof(ts));
7 -       spin_lock_bh(&txq->axq_lock);
8  
9         while ((skb = __skb_dequeue(&tid->buf_q))) {
10                 fi = get_frame_info(skb);
11                 bf = fi->bf;
12  
13 -               spin_unlock_bh(&txq->axq_lock);
14                 if (bf && fi->retries) {
15                         list_add_tail(&bf->list, &bf_head);
16                         ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
17 @@ -184,7 +182,6 @@ static void ath_tx_flush_tid(struct ath_
18                 } else {
19                         ath_tx_send_normal(sc, txq, NULL, skb);
20                 }
21 -               spin_lock_bh(&txq->axq_lock);
22         }
23  
24         if (tid->baw_head == tid->baw_tail) {
25 @@ -192,8 +189,6 @@ static void ath_tx_flush_tid(struct ath_
26                 tid->state &= ~AGGR_CLEANUP;
27         }
28  
29 -       spin_unlock_bh(&txq->axq_lock);
30 -
31         if (sendbar)
32                 ath_send_bar(tid, tid->seq_start);
33  }
34 @@ -254,9 +249,7 @@ static void ath_tid_drain(struct ath_sof
35                 bf = fi->bf;
36  
37                 if (!bf) {
38 -                       spin_unlock(&txq->axq_lock);
39                         ath_tx_complete(sc, skb, ATH_TX_ERROR, txq);
40 -                       spin_lock(&txq->axq_lock);
41                         continue;
42                 }
43  
44 @@ -265,9 +258,7 @@ static void ath_tid_drain(struct ath_sof
45                 if (fi->retries)
46                         ath_tx_update_baw(sc, tid, bf->bf_state.seqno);
47  
48 -               spin_unlock(&txq->axq_lock);
49                 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
50 -               spin_lock(&txq->axq_lock);
51         }
52  
53         tid->seq_next = tid->seq_start;
54 @@ -525,9 +516,7 @@ static void ath_tx_complete_aggr(struct 
55                          * complete the acked-ones/xretried ones; update
56                          * block-ack window
57                          */
58 -                       spin_lock_bh(&txq->axq_lock);
59                         ath_tx_update_baw(sc, tid, seqno);
60 -                       spin_unlock_bh(&txq->axq_lock);
61  
62                         if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
63                                 memcpy(tx_info->control.rates, rates, sizeof(rates));
64 @@ -550,9 +539,7 @@ static void ath_tx_complete_aggr(struct 
65                                  * run out of tx buf.
66                                  */
67                                 if (!tbf) {
68 -                                       spin_lock_bh(&txq->axq_lock);
69                                         ath_tx_update_baw(sc, tid, seqno);
70 -                                       spin_unlock_bh(&txq->axq_lock);
71  
72                                         ath_tx_complete_buf(sc, bf, txq,
73                                                             &bf_head, ts, 0);
74 @@ -582,7 +569,6 @@ static void ath_tx_complete_aggr(struct 
75                 if (an->sleeping)
76                         ieee80211_sta_set_buffered(sta, tid->tidno, true);
77  
78 -               spin_lock_bh(&txq->axq_lock);
79                 skb_queue_splice(&bf_pending, &tid->buf_q);
80                 if (!an->sleeping) {
81                         ath_tx_queue_tid(txq, tid);
82 @@ -590,7 +576,6 @@ static void ath_tx_complete_aggr(struct 
83                         if (ts->ts_status & ATH9K_TXERR_FILT)
84                                 tid->ac->clear_ps_filter = true;
85                 }
86 -               spin_unlock_bh(&txq->axq_lock);
87         }
88  
89         if (tid->state & AGGR_CLEANUP)
90 @@ -1190,9 +1175,9 @@ void ath_tx_aggr_stop(struct ath_softc *
91                 txtid->state |= AGGR_CLEANUP;
92         else
93                 txtid->state &= ~AGGR_ADDBA_COMPLETE;
94 -       spin_unlock_bh(&txq->axq_lock);
95  
96         ath_tx_flush_tid(sc, txtid);
97 +       spin_unlock_bh(&txq->axq_lock);
98  }
99  
100  void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
101 @@ -1434,8 +1419,6 @@ static bool bf_is_ampdu_not_probing(stru
102  
103  static void ath_drain_txq_list(struct ath_softc *sc, struct ath_txq *txq,
104                                struct list_head *list, bool retry_tx)
105 -       __releases(txq->axq_lock)
106 -       __acquires(txq->axq_lock)
107  {
108         struct ath_buf *bf, *lastbf;
109         struct list_head bf_head;
110 @@ -1462,13 +1445,11 @@ static void ath_drain_txq_list(struct at
111                 if (bf_is_ampdu_not_probing(bf))
112                         txq->axq_ampdu_depth--;
113  
114 -               spin_unlock_bh(&txq->axq_lock);
115                 if (bf_isampdu(bf))
116                         ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
117                                              retry_tx);
118                 else
119                         ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0);
120 -               spin_lock_bh(&txq->axq_lock);
121         }
122  }
123  
124 @@ -1847,8 +1828,6 @@ static void ath_tx_start_dma(struct ath_
125         struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
126         struct ath_buf *bf;
127  
128 -       spin_lock_bh(&txctl->txq->axq_lock);
129 -
130         if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
131                 /*
132                  * Try aggregation if it's a unicast data frame
133 @@ -1858,7 +1837,7 @@ static void ath_tx_start_dma(struct ath_
134         } else {
135                 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb);
136                 if (!bf)
137 -                       goto out;
138 +                       return;
139  
140                 bf->bf_state.bfs_paprd = txctl->paprd;
141  
142 @@ -1867,9 +1846,6 @@ static void ath_tx_start_dma(struct ath_
143  
144                 ath_tx_send_normal(sc, txctl->txq, tid, skb);
145         }
146 -
147 -out:
148 -       spin_unlock_bh(&txctl->txq->axq_lock);
149  }
150  
151  /* Upon failure caller should free skb */
152 @@ -1949,15 +1925,19 @@ int ath_tx_start(struct ieee80211_hw *hw
153          */
154  
155         q = skb_get_queue_mapping(skb);
156 +
157         spin_lock_bh(&txq->axq_lock);
158 +
159         if (txq == sc->tx.txq_map[q] &&
160             ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
161                 ieee80211_stop_queue(sc->hw, q);
162                 txq->stopped = 1;
163         }
164 -       spin_unlock_bh(&txq->axq_lock);
165  
166         ath_tx_start_dma(sc, skb, txctl, tid);
167 +
168 +       spin_unlock_bh(&txq->axq_lock);
169 +
170         return 0;
171  }
172  
173 @@ -2003,7 +1983,6 @@ static void ath_tx_complete(struct ath_s
174  
175         q = skb_get_queue_mapping(skb);
176         if (txq == sc->tx.txq_map[q]) {
177 -               spin_lock_bh(&txq->axq_lock);
178                 if (WARN_ON(--txq->pending_frames < 0))
179                         txq->pending_frames = 0;
180  
181 @@ -2011,7 +1990,6 @@ static void ath_tx_complete(struct ath_s
182                         ieee80211_wake_queue(sc->hw, q);
183                         txq->stopped = 0;
184                 }
185 -               spin_unlock_bh(&txq->axq_lock);
186         }
187  
188         ieee80211_tx_status(hw, skb);
189 @@ -2117,8 +2095,6 @@ static void ath_tx_rc_status(struct ath_
190  static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq,
191                                   struct ath_tx_status *ts, struct ath_buf *bf,
192                                   struct list_head *bf_head)
193 -       __releases(txq->axq_lock)
194 -       __acquires(txq->axq_lock)
195  {
196         int txok;
197  
198 @@ -2128,16 +2104,12 @@ static void ath_tx_process_buffer(struct
199         if (bf_is_ampdu_not_probing(bf))
200                 txq->axq_ampdu_depth--;
201  
202 -       spin_unlock_bh(&txq->axq_lock);
203 -
204         if (!bf_isampdu(bf)) {
205                 ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok);
206                 ath_tx_complete_buf(sc, bf, txq, bf_head, ts, txok);
207         } else
208                 ath_tx_complete_aggr(sc, txq, bf, bf_head, ts, txok, true);
209  
210 -       spin_lock_bh(&txq->axq_lock);
211 -
212         if (sc->sc_flags & SC_OP_TXAGGR)
213                 ath_txq_schedule(sc, txq);
214  }