summaryrefslogtreecommitdiff
path: root/net/mac802154/util.c
blob: ebc9a8521765faf90201f7804d0d231c1b708f5f (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
// SPDX-License-Identifier: GPL-2.0-only
/*
 *
 * Authors:
 * Alexander Aring <aar@pengutronix.de>
 *
 * Based on: net/mac80211/util.c
 */

#include "ieee802154_i.h"
#include "driver-ops.h"

/* privid for wpan_phys to determine whether they belong to us or not */
const void *const mac802154_wpan_phy_privid = &mac802154_wpan_phy_privid;

/**
 * ieee802154_wake_queue - wake ieee802154 queue
 * @hw: main hardware object
 *
 * Tranceivers usually have either one transmit framebuffer or one framebuffer
 * for both transmitting and receiving. Hence, the core currently only handles
 * one frame at a time for each phy, which means we had to stop the queue to
 * avoid new skb to come during the transmission. The queue then needs to be
 * woken up after the operation.
 */
static void ieee802154_wake_queue(struct ieee802154_hw *hw)
{
	struct ieee802154_local *local = hw_to_local(hw);
	struct ieee802154_sub_if_data *sdata;

	rcu_read_lock();
	clear_bit(WPAN_PHY_FLAG_STATE_QUEUE_STOPPED, &local->phy->flags);
	list_for_each_entry_rcu(sdata, &local->interfaces, list) {
		if (!sdata->dev)
			continue;

		netif_wake_queue(sdata->dev);
	}
	rcu_read_unlock();
}

/**
 * ieee802154_stop_queue - stop ieee802154 queue
 * @hw: main hardware object
 *
 * Tranceivers usually have either one transmit framebuffer or one framebuffer
 * for both transmitting and receiving. Hence, the core currently only handles
 * one frame at a time for each phy, which means we need to tell upper layers to
 * stop giving us new skbs while we are busy with the transmitted one. The queue
 * must then be stopped before transmitting.
 */
static void ieee802154_stop_queue(struct ieee802154_hw *hw)
{
	struct ieee802154_local *local = hw_to_local(hw);
	struct ieee802154_sub_if_data *sdata;

	rcu_read_lock();
	list_for_each_entry_rcu(sdata, &local->interfaces, list) {
		if (!sdata->dev)
			continue;

		netif_stop_queue(sdata->dev);
	}
	rcu_read_unlock();
}

void ieee802154_hold_queue(struct ieee802154_local *local)
{
	unsigned long flags;

	spin_lock_irqsave(&local->phy->queue_lock, flags);
	if (!atomic_fetch_inc(&local->phy->hold_txs))
		ieee802154_stop_queue(&local->hw);
	spin_unlock_irqrestore(&local->phy->queue_lock, flags);
}

void ieee802154_release_queue(struct ieee802154_local *local)
{
	unsigned long flags;

	spin_lock_irqsave(&local->phy->queue_lock, flags);
	if (atomic_dec_and_test(&local->phy->hold_txs))
		ieee802154_wake_queue(&local->hw);
	spin_unlock_irqrestore(&local->phy->queue_lock, flags);
}

void ieee802154_disable_queue(struct ieee802154_local *local)
{
	struct ieee802154_sub_if_data *sdata;

	rcu_read_lock();
	list_for_each_entry_rcu(sdata, &local->interfaces, list) {
		if (!sdata->dev)
			continue;

		netif_tx_disable(sdata->dev);
	}
	rcu_read_unlock();
}

enum hrtimer_restart ieee802154_xmit_ifs_timer(struct hrtimer *timer)
{
	struct ieee802154_local *local =
		container_of(timer, struct ieee802154_local, ifs_timer);

	ieee802154_release_queue(local);

	return HRTIMER_NORESTART;
}

void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb,
			      bool ifs_handling)
{
	struct ieee802154_local *local = hw_to_local(hw);

	local->tx_result = IEEE802154_SUCCESS;

	if (ifs_handling) {
		u8 max_sifs_size;

		/* If transceiver sets CRC on his own we need to use lifs
		 * threshold len above 16 otherwise 18, because it's not
		 * part of skb->len.
		 */
		if (hw->flags & IEEE802154_HW_TX_OMIT_CKSUM)
			max_sifs_size = IEEE802154_MAX_SIFS_FRAME_SIZE -
					IEEE802154_FCS_LEN;
		else
			max_sifs_size = IEEE802154_MAX_SIFS_FRAME_SIZE;

		if (skb->len > max_sifs_size)
			hrtimer_start(&local->ifs_timer,
				      hw->phy->lifs_period * NSEC_PER_USEC,
				      HRTIMER_MODE_REL);
		else
			hrtimer_start(&local->ifs_timer,
				      hw->phy->sifs_period * NSEC_PER_USEC,
				      HRTIMER_MODE_REL);
	} else {
		ieee802154_release_queue(local);
	}

	dev_consume_skb_any(skb);
	if (atomic_dec_and_test(&hw->phy->ongoing_txs))
		wake_up(&hw->phy->sync_txq);
}
EXPORT_SYMBOL(ieee802154_xmit_complete);

void ieee802154_xmit_error(struct ieee802154_hw *hw, struct sk_buff *skb,
			   int reason)
{
	struct ieee802154_local *local = hw_to_local(hw);

	local->tx_result = reason;
	ieee802154_release_queue(local);
	dev_kfree_skb_any(skb);
	if (atomic_dec_and_test(&hw->phy->ongoing_txs))
		wake_up(&hw->phy->sync_txq);
}
EXPORT_SYMBOL(ieee802154_xmit_error);

void ieee802154_xmit_hw_error(struct ieee802154_hw *hw, struct sk_buff *skb)
{
	ieee802154_xmit_error(hw, skb, IEEE802154_SYSTEM_ERROR);
}
EXPORT_SYMBOL(ieee802154_xmit_hw_error);

void ieee802154_stop_device(struct ieee802154_local *local)
{
	flush_workqueue(local->workqueue);
	hrtimer_cancel(&local->ifs_timer);
	drv_stop(local);
}