1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
|
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) 2005-2014, 2018-2021 Intel Corporation
* Copyright (C) 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 Intel Deutschland GmbH
*/
#ifndef __iwl_op_mode_h__
#define __iwl_op_mode_h__
#include <linux/netdevice.h>
#include <linux/debugfs.h>
#include "iwl-dbg-tlv.h"
struct iwl_op_mode;
struct iwl_trans;
struct sk_buff;
struct iwl_device_cmd;
struct iwl_rx_cmd_buffer;
struct iwl_fw;
struct iwl_cfg;
/**
* DOC: Operational mode - what is it ?
*
* The operational mode (a.k.a. op_mode) is the layer that implements
* mac80211's handlers. It knows two APIs: mac80211's and the fw's. It uses
* the transport API to access the HW. The op_mode doesn't need to know how the
* underlying HW works, since the transport layer takes care of that.
*
* There can be several op_mode: i.e. different fw APIs will require two
* different op_modes. This is why the op_mode is virtualized.
*/
/**
* DOC: Life cycle of the Operational mode
*
* The operational mode has a very simple life cycle.
*
* 1) The driver layer (iwl-drv.c) chooses the op_mode based on the
* capabilities advertised by the fw file (in TLV format).
* 2) The driver layer starts the op_mode (ops->start)
* 3) The op_mode registers mac80211
* 4) The op_mode is governed by mac80211
* 5) The driver layer stops the op_mode
*/
/**
* struct iwl_op_mode_ops - op_mode specific operations
*
* The op_mode exports its ops so that external components can start it and
* interact with it. The driver layer typically calls the start and stop
* handlers, the transport layer calls the others.
*
* All the handlers MUST be implemented, except @rx_rss which can be left
* out *iff* the opmode will never run on hardware with multi-queue capability.
*
* @start: start the op_mode. The transport layer is already allocated.
* May sleep
* @stop: stop the op_mode. Must free all the memory allocated.
* May sleep
* @rx: Rx notification to the op_mode. rxb is the Rx buffer itself. Cmd is the
* HCMD this Rx responds to. Can't sleep.
* @rx_rss: data queue RX notification to the op_mode, for (data) notifications
* received on the RSS queue(s). The queue parameter indicates which of the
* RSS queues received this frame; it will always be non-zero.
* This method must not sleep.
* @queue_full: notifies that a HW queue is full.
* Must be atomic and called with BH disabled.
* @queue_not_full: notifies that a HW queue is not full any more.
* Must be atomic and called with BH disabled.
* @hw_rf_kill: notifies of a change in the HW rf kill switch. True means that
* the radio is killed. Return %true if the device should be stopped by
* the transport immediately after the call. May sleep.
* Note that this must not return %true for newer devices using gen2 PCIe
* transport.
* @free_skb: allows the transport layer to free skbs that haven't been
* reclaimed by the op_mode. This can happen when the driver is freed and
* there are Tx packets pending in the transport layer.
* Must be atomic
* @nic_error: error notification. Must be atomic and must be called with BH
* disabled, unless the sync parameter is true.
* @cmd_queue_full: Called when the command queue gets full. Must be atomic and
* called with BH disabled.
* @nic_config: configure NIC, called before firmware is started.
* May sleep
* @wimax_active: invoked when WiMax becomes active. May sleep
* @time_point: called when transport layer wants to collect debug data
*/
struct iwl_op_mode_ops {
struct iwl_op_mode *(*start)(struct iwl_trans *trans,
const struct iwl_cfg *cfg,
const struct iwl_fw *fw,
struct dentry *dbgfs_dir);
void (*stop)(struct iwl_op_mode *op_mode);
void (*rx)(struct iwl_op_mode *op_mode, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb);
void (*rx_rss)(struct iwl_op_mode *op_mode, struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb, unsigned int queue);
void (*queue_full)(struct iwl_op_mode *op_mode, int queue);
void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue);
bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
void (*free_skb)(struct iwl_op_mode *op_mode, struct sk_buff *skb);
void (*nic_error)(struct iwl_op_mode *op_mode, bool sync);
void (*cmd_queue_full)(struct iwl_op_mode *op_mode);
void (*nic_config)(struct iwl_op_mode *op_mode);
void (*wimax_active)(struct iwl_op_mode *op_mode);
void (*time_point)(struct iwl_op_mode *op_mode,
enum iwl_fw_ini_time_point tp_id,
union iwl_dbg_tlv_tp_data *tp_data);
};
int iwl_opmode_register(const char *name, const struct iwl_op_mode_ops *ops);
void iwl_opmode_deregister(const char *name);
/**
* struct iwl_op_mode - operational mode
* @ops: pointer to its own ops
*
* This holds an implementation of the mac80211 / fw API.
*/
struct iwl_op_mode {
const struct iwl_op_mode_ops *ops;
char op_mode_specific[] __aligned(sizeof(void *));
};
static inline void iwl_op_mode_stop(struct iwl_op_mode *op_mode)
{
might_sleep();
op_mode->ops->stop(op_mode);
}
static inline void iwl_op_mode_rx(struct iwl_op_mode *op_mode,
struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb)
{
return op_mode->ops->rx(op_mode, napi, rxb);
}
static inline void iwl_op_mode_rx_rss(struct iwl_op_mode *op_mode,
struct napi_struct *napi,
struct iwl_rx_cmd_buffer *rxb,
unsigned int queue)
{
op_mode->ops->rx_rss(op_mode, napi, rxb, queue);
}
static inline void iwl_op_mode_queue_full(struct iwl_op_mode *op_mode,
int queue)
{
op_mode->ops->queue_full(op_mode, queue);
}
static inline void iwl_op_mode_queue_not_full(struct iwl_op_mode *op_mode,
int queue)
{
op_mode->ops->queue_not_full(op_mode, queue);
}
static inline bool __must_check
iwl_op_mode_hw_rf_kill(struct iwl_op_mode *op_mode, bool state)
{
might_sleep();
return op_mode->ops->hw_rf_kill(op_mode, state);
}
static inline void iwl_op_mode_free_skb(struct iwl_op_mode *op_mode,
struct sk_buff *skb)
{
if (WARN_ON_ONCE(!op_mode))
return;
op_mode->ops->free_skb(op_mode, skb);
}
static inline void iwl_op_mode_nic_error(struct iwl_op_mode *op_mode, bool sync)
{
op_mode->ops->nic_error(op_mode, sync);
}
static inline void iwl_op_mode_cmd_queue_full(struct iwl_op_mode *op_mode)
{
op_mode->ops->cmd_queue_full(op_mode);
}
static inline void iwl_op_mode_nic_config(struct iwl_op_mode *op_mode)
{
might_sleep();
op_mode->ops->nic_config(op_mode);
}
static inline void iwl_op_mode_wimax_active(struct iwl_op_mode *op_mode)
{
might_sleep();
op_mode->ops->wimax_active(op_mode);
}
static inline void iwl_op_mode_time_point(struct iwl_op_mode *op_mode,
enum iwl_fw_ini_time_point tp_id,
union iwl_dbg_tlv_tp_data *tp_data)
{
if (!op_mode || !op_mode->ops || !op_mode->ops->time_point)
return;
op_mode->ops->time_point(op_mode, tp_id, tp_data);
}
#endif /* __iwl_op_mode_h__ */
|