FreeBSD kernel iwm device code
if_iwm.c
Go to the documentation of this file.
1/* $OpenBSD: if_iwm.c,v 1.167 2017/04/04 00:40:52 claudio Exp $ */
2
3/*
4 * Copyright (c) 2014 genua mbh <info@genua.de>
5 * Copyright (c) 2014 Fixup Software Ltd.
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20/*-
21 * Based on BSD-licensed source modules in the Linux iwlwifi driver,
22 * which were used as the reference documentation for this implementation.
23 *
24 * Driver version we are currently based off of is
25 * Linux 3.14.3 (tag id a2df521e42b1d9a23f620ac79dbfe8655a8391dd)
26 *
27 ***********************************************************************
28 *
29 * This file is provided under a dual BSD/GPLv2 license. When using or
30 * redistributing this file, you may do so under either license.
31 *
32 * GPL LICENSE SUMMARY
33 *
34 * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
35 *
36 * This program is free software; you can redistribute it and/or modify
37 * it under the terms of version 2 of the GNU General Public License as
38 * published by the Free Software Foundation.
39 *
40 * This program is distributed in the hope that it will be useful, but
41 * WITHOUT ANY WARRANTY; without even the implied warranty of
42 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
43 * General Public License for more details.
44 *
45 * You should have received a copy of the GNU General Public License
46 * along with this program; if not, write to the Free Software
47 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
48 * USA
49 *
50 * The full GNU General Public License is included in this distribution
51 * in the file called COPYING.
52 *
53 * Contact Information:
54 * Intel Linux Wireless <ilw@linux.intel.com>
55 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
56 *
57 *
58 * BSD LICENSE
59 *
60 * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
61 * All rights reserved.
62 *
63 * Redistribution and use in source and binary forms, with or without
64 * modification, are permitted provided that the following conditions
65 * are met:
66 *
67 * * Redistributions of source code must retain the above copyright
68 * notice, this list of conditions and the following disclaimer.
69 * * Redistributions in binary form must reproduce the above copyright
70 * notice, this list of conditions and the following disclaimer in
71 * the documentation and/or other materials provided with the
72 * distribution.
73 * * Neither the name Intel Corporation nor the names of its
74 * contributors may be used to endorse or promote products derived
75 * from this software without specific prior written permission.
76 *
77 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
78 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
79 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
80 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
81 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
82 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
83 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
84 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
85 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
86 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
87 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
88 */
89
90/*-
91 * Copyright (c) 2007-2010 Damien Bergamini <damien.bergamini@free.fr>
92 *
93 * Permission to use, copy, modify, and distribute this software for any
94 * purpose with or without fee is hereby granted, provided that the above
95 * copyright notice and this permission notice appear in all copies.
96 *
97 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
98 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
99 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
100 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
101 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
102 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
103 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
104 */
105#include <sys/cdefs.h>
106__FBSDID("$FreeBSD$");
107
108#include "opt_wlan.h"
109#include "opt_iwm.h"
110
111#include <sys/param.h>
112#include <sys/bus.h>
113#include <sys/conf.h>
114#include <sys/endian.h>
115#include <sys/firmware.h>
116#include <sys/kernel.h>
117#include <sys/malloc.h>
118#include <sys/mbuf.h>
119#include <sys/mutex.h>
120#include <sys/module.h>
121#include <sys/proc.h>
122#include <sys/rman.h>
123#include <sys/socket.h>
124#include <sys/sockio.h>
125#include <sys/sysctl.h>
126#include <sys/linker.h>
127
128#include <machine/bus.h>
129#include <machine/endian.h>
130#include <machine/resource.h>
131
132#include <dev/pci/pcivar.h>
133#include <dev/pci/pcireg.h>
134
135#include <net/bpf.h>
136
137#include <net/if.h>
138#include <net/if_var.h>
139#include <net/if_arp.h>
140#include <net/if_dl.h>
141#include <net/if_media.h>
142#include <net/if_types.h>
143
144#include <netinet/in.h>
145#include <netinet/in_systm.h>
146#include <netinet/if_ether.h>
147#include <netinet/ip.h>
148
149#include <net80211/ieee80211_var.h>
150#include <net80211/ieee80211_regdomain.h>
151#include <net80211/ieee80211_ratectl.h>
152#include <net80211/ieee80211_radiotap.h>
153
154#include <dev/iwm/if_iwmreg.h>
155#include <dev/iwm/if_iwmvar.h>
157#include <dev/iwm/if_iwm_debug.h>
159#include <dev/iwm/if_iwm_util.h>
165#include <dev/iwm/if_iwm_power.h>
166#include <dev/iwm/if_iwm_scan.h>
167#include <dev/iwm/if_iwm_sf.h>
168#include <dev/iwm/if_iwm_sta.h>
169
171#include <dev/iwm/if_iwm_led.h>
172#include <dev/iwm/if_iwm_fw.h>
173
174/* From DragonflyBSD */
175#define mtodoff(m, t, off) ((t)((m)->m_data + (off)))
176
177const uint8_t iwm_nvm_channels[] = {
178 /* 2.4 GHz */
179 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
180 /* 5 GHz */
181 36, 40, 44, 48, 52, 56, 60, 64,
182 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
183 149, 153, 157, 161, 165
184};
186 "IWM_NUM_CHANNELS is too small");
187
188const uint8_t iwm_nvm_channels_8000[] = {
189 /* 2.4 GHz */
190 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
191 /* 5 GHz */
192 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
193 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
194 149, 153, 157, 161, 165, 169, 173, 177, 181
195};
197 "IWM_NUM_CHANNELS_8000 is too small");
198
199#define IWM_NUM_2GHZ_CHANNELS 14
200#define IWM_N_HW_ADDR_MASK 0xF
201
202/*
203 * XXX For now, there's simply a fixed set of rate table entries
204 * that are populated.
205 */
206const struct iwm_rate {
207 uint8_t rate;
208 uint8_t plcp;
209} iwm_rates[] = {
210 { 2, IWM_RATE_1M_PLCP },
211 { 4, IWM_RATE_2M_PLCP },
212 { 11, IWM_RATE_5M_PLCP },
213 { 22, IWM_RATE_11M_PLCP },
214 { 12, IWM_RATE_6M_PLCP },
215 { 18, IWM_RATE_9M_PLCP },
216 { 24, IWM_RATE_12M_PLCP },
217 { 36, IWM_RATE_18M_PLCP },
218 { 48, IWM_RATE_24M_PLCP },
219 { 72, IWM_RATE_36M_PLCP },
220 { 96, IWM_RATE_48M_PLCP },
221 { 108, IWM_RATE_54M_PLCP },
223#define IWM_RIDX_CCK 0
224#define IWM_RIDX_OFDM 4
225#define IWM_RIDX_MAX (nitems(iwm_rates)-1)
226#define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
227#define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
228
230 uint16_t length;
231 uint8_t *data;
232};
233
234#define IWM_UCODE_ALIVE_TIMEOUT hz
235#define IWM_UCODE_CALIB_TIMEOUT (2*hz)
236
238 int valid;
240};
241
242static int iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
243static int iwm_firmware_store_section(struct iwm_softc *,
244 enum iwm_ucode_type,
245 const uint8_t *, size_t);
246static int iwm_set_default_calib(struct iwm_softc *, const void *);
247static void iwm_fw_info_free(struct iwm_fw_info *);
248static int iwm_read_firmware(struct iwm_softc *);
249static int iwm_alloc_fwmem(struct iwm_softc *);
250static int iwm_alloc_sched(struct iwm_softc *);
251static int iwm_alloc_kw(struct iwm_softc *);
252static int iwm_alloc_ict(struct iwm_softc *);
253static int iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
254static void iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
255static void iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *);
256static int iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *,
257 int);
258static void iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
259static void iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *);
260static void iwm_enable_interrupts(struct iwm_softc *);
261static void iwm_restore_interrupts(struct iwm_softc *);
262static void iwm_disable_interrupts(struct iwm_softc *);
263static void iwm_ict_reset(struct iwm_softc *);
264static int iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *);
265static void iwm_stop_device(struct iwm_softc *);
266static void iwm_nic_config(struct iwm_softc *);
267static int iwm_nic_rx_init(struct iwm_softc *);
268static int iwm_nic_tx_init(struct iwm_softc *);
269static int iwm_nic_init(struct iwm_softc *);
270static int iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t);
271static int iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
272 uint16_t, uint8_t *, uint16_t *);
273static int iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
274 uint16_t *, uint32_t);
275static uint32_t iwm_eeprom_channel_flags(uint16_t);
276static void iwm_add_channel_band(struct iwm_softc *,
277 struct ieee80211_channel[], int, int *, int, size_t,
278 const uint8_t[]);
279static void iwm_init_channel_map(struct ieee80211com *, int, int *,
280 struct ieee80211_channel[]);
281static struct iwm_nvm_data *
282 iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
283 const uint16_t *, const uint16_t *,
284 const uint16_t *, const uint16_t *,
285 const uint16_t *);
286static void iwm_free_nvm_data(struct iwm_nvm_data *);
287static void iwm_set_hw_address_family_8000(struct iwm_softc *,
288 struct iwm_nvm_data *,
289 const uint16_t *,
290 const uint16_t *);
291static int iwm_get_sku(const struct iwm_softc *, const uint16_t *,
292 const uint16_t *);
293static int iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
294static int iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
295 const uint16_t *);
296static int iwm_get_n_hw_addrs(const struct iwm_softc *,
297 const uint16_t *);
298static void iwm_set_radio_cfg(const struct iwm_softc *,
299 struct iwm_nvm_data *, uint32_t);
300static struct iwm_nvm_data *
302static int iwm_nvm_init(struct iwm_softc *);
303static int iwm_pcie_load_section(struct iwm_softc *, uint8_t,
304 const struct iwm_fw_desc *);
305static int iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t,
306 bus_addr_t, uint32_t);
307static int iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc,
308 const struct iwm_fw_img *,
309 int, int *);
310static int iwm_pcie_load_cpu_sections(struct iwm_softc *,
311 const struct iwm_fw_img *,
312 int, int *);
313static int iwm_pcie_load_given_ucode_8000(struct iwm_softc *,
314 const struct iwm_fw_img *);
315static int iwm_pcie_load_given_ucode(struct iwm_softc *,
316 const struct iwm_fw_img *);
317static int iwm_start_fw(struct iwm_softc *, const struct iwm_fw_img *);
318static int iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
319static int iwm_send_phy_cfg_cmd(struct iwm_softc *);
320static int iwm_load_ucode_wait_alive(struct iwm_softc *,
321 enum iwm_ucode_type);
322static int iwm_run_init_ucode(struct iwm_softc *, int);
323static int iwm_config_ltr(struct iwm_softc *sc);
324static int iwm_rx_addbuf(struct iwm_softc *, int, int);
325static void iwm_rx_rx_phy_cmd(struct iwm_softc *,
326 struct iwm_rx_packet *);
327static int iwm_get_noise(struct iwm_softc *,
328 const struct iwm_statistics_rx_non_phy *);
329static void iwm_handle_rx_statistics(struct iwm_softc *,
330 struct iwm_rx_packet *);
331static bool iwm_rx_mpdu(struct iwm_softc *, struct mbuf *,
332 uint32_t, bool);
333static int iwm_rx_tx_cmd_single(struct iwm_softc *,
334 struct iwm_rx_packet *,
335 struct iwm_node *);
336static void iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *);
337static void iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *);
338#if 0
339static void iwm_update_sched(struct iwm_softc *, int, int, uint8_t,
340 uint16_t);
341#endif
342static const struct iwm_rate *
343 iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *,
344 struct mbuf *, struct iwm_tx_cmd *);
345static int iwm_tx(struct iwm_softc *, struct mbuf *,
346 struct ieee80211_node *, int);
347static int iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
348 const struct ieee80211_bpf_params *);
349static int iwm_update_quotas(struct iwm_softc *, struct iwm_vap *);
350static int iwm_auth(struct ieee80211vap *, struct iwm_softc *);
351static struct ieee80211_node *
352 iwm_node_alloc(struct ieee80211vap *,
353 const uint8_t[IEEE80211_ADDR_LEN]);
354static uint8_t iwm_rate_from_ucode_rate(uint32_t);
355static int iwm_rate2ridx(struct iwm_softc *, uint8_t);
356static void iwm_setrates(struct iwm_softc *, struct iwm_node *, int);
357static int iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
358static void iwm_endscan_cb(void *, int);
359static int iwm_send_bt_init_conf(struct iwm_softc *);
360static boolean_t iwm_is_lar_supported(struct iwm_softc *);
361static boolean_t iwm_is_wifi_mcc_supported(struct iwm_softc *);
362static int iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
363static void iwm_tt_tx_backoff(struct iwm_softc *, uint32_t);
364static int iwm_init_hw(struct iwm_softc *);
365static void iwm_init(struct iwm_softc *);
366static void iwm_start(struct iwm_softc *);
367static void iwm_stop(struct iwm_softc *);
368static void iwm_watchdog(void *);
369static void iwm_parent(struct ieee80211com *);
370#ifdef IWM_DEBUG
371static const char *
372 iwm_desc_lookup(uint32_t);
373static void iwm_nic_error(struct iwm_softc *);
374static void iwm_nic_umac_error(struct iwm_softc *);
375#endif
376static void iwm_handle_rxb(struct iwm_softc *, struct mbuf *);
377static void iwm_notif_intr(struct iwm_softc *);
378static void iwm_intr(void *);
379static int iwm_attach(device_t);
380static int iwm_is_valid_ether_addr(uint8_t *);
381static void iwm_preinit(void *);
382static int iwm_detach_local(struct iwm_softc *sc, int);
383static void iwm_init_task(void *);
384static void iwm_radiotap_attach(struct iwm_softc *);
385static struct ieee80211vap *
386 iwm_vap_create(struct ieee80211com *,
387 const char [IFNAMSIZ], int,
388 enum ieee80211_opmode, int,
389 const uint8_t [IEEE80211_ADDR_LEN],
390 const uint8_t [IEEE80211_ADDR_LEN]);
391static void iwm_vap_delete(struct ieee80211vap *);
392static void iwm_xmit_queue_drain(struct iwm_softc *);
393static void iwm_scan_start(struct ieee80211com *);
394static void iwm_scan_end(struct ieee80211com *);
395static void iwm_update_mcast(struct ieee80211com *);
396static void iwm_set_channel(struct ieee80211com *);
397static void iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long);
398static void iwm_scan_mindwell(struct ieee80211_scan_state *);
399static int iwm_detach(device_t);
400
401static int iwm_lar_disable = 0;
402TUNABLE_INT("hw.iwm.lar.disable", &iwm_lar_disable);
403
404/*
405 * Firmware parser.
406 */
407
408static int
409iwm_store_cscheme(struct iwm_softc *sc, const uint8_t *data, size_t dlen)
410{
411 const struct iwm_fw_cscheme_list *l = (const void *)data;
412
413 if (dlen < sizeof(*l) ||
414 dlen < sizeof(l->size) + l->size * sizeof(*l->cs))
415 return EINVAL;
416
417 /* we don't actually store anything for now, always use s/w crypto */
418
419 return 0;
420}
421
422static int
424 enum iwm_ucode_type type, const uint8_t *data, size_t dlen)
425{
426 struct iwm_fw_img *fws;
427 struct iwm_fw_desc *fwone;
428
429 if (type >= IWM_UCODE_TYPE_MAX)
430 return EINVAL;
431 if (dlen < sizeof(uint32_t))
432 return EINVAL;
433
434 fws = &sc->sc_fw.img[type];
436 return EINVAL;
437
438 fwone = &fws->sec[fws->fw_count];
439
440 /* first 32bit are device load offset */
441 memcpy(&fwone->offset, data, sizeof(uint32_t));
442
443 /* rest is data */
444 fwone->data = data + sizeof(uint32_t);
445 fwone->len = dlen - sizeof(uint32_t);
446
447 fws->fw_count++;
448
449 return 0;
450}
451
452#define IWM_DEFAULT_SCAN_CHANNELS 40
453
454/* iwlwifi: iwl-drv.c */
456 uint32_t ucode_type;
459
460static int
461iwm_set_default_calib(struct iwm_softc *sc, const void *data)
462{
463 const struct iwm_tlv_calib_data *def_calib = data;
464 uint32_t ucode_type = le32toh(def_calib->ucode_type);
465
467 device_printf(sc->sc_dev,
468 "Wrong ucode_type %u for default "
469 "calibration.\n", ucode_type);
470 return EINVAL;
471 }
472
474 def_calib->calib.flow_trigger;
476 def_calib->calib.event_trigger;
477
478 return 0;
479}
480
481static int
482iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data,
483 struct iwm_ucode_capabilities *capa)
484{
485 const struct iwm_ucode_api *ucode_api = (const void *)data;
486 uint32_t api_index = le32toh(ucode_api->api_index);
487 uint32_t api_flags = le32toh(ucode_api->api_flags);
488 int i;
489
490 if (api_index >= howmany(IWM_NUM_UCODE_TLV_API, 32)) {
491 device_printf(sc->sc_dev,
492 "api flags index %d larger than supported by driver\n",
493 api_index);
494 /* don't return an error so we can load FW that has more bits */
495 return 0;
496 }
497
498 for (i = 0; i < 32; i++) {
499 if (api_flags & (1U << i))
500 setbit(capa->enabled_api, i + 32 * api_index);
501 }
502
503 return 0;
504}
505
506static int
507iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data,
508 struct iwm_ucode_capabilities *capa)
509{
510 const struct iwm_ucode_capa *ucode_capa = (const void *)data;
511 uint32_t api_index = le32toh(ucode_capa->api_index);
512 uint32_t api_flags = le32toh(ucode_capa->api_capa);
513 int i;
514
515 if (api_index >= howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
516 device_printf(sc->sc_dev,
517 "capa flags index %d larger than supported by driver\n",
518 api_index);
519 /* don't return an error so we can load FW that has more bits */
520 return 0;
521 }
522
523 for (i = 0; i < 32; i++) {
524 if (api_flags & (1U << i))
525 setbit(capa->enabled_capa, i + 32 * api_index);
526 }
527
528 return 0;
529}
530
531static void
533{
534 firmware_put(fw->fw_fp, FIRMWARE_UNLOAD);
535 fw->fw_fp = NULL;
536 memset(fw->img, 0, sizeof(fw->img));
537}
538
539static int
541{
542 struct iwm_fw_info *fw = &sc->sc_fw;
543 const struct iwm_tlv_ucode_header *uhdr;
544 const struct iwm_ucode_tlv *tlv;
545 struct iwm_ucode_capabilities *capa = &sc->sc_fw.ucode_capa;
546 enum iwm_ucode_tlv_type tlv_type;
547 const struct firmware *fwp;
548 const uint8_t *data;
549 uint32_t tlv_len;
550 uint32_t usniffer_img;
551 const uint8_t *tlv_data;
552 uint32_t paging_mem_size;
553 int num_of_cpus;
554 int error = 0;
555 size_t len;
556
557 /*
558 * Load firmware into driver memory.
559 * fw_fp will be set.
560 */
561 fwp = firmware_get(sc->cfg->fw_name);
562 if (fwp == NULL) {
563 device_printf(sc->sc_dev,
564 "could not read firmware %s (error %d)\n",
565 sc->cfg->fw_name, error);
566 goto out;
567 }
568 fw->fw_fp = fwp;
569
570 /* (Re-)Initialize default values. */
571 capa->flags = 0;
574 memset(capa->enabled_capa, 0, sizeof(capa->enabled_capa));
575 memset(capa->enabled_api, 0, sizeof(capa->enabled_api));
576 memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
577
578 /*
579 * Parse firmware contents
580 */
581
582 uhdr = (const void *)fw->fw_fp->data;
583 if (*(const uint32_t *)fw->fw_fp->data != 0
584 || le32toh(uhdr->magic) != IWM_TLV_UCODE_MAGIC) {
585 device_printf(sc->sc_dev, "invalid firmware %s\n",
586 sc->cfg->fw_name);
587 error = EINVAL;
588 goto out;
589 }
590
591 snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%u.%u (API ver %u)",
592 IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
593 IWM_UCODE_MINOR(le32toh(uhdr->ver)),
594 IWM_UCODE_API(le32toh(uhdr->ver)));
595 data = uhdr->data;
596 len = fw->fw_fp->datasize - sizeof(*uhdr);
597
598 while (len >= sizeof(*tlv)) {
599 len -= sizeof(*tlv);
600 tlv = (const void *)data;
601
602 tlv_len = le32toh(tlv->length);
603 tlv_type = le32toh(tlv->type);
604 tlv_data = tlv->data;
605
606 if (len < tlv_len) {
607 device_printf(sc->sc_dev,
608 "firmware too short: %zu bytes\n",
609 len);
610 error = EINVAL;
611 goto parse_out;
612 }
613 len -= roundup2(tlv_len, 4);
614 data += sizeof(*tlv) + roundup2(tlv_len, 4);
615
616 switch ((int)tlv_type) {
618 if (tlv_len != sizeof(uint32_t)) {
619 device_printf(sc->sc_dev,
620 "%s: PROBE_MAX_LEN (%u) != sizeof(uint32_t)\n",
621 __func__, tlv_len);
622 error = EINVAL;
623 goto parse_out;
624 }
625 capa->max_probe_length =
626 le32_to_cpup((const uint32_t *)tlv_data);
627 /* limit it to something sensible */
628 if (capa->max_probe_length >
630 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
631 "%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
632 "ridiculous\n", __func__);
633 error = EINVAL;
634 goto parse_out;
635 }
636 break;
638 if (tlv_len) {
639 device_printf(sc->sc_dev,
640 "%s: IWM_UCODE_TLV_PAN: tlv_len (%u) > 0\n",
641 __func__, tlv_len);
642 error = EINVAL;
643 goto parse_out;
644 }
646 break;
648 if (tlv_len < sizeof(uint32_t)) {
649 device_printf(sc->sc_dev,
650 "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) < sizeof(uint32_t)\n",
651 __func__, tlv_len);
652 error = EINVAL;
653 goto parse_out;
654 }
655 if (tlv_len % sizeof(uint32_t)) {
656 device_printf(sc->sc_dev,
657 "%s: IWM_UCODE_TLV_FLAGS: tlv_len (%u) %% sizeof(uint32_t)\n",
658 __func__, tlv_len);
659 error = EINVAL;
660 goto parse_out;
661 }
662 /*
663 * Apparently there can be many flags, but Linux driver
664 * parses only the first one, and so do we.
665 *
666 * XXX: why does this override IWM_UCODE_TLV_PAN?
667 * Intentional or a bug? Observations from
668 * current firmware file:
669 * 1) TLV_PAN is parsed first
670 * 2) TLV_FLAGS contains TLV_FLAGS_PAN
671 * ==> this resets TLV_PAN to itself... hnnnk
672 */
673 capa->flags = le32_to_cpup((const uint32_t *)tlv_data);
674 break;
676 if ((error = iwm_store_cscheme(sc,
677 tlv_data, tlv_len)) != 0) {
678 device_printf(sc->sc_dev,
679 "%s: iwm_store_cscheme(): returned %d\n",
680 __func__, error);
681 goto parse_out;
682 }
683 break;
685 if (tlv_len != sizeof(uint32_t)) {
686 device_printf(sc->sc_dev,
687 "%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%u) != sizeof(uint32_t)\n",
688 __func__, tlv_len);
689 error = EINVAL;
690 goto parse_out;
691 }
692 num_of_cpus = le32_to_cpup((const uint32_t *)tlv_data);
693 if (num_of_cpus == 2) {
695 TRUE;
697 TRUE;
699 TRUE;
700 } else if ((num_of_cpus > 2) || (num_of_cpus < 1)) {
701 device_printf(sc->sc_dev,
702 "%s: Driver supports only 1 or 2 CPUs\n",
703 __func__);
704 error = EINVAL;
705 goto parse_out;
706 }
707 break;
709 if ((error = iwm_firmware_store_section(sc,
710 IWM_UCODE_REGULAR, tlv_data, tlv_len)) != 0) {
711 device_printf(sc->sc_dev,
712 "%s: IWM_UCODE_REGULAR: iwm_firmware_store_section() failed; %d\n",
713 __func__, error);
714 goto parse_out;
715 }
716 break;
718 if ((error = iwm_firmware_store_section(sc,
719 IWM_UCODE_INIT, tlv_data, tlv_len)) != 0) {
720 device_printf(sc->sc_dev,
721 "%s: IWM_UCODE_INIT: iwm_firmware_store_section() failed; %d\n",
722 __func__, error);
723 goto parse_out;
724 }
725 break;
727 if ((error = iwm_firmware_store_section(sc,
728 IWM_UCODE_WOWLAN, tlv_data, tlv_len)) != 0) {
729 device_printf(sc->sc_dev,
730 "%s: IWM_UCODE_WOWLAN: iwm_firmware_store_section() failed; %d\n",
731 __func__, error);
732 goto parse_out;
733 }
734 break;
736 if (tlv_len != sizeof(struct iwm_tlv_calib_data)) {
737 device_printf(sc->sc_dev,
738 "%s: IWM_UCODE_TLV_DEV_CALIB: tlv_len (%u) < sizeof(iwm_tlv_calib_data) (%zu)\n",
739 __func__, tlv_len,
740 sizeof(struct iwm_tlv_calib_data));
741 error = EINVAL;
742 goto parse_out;
743 }
744 if ((error = iwm_set_default_calib(sc, tlv_data)) != 0) {
745 device_printf(sc->sc_dev,
746 "%s: iwm_set_default_calib() failed: %d\n",
747 __func__, error);
748 goto parse_out;
749 }
750 break;
752 if (tlv_len != sizeof(uint32_t)) {
753 error = EINVAL;
754 device_printf(sc->sc_dev,
755 "%s: IWM_UCODE_TLV_PHY_SKU: tlv_len (%u) < sizeof(uint32_t)\n",
756 __func__, tlv_len);
757 goto parse_out;
758 }
759 sc->sc_fw.phy_config =
760 le32_to_cpup((const uint32_t *)tlv_data);
767 break;
768
770 if (tlv_len != sizeof(struct iwm_ucode_api)) {
771 error = EINVAL;
772 goto parse_out;
773 }
774 if (iwm_set_ucode_api_flags(sc, tlv_data, capa)) {
775 error = EINVAL;
776 goto parse_out;
777 }
778 break;
779 }
780
782 if (tlv_len != sizeof(struct iwm_ucode_capa)) {
783 error = EINVAL;
784 goto parse_out;
785 }
786 if (iwm_set_ucode_capabilities(sc, tlv_data, capa)) {
787 error = EINVAL;
788 goto parse_out;
789 }
790 break;
791 }
792
796 /* ignore, not used by current driver */
797 break;
798
800 if ((error = iwm_firmware_store_section(sc,
802 tlv_len)) != 0)
803 goto parse_out;
804 break;
805
807 if (tlv_len != sizeof(uint32_t)) {
808 error = EINVAL;
809 goto parse_out;
810 }
811 paging_mem_size = le32_to_cpup((const uint32_t *)tlv_data);
812
813 IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
814 "%s: Paging: paging enabled (size = %u bytes)\n",
815 __func__, paging_mem_size);
816 if (paging_mem_size > IWM_MAX_PAGING_IMAGE_SIZE) {
817 device_printf(sc->sc_dev,
818 "%s: Paging: driver supports up to %u bytes for paging image\n",
819 __func__, IWM_MAX_PAGING_IMAGE_SIZE);
820 error = EINVAL;
821 goto out;
822 }
823 if (paging_mem_size & (IWM_FW_PAGING_SIZE - 1)) {
824 device_printf(sc->sc_dev,
825 "%s: Paging: image isn't multiple %u\n",
826 __func__, IWM_FW_PAGING_SIZE);
827 error = EINVAL;
828 goto out;
829 }
830
832 paging_mem_size;
833 usniffer_img = IWM_UCODE_REGULAR_USNIFFER;
834 sc->sc_fw.img[usniffer_img].paging_mem_size =
835 paging_mem_size;
836 break;
837
839 if (tlv_len != sizeof(uint32_t)) {
840 error = EINVAL;
841 goto parse_out;
842 }
843 capa->n_scan_channels =
844 le32_to_cpup((const uint32_t *)tlv_data);
845 break;
846
848 if (tlv_len != sizeof(uint32_t) * 3) {
849 error = EINVAL;
850 goto parse_out;
851 }
852 snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
853 "%u.%u.%u",
854 le32toh(((const uint32_t *)tlv_data)[0]),
855 le32toh(((const uint32_t *)tlv_data)[1]),
856 le32toh(((const uint32_t *)tlv_data)[2]));
857 break;
858
860 break;
861
862 default:
863 device_printf(sc->sc_dev,
864 "%s: unknown firmware section %d, abort\n",
865 __func__, tlv_type);
866 error = EINVAL;
867 goto parse_out;
868 }
869 }
870
871 KASSERT(error == 0, ("unhandled error"));
872
873 parse_out:
874 if (error) {
875 device_printf(sc->sc_dev, "firmware parse error %d, "
876 "section type %d\n", error, tlv_type);
877 }
878
879 out:
880 if (error) {
881 if (fw->fw_fp != NULL)
883 }
884
885 return error;
886}
887
888/*
889 * DMA resource routines
890 */
891
892/* fwmem is used to load firmware onto the card */
893static int
895{
896 /* Must be aligned on a 16-byte boundary. */
897 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->fw_dma,
899}
900
901/* tx scheduler rings. not used? */
902static int
904{
905 /* TX scheduler rings must be aligned on a 1KB boundary. */
906 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
907 nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
908}
909
910/* keep-warm page is used internally by the card. see iwl-fh.h for more info */
911static int
913{
914 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->kw_dma, 4096, 4096);
915}
916
917/* interrupt cause table */
918static int
920{
921 return iwm_dma_contig_alloc(sc->sc_dmat, &sc->ict_dma,
923}
924
925static int
926iwm_alloc_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
927{
928 bus_size_t size;
929 size_t descsz;
930 int count, i, error;
931
932 ring->cur = 0;
933 if (sc->cfg->mqrx_supported) {
934 count = IWM_RX_MQ_RING_COUNT;
935 descsz = sizeof(uint64_t);
936 } else {
938 descsz = sizeof(uint32_t);
939 }
940
941 /* Allocate RX descriptors (256-byte aligned). */
942 size = count * descsz;
943 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->free_desc_dma, size,
944 256);
945 if (error != 0) {
946 device_printf(sc->sc_dev,
947 "could not allocate RX ring DMA memory\n");
948 goto fail;
949 }
950 ring->desc = ring->free_desc_dma.vaddr;
951
952 /* Allocate RX status area (16-byte aligned). */
953 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->stat_dma,
954 sizeof(*ring->stat), 16);
955 if (error != 0) {
956 device_printf(sc->sc_dev,
957 "could not allocate RX status DMA memory\n");
958 goto fail;
959 }
960 ring->stat = ring->stat_dma.vaddr;
961
962 if (sc->cfg->mqrx_supported) {
963 size = count * sizeof(uint32_t);
964 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->used_desc_dma,
965 size, 256);
966 if (error != 0) {
967 device_printf(sc->sc_dev,
968 "could not allocate RX ring DMA memory\n");
969 goto fail;
970 }
971 }
972
973 /* Create RX buffer DMA tag. */
974 error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
975 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
976 IWM_RBUF_SIZE, 1, IWM_RBUF_SIZE, 0, NULL, NULL, &ring->data_dmat);
977 if (error != 0) {
978 device_printf(sc->sc_dev,
979 "%s: could not create RX buf DMA tag, error %d\n",
980 __func__, error);
981 goto fail;
982 }
983
984 /* Allocate spare bus_dmamap_t for iwm_rx_addbuf() */
985 error = bus_dmamap_create(ring->data_dmat, 0, &ring->spare_map);
986 if (error != 0) {
987 device_printf(sc->sc_dev,
988 "%s: could not create RX buf DMA map, error %d\n",
989 __func__, error);
990 goto fail;
991 }
992
993 /*
994 * Allocate and map RX buffers.
995 */
996 for (i = 0; i < count; i++) {
997 struct iwm_rx_data *data = &ring->data[i];
998 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
999 if (error != 0) {
1000 device_printf(sc->sc_dev,
1001 "%s: could not create RX buf DMA map, error %d\n",
1002 __func__, error);
1003 goto fail;
1004 }
1005 data->m = NULL;
1006
1007 if ((error = iwm_rx_addbuf(sc, IWM_RBUF_SIZE, i)) != 0) {
1008 goto fail;
1009 }
1010 }
1011 return 0;
1012
1013fail: iwm_free_rx_ring(sc, ring);
1014 return error;
1015}
1016
1017static void
1018iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1019{
1020 /* Reset the ring state */
1021 ring->cur = 0;
1022
1023 /*
1024 * The hw rx ring index in shared memory must also be cleared,
1025 * otherwise the discrepancy can cause reprocessing chaos.
1026 */
1027 if (sc->rxq.stat)
1028 memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
1029}
1030
1031static void
1032iwm_free_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
1033{
1034 int count, i;
1035
1039
1040 count = sc->cfg->mqrx_supported ? IWM_RX_MQ_RING_COUNT :
1042
1043 for (i = 0; i < count; i++) {
1044 struct iwm_rx_data *data = &ring->data[i];
1045
1046 if (data->m != NULL) {
1047 bus_dmamap_sync(ring->data_dmat, data->map,
1048 BUS_DMASYNC_POSTREAD);
1049 bus_dmamap_unload(ring->data_dmat, data->map);
1050 m_freem(data->m);
1051 data->m = NULL;
1052 }
1053 if (data->map != NULL) {
1054 bus_dmamap_destroy(ring->data_dmat, data->map);
1055 data->map = NULL;
1056 }
1057 }
1058 if (ring->spare_map != NULL) {
1059 bus_dmamap_destroy(ring->data_dmat, ring->spare_map);
1060 ring->spare_map = NULL;
1061 }
1062 if (ring->data_dmat != NULL) {
1063 bus_dma_tag_destroy(ring->data_dmat);
1064 ring->data_dmat = NULL;
1065 }
1066}
1067
1068static int
1069iwm_alloc_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring, int qid)
1070{
1071 bus_addr_t paddr;
1072 bus_size_t size;
1073 size_t maxsize;
1074 int nsegments;
1075 int i, error;
1076
1077 ring->qid = qid;
1078 ring->queued = 0;
1079 ring->cur = 0;
1080
1081 /* Allocate TX descriptors (256-byte aligned). */
1082 size = IWM_TX_RING_COUNT * sizeof (struct iwm_tfd);
1083 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->desc_dma, size, 256);
1084 if (error != 0) {
1085 device_printf(sc->sc_dev,
1086 "could not allocate TX ring DMA memory\n");
1087 goto fail;
1088 }
1089 ring->desc = ring->desc_dma.vaddr;
1090
1091 /*
1092 * We only use rings 0 through 9 (4 EDCA + cmd) so there is no need
1093 * to allocate commands space for other rings.
1094 */
1095 if (qid > IWM_CMD_QUEUE)
1096 return 0;
1097
1098 size = IWM_TX_RING_COUNT * sizeof(struct iwm_device_cmd);
1099 error = iwm_dma_contig_alloc(sc->sc_dmat, &ring->cmd_dma, size, 4);
1100 if (error != 0) {
1101 device_printf(sc->sc_dev,
1102 "could not allocate TX cmd DMA memory\n");
1103 goto fail;
1104 }
1105 ring->cmd = ring->cmd_dma.vaddr;
1106
1107 /* FW commands may require more mapped space than packets. */
1108 if (qid == IWM_CMD_QUEUE) {
1109 maxsize = IWM_RBUF_SIZE;
1110 nsegments = 1;
1111 } else {
1112 maxsize = MCLBYTES;
1113 nsegments = IWM_MAX_SCATTER - 2;
1114 }
1115
1116 error = bus_dma_tag_create(sc->sc_dmat, 1, 0,
1117 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, maxsize,
1118 nsegments, maxsize, 0, NULL, NULL, &ring->data_dmat);
1119 if (error != 0) {
1120 device_printf(sc->sc_dev, "could not create TX buf DMA tag\n");
1121 goto fail;
1122 }
1123
1124 paddr = ring->cmd_dma.paddr;
1125 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1126 struct iwm_tx_data *data = &ring->data[i];
1127
1128 data->cmd_paddr = paddr;
1129 data->scratch_paddr = paddr + sizeof(struct iwm_cmd_header)
1130 + offsetof(struct iwm_tx_cmd, scratch);
1131 paddr += sizeof(struct iwm_device_cmd);
1132
1133 error = bus_dmamap_create(ring->data_dmat, 0, &data->map);
1134 if (error != 0) {
1135 device_printf(sc->sc_dev,
1136 "could not create TX buf DMA map\n");
1137 goto fail;
1138 }
1139 }
1140 KASSERT(paddr == ring->cmd_dma.paddr + size,
1141 ("invalid physical address"));
1142 return 0;
1143
1144fail: iwm_free_tx_ring(sc, ring);
1145 return error;
1146}
1147
1148static void
1149iwm_reset_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1150{
1151 int i;
1152
1153 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1154 struct iwm_tx_data *data = &ring->data[i];
1155
1156 if (data->m != NULL) {
1157 bus_dmamap_sync(ring->data_dmat, data->map,
1158 BUS_DMASYNC_POSTWRITE);
1159 bus_dmamap_unload(ring->data_dmat, data->map);
1160 m_freem(data->m);
1161 data->m = NULL;
1162 }
1163 }
1164 /* Clear TX descriptors. */
1165 memset(ring->desc, 0, ring->desc_dma.size);
1166 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
1167 BUS_DMASYNC_PREWRITE);
1168 sc->qfullmsk &= ~(1 << ring->qid);
1169 ring->queued = 0;
1170 ring->cur = 0;
1171
1172 if (ring->qid == IWM_CMD_QUEUE && sc->cmd_hold_nic_awake)
1174}
1175
1176static void
1177iwm_free_tx_ring(struct iwm_softc *sc, struct iwm_tx_ring *ring)
1178{
1179 int i;
1180
1183
1184 for (i = 0; i < IWM_TX_RING_COUNT; i++) {
1185 struct iwm_tx_data *data = &ring->data[i];
1186
1187 if (data->m != NULL) {
1188 bus_dmamap_sync(ring->data_dmat, data->map,
1189 BUS_DMASYNC_POSTWRITE);
1190 bus_dmamap_unload(ring->data_dmat, data->map);
1191 m_freem(data->m);
1192 data->m = NULL;
1193 }
1194 if (data->map != NULL) {
1195 bus_dmamap_destroy(ring->data_dmat, data->map);
1196 data->map = NULL;
1197 }
1198 }
1199 if (ring->data_dmat != NULL) {
1200 bus_dma_tag_destroy(ring->data_dmat);
1201 ring->data_dmat = NULL;
1202 }
1203}
1204
1205/*
1206 * High-level hardware frobbing routines
1207 */
1208
1209static void
1211{
1214}
1215
1216static void
1218{
1220}
1221
1222static void
1224{
1225 /* disable interrupts */
1227
1228 /* acknowledge all interrupts */
1229 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1231}
1232
1233static void
1235{
1237
1238 /* Reset ICT table. */
1239 memset(sc->ict_dma.vaddr, 0, IWM_ICT_SIZE);
1240 sc->ict_cur = 0;
1241
1242 /* Set physical address of ICT table (4KB aligned). */
1248
1249 /* Switch to ICT interrupt mode in driver. */
1251
1252 /* Re-enable interrupts. */
1253 IWM_WRITE(sc, IWM_CSR_INT, ~0);
1255}
1256
1257/* iwlwifi pcie/trans.c */
1258
1259/*
1260 * Since this .. hard-resets things, it's time to actually
1261 * mark the first vap (if any) as having no mac context.
1262 * It's annoying, but since the driver is potentially being
1263 * stop/start'ed whilst active (thanks openbsd port!) we
1264 * have to correctly track this.
1265 */
1266static void
1268{
1269 struct ieee80211com *ic = &sc->sc_ic;
1270 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
1271 int chnl, qid;
1272 uint32_t mask = 0;
1273
1274 /* tell the device to stop sending interrupts */
1276
1277 /*
1278 * FreeBSD-local: mark the first vap as not-uploaded,
1279 * so the next transition through auth/assoc
1280 * will correctly populate the MAC context.
1281 */
1282 if (vap) {
1283 struct iwm_vap *iv = IWM_VAP(vap);
1284 iv->phy_ctxt = NULL;
1285 iv->is_uploaded = 0;
1286 }
1287 sc->sc_firmware_state = 0;
1288 sc->sc_flags &= ~IWM_FLAG_TE_ACTIVE;
1289
1290 /* device going down, Stop using ICT table */
1291 sc->sc_flags &= ~IWM_FLAG_USE_ICT;
1292
1293 /* stop tx and rx. tx and rx bits, as usual, are from if_iwn */
1294
1295 if (iwm_nic_lock(sc)) {
1297
1298 /* Stop each Tx DMA channel */
1299 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1300 IWM_WRITE(sc,
1303 }
1304
1305 /* Wait for DMA channels to be idle */
1306 if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
1307 5000)) {
1308 device_printf(sc->sc_dev,
1309 "Failing on timeout while stopping DMA channel: [0x%08x]\n",
1311 }
1312 iwm_nic_unlock(sc);
1313 }
1314 iwm_pcie_rx_stop(sc);
1315
1316 /* Stop RX ring. */
1317 iwm_reset_rx_ring(sc, &sc->rxq);
1318
1319 /* Reset all TX rings. */
1320 for (qid = 0; qid < nitems(sc->txq); qid++)
1321 iwm_reset_tx_ring(sc, &sc->txq[qid]);
1322
1324 /* Power-down device's busmaster DMA clocks */
1325 if (iwm_nic_lock(sc)) {
1328 iwm_nic_unlock(sc);
1329 }
1330 DELAY(5);
1331 }
1332
1333 /* Make sure (redundant) we've released our request to stay awake */
1336
1337 /* Stop the device, and put it in low power state */
1338 iwm_apm_stop(sc);
1339
1340 /* stop and reset the on-board processor */
1342 DELAY(5000);
1343
1344 /*
1345 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1346 */
1348
1349 /*
1350 * Even if we stop the HW, we still want the RF kill
1351 * interrupt
1352 */
1354 iwm_check_rfkill(sc);
1355
1357}
1358
1359/* iwlwifi: mvm/ops.c */
1360static void
1362{
1363 uint8_t radio_cfg_type, radio_cfg_step, radio_cfg_dash;
1364 uint32_t reg_val = 0;
1365 uint32_t phy_config = iwm_get_phy_config(sc);
1366
1367 radio_cfg_type = (phy_config & IWM_FW_PHY_CFG_RADIO_TYPE) >>
1369 radio_cfg_step = (phy_config & IWM_FW_PHY_CFG_RADIO_STEP) >>
1371 radio_cfg_dash = (phy_config & IWM_FW_PHY_CFG_RADIO_DASH) >>
1373
1374 /* SKU control */
1375 reg_val |= IWM_CSR_HW_REV_STEP(sc->sc_hw_rev) <<
1377 reg_val |= IWM_CSR_HW_REV_DASH(sc->sc_hw_rev) <<
1379
1380 /* radio configuration */
1381 reg_val |= radio_cfg_type << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE;
1382 reg_val |= radio_cfg_step << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP;
1383 reg_val |= radio_cfg_dash << IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH;
1384
1393 reg_val);
1394
1395 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1396 "Radio type=0x%x-0x%x-0x%x\n", radio_cfg_type,
1397 radio_cfg_step, radio_cfg_dash);
1398
1399 /*
1400 * W/A : NIC is stuck in a reset state after Early PCIe power off
1401 * (PCIe power is lost before PERST# is asserted), causing ME FW
1402 * to lose ownership and not being able to obtain it back.
1403 */
1408 }
1409}
1410
1411static int
1413{
1414 int enabled;
1415
1416 if (!iwm_nic_lock(sc))
1417 return EBUSY;
1418
1419 /* Stop RX DMA. */
1421 /* Disable RX used and free queue operation. */
1423
1425 sc->rxq.free_desc_dma.paddr);
1427 sc->rxq.used_desc_dma.paddr);
1429 sc->rxq.stat_dma.paddr);
1433
1434 /* We configure only queue 0 for now. */
1435 enabled = ((1 << 0) << 16) | (1 << 0);
1436
1437 /* Enable RX DMA, 4KB buffer size. */
1444
1445 /* Enable RX DMA snooping. */
1451
1452 /* Enable the configured queue(s). */
1454
1455 iwm_nic_unlock(sc);
1456
1458
1460
1461 return (0);
1462}
1463
1464static int
1466{
1467
1468 /* Stop Rx DMA */
1469 iwm_pcie_rx_stop(sc);
1470
1471 if (!iwm_nic_lock(sc))
1472 return EBUSY;
1473
1474 /* reset and flush pointers */
1479
1480 /* Set physical address of RX ring (256-byte aligned). */
1481 IWM_WRITE(sc,
1483 sc->rxq.free_desc_dma.paddr >> 8);
1484
1485 /* Set physical address of RX status (16-byte aligned). */
1486 IWM_WRITE(sc,
1488
1489 /* Enable Rx DMA
1490 * XXX 5000 HW isn't supported by the iwm(4) driver.
1491 * IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
1492 * the credit mechanism in 5000 HW RX FIFO
1493 * Direct rx interrupts to hosts
1494 * Rx buffer size 4 or 8k or 12k
1495 * RB timeout 0x10
1496 * 256 RBDs
1497 */
1505
1507
1508 /* W/A for interrupt coalescing bug in 7260 and 3160 */
1511
1512 iwm_nic_unlock(sc);
1513
1515
1516 return 0;
1517}
1518
1519static int
1521{
1522 if (sc->cfg->mqrx_supported)
1523 return iwm_nic_rx_mq_init(sc);
1524 else
1525 return iwm_nic_rx_legacy_init(sc);
1526}
1527
1528static int
1530{
1531 int qid;
1532
1533 if (!iwm_nic_lock(sc))
1534 return EBUSY;
1535
1536 /* Deactivate TX scheduler. */
1538
1539 /* Set physical address of "keep warm" page (16-byte aligned). */
1541
1542 /* Initialize TX rings. */
1543 for (qid = 0; qid < nitems(sc->txq); qid++) {
1544 struct iwm_tx_ring *txq = &sc->txq[qid];
1545
1546 /* Set physical address of TX ring (256-byte aligned). */
1548 txq->desc_dma.paddr >> 8);
1549 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
1550 "%s: loading ring %d descriptors (%p) at %lx\n",
1551 __func__,
1552 qid, txq->desc,
1553 (unsigned long) (txq->desc_dma.paddr >> 8));
1554 }
1555
1559
1560 iwm_nic_unlock(sc);
1561
1562 return 0;
1563}
1564
1565static int
1567{
1568 int error;
1569
1570 iwm_apm_init(sc);
1572 iwm_set_pwr(sc);
1573
1574 iwm_nic_config(sc);
1575
1576 if ((error = iwm_nic_rx_init(sc)) != 0)
1577 return error;
1578
1579 /*
1580 * Ditto for TX, from iwn
1581 */
1582 if ((error = iwm_nic_tx_init(sc)) != 0)
1583 return error;
1584
1585 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
1586 "%s: shadow registers enabled\n", __func__);
1588
1589 return 0;
1590}
1591
1592int
1593iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
1594{
1595 int qmsk;
1596
1597 qmsk = 1 << qid;
1598
1599 if (!iwm_nic_lock(sc)) {
1600 device_printf(sc->sc_dev, "%s: cannot enable txq %d\n",
1601 __func__, qid);
1602 return EBUSY;
1603 }
1604
1605 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
1606
1607 if (qid == IWM_CMD_QUEUE) {
1608 /* Disable the scheduler. */
1610
1611 /* Stop the TX queue prior to configuration. */
1615
1616 iwm_nic_unlock(sc);
1617
1618 /* Disable aggregations for this queue. */
1620
1621 if (!iwm_nic_lock(sc)) {
1622 device_printf(sc->sc_dev,
1623 "%s: cannot enable txq %d\n", __func__, qid);
1624 return EBUSY;
1625 }
1627 iwm_nic_unlock(sc);
1628
1629 iwm_write_mem32(sc,
1631 /* Set scheduler window size and frame limit. */
1632 iwm_write_mem32(sc,
1634 sizeof(uint32_t),
1639
1640 if (!iwm_nic_lock(sc)) {
1641 device_printf(sc->sc_dev,
1642 "%s: cannot enable txq %d\n", __func__, qid);
1643 return EBUSY;
1644 }
1650
1651 /* Enable the scheduler for this queue. */
1653 } else {
1654 struct iwm_scd_txq_cfg_cmd cmd;
1655 int error;
1656
1657 iwm_nic_unlock(sc);
1658
1659 memset(&cmd, 0, sizeof(cmd));
1660 cmd.scd_queue = qid;
1661 cmd.enable = 1;
1662 cmd.sta_id = sta_id;
1663 cmd.tx_fifo = fifo;
1664 cmd.aggregate = 0;
1665 cmd.window = IWM_FRAME_LIMIT;
1666
1668 sizeof(cmd), &cmd);
1669 if (error) {
1670 device_printf(sc->sc_dev,
1671 "cannot enable txq %d\n", qid);
1672 return error;
1673 }
1674
1675 if (!iwm_nic_lock(sc))
1676 return EBUSY;
1677 }
1678
1679 iwm_nic_unlock(sc);
1680
1681 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
1682 __func__, qid, fifo);
1683
1684 return 0;
1685}
1686
1687static int
1688iwm_trans_pcie_fw_alive(struct iwm_softc *sc, uint32_t scd_base_addr)
1689{
1690 int error, chnl;
1691
1692 int clear_dwords = (IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND -
1693 IWM_SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(uint32_t);
1694
1695 if (!iwm_nic_lock(sc))
1696 return EBUSY;
1697
1698 iwm_ict_reset(sc);
1699
1701 if (scd_base_addr != 0 &&
1702 scd_base_addr != sc->scd_base_addr) {
1703 device_printf(sc->sc_dev,
1704 "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
1705 __func__, sc->scd_base_addr, scd_base_addr);
1706 }
1707
1708 iwm_nic_unlock(sc);
1709
1710 /* reset context data, TX status and translation data */
1711 error = iwm_write_mem(sc,
1713 NULL, clear_dwords);
1714 if (error)
1715 return EBUSY;
1716
1717 if (!iwm_nic_lock(sc))
1718 return EBUSY;
1719
1720 /* Set physical address of TX scheduler rings (1KB aligned). */
1722
1724
1725 iwm_nic_unlock(sc);
1726
1727 /* enable command channel */
1728 error = iwm_enable_txq(sc, 0 /* unused */, IWM_CMD_QUEUE, 7);
1729 if (error)
1730 return error;
1731
1732 if (!iwm_nic_lock(sc))
1733 return EBUSY;
1734
1735 iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
1736
1737 /* Enable DMA channels. */
1738 for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
1742 }
1743
1746
1747 iwm_nic_unlock(sc);
1748
1749 /* Enable L1-Active */
1753 }
1754
1755 return error;
1756}
1757
1758/*
1759 * NVM read access and content parsing. We do not support
1760 * external NVM or writing NVM.
1761 * iwlwifi/mvm/nvm.c
1762 */
1763
1764/* Default NVM size to read */
1765#define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
1766
1767#define IWM_NVM_WRITE_OPCODE 1
1768#define IWM_NVM_READ_OPCODE 0
1769
1770/* load nvm chunk response */
1771enum {
1775
1776static int
1777iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
1778 uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
1779{
1780 struct iwm_nvm_access_cmd nvm_access_cmd = {
1781 .offset = htole16(offset),
1782 .length = htole16(length),
1783 .type = htole16(section),
1784 .op_code = IWM_NVM_READ_OPCODE,
1785 };
1786 struct iwm_nvm_access_resp *nvm_resp;
1787 struct iwm_rx_packet *pkt;
1788 struct iwm_host_cmd cmd = {
1791 .data = { &nvm_access_cmd, },
1792 };
1793 int ret, bytes_read, offset_read;
1794 uint8_t *resp_data;
1795
1796 cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
1797
1798 ret = iwm_send_cmd(sc, &cmd);
1799 if (ret) {
1800 device_printf(sc->sc_dev,
1801 "Could not send NVM_ACCESS command (error=%d)\n", ret);
1802 return ret;
1803 }
1804
1805 pkt = cmd.resp_pkt;
1806
1807 /* Extract NVM response */
1808 nvm_resp = (void *)pkt->data;
1809 ret = le16toh(nvm_resp->status);
1810 bytes_read = le16toh(nvm_resp->length);
1811 offset_read = le16toh(nvm_resp->offset);
1812 resp_data = nvm_resp->data;
1813 if (ret) {
1814 if ((offset != 0) &&
1816 /*
1817 * meaning of NOT_VALID_ADDRESS:
1818 * driver try to read chunk from address that is
1819 * multiple of 2K and got an error since addr is empty.
1820 * meaning of (offset != 0): driver already
1821 * read valid data from another chunk so this case
1822 * is not an error.
1823 */
1824 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1825 "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
1826 offset);
1827 *len = 0;
1828 ret = 0;
1829 } else {
1830 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1831 "NVM access command failed with status %d\n", ret);
1832 ret = EIO;
1833 }
1834 goto exit;
1835 }
1836
1837 if (offset_read != offset) {
1838 device_printf(sc->sc_dev,
1839 "NVM ACCESS response with invalid offset %d\n",
1840 offset_read);
1841 ret = EINVAL;
1842 goto exit;
1843 }
1844
1845 if (bytes_read > length) {
1846 device_printf(sc->sc_dev,
1847 "NVM ACCESS response with too much data "
1848 "(%d bytes requested, %d bytes received)\n",
1849 length, bytes_read);
1850 ret = EINVAL;
1851 goto exit;
1852 }
1853
1854 /* Write data to NVM */
1855 memcpy(data + offset, resp_data, bytes_read);
1856 *len = bytes_read;
1857
1858 exit:
1859 iwm_free_resp(sc, &cmd);
1860 return ret;
1861}
1862
1863/*
1864 * Reads an NVM section completely.
1865 * NICs prior to 7000 family don't have a real NVM, but just read
1866 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
1867 * by uCode, we need to manually check in this case that we don't
1868 * overflow and try to read more than the EEPROM size.
1869 * For 7000 family NICs, we supply the maximal size we can read, and
1870 * the uCode fills the response with as much data as we can,
1871 * without overflowing, so no check is needed.
1872 */
1873static int
1875 uint16_t section, uint8_t *data, uint16_t *len, uint32_t size_read)
1876{
1877 uint16_t seglen, length, offset = 0;
1878 int ret;
1879
1880 /* Set nvm section read length */
1882
1883 seglen = length;
1884
1885 /* Read the NVM until exhausted (reading less than requested) */
1886 while (seglen == length) {
1887 /* Check no memory assumptions fail and cause an overflow */
1888 if ((size_read + offset + length) >
1889 sc->cfg->eeprom_size) {
1890 device_printf(sc->sc_dev,
1891 "EEPROM size is too small for NVM\n");
1892 return ENOBUFS;
1893 }
1894
1895 ret = iwm_nvm_read_chunk(sc, section, offset, length, data, &seglen);
1896 if (ret) {
1897 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1898 "Cannot read NVM from section %d offset %d, length %d\n",
1899 section, offset, length);
1900 return ret;
1901 }
1902 offset += seglen;
1903 }
1904
1905 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
1906 "NVM section %d read completed\n", section);
1907 *len = offset;
1908 return 0;
1909}
1910
1911/*
1912 * BEGIN IWM_NVM_PARSE
1913 */
1914
1915/* iwlwifi/iwl-nvm-parse.c */
1916
1917/*
1918 * Translate EEPROM flags to net80211.
1919 */
1920static uint32_t
1922{
1923 uint32_t nflags;
1924
1925 nflags = 0;
1926 if ((ch_flags & IWM_NVM_CHANNEL_ACTIVE) == 0)
1927 nflags |= IEEE80211_CHAN_PASSIVE;
1928 if ((ch_flags & IWM_NVM_CHANNEL_IBSS) == 0)
1929 nflags |= IEEE80211_CHAN_NOADHOC;
1930 if (ch_flags & IWM_NVM_CHANNEL_RADAR) {
1931 nflags |= IEEE80211_CHAN_DFS;
1932 /* Just in case. */
1933 nflags |= IEEE80211_CHAN_NOADHOC;
1934 }
1935
1936 return (nflags);
1937}
1938
1939static void
1940iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
1941 int maxchans, int *nchans, int ch_idx, size_t ch_num,
1942 const uint8_t bands[])
1943{
1944 const uint16_t * const nvm_ch_flags = sc->nvm_data->nvm_ch_flags;
1945 uint32_t nflags;
1946 uint16_t ch_flags;
1947 uint8_t ieee;
1948 int error;
1949
1950 for (; ch_idx < ch_num; ch_idx++) {
1951 ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
1953 ieee = iwm_nvm_channels[ch_idx];
1954 else
1955 ieee = iwm_nvm_channels_8000[ch_idx];
1956
1957 if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
1958 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1959 "Ch. %d Flags %x [%sGHz] - No traffic\n",
1960 ieee, ch_flags,
1961 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1962 "5.2" : "2.4");
1963 continue;
1964 }
1965
1966 nflags = iwm_eeprom_channel_flags(ch_flags);
1967 error = ieee80211_add_channel(chans, maxchans, nchans,
1968 ieee, 0, 0, nflags, bands);
1969 if (error != 0)
1970 break;
1971
1972 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
1973 "Ch. %d Flags %x [%sGHz] - Added\n",
1974 ieee, ch_flags,
1975 (ch_idx >= IWM_NUM_2GHZ_CHANNELS) ?
1976 "5.2" : "2.4");
1977 }
1978}
1979
1980static void
1981iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
1982 struct ieee80211_channel chans[])
1983{
1984 struct iwm_softc *sc = ic->ic_softc;
1985 struct iwm_nvm_data *data = sc->nvm_data;
1986 uint8_t bands[IEEE80211_MODE_BYTES];
1987 size_t ch_num;
1988
1989 memset(bands, 0, sizeof(bands));
1990 /* 1-13: 11b/g channels. */
1991 setbit(bands, IEEE80211_MODE_11B);
1992 setbit(bands, IEEE80211_MODE_11G);
1993 iwm_add_channel_band(sc, chans, maxchans, nchans, 0,
1994 IWM_NUM_2GHZ_CHANNELS - 1, bands);
1995
1996 /* 14: 11b channel only. */
1997 clrbit(bands, IEEE80211_MODE_11G);
1998 iwm_add_channel_band(sc, chans, maxchans, nchans,
2000
2001 if (data->sku_cap_band_52GHz_enable) {
2003 ch_num = nitems(iwm_nvm_channels);
2004 else
2005 ch_num = nitems(iwm_nvm_channels_8000);
2006 memset(bands, 0, sizeof(bands));
2007 setbit(bands, IEEE80211_MODE_11A);
2008 iwm_add_channel_band(sc, chans, maxchans, nchans,
2009 IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
2010 }
2011}
2012
2013static void
2015 const uint16_t *mac_override, const uint16_t *nvm_hw)
2016{
2017 const uint8_t *hw_addr;
2018
2019 if (mac_override) {
2020 static const uint8_t reserved_mac[] = {
2021 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
2022 };
2023
2024 hw_addr = (const uint8_t *)(mac_override +
2026
2027 /*
2028 * Store the MAC address from MAO section.
2029 * No byte swapping is required in MAO section
2030 */
2031 IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
2032
2033 /*
2034 * Force the use of the OTP MAC address in case of reserved MAC
2035 * address in the NVM, or if address is given but invalid.
2036 */
2037 if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
2038 !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
2040 !IEEE80211_IS_MULTICAST(data->hw_addr))
2041 return;
2042
2043 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2044 "%s: mac address from nvm override section invalid\n",
2045 __func__);
2046 }
2047
2048 if (nvm_hw) {
2049 /* read the mac address from WFMP registers */
2050 uint32_t mac_addr0 =
2051 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
2052 uint32_t mac_addr1 =
2053 htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
2054
2055 hw_addr = (const uint8_t *)&mac_addr0;
2056 data->hw_addr[0] = hw_addr[3];
2057 data->hw_addr[1] = hw_addr[2];
2058 data->hw_addr[2] = hw_addr[1];
2059 data->hw_addr[3] = hw_addr[0];
2060
2061 hw_addr = (const uint8_t *)&mac_addr1;
2062 data->hw_addr[4] = hw_addr[1];
2063 data->hw_addr[5] = hw_addr[0];
2064
2065 return;
2066 }
2067
2068 device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
2069 memset(data->hw_addr, 0, sizeof(data->hw_addr));
2070}
2071
2072static int
2073iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2074 const uint16_t *phy_sku)
2075{
2077 return le16_to_cpup(nvm_sw + IWM_SKU);
2078
2079 return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
2080}
2081
2082static int
2083iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2084{
2086 return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
2087 else
2088 return le32_to_cpup((const uint32_t *)(nvm_sw +
2090}
2091
2092static int
2093iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
2094 const uint16_t *phy_sku)
2095{
2097 return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
2098
2099 return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
2100}
2101
2102static int
2103iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
2104{
2105 int n_hw_addr;
2106
2108 return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
2109
2110 n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
2111
2112 return n_hw_addr & IWM_N_HW_ADDR_MASK;
2113}
2114
2115static void
2116iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
2117 uint32_t radio_cfg)
2118{
2120 data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
2121 data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
2122 data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
2123 data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
2124 return;
2125 }
2126
2127 /* set the radio configuration for family 8000 */
2134}
2135
2136static int
2138 const uint16_t *nvm_hw, const uint16_t *mac_override)
2139{
2140#ifdef notyet /* for FAMILY 9000 */
2141 if (cfg->mac_addr_from_csr) {
2142 iwm_set_hw_address_from_csr(sc, data);
2143 } else
2144#endif
2146 const uint8_t *hw_addr = (const uint8_t *)(nvm_hw + IWM_HW_ADDR);
2147
2148 /* The byte order is little endian 16 bit, meaning 214365 */
2149 data->hw_addr[0] = hw_addr[1];
2150 data->hw_addr[1] = hw_addr[0];
2151 data->hw_addr[2] = hw_addr[3];
2152 data->hw_addr[3] = hw_addr[2];
2153 data->hw_addr[4] = hw_addr[5];
2154 data->hw_addr[5] = hw_addr[4];
2155 } else {
2156 iwm_set_hw_address_family_8000(sc, data, mac_override, nvm_hw);
2157 }
2158
2159 if (!iwm_is_valid_ether_addr(data->hw_addr)) {
2160 device_printf(sc->sc_dev, "no valid mac address was found\n");
2161 return EINVAL;
2162 }
2163
2164 return 0;
2165}
2166
2167static struct iwm_nvm_data *
2169 const uint16_t *nvm_hw, const uint16_t *nvm_sw,
2170 const uint16_t *nvm_calib, const uint16_t *mac_override,
2171 const uint16_t *phy_sku, const uint16_t *regulatory)
2172{
2173 struct iwm_nvm_data *data;
2174 uint32_t sku, radio_cfg;
2175 uint16_t lar_config;
2176
2178 data = malloc(sizeof(*data) +
2179 IWM_NUM_CHANNELS * sizeof(uint16_t),
2180 M_DEVBUF, M_NOWAIT | M_ZERO);
2181 } else {
2182 data = malloc(sizeof(*data) +
2183 IWM_NUM_CHANNELS_8000 * sizeof(uint16_t),
2184 M_DEVBUF, M_NOWAIT | M_ZERO);
2185 }
2186 if (!data)
2187 return NULL;
2188
2189 data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
2190
2191 radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
2192 iwm_set_radio_cfg(sc, data, radio_cfg);
2193
2194 sku = iwm_get_sku(sc, nvm_sw, phy_sku);
2197 data->sku_cap_11n_enable = 0;
2198
2199 data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
2200
2202 /* TODO: use IWL_NVM_EXT */
2203 uint16_t lar_offset = data->nvm_version < 0xE39 ?
2206
2207 lar_config = le16_to_cpup(regulatory + lar_offset);
2208 data->lar_enabled = !!(lar_config &
2210 }
2211
2212 /* If no valid mac address was found - bail out */
2213 if (iwm_set_hw_address(sc, data, nvm_hw, mac_override)) {
2214 free(data, M_DEVBUF);
2215 return NULL;
2216 }
2217
2219 memcpy(data->nvm_ch_flags, sc->cfg->nvm_type == IWM_NVM_SDP ?
2220 &regulatory[0] : &nvm_sw[IWM_NVM_CHANNELS],
2221 IWM_NUM_CHANNELS * sizeof(uint16_t));
2222 } else {
2223 memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
2224 IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
2225 }
2226
2227 return data;
2228}
2229
2230static void
2232{
2233 if (data != NULL)
2234 free(data, M_DEVBUF);
2235}
2236
2237static struct iwm_nvm_data *
2239{
2240 const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
2241
2242 /* Checking for required sections */
2244 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2245 !sections[sc->cfg->nvm_hw_section_num].data) {
2246 device_printf(sc->sc_dev,
2247 "Can't parse empty OTP/NVM sections\n");
2248 return NULL;
2249 }
2250 } else if (sc->cfg->device_family >= IWM_DEVICE_FAMILY_8000) {
2251 /* SW and REGULATORY sections are mandatory */
2252 if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
2253 !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
2254 device_printf(sc->sc_dev,
2255 "Can't parse empty OTP/NVM sections\n");
2256 return NULL;
2257 }
2258 /* MAC_OVERRIDE or at least HW section must exist */
2259 if (!sections[sc->cfg->nvm_hw_section_num].data &&
2261 device_printf(sc->sc_dev,
2262 "Can't parse mac_address, empty sections\n");
2263 return NULL;
2264 }
2265
2266 /* PHY_SKU section is mandatory in B0 */
2267 if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
2268 device_printf(sc->sc_dev,
2269 "Can't parse phy_sku in B0, empty sections\n");
2270 return NULL;
2271 }
2272 } else {
2273 panic("unknown device family %d\n", sc->cfg->device_family);
2274 }
2275
2276 hw = (const uint16_t *) sections[sc->cfg->nvm_hw_section_num].data;
2277 sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
2278 calib = (const uint16_t *)
2279 sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
2280 regulatory = sc->cfg->nvm_type == IWM_NVM_SDP ?
2281 (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_REGULATORY_SDP].data :
2282 (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
2283 mac_override = (const uint16_t *)
2284 sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
2285 phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
2286
2287 return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
2288 phy_sku, regulatory);
2289}
2290
2291static int
2293{
2294 struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
2295 int i, ret, section;
2296 uint32_t size_read = 0;
2297 uint8_t *nvm_buffer, *temp;
2298 uint16_t len;
2299
2300 memset(nvm_sections, 0, sizeof(nvm_sections));
2301
2303 return EINVAL;
2304
2305 /* load NVM values from nic */
2306 /* Read From FW NVM */
2307 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM, "Read from NVM\n");
2308
2309 nvm_buffer = malloc(sc->cfg->eeprom_size, M_DEVBUF, M_NOWAIT | M_ZERO);
2310 if (!nvm_buffer)
2311 return ENOMEM;
2312 for (section = 0; section < IWM_NVM_NUM_OF_SECTIONS; section++) {
2313 /* we override the constness for initial read */
2314 ret = iwm_nvm_read_section(sc, section, nvm_buffer,
2315 &len, size_read);
2316 if (ret)
2317 continue;
2318 size_read += len;
2319 temp = malloc(len, M_DEVBUF, M_NOWAIT);
2320 if (!temp) {
2321 ret = ENOMEM;
2322 break;
2323 }
2324 memcpy(temp, nvm_buffer, len);
2325
2326 nvm_sections[section].data = temp;
2327 nvm_sections[section].length = len;
2328 }
2329 if (!size_read)
2330 device_printf(sc->sc_dev, "OTP is blank\n");
2331 free(nvm_buffer, M_DEVBUF);
2332
2333 sc->nvm_data = iwm_parse_nvm_sections(sc, nvm_sections);
2334 if (!sc->nvm_data)
2335 return EINVAL;
2336 IWM_DPRINTF(sc, IWM_DEBUG_EEPROM | IWM_DEBUG_RESET,
2337 "nvm version = %x\n", sc->nvm_data->nvm_version);
2338
2339 for (i = 0; i < IWM_NVM_NUM_OF_SECTIONS; i++) {
2340 if (nvm_sections[i].data != NULL)
2341 free(nvm_sections[i].data, M_DEVBUF);
2342 }
2343
2344 return 0;
2345}
2346
2347static int
2348iwm_pcie_load_section(struct iwm_softc *sc, uint8_t section_num,
2349 const struct iwm_fw_desc *section)
2350{
2351 struct iwm_dma_info *dma = &sc->fw_dma;
2352 uint8_t *v_addr;
2353 bus_addr_t p_addr;
2354 uint32_t offset, chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, section->len);
2355 int ret = 0;
2356
2357 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2358 "%s: [%d] uCode section being loaded...\n",
2359 __func__, section_num);
2360
2361 v_addr = dma->vaddr;
2362 p_addr = dma->paddr;
2363
2364 for (offset = 0; offset < section->len; offset += chunk_sz) {
2365 uint32_t copy_size, dst_addr;
2366 int extended_addr = FALSE;
2367
2368 copy_size = MIN(chunk_sz, section->len - offset);
2369 dst_addr = section->offset + offset;
2370
2371 if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
2372 dst_addr <= IWM_FW_MEM_EXTENDED_END)
2373 extended_addr = TRUE;
2374
2375 if (extended_addr)
2378
2379 memcpy(v_addr, (const uint8_t *)section->data + offset,
2380 copy_size);
2381 bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
2382 ret = iwm_pcie_load_firmware_chunk(sc, dst_addr, p_addr,
2383 copy_size);
2384
2385 if (extended_addr)
2388
2389 if (ret) {
2390 device_printf(sc->sc_dev,
2391 "%s: Could not load the [%d] uCode section\n",
2392 __func__, section_num);
2393 break;
2394 }
2395 }
2396
2397 return ret;
2398}
2399
2400/*
2401 * ucode
2402 */
2403static int
2404iwm_pcie_load_firmware_chunk(struct iwm_softc *sc, uint32_t dst_addr,
2405 bus_addr_t phy_addr, uint32_t byte_cnt)
2406{
2407 sc->sc_fw_chunk_done = 0;
2408
2409 if (!iwm_nic_lock(sc))
2410 return EBUSY;
2411
2414
2416 dst_addr);
2417
2420
2422 (iwm_get_dma_hi_addr(phy_addr)
2424
2429
2434
2435 iwm_nic_unlock(sc);
2436
2437 /* wait up to 5s for this segment to load */
2438 msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz * 5);
2439
2440 if (!sc->sc_fw_chunk_done) {
2441 device_printf(sc->sc_dev,
2442 "fw chunk addr 0x%x len %d failed to load\n",
2443 dst_addr, byte_cnt);
2444 return ETIMEDOUT;
2445 }
2446
2447 return 0;
2448}
2449
2450static int
2452 const struct iwm_fw_img *image, int cpu, int *first_ucode_section)
2453{
2454 int shift_param;
2455 int i, ret = 0, sec_num = 0x1;
2456 uint32_t val, last_read_idx = 0;
2457
2458 if (cpu == 1) {
2459 shift_param = 0;
2460 *first_ucode_section = 0;
2461 } else {
2462 shift_param = 16;
2463 (*first_ucode_section)++;
2464 }
2465
2466 for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2467 last_read_idx = i;
2468
2469 /*
2470 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2471 * CPU1 to CPU2.
2472 * PAGING_SEPARATOR_SECTION delimiter - separate between
2473 * CPU2 non paged to CPU2 paging sec.
2474 */
2475 if (!image->sec[i].data ||
2478 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2479 "Break since Data not valid or Empty section, sec = %d\n",
2480 i);
2481 break;
2482 }
2483 ret = iwm_pcie_load_section(sc, i, &image->sec[i]);
2484 if (ret)
2485 return ret;
2486
2487 /* Notify the ucode of the loaded section number and status */
2488 if (iwm_nic_lock(sc)) {
2490 val = val | (sec_num << shift_param);
2492 sec_num = (sec_num << 1) | 0x1;
2493 iwm_nic_unlock(sc);
2494 }
2495 }
2496
2497 *first_ucode_section = last_read_idx;
2498
2500
2501 if (iwm_nic_lock(sc)) {
2502 if (cpu == 1)
2504 else
2505 IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
2506 iwm_nic_unlock(sc);
2507 }
2508
2509 return 0;
2510}
2511
2512static int
2514 const struct iwm_fw_img *image, int cpu, int *first_ucode_section)
2515{
2516 int i, ret = 0;
2517 uint32_t last_read_idx = 0;
2518
2519 if (cpu == 1) {
2520 *first_ucode_section = 0;
2521 } else {
2522 (*first_ucode_section)++;
2523 }
2524
2525 for (i = *first_ucode_section; i < IWM_UCODE_SECTION_MAX; i++) {
2526 last_read_idx = i;
2527
2528 /*
2529 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
2530 * CPU1 to CPU2.
2531 * PAGING_SEPARATOR_SECTION delimiter - separate between
2532 * CPU2 non paged to CPU2 paging sec.
2533 */
2534 if (!image->sec[i].data ||
2537 IWM_DPRINTF(sc, IWM_DEBUG_RESET,
2538 "Break since Data not valid or Empty section, sec = %d\n",
2539 i);
2540 break;
2541 }
2542
2543 ret = iwm_pcie_load_section(sc, i, &image->sec[i]);
2544 if (ret)
2545 return ret;
2546 }
2547
2548 *first_ucode_section = last_read_idx;
2549
2550 return 0;
2551
2552}
2553
2554static int
2555iwm_pcie_load_given_ucode(struct iwm_softc *sc, const struct iwm_fw_img *image)
2556{
2557 int ret = 0;
2558 int first_ucode_section;
2559
2560 IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2561 image->is_dual_cpus ? "Dual" : "Single");
2562
2563 /* load to FW the binary non secured sections of CPU1 */
2564 ret = iwm_pcie_load_cpu_sections(sc, image, 1, &first_ucode_section);
2565 if (ret)
2566 return ret;
2567
2568 if (image->is_dual_cpus) {
2569 /* set CPU2 header address */
2570 if (iwm_nic_lock(sc)) {
2571 iwm_write_prph(sc,
2574 iwm_nic_unlock(sc);
2575 }
2576
2577 /* load to FW the binary sections of CPU2 */
2578 ret = iwm_pcie_load_cpu_sections(sc, image, 2,
2579 &first_ucode_section);
2580 if (ret)
2581 return ret;
2582 }
2583
2585
2586 /* release CPU reset */
2587 IWM_WRITE(sc, IWM_CSR_RESET, 0);
2588
2589 return 0;
2590}
2591
2592int
2594 const struct iwm_fw_img *image)
2595{
2596 int ret = 0;
2597 int first_ucode_section;
2598
2599 IWM_DPRINTF(sc, IWM_DEBUG_RESET, "working with %s CPU\n",
2600 image->is_dual_cpus ? "Dual" : "Single");
2601
2602 /* configure the ucode to be ready to get the secured image */
2603 /* release CPU reset */
2604 if (iwm_nic_lock(sc)) {
2607 iwm_nic_unlock(sc);
2608 }
2609
2610 /* load to FW the binary Secured sections of CPU1 */
2611 ret = iwm_pcie_load_cpu_sections_8000(sc, image, 1,
2612 &first_ucode_section);
2613 if (ret)
2614 return ret;
2615
2616 /* load to FW the binary sections of CPU2 */
2617 return iwm_pcie_load_cpu_sections_8000(sc, image, 2,
2618 &first_ucode_section);
2619}
2620
2621/* XXX Get rid of this definition */
2622static inline void
2624{
2625 IWM_DPRINTF(sc, IWM_DEBUG_INTR, "Enabling FW load interrupt\n");
2628}
2629
2630/* XXX Add proper rfkill support code */
2631static int
2632iwm_start_fw(struct iwm_softc *sc, const struct iwm_fw_img *fw)
2633{
2634 int ret;
2635
2636 /* This may fail if AMT took ownership of the device */
2637 if (iwm_prepare_card_hw(sc)) {
2638 device_printf(sc->sc_dev,
2639 "%s: Exit HW not ready\n", __func__);
2640 ret = EIO;
2641 goto out;
2642 }
2643
2644 IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2645
2647
2648 /* make sure rfkill handshake bits are cleared */
2652
2653 /* clear (again), then enable host interrupts */
2654 IWM_WRITE(sc, IWM_CSR_INT, 0xFFFFFFFF);
2655
2656 ret = iwm_nic_init(sc);
2657 if (ret) {
2658 device_printf(sc->sc_dev, "%s: Unable to init nic\n", __func__);
2659 goto out;
2660 }
2661
2662 /*
2663 * Now, we load the firmware and don't want to be interrupted, even
2664 * by the RF-Kill interrupt (hence mask all the interrupt besides the
2665 * FH_TX interrupt which is needed to load the firmware). If the
2666 * RF-Kill switch is toggled, we will find out after having loaded
2667 * the firmware and return the proper value to the caller.
2668 */
2670
2671 /* really make sure rfkill handshake bits are cleared */
2672 /* maybe we should write a few times more? just to make sure */
2675
2676 /* Load the given image to the HW */
2678 ret = iwm_pcie_load_given_ucode_8000(sc, fw);
2679 else
2680 ret = iwm_pcie_load_given_ucode(sc, fw);
2681
2682 /* XXX re-check RF-Kill state */
2683
2684out:
2685 return ret;
2686}
2687
2688static int
2689iwm_send_tx_ant_cfg(struct iwm_softc *sc, uint8_t valid_tx_ant)
2690{
2691 struct iwm_tx_ant_cfg_cmd tx_ant_cmd = {
2692 .valid = htole32(valid_tx_ant),
2693 };
2694
2696 IWM_CMD_SYNC, sizeof(tx_ant_cmd), &tx_ant_cmd);
2697}
2698
2699/* iwlwifi: mvm/fw.c */
2700static int
2702{
2703 struct iwm_phy_cfg_cmd phy_cfg_cmd;
2704 enum iwm_ucode_type ucode_type = sc->cur_ucode;
2705
2706 /* Set parameters */
2707 phy_cfg_cmd.phy_cfg = htole32(iwm_get_phy_config(sc));
2708 phy_cfg_cmd.calib_control.event_trigger =
2709 sc->sc_default_calib[ucode_type].event_trigger;
2710 phy_cfg_cmd.calib_control.flow_trigger =
2711 sc->sc_default_calib[ucode_type].flow_trigger;
2712
2713 IWM_DPRINTF(sc, IWM_DEBUG_CMD | IWM_DEBUG_RESET,
2714 "Sending Phy CFG command: 0x%x\n", phy_cfg_cmd.phy_cfg);
2716 sizeof(phy_cfg_cmd), &phy_cfg_cmd);
2717}
2718
2719static int
2720iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
2721{
2722 struct iwm_alive_data *alive_data = data;
2723 struct iwm_alive_resp_v3 *palive3;
2724 struct iwm_alive_resp *palive;
2725 struct iwm_umac_alive *umac;
2726 struct iwm_lmac_alive *lmac1;
2727 struct iwm_lmac_alive *lmac2 = NULL;
2728 uint16_t status;
2729
2730 if (iwm_rx_packet_payload_len(pkt) == sizeof(*palive)) {
2731 palive = (void *)pkt->data;
2732 umac = &palive->umac_data;
2733 lmac1 = &palive->lmac_data[0];
2734 lmac2 = &palive->lmac_data[1];
2735 status = le16toh(palive->status);
2736 } else {
2737 palive3 = (void *)pkt->data;
2738 umac = &palive3->umac_data;
2739 lmac1 = &palive3->lmac_data;
2740 status = le16toh(palive3->status);
2741 }
2742
2743 sc->error_event_table[0] = le32toh(lmac1->error_event_table_ptr);
2744 if (lmac2)
2745 sc->error_event_table[1] =
2746 le32toh(lmac2->error_event_table_ptr);
2747 sc->log_event_table = le32toh(lmac1->log_event_table_ptr);
2748 sc->umac_error_event_table = le32toh(umac->error_info_addr);
2749 alive_data->scd_base_addr = le32toh(lmac1->scd_base_ptr);
2750 alive_data->valid = status == IWM_ALIVE_STATUS_OK;
2751 if (sc->umac_error_event_table)
2752 sc->support_umac_log = TRUE;
2753
2754 IWM_DPRINTF(sc, IWM_DEBUG_FW,
2755 "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
2756 status, lmac1->ver_type, lmac1->ver_subtype);
2757
2758 if (lmac2)
2759 IWM_DPRINTF(sc, IWM_DEBUG_FW, "Alive ucode CDB\n");
2760
2761 IWM_DPRINTF(sc, IWM_DEBUG_FW,
2762 "UMAC version: Major - 0x%x, Minor - 0x%x\n",
2763 le32toh(umac->umac_major),
2764 le32toh(umac->umac_minor));
2765
2766 return TRUE;
2767}
2768
2769static int
2771 struct iwm_rx_packet *pkt, void *data)
2772{
2773 struct iwm_phy_db *phy_db = data;
2774
2775 if (pkt->hdr.code != IWM_CALIB_RES_NOTIF_PHY_DB) {
2776 if(pkt->hdr.code != IWM_INIT_COMPLETE_NOTIF) {
2777 device_printf(sc->sc_dev, "%s: Unexpected cmd: %d\n",
2778 __func__, pkt->hdr.code);
2779 }
2780 return TRUE;
2781 }
2782
2783 if (iwm_phy_db_set_section(phy_db, pkt)) {
2784 device_printf(sc->sc_dev,
2785 "%s: iwm_phy_db_set_section failed\n", __func__);
2786 }
2787
2788 return FALSE;
2789}
2790
2791static int
2793 enum iwm_ucode_type ucode_type)
2794{
2795 struct iwm_notification_wait alive_wait;
2796 struct iwm_alive_data alive_data;
2797 const struct iwm_fw_img *fw;
2798 enum iwm_ucode_type old_type = sc->cur_ucode;
2799 int error;
2800 static const uint16_t alive_cmd[] = { IWM_ALIVE };
2801
2802 fw = &sc->sc_fw.img[ucode_type];
2803 sc->cur_ucode = ucode_type;
2804 sc->ucode_loaded = FALSE;
2805
2806 memset(&alive_data, 0, sizeof(alive_data));
2808 alive_cmd, nitems(alive_cmd),
2809 iwm_alive_fn, &alive_data);
2810
2811 error = iwm_start_fw(sc, fw);
2812 if (error) {
2813 device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
2814 sc->cur_ucode = old_type;
2815 iwm_remove_notification(sc->sc_notif_wait, &alive_wait);
2816 return error;
2817 }
2818
2819 /*
2820 * Some things may run in the background now, but we
2821 * just wait for the ALIVE notification here.
2822 */
2823 IWM_UNLOCK(sc);
2824 error = iwm_wait_notification(sc->sc_notif_wait, &alive_wait,
2826 IWM_LOCK(sc);
2827 if (error) {
2829 uint32_t a = 0x5a5a5a5a, b = 0x5a5a5a5a;
2830 if (iwm_nic_lock(sc)) {
2833 iwm_nic_unlock(sc);
2834 }
2835 device_printf(sc->sc_dev,
2836 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
2837 a, b);
2838 }
2839 sc->cur_ucode = old_type;
2840 return error;
2841 }
2842
2843 if (!alive_data.valid) {
2844 device_printf(sc->sc_dev, "%s: Loaded ucode is not valid\n",
2845 __func__);
2846 sc->cur_ucode = old_type;
2847 return EIO;
2848 }
2849
2850 iwm_trans_pcie_fw_alive(sc, alive_data.scd_base_addr);
2851
2852 /*
2853 * configure and operate fw paging mechanism.
2854 * driver configures the paging flow only once, CPU2 paging image
2855 * included in the IWM_UCODE_INIT image.
2856 */
2857 if (fw->paging_mem_size) {
2858 error = iwm_save_fw_paging(sc, fw);
2859 if (error) {
2860 device_printf(sc->sc_dev,
2861 "%s: failed to save the FW paging image\n",
2862 __func__);
2863 return error;
2864 }
2865
2866 error = iwm_send_paging_cmd(sc, fw);
2867 if (error) {
2868 device_printf(sc->sc_dev,
2869 "%s: failed to send the paging cmd\n", __func__);
2871 return error;
2872 }
2873 }
2874
2875 if (!error)
2876 sc->ucode_loaded = TRUE;
2877 return error;
2878}
2879
2880/*
2881 * mvm misc bits
2882 */
2883
2884/*
2885 * follows iwlwifi/fw.c
2886 */
2887static int
2888iwm_run_init_ucode(struct iwm_softc *sc, int justnvm)
2889{
2890 struct iwm_notification_wait calib_wait;
2891 static const uint16_t init_complete[] = {
2894 };
2895 int ret;
2896
2897 /* do not operate with rfkill switch turned on */
2898 if ((sc->sc_flags & IWM_FLAG_RFKILL) && !justnvm) {
2899 device_printf(sc->sc_dev,
2900 "radio is disabled by hardware switch\n");
2901 return EPERM;
2902 }
2903
2905 &calib_wait,
2906 init_complete,
2907 nitems(init_complete),
2909 sc->sc_phy_db);
2910
2911 /* Will also start the device */
2913 if (ret) {
2914 device_printf(sc->sc_dev, "Failed to start INIT ucode: %d\n",
2915 ret);
2916 goto error;
2917 }
2918
2920 ret = iwm_send_bt_init_conf(sc);
2921 if (ret) {
2922 device_printf(sc->sc_dev,
2923 "failed to send bt coex configuration: %d\n", ret);
2924 goto error;
2925 }
2926 }
2927
2928 if (justnvm) {
2929 /* Read nvm */
2930 ret = iwm_nvm_init(sc);
2931 if (ret) {
2932 device_printf(sc->sc_dev, "failed to read nvm\n");
2933 goto error;
2934 }
2935 IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->nvm_data->hw_addr);
2936 goto error;
2937 }
2938
2939 /* Send TX valid antennas before triggering calibrations */
2941 if (ret) {
2942 device_printf(sc->sc_dev,
2943 "failed to send antennas before calibration: %d\n", ret);
2944 goto error;
2945 }
2946
2947 /*
2948 * Send phy configurations command to init uCode
2949 * to start the 16.0 uCode init image internal calibrations.
2950 */
2951 ret = iwm_send_phy_cfg_cmd(sc);
2952 if (ret) {
2953 device_printf(sc->sc_dev,
2954 "%s: Failed to run INIT calibrations: %d\n",
2955 __func__, ret);
2956 goto error;
2957 }
2958
2959 /*
2960 * Nothing to do but wait for the init complete notification
2961 * from the firmware.
2962 */
2963 IWM_UNLOCK(sc);
2964 ret = iwm_wait_notification(sc->sc_notif_wait, &calib_wait,
2966 IWM_LOCK(sc);
2967
2968
2969 goto out;
2970
2971error:
2972 iwm_remove_notification(sc->sc_notif_wait, &calib_wait);
2973out:
2974 return ret;
2975}
2976
2977static int
2979{
2980 struct iwm_ltr_config_cmd cmd = {
2982 };
2983
2984 if (!sc->sc_ltr_enabled)
2985 return 0;
2986
2987 return iwm_send_cmd_pdu(sc, IWM_LTR_CONFIG, 0, sizeof(cmd), &cmd);
2988}
2989
2990/*
2991 * receive side
2992 */
2993
2994/* (re)stock rx ring, called at init-time and at runtime */
2995static int
2996iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
2997{
2998 struct iwm_rx_ring *ring = &sc->rxq;
2999 struct iwm_rx_data *data = &ring->data[idx];
3000 struct mbuf *m;
3001 bus_dmamap_t dmamap;
3002 bus_dma_segment_t seg;
3003 int nsegs, error;
3004
3005 m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
3006 if (m == NULL)
3007 return ENOBUFS;
3008
3009 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
3010 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
3011 &seg, &nsegs, BUS_DMA_NOWAIT);
3012 if (error != 0) {
3013 device_printf(sc->sc_dev,
3014 "%s: can't map mbuf, error %d\n", __func__, error);
3015 m_freem(m);
3016 return error;
3017 }
3018
3019 if (data->m != NULL)
3020 bus_dmamap_unload(ring->data_dmat, data->map);
3021
3022 /* Swap ring->spare_map with data->map */
3023 dmamap = data->map;
3024 data->map = ring->spare_map;
3025 ring->spare_map = dmamap;
3026
3027 bus_dmamap_sync(ring->data_dmat, data->map, BUS_DMASYNC_PREREAD);
3028 data->m = m;
3029
3030 /* Update RX descriptor. */
3031 KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
3032 if (sc->cfg->mqrx_supported)
3033 ((uint64_t *)ring->desc)[idx] = htole64(seg.ds_addr);
3034 else
3035 ((uint32_t *)ring->desc)[idx] = htole32(seg.ds_addr >> 8);
3036 bus_dmamap_sync(ring->free_desc_dma.tag, ring->free_desc_dma.map,
3037 BUS_DMASYNC_PREWRITE);
3038
3039 return 0;
3040}
3041
3042static void
3044{
3045 struct iwm_rx_phy_info *phy_info = (void *)pkt->data;
3046
3047 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "received PHY stats\n");
3048
3049 memcpy(&sc->sc_last_phy_info, phy_info, sizeof(sc->sc_last_phy_info));
3050}
3051
3052/*
3053 * Retrieve the average noise (in dBm) among receivers.
3054 */
3055static int
3057 const struct iwm_statistics_rx_non_phy *stats)
3058{
3059 int i, total, nbant, noise;
3060
3061 total = nbant = noise = 0;
3062 for (i = 0; i < 3; i++) {
3063 noise = le32toh(stats->beacon_silence_rssi[i]) & 0xff;
3064 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: i=%d, noise=%d\n",
3065 __func__,
3066 i,
3067 noise);
3068
3069 if (noise) {
3070 total += noise;
3071 nbant++;
3072 }
3073 }
3074
3075 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "%s: nbant=%d, total=%d\n",
3076 __func__, nbant, total);
3077#if 0
3078 /* There should be at least one antenna but check anyway. */
3079 return (nbant == 0) ? -127 : (total / nbant) - 107;
3080#else
3081 /* For now, just hard-code it to -96 to be safe */
3082 return (-96);
3083#endif
3084}
3085
3086static void
3088{
3089 struct iwm_notif_statistics *stats = (void *)&pkt->data;
3090
3091 memcpy(&sc->sc_stats, stats, sizeof(sc->sc_stats));
3092 sc->sc_noise = iwm_get_noise(sc, &stats->rx.general);
3093}
3094
3095/* iwlwifi: mvm/rx.c */
3096/*
3097 * iwm_get_signal_strength - use new rx PHY INFO API
3098 * values are reported by the fw as positive values - need to negate
3099 * to obtain their dBM. Account for missing antennas by replacing 0
3100 * values by -256dBm: practically 0 power and a non-feasible 8 bit value.
3101 */
3102static int
3104 struct iwm_rx_phy_info *phy_info)
3105{
3106 int energy_a, energy_b, energy_c, max_energy;
3107 uint32_t val;
3108
3109 val = le32toh(phy_info->non_cfg_phy[IWM_RX_INFO_ENERGY_ANT_ABC_IDX]);
3110 energy_a = (val & IWM_RX_INFO_ENERGY_ANT_A_MSK) >>
3112 energy_a = energy_a ? -energy_a : -256;
3113 energy_b = (val & IWM_RX_INFO_ENERGY_ANT_B_MSK) >>
3115 energy_b = energy_b ? -energy_b : -256;
3116 energy_c = (val & IWM_RX_INFO_ENERGY_ANT_C_MSK) >>
3118 energy_c = energy_c ? -energy_c : -256;
3119 max_energy = MAX(energy_a, energy_b);
3120 max_energy = MAX(max_energy, energy_c);
3121
3122 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3123 "energy In A %d B %d C %d , and max %d\n",
3124 energy_a, energy_b, energy_c, max_energy);
3125
3126 return max_energy;
3127}
3128
3129static int
3131 struct iwm_rx_mpdu_desc *desc)
3132{
3133 int energy_a, energy_b;
3134
3135 energy_a = desc->v1.energy_a;
3136 energy_b = desc->v1.energy_b;
3137 energy_a = energy_a ? -energy_a : -256;
3138 energy_b = energy_b ? -energy_b : -256;
3139 return MAX(energy_a, energy_b);
3140}
3141
3142/*
3143 * iwm_rx_rx_mpdu - IWM_REPLY_RX_MPDU_CMD handler
3144 *
3145 * Handles the actual data of the Rx packet from the fw
3146 */
3147static bool
3148iwm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3149 bool stolen)
3150{
3151 struct ieee80211com *ic = &sc->sc_ic;
3152 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3153 struct ieee80211_rx_stats rxs;
3154 struct iwm_rx_phy_info *phy_info;
3155 struct iwm_rx_mpdu_res_start *rx_res;
3156 struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *, offset);
3157 uint32_t len;
3158 uint32_t rx_pkt_status;
3159 int rssi;
3160
3161 phy_info = &sc->sc_last_phy_info;
3162 rx_res = (struct iwm_rx_mpdu_res_start *)pkt->data;
3163 len = le16toh(rx_res->byte_count);
3164 rx_pkt_status = le32toh(*(uint32_t *)(pkt->data + sizeof(*rx_res) + len));
3165
3166 if (__predict_false(phy_info->cfg_phy_cnt > 20)) {
3167 device_printf(sc->sc_dev,
3168 "dsp size out of range [0,20]: %d\n",
3169 phy_info->cfg_phy_cnt);
3170 return false;
3171 }
3172
3173 if (!(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_CRC_OK) ||
3174 !(rx_pkt_status & IWM_RX_MPDU_RES_STATUS_OVERRUN_OK)) {
3175 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3176 "Bad CRC or FIFO: 0x%08X.\n", rx_pkt_status);
3177 return false;
3178 }
3179
3180 rssi = iwm_rx_get_signal_strength(sc, phy_info);
3181
3182 /* Map it to relative value */
3183 rssi = rssi - sc->sc_noise;
3184
3185 /* replenish ring for the buffer we're going to feed to the sharks */
3186 if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3187 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3188 __func__);
3189 return false;
3190 }
3191
3192 m->m_data = pkt->data + sizeof(*rx_res);
3193 m->m_pkthdr.len = m->m_len = len;
3194
3195 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3196 "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3197
3198 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3199 "%s: phy_info: channel=%d, flags=0x%08x\n",
3200 __func__,
3201 le16toh(phy_info->channel),
3202 le16toh(phy_info->phy_flags));
3203
3204 /*
3205 * Populate an RX state struct with the provided information.
3206 */
3207 bzero(&rxs, sizeof(rxs));
3208 rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3209 rxs.r_flags |= IEEE80211_R_BAND;
3210 rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3211 rxs.c_ieee = le16toh(phy_info->channel);
3212 if (le16toh(phy_info->phy_flags & IWM_RX_RES_PHY_FLAGS_BAND_24)) {
3213 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_2GHZ);
3214 rxs.c_band = IEEE80211_CHAN_2GHZ;
3215 } else {
3216 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee, IEEE80211_CHAN_5GHZ);
3217 rxs.c_band = IEEE80211_CHAN_5GHZ;
3218 }
3219
3220 /* rssi is in 1/2db units */
3221 rxs.c_rssi = rssi * 2;
3222 rxs.c_nf = sc->sc_noise;
3223 if (ieee80211_add_rx_params(m, &rxs) == 0)
3224 return false;
3225
3226 if (ieee80211_radiotap_active_vap(vap)) {
3227 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3228
3229 tap->wr_flags = 0;
3230 if (phy_info->phy_flags & htole16(IWM_PHY_INFO_FLAG_SHPREAMBLE))
3231 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3232 tap->wr_chan_freq = htole16(rxs.c_freq);
3233 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3234 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3235 tap->wr_dbm_antsignal = (int8_t)rssi;
3236 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3237 tap->wr_tsft = phy_info->system_timestamp;
3238 switch (phy_info->rate) {
3239 /* CCK rates. */
3240 case 10: tap->wr_rate = 2; break;
3241 case 20: tap->wr_rate = 4; break;
3242 case 55: tap->wr_rate = 11; break;
3243 case 110: tap->wr_rate = 22; break;
3244 /* OFDM rates. */
3245 case 0xd: tap->wr_rate = 12; break;
3246 case 0xf: tap->wr_rate = 18; break;
3247 case 0x5: tap->wr_rate = 24; break;
3248 case 0x7: tap->wr_rate = 36; break;
3249 case 0x9: tap->wr_rate = 48; break;
3250 case 0xb: tap->wr_rate = 72; break;
3251 case 0x1: tap->wr_rate = 96; break;
3252 case 0x3: tap->wr_rate = 108; break;
3253 /* Unknown rate: should not happen. */
3254 default: tap->wr_rate = 0;
3255 }
3256 }
3257
3258 return true;
3259}
3260
3261static bool
3262iwm_rx_mpdu_mq(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3263 bool stolen)
3264{
3265 struct ieee80211com *ic = &sc->sc_ic;
3266 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3267 struct ieee80211_frame *wh;
3268 struct ieee80211_rx_stats rxs;
3269 struct iwm_rx_mpdu_desc *desc;
3270 struct iwm_rx_packet *pkt;
3271 int rssi;
3272 uint32_t hdrlen, len, rate_n_flags;
3273 uint16_t phy_info;
3274 uint8_t channel;
3275
3276 pkt = mtodo(m, offset);
3277 desc = (void *)pkt->data;
3278
3279 if (!(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_CRC_OK)) ||
3280 !(desc->status & htole16(IWM_RX_MPDU_RES_STATUS_OVERRUN_OK))) {
3281 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3282 "Bad CRC or FIFO: 0x%08X.\n", desc->status);
3283 return false;
3284 }
3285
3286 channel = desc->v1.channel;
3287 len = le16toh(desc->mpdu_len);
3288 phy_info = le16toh(desc->phy_info);
3289 rate_n_flags = desc->v1.rate_n_flags;
3290
3291 wh = mtodo(m, sizeof(*desc));
3292 m->m_data = pkt->data + sizeof(*desc);
3293 m->m_pkthdr.len = m->m_len = len;
3294 m->m_len = len;
3295
3296 /* Account for padding following the frame header. */
3297 if ((desc->mac_flags2 & IWM_RX_MPDU_MFLG2_PAD)) {
3298 hdrlen = ieee80211_anyhdrsize(wh);
3299 memmove(mtodo(m, 2), mtodo(m, 0), hdrlen);
3300 m->m_data = mtodo(m, 2);
3301 wh = mtod(m, struct ieee80211_frame *);
3302 }
3303
3304 /* Map it to relative value */
3305 rssi = iwm_rxmq_get_signal_strength(sc, desc);
3306 rssi = rssi - sc->sc_noise;
3307
3308 /* replenish ring for the buffer we're going to feed to the sharks */
3309 if (!stolen && iwm_rx_addbuf(sc, IWM_RBUF_SIZE, sc->rxq.cur) != 0) {
3310 device_printf(sc->sc_dev, "%s: unable to add more buffers\n",
3311 __func__);
3312 return false;
3313 }
3314
3315 IWM_DPRINTF(sc, IWM_DEBUG_RECV,
3316 "%s: rssi=%d, noise=%d\n", __func__, rssi, sc->sc_noise);
3317
3318 /*
3319 * Populate an RX state struct with the provided information.
3320 */
3321 bzero(&rxs, sizeof(rxs));
3322 rxs.r_flags |= IEEE80211_R_IEEE | IEEE80211_R_FREQ;
3323 rxs.r_flags |= IEEE80211_R_BAND;
3324 rxs.r_flags |= IEEE80211_R_NF | IEEE80211_R_RSSI;
3325 rxs.c_ieee = channel;
3326 rxs.c_freq = ieee80211_ieee2mhz(rxs.c_ieee,
3327 channel <= 14 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ);
3328 rxs.c_band = channel <= 14 ? IEEE80211_CHAN_2GHZ : IEEE80211_CHAN_5GHZ;
3329
3330 /* rssi is in 1/2db units */
3331 rxs.c_rssi = rssi * 2;
3332 rxs.c_nf = sc->sc_noise;
3333 if (ieee80211_add_rx_params(m, &rxs) == 0)
3334 return false;
3335
3336 if (ieee80211_radiotap_active_vap(vap)) {
3337 struct iwm_rx_radiotap_header *tap = &sc->sc_rxtap;
3338
3339 tap->wr_flags = 0;
3340 if ((phy_info & IWM_RX_MPDU_PHY_SHORT_PREAMBLE) != 0)
3341 tap->wr_flags |= IEEE80211_RADIOTAP_F_SHORTPRE;
3342 tap->wr_chan_freq = htole16(rxs.c_freq);
3343 /* XXX only if ic->ic_curchan->ic_ieee == rxs.c_ieee */
3344 tap->wr_chan_flags = htole16(ic->ic_curchan->ic_flags);
3345 tap->wr_dbm_antsignal = (int8_t)rssi;
3346 tap->wr_dbm_antnoise = (int8_t)sc->sc_noise;
3347 tap->wr_tsft = desc->v1.gp2_on_air_rise;
3348 switch ((rate_n_flags & 0xff)) {
3349 /* CCK rates. */
3350 case 10: tap->wr_rate = 2; break;
3351 case 20: tap->wr_rate = 4; break;
3352 case 55: tap->wr_rate = 11; break;
3353 case 110: tap->wr_rate = 22; break;
3354 /* OFDM rates. */
3355 case 0xd: tap->wr_rate = 12; break;
3356 case 0xf: tap->wr_rate = 18; break;
3357 case 0x5: tap->wr_rate = 24; break;
3358 case 0x7: tap->wr_rate = 36; break;
3359 case 0x9: tap->wr_rate = 48; break;
3360 case 0xb: tap->wr_rate = 72; break;
3361 case 0x1: tap->wr_rate = 96; break;
3362 case 0x3: tap->wr_rate = 108; break;
3363 /* Unknown rate: should not happen. */
3364 default: tap->wr_rate = 0;
3365 }
3366 }
3367
3368 return true;
3369}
3370
3371static bool
3372iwm_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset,
3373 bool stolen)
3374{
3375 struct epoch_tracker et;
3376 struct ieee80211com *ic;
3377 struct ieee80211_frame *wh;
3378 struct ieee80211_node *ni;
3379 bool ret;
3380
3381 ic = &sc->sc_ic;
3382
3383 ret = sc->cfg->mqrx_supported ?
3384 iwm_rx_mpdu_mq(sc, m, offset, stolen) :
3385 iwm_rx_rx_mpdu(sc, m, offset, stolen);
3386 if (!ret) {
3387 counter_u64_add(ic->ic_ierrors, 1);
3388 return (ret);
3389 }
3390
3391 wh = mtod(m, struct ieee80211_frame *);
3392 ni = ieee80211_find_rxnode(ic, (struct ieee80211_frame_min *)wh);
3393
3394 IWM_UNLOCK(sc);
3395
3396 NET_EPOCH_ENTER(et);
3397 if (ni != NULL) {
3398 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "input m %p\n", m);
3399 ieee80211_input_mimo(ni, m);
3400 ieee80211_free_node(ni);
3401 } else {
3402 IWM_DPRINTF(sc, IWM_DEBUG_RECV, "inputall m %p\n", m);
3403 ieee80211_input_mimo_all(ic, m);
3404 }
3405 NET_EPOCH_EXIT(et);
3406
3407 IWM_LOCK(sc);
3408
3409 return true;
3410}
3411
3412static int
3414 struct iwm_node *in)
3415{
3416 struct iwm_tx_resp *tx_resp = (void *)pkt->data;
3417 struct ieee80211_ratectl_tx_status *txs = &sc->sc_txs;
3418 struct ieee80211_node *ni = &in->in_ni;
3419 struct ieee80211vap *vap = ni->ni_vap;
3420 int status = le16toh(tx_resp->status.status) & IWM_TX_STATUS_MSK;
3421 int new_rate, cur_rate = vap->iv_bss->ni_txrate;
3422 boolean_t rate_matched;
3423 uint8_t tx_resp_rate;
3424
3425 KASSERT(tx_resp->frame_count == 1, ("too many frames"));
3426
3427 /* Update rate control statistics. */
3428 IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: status=0x%04x, seq=%d, fc=%d, btc=%d, frts=%d, ff=%d, irate=%08x, wmt=%d\n",
3429 __func__,
3430 (int) le16toh(tx_resp->status.status),
3431 (int) le16toh(tx_resp->status.sequence),
3432 tx_resp->frame_count,
3433 tx_resp->bt_kill_count,
3434 tx_resp->failure_rts,
3435 tx_resp->failure_frame,
3436 le32toh(tx_resp->initial_rate),
3437 (int) le16toh(tx_resp->wireless_media_time));
3438
3439 tx_resp_rate = iwm_rate_from_ucode_rate(le32toh(tx_resp->initial_rate));
3440
3441 /* For rate control, ignore frames sent at different initial rate */
3442 rate_matched = (tx_resp_rate != 0 && tx_resp_rate == cur_rate);
3443
3444 if (tx_resp_rate != 0 && cur_rate != 0 && !rate_matched) {
3445 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3446 "tx_resp_rate doesn't match ni_txrate (tx_resp_rate=%u "
3447 "ni_txrate=%d)\n", tx_resp_rate, cur_rate);
3448 }
3449
3450 txs->flags = IEEE80211_RATECTL_STATUS_SHORT_RETRY |
3451 IEEE80211_RATECTL_STATUS_LONG_RETRY;
3452 txs->short_retries = tx_resp->failure_rts;
3453 txs->long_retries = tx_resp->failure_frame;
3456 switch (status) {
3458 txs->status = IEEE80211_RATECTL_TX_FAIL_SHORT;
3459 break;
3461 txs->status = IEEE80211_RATECTL_TX_FAIL_LONG;
3462 break;
3464 txs->status = IEEE80211_RATECTL_TX_FAIL_EXPIRED;
3465 break;
3466 default:
3467 txs->status = IEEE80211_RATECTL_TX_FAIL_UNSPECIFIED;
3468 break;
3469 }
3470 } else {
3471 txs->status = IEEE80211_RATECTL_TX_SUCCESS;
3472 }
3473
3474 if (rate_matched) {
3475 ieee80211_ratectl_tx_complete(ni, txs);
3476
3477 int rix = ieee80211_ratectl_rate(vap->iv_bss, NULL, 0);
3478 new_rate = vap->iv_bss->ni_txrate;
3479 if (new_rate != 0 && new_rate != cur_rate) {
3480 struct iwm_node *in = IWM_NODE(vap->iv_bss);
3481 iwm_setrates(sc, in, rix);
3482 iwm_send_lq_cmd(sc, &in->in_lq, FALSE);
3483 }
3484 }
3485
3486 return (txs->status != IEEE80211_RATECTL_TX_SUCCESS);
3487}
3488
3489static void
3490iwm_rx_tx_cmd(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3491{
3492 struct iwm_cmd_header *cmd_hdr;
3493 struct iwm_tx_ring *ring;
3494 struct iwm_tx_data *txd;
3495 struct iwm_node *in;
3496 struct mbuf *m;
3497 int idx, qid, qmsk, status;
3498
3499 cmd_hdr = &pkt->hdr;
3500 idx = cmd_hdr->idx;
3501 qid = cmd_hdr->qid;
3502
3503 ring = &sc->txq[qid];
3504 txd = &ring->data[idx];
3505 in = txd->in;
3506 m = txd->m;
3507
3508 KASSERT(txd->done == 0, ("txd not done"));
3509 KASSERT(txd->in != NULL, ("txd without node"));
3510 KASSERT(txd->m != NULL, ("txd without mbuf"));
3511
3512 sc->sc_tx_timer = 0;
3513
3514 status = iwm_rx_tx_cmd_single(sc, pkt, in);
3515
3516 /* Unmap and free mbuf. */
3517 bus_dmamap_sync(ring->data_dmat, txd->map, BUS_DMASYNC_POSTWRITE);
3518 bus_dmamap_unload(ring->data_dmat, txd->map);
3519
3520 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3521 "free txd %p, in %p\n", txd, txd->in);
3522 txd->done = 1;
3523 txd->m = NULL;
3524 txd->in = NULL;
3525
3526 ieee80211_tx_complete(&in->in_ni, m, status);
3527
3528 qmsk = 1 << qid;
3529 if (--ring->queued < IWM_TX_RING_LOMARK && (sc->qfullmsk & qmsk) != 0) {
3530 sc->qfullmsk &= ~qmsk;
3531 if (sc->qfullmsk == 0)
3532 iwm_start(sc);
3533 }
3534}
3535
3536/*
3537 * transmit side
3538 */
3539
3540/*
3541 * Process a "command done" firmware notification. This is where we wakeup
3542 * processes waiting for a synchronous command completion.
3543 * from if_iwn
3544 */
3545static void
3546iwm_cmd_done(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
3547{
3548 struct iwm_tx_ring *ring = &sc->txq[IWM_CMD_QUEUE];
3549 struct iwm_tx_data *data;
3550
3551 if (pkt->hdr.qid != IWM_CMD_QUEUE) {
3552 return; /* Not a command ack. */
3553 }
3554
3555 /* XXX wide commands? */
3556 IWM_DPRINTF(sc, IWM_DEBUG_CMD,
3557 "cmd notification type 0x%x qid %d idx %d\n",
3558 pkt->hdr.code, pkt->hdr.qid, pkt->hdr.idx);
3559
3560 data = &ring->data[pkt->hdr.idx];
3561
3562 /* If the command was mapped in an mbuf, free it. */
3563 if (data->m != NULL) {
3564 bus_dmamap_sync(ring->data_dmat, data->map,
3565 BUS_DMASYNC_POSTWRITE);
3566 bus_dmamap_unload(ring->data_dmat, data->map);
3567 m_freem(data->m);
3568 data->m = NULL;
3569 }
3570 wakeup(&ring->desc[pkt->hdr.idx]);
3571
3572 if (((pkt->hdr.idx + ring->queued) % IWM_TX_RING_COUNT) != ring->cur) {
3573 device_printf(sc->sc_dev,
3574 "%s: Some HCMDs skipped?: idx=%d queued=%d cur=%d\n",
3575 __func__, pkt->hdr.idx, ring->queued, ring->cur);
3576 /* XXX call iwm_force_nmi() */
3577 }
3578
3579 KASSERT(ring->queued > 0, ("ring->queued is empty?"));
3580 ring->queued--;
3581 if (ring->queued == 0)
3583}
3584
3585#if 0
3586/*
3587 * necessary only for block ack mode
3588 */
3589void
3590iwm_update_sched(struct iwm_softc *sc, int qid, int idx, uint8_t sta_id,
3591 uint16_t len)
3592{
3593 struct iwm_agn_scd_bc_tbl *scd_bc_tbl;
3594 uint16_t w_val;
3595
3596 scd_bc_tbl = sc->sched_dma.vaddr;
3597
3598 len += 8; /* magic numbers came naturally from paris */
3599 len = roundup(len, 4) / 4;
3600
3601 w_val = htole16(sta_id << 12 | len);
3602
3603 /* Update TX scheduler. */
3604 scd_bc_tbl[qid].tfd_offset[idx] = w_val;
3605 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3606 BUS_DMASYNC_PREWRITE);
3607
3608 /* I really wonder what this is ?!? */
3609 if (idx < IWM_TFD_QUEUE_SIZE_BC_DUP) {
3610 scd_bc_tbl[qid].tfd_offset[IWM_TFD_QUEUE_SIZE_MAX + idx] = w_val;
3611 bus_dmamap_sync(sc->sched_dma.tag, sc->sched_dma.map,
3612 BUS_DMASYNC_PREWRITE);
3613 }
3614}
3615#endif
3616
3617static int
3619{
3620 int i;
3621
3622 for (i = 0; i < nitems(iwm_rates); i++) {
3623 if (iwm_rates[i].rate == rate)
3624 return (i);
3625 }
3626 /* XXX error? */
3627 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3628 "%s: couldn't find an entry for rate=%d\n",
3629 __func__,
3630 rate);
3631 return (0);
3632}
3633
3634/*
3635 * Fill in the rate related information for a transmit command.
3636 */
3637static const struct iwm_rate *
3638iwm_tx_fill_cmd(struct iwm_softc *sc, struct iwm_node *in,
3639 struct mbuf *m, struct iwm_tx_cmd *tx)
3640{
3641 struct ieee80211_node *ni = &in->in_ni;
3642 struct ieee80211_frame *wh;
3643 const struct ieee80211_txparam *tp = ni->ni_txparms;
3644 const struct iwm_rate *rinfo;
3645 int type;
3646 int ridx, rate_flags;
3647
3648 wh = mtod(m, struct ieee80211_frame *);
3649 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3650
3653
3654 if (type == IEEE80211_FC0_TYPE_MGT ||
3655 type == IEEE80211_FC0_TYPE_CTL ||
3656 (m->m_flags & M_EAPOL) != 0) {
3657 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mgmtrate);
3658 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3659 "%s: MGT (%d)\n", __func__, tp->mgmtrate);
3660 } else if (IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3661 ridx = iwm_tx_rateidx_global_lookup(sc, tp->mcastrate);
3662 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3663 "%s: MCAST (%d)\n", __func__, tp->mcastrate);
3664 } else if (tp->ucastrate != IEEE80211_FIXED_RATE_NONE) {
3665 ridx = iwm_tx_rateidx_global_lookup(sc, tp->ucastrate);
3666 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
3667 "%s: FIXED_RATE (%d)\n", __func__, tp->ucastrate);
3668 } else {
3669 /* for data frames, use RS table */
3670 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: DATA\n", __func__);
3671 ridx = iwm_rate2ridx(sc, ni->ni_txrate);
3672 if (ridx == -1)
3673 ridx = 0;
3674
3675 /* This is the index into the programmed table */
3676 tx->initial_rate_index = 0;
3677 tx->tx_flags |= htole32(IWM_TX_CMD_FLG_STA_RATE);
3678 }
3679
3680 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TXRATE,
3681 "%s: frame type=%d txrate %d\n",
3682 __func__, type, iwm_rates[ridx].rate);
3683
3684 rinfo = &iwm_rates[ridx];
3685
3686 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE, "%s: ridx=%d; rate=%d, CCK=%d\n",
3687 __func__, ridx,
3688 rinfo->rate,
3689 !! (IWM_RIDX_IS_CCK(ridx))
3690 );
3691
3692 /* XXX TODO: hard-coded TX antenna? */
3694 rate_flags = IWM_RATE_MCS_ANT_B_MSK;
3695 else
3696 rate_flags = IWM_RATE_MCS_ANT_A_MSK;
3697 if (IWM_RIDX_IS_CCK(ridx))
3698 rate_flags |= IWM_RATE_MCS_CCK_MSK;
3699 tx->rate_n_flags = htole32(rate_flags | rinfo->plcp);
3700
3701 return rinfo;
3702}
3703
3704#define TB0_SIZE 16
3705static int
3706iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
3707{
3708 struct ieee80211com *ic = &sc->sc_ic;
3709 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
3710 struct iwm_node *in = IWM_NODE(ni);
3711 struct iwm_tx_ring *ring;
3712 struct iwm_tx_data *data;
3713 struct iwm_tfd *desc;
3714 struct iwm_device_cmd *cmd;
3715 struct iwm_tx_cmd *tx;
3716 struct ieee80211_frame *wh;
3717 struct ieee80211_key *k = NULL;
3718 struct mbuf *m1;
3719 const struct iwm_rate *rinfo;
3720 uint32_t flags;
3721 u_int hdrlen;
3722 bus_dma_segment_t *seg, segs[IWM_MAX_SCATTER];
3723 int nsegs;
3724 uint8_t tid, type;
3725 int i, totlen, error, pad;
3726
3727 wh = mtod(m, struct ieee80211_frame *);
3728 hdrlen = ieee80211_anyhdrsize(wh);
3729 type = wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK;
3730 tid = 0;
3731 ring = &sc->txq[ac];
3732 desc = &ring->desc[ring->cur];
3733 data = &ring->data[ring->cur];
3734
3735 /* Fill out iwm_tx_cmd to send to the firmware */
3736 cmd = &ring->cmd[ring->cur];
3737 cmd->hdr.code = IWM_TX_CMD;
3738 cmd->hdr.flags = 0;
3739 cmd->hdr.qid = ring->qid;
3740 cmd->hdr.idx = ring->cur;
3741
3742 tx = (void *)cmd->data;
3743 memset(tx, 0, sizeof(*tx));
3744
3745 rinfo = iwm_tx_fill_cmd(sc, in, m, tx);
3746
3747 /* Encrypt the frame if need be. */
3748 if (wh->i_fc[1] & IEEE80211_FC1_PROTECTED) {
3749 /* Retrieve key for TX && do software encryption. */
3750 k = ieee80211_crypto_encap(ni, m);
3751 if (k == NULL) {
3752 m_freem(m);
3753 return (ENOBUFS);
3754 }
3755 /* 802.11 header may have moved. */
3756 wh = mtod(m, struct ieee80211_frame *);
3757 }
3758
3759 if (ieee80211_radiotap_active_vap(vap)) {
3760 struct iwm_tx_radiotap_header *tap = &sc->sc_txtap;
3761
3762 tap->wt_flags = 0;
3763 tap->wt_chan_freq = htole16(ni->ni_chan->ic_freq);
3764 tap->wt_chan_flags = htole16(ni->ni_chan->ic_flags);
3765 tap->wt_rate = rinfo->rate;
3766 if (k != NULL)
3767 tap->wt_flags |= IEEE80211_RADIOTAP_F_WEP;
3768 ieee80211_radiotap_tx(vap, m);
3769 }
3770
3771 flags = 0;
3772 totlen = m->m_pkthdr.len;
3773 if (!IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3774 flags |= IWM_TX_CMD_FLG_ACK;
3775 }
3776
3777 if (type == IEEE80211_FC0_TYPE_DATA &&
3778 totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold &&
3779 !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
3781 }
3782
3783 tx->sta_id = IWM_STATION_ID;
3784
3785 if (type == IEEE80211_FC0_TYPE_MGT) {
3786 uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
3787
3788 if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
3789 subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
3790 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
3791 } else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
3792 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3793 } else {
3794 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
3795 }
3796 } else {
3797 tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
3798 }
3799
3800 if (hdrlen & 3) {
3801 /* First segment length must be a multiple of 4. */
3802 flags |= IWM_TX_CMD_FLG_MH_PAD;
3803 tx->offload_assist |= htole16(IWM_TX_CMD_OFFLD_PAD);
3804 pad = 4 - (hdrlen & 3);
3805 } else {
3806 tx->offload_assist = 0;
3807 pad = 0;
3808 }
3809
3810 tx->len = htole16(totlen);
3811 tx->tid_tspec = tid;
3813
3814 /* Set physical address of "scratch area". */
3815 tx->dram_lsb_ptr = htole32(data->scratch_paddr);
3817
3818 /* Copy 802.11 header in TX command. */
3819 memcpy((uint8_t *)tx + sizeof(*tx), wh, hdrlen);
3820
3822
3823 tx->sec_ctl = 0;
3824 tx->tx_flags |= htole32(flags);
3825
3826 /* Trim 802.11 header. */
3827 m_adj(m, hdrlen);
3828 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3829 segs, &nsegs, BUS_DMA_NOWAIT);
3830 if (error != 0) {
3831 if (error != EFBIG) {
3832 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3833 error);
3834 m_freem(m);
3835 return error;
3836 }
3837 /* Too many DMA segments, linearize mbuf. */
3838 m1 = m_collapse(m, M_NOWAIT, IWM_MAX_SCATTER - 2);
3839 if (m1 == NULL) {
3840 device_printf(sc->sc_dev,
3841 "%s: could not defrag mbuf\n", __func__);
3842 m_freem(m);
3843 return (ENOBUFS);
3844 }
3845 m = m1;
3846
3847 error = bus_dmamap_load_mbuf_sg(ring->data_dmat, data->map, m,
3848 segs, &nsegs, BUS_DMA_NOWAIT);
3849 if (error != 0) {
3850 device_printf(sc->sc_dev, "can't map mbuf (error %d)\n",
3851 error);
3852 m_freem(m);
3853 return error;
3854 }
3855 }
3856 data->m = m;
3857 data->in = in;
3858 data->done = 0;
3859
3860 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3861 "sending txd %p, in %p\n", data, data->in);
3862 KASSERT(data->in != NULL, ("node is NULL"));
3863
3864 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3865 "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
3866 ring->qid, ring->cur, totlen, nsegs,
3867 le32toh(tx->tx_flags),
3868 le32toh(tx->rate_n_flags),
3870 );
3871
3872 /* Fill TX descriptor. */
3873 memset(desc, 0, sizeof(*desc));
3874 desc->num_tbs = 2 + nsegs;
3875
3876 desc->tbs[0].lo = htole32(data->cmd_paddr);
3877 desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
3878 (TB0_SIZE << 4));
3879 desc->tbs[1].lo = htole32(data->cmd_paddr + TB0_SIZE);
3880 desc->tbs[1].hi_n_len = htole16(iwm_get_dma_hi_addr(data->cmd_paddr) |
3881 ((sizeof(struct iwm_cmd_header) + sizeof(*tx) +
3882 hdrlen + pad - TB0_SIZE) << 4));
3883
3884 /* Other DMA segments are for data payload. */
3885 for (i = 0; i < nsegs; i++) {
3886 seg = &segs[i];
3887 desc->tbs[i + 2].lo = htole32(seg->ds_addr);
3888 desc->tbs[i + 2].hi_n_len =
3889 htole16(iwm_get_dma_hi_addr(seg->ds_addr)) |
3890 (seg->ds_len << 4);
3891 }
3892
3893 bus_dmamap_sync(ring->data_dmat, data->map,
3894 BUS_DMASYNC_PREWRITE);
3895 bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
3896 BUS_DMASYNC_PREWRITE);
3897 bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
3898 BUS_DMASYNC_PREWRITE);
3899
3900#if 0
3901 iwm_update_sched(sc, ring->qid, ring->cur, tx->sta_id, le16toh(tx->len));
3902#endif
3903
3904 /* Kick TX ring. */
3905 ring->cur = (ring->cur + 1) % IWM_TX_RING_COUNT;
3906 IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, ring->qid << 8 | ring->cur);
3907
3908 /* Mark TX ring as full if we reach a certain threshold. */
3909 if (++ring->queued > IWM_TX_RING_HIMARK) {
3910 sc->qfullmsk |= 1 << ring->qid;
3911 }
3912
3913 return 0;
3914}
3915
3916static int
3917iwm_raw_xmit(struct ieee80211_node *ni, struct mbuf *m,
3918 const struct ieee80211_bpf_params *params)
3919{
3920 struct ieee80211com *ic = ni->ni_ic;
3921 struct iwm_softc *sc = ic->ic_softc;
3922 int error = 0;
3923
3924 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3925 "->%s begin\n", __func__);
3926
3927 if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
3928 m_freem(m);
3929 IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
3930 "<-%s not RUNNING\n", __func__);
3931 return (ENETDOWN);
3932 }
3933
3934 IWM_LOCK(sc);
3935 /* XXX fix this */
3936 if (params == NULL) {
3937 error = iwm_tx(sc, m, ni, 0);
3938 } else {
3939 error = iwm_tx(sc, m, ni, 0);
3940 }
3941 if (sc->sc_tx_timer == 0)
3942 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
3943 sc->sc_tx_timer = 5;
3944 IWM_UNLOCK(sc);
3945
3946 return (error);
3947}
3948
3949/*
3950 * mvm/tx.c
3951 */
3952
3953/*
3954 * Note that there are transports that buffer frames before they reach
3955 * the firmware. This means that after flush_tx_path is called, the
3956 * queue might not be empty. The race-free way to handle this is to:
3957 * 1) set the station as draining
3958 * 2) flush the Tx path
3959 * 3) wait for the transport queues to be empty
3960 */
3961int
3962iwm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
3963{
3964 int ret;
3965 struct iwm_tx_path_flush_cmd_v1 flush_cmd = {
3966 .queues_ctl = htole32(tfd_msk),
3967 .flush_ctl = htole16(IWM_DUMP_TX_FIFO_FLUSH),
3968 };
3969
3970 ret = iwm_send_cmd_pdu(sc, IWM_TXPATH_FLUSH, flags,
3971 sizeof(flush_cmd), &flush_cmd);
3972 if (ret)
3973 device_printf(sc->sc_dev,
3974 "Flushing tx queue failed: %d\n", ret);
3975 return ret;
3976}
3977
3978/*
3979 * BEGIN mvm/quota.c
3980 */
3981
3982static int
3983iwm_update_quotas(struct iwm_softc *sc, struct iwm_vap *ivp)
3984{
3985 struct iwm_time_quota_cmd_v1 cmd;
3986 int i, idx, ret, num_active_macs, quota, quota_rem;
3987 int colors[IWM_MAX_BINDINGS] = { -1, -1, -1, -1, };
3988 int n_ifs[IWM_MAX_BINDINGS] = {0, };
3989 uint16_t id;
3990
3991 memset(&cmd, 0, sizeof(cmd));
3992
3993 /* currently, PHY ID == binding ID */
3994 if (ivp) {
3995 id = ivp->phy_ctxt->id;
3996 KASSERT(id < IWM_MAX_BINDINGS, ("invalid id"));
3997 colors[id] = ivp->phy_ctxt->color;
3998
3999 if (1)
4000 n_ifs[id] = 1;
4001 }
4002
4003 /*
4004 * The FW's scheduling session consists of
4005 * IWM_MAX_QUOTA fragments. Divide these fragments
4006 * equally between all the bindings that require quota
4007 */
4008 num_active_macs = 0;
4009 for (i = 0; i < IWM_MAX_BINDINGS; i++) {
4010 cmd.quotas[i].id_and_color = htole32(IWM_FW_CTXT_INVALID);
4011 num_active_macs += n_ifs[i];
4012 }
4013
4014 quota = 0;
4015 quota_rem = 0;
4016 if (num_active_macs) {
4017 quota = IWM_MAX_QUOTA / num_active_macs;
4018 quota_rem = IWM_MAX_QUOTA % num_active_macs;
4019 }
4020
4021 for (idx = 0, i = 0; i < IWM_MAX_BINDINGS; i++) {
4022 if (colors[i] < 0)
4023 continue;
4024
4025 cmd.quotas[idx].id_and_color =
4026 htole32(IWM_FW_CMD_ID_AND_COLOR(i, colors[i]));
4027
4028 if (n_ifs[i] <= 0) {
4029 cmd.quotas[idx].quota = htole32(0);
4030 cmd.quotas[idx].max_duration = htole32(0);
4031 } else {
4032 cmd.quotas[idx].quota = htole32(quota * n_ifs[i]);
4033 cmd.quotas[idx].max_duration = htole32(0);
4034 }
4035 idx++;
4036 }
4037
4038 /* Give the remainder of the session to the first binding */
4039 cmd.quotas[0].quota = htole32(le32toh(cmd.quotas[0].quota) + quota_rem);
4040
4042 sizeof(cmd), &cmd);
4043 if (ret)
4044 device_printf(sc->sc_dev,
4045 "%s: Failed to send quota: %d\n", __func__, ret);
4046 return ret;
4047}
4048
4049/*
4050 * END mvm/quota.c
4051 */
4052
4053/*
4054 * ieee80211 routines
4055 */
4056
4057/*
4058 * Change to AUTH state in 80211 state machine. Roughly matches what
4059 * Linux does in bss_info_changed().
4060 */
4061static int
4062iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
4063{
4064 struct ieee80211_node *ni;
4065 struct iwm_node *in;
4066 struct iwm_vap *iv = IWM_VAP(vap);
4067 uint32_t duration;
4068 int error;
4069
4070 /*
4071 * XXX i have a feeling that the vap node is being
4072 * freed from underneath us. Grr.
4073 */
4074 ni = ieee80211_ref_node(vap->iv_bss);
4075 in = IWM_NODE(ni);
4076 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_STATE,
4077 "%s: called; vap=%p, bss ni=%p\n",
4078 __func__,
4079 vap,
4080 ni);
4081 IWM_DPRINTF(sc, IWM_DEBUG_STATE, "%s: Current node bssid: %s\n",
4082 __func__, ether_sprintf(ni->ni_bssid));
4083
4084 in->in_assoc = 0;
4085 iv->iv_auth = 1;
4086
4087 /*
4088 * Firmware bug - it'll crash if the beacon interval is less
4089 * than 16. We can't avoid connecting at all, so refuse the
4090 * station state change, this will cause net80211 to abandon
4091 * attempts to connect to this AP, and eventually wpa_s will
4092 * blacklist the AP...
4093 */
4094 if (ni->ni_intval < 16) {
4095 device_printf(sc->sc_dev,
4096 "AP %s beacon interval is %d, refusing due to firmware bug!\n",
4097 ether_sprintf(ni->ni_bssid), ni->ni_intval);
4098 error = EINVAL;
4099 goto out;
4100 }
4101
4102 error = iwm_allow_mcast(vap, sc);
4103 if (error) {
4104 device_printf(sc->sc_dev,
4105 "%s: failed to set multicast\n", __func__);
4106 goto out;
4107 }
4108
4109 /*
4110 * This is where it deviates from what Linux does.
4111 *
4112 * Linux iwlwifi doesn't reset the nic each time, nor does it
4113 * call ctxt_add() here. Instead, it adds it during vap creation,
4114 * and always does a mac_ctx_changed().
4115 *
4116 * The openbsd port doesn't attempt to do that - it reset things
4117 * at odd states and does the add here.
4118 *
4119 * So, until the state handling is fixed (ie, we never reset
4120 * the NIC except for a firmware failure, which should drag
4121 * the NIC back to IDLE, re-setup and re-add all the mac/phy
4122 * contexts that are required), let's do a dirty hack here.
4123 */
4124 if (iv->is_uploaded) {
4125 if ((error = iwm_mac_ctxt_changed(sc, vap)) != 0) {
4126 device_printf(sc->sc_dev,
4127 "%s: failed to update MAC\n", __func__);
4128 goto out;
4129 }
4130 } else {
4131 if ((error = iwm_mac_ctxt_add(sc, vap)) != 0) {
4132 device_printf(sc->sc_dev,
4133 "%s: failed to add MAC\n", __func__);
4134 goto out;
4135 }
4136 }
4137 sc->sc_firmware_state = 1;
4138
4139 if ((error = iwm_phy_ctxt_changed(sc, &sc->sc_phyctxt[0],
4140 in->in_ni.ni_chan, 1, 1)) != 0) {
4141 device_printf(sc->sc_dev,
4142 "%s: failed update phy ctxt\n", __func__);
4143 goto out;
4144 }
4145 iv->phy_ctxt = &sc->sc_phyctxt[0];
4146
4147 if ((error = iwm_binding_add_vif(sc, iv)) != 0) {
4148 device_printf(sc->sc_dev,
4149 "%s: binding update cmd\n", __func__);
4150 goto out;
4151 }
4152 sc->sc_firmware_state = 2;
4153 /*
4154 * Authentication becomes unreliable when powersaving is left enabled
4155 * here. Powersaving will be activated again when association has
4156 * finished or is aborted.
4157 */
4158 iv->ps_disabled = TRUE;
4159 error = iwm_power_update_mac(sc);
4160 iv->ps_disabled = FALSE;
4161 if (error != 0) {
4162 device_printf(sc->sc_dev,
4163 "%s: failed to update power management\n",
4164 __func__);
4165 goto out;
4166 }
4167 if ((error = iwm_add_sta(sc, in)) != 0) {
4168 device_printf(sc->sc_dev,
4169 "%s: failed to add sta\n", __func__);
4170 goto out;
4171 }
4172 sc->sc_firmware_state = 3;
4173
4174 /*
4175 * Prevent the FW from wandering off channel during association
4176 * by "protecting" the session with a time event.
4177 */
4178 /* XXX duration is in units of TU, not MS */
4180 iwm_protect_session(sc, iv, duration, 500 /* XXX magic number */, TRUE);
4181
4182 error = 0;
4183out:
4184 if (error != 0)
4185 iv->iv_auth = 0;
4186 ieee80211_free_node(ni);
4187 return (error);
4188}
4189
4190static struct ieee80211_node *
4191iwm_node_alloc(struct ieee80211vap *vap, const uint8_t mac[IEEE80211_ADDR_LEN])
4192{
4193 return malloc(sizeof (struct iwm_node), M_80211_NODE,
4194 M_NOWAIT | M_ZERO);
4195}
4196
4197static uint8_t
4198iwm_rate_from_ucode_rate(uint32_t rate_n_flags)
4199{
4200 uint8_t plcp = rate_n_flags & 0xff;
4201 int i;
4202
4203 for (i = 0; i <= IWM_RIDX_MAX; i++) {
4204 if (iwm_rates[i].plcp == plcp)
4205 return iwm_rates[i].rate;
4206 }
4207 return 0;
4208}
4209
4210uint8_t
4211iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
4212{
4213 int i;
4214 uint8_t rval;
4215
4216 for (i = 0; i < rs->rs_nrates; i++) {
4217 rval = (rs->rs_rates[i] & IEEE80211_RATE_VAL);
4218 if (rval == iwm_rates[ridx].rate)
4219 return rs->rs_rates[i];
4220 }
4221
4222 return 0;
4223}
4224
4225static int
4226iwm_rate2ridx(struct iwm_softc *sc, uint8_t rate)
4227{
4228 int i;
4229
4230 for (i = 0; i <= IWM_RIDX_MAX; i++) {
4231 if (iwm_rates[i].rate == rate)
4232 return i;
4233 }
4234
4235 device_printf(sc->sc_dev,
4236 "%s: WARNING: device rate for %u not found!\n",
4237 __func__, rate);
4238
4239 return -1;
4240}
4241
4242
4243static void
4244iwm_setrates(struct iwm_softc *sc, struct iwm_node *in, int rix)
4245{
4246 struct ieee80211_node *ni = &in->in_ni;
4247 struct iwm_lq_cmd *lq = &in->in_lq;
4248 struct ieee80211_rateset *rs = &ni->ni_rates;
4249 int nrates = rs->rs_nrates;
4250 int i, ridx, tab = 0;
4251// int txant = 0;
4252
4253 KASSERT(rix >= 0 && rix < nrates, ("invalid rix"));
4254
4255 if (nrates > nitems(lq->rs_table)) {
4256 device_printf(sc->sc_dev,
4257 "%s: node supports %d rates, driver handles "
4258 "only %zu\n", __func__, nrates, nitems(lq->rs_table));
4259 return;
4260 }
4261 if (nrates == 0) {
4262 device_printf(sc->sc_dev,
4263 "%s: node supports 0 rates, odd!\n", __func__);
4264 return;
4265 }
4266 nrates = imin(rix + 1, nrates);
4267
4268 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4269 "%s: nrates=%d\n", __func__, nrates);
4270
4271 /* then construct a lq_cmd based on those */
4272 memset(lq, 0, sizeof(*lq));
4273 lq->sta_id = IWM_STATION_ID;
4274
4275 /* For HT, always enable RTS/CTS to avoid excessive retries. */
4276 if (ni->ni_flags & IEEE80211_NODE_HT)
4278
4279 /*
4280 * are these used? (we don't do SISO or MIMO)
4281 * need to set them to non-zero, though, or we get an error.
4282 */
4283 lq->single_stream_ant_msk = 1;
4284 lq->dual_stream_ant_msk = 1;
4285
4286 /*
4287 * Build the actual rate selection table.
4288 * The lowest bits are the rates. Additionally,
4289 * CCK needs bit 9 to be set. The rest of the bits
4290 * we add to the table select the tx antenna
4291 * Note that we add the rates in the highest rate first
4292 * (opposite of ni_rates).
4293 */
4294 for (i = 0; i < nrates; i++) {
4295 int rate = rs->rs_rates[rix - i] & IEEE80211_RATE_VAL;
4296 int nextant;
4297
4298 /* Map 802.11 rate to HW rate index. */
4299 ridx = iwm_rate2ridx(sc, rate);
4300 if (ridx == -1)
4301 continue;
4302
4303#if 0
4304 if (txant == 0)
4305 txant = iwm_get_valid_tx_ant(sc);
4306 nextant = 1<<(ffs(txant)-1);
4307 txant &= ~nextant;
4308#else
4309 nextant = iwm_get_valid_tx_ant(sc);
4310#endif
4311 tab = iwm_rates[ridx].plcp;
4312 tab |= nextant << IWM_RATE_MCS_ANT_POS;
4313 if (IWM_RIDX_IS_CCK(ridx))
4314 tab |= IWM_RATE_MCS_CCK_MSK;
4315 IWM_DPRINTF(sc, IWM_DEBUG_TXRATE,
4316 "station rate i=%d, rate=%d, hw=%x\n",
4317 i, iwm_rates[ridx].rate, tab);
4318 lq->rs_table[i] = htole32(tab);
4319 }
4320 /* then fill the rest with the lowest possible rate */
4321 for (i = nrates; i < nitems(lq->rs_table); i++) {
4322 KASSERT(tab != 0, ("invalid tab"));
4323 lq->rs_table[i] = htole32(tab);
4324 }
4325}
4326
4327static void
4328iwm_bring_down_firmware(struct iwm_softc *sc, struct ieee80211vap *vap)
4329{
4330 struct iwm_vap *ivp = IWM_VAP(vap);
4331 int error;
4332
4333 /* Avoid Tx watchdog triggering, when transfers get dropped here. */
4334 sc->sc_tx_timer = 0;
4335
4336 ivp->iv_auth = 0;
4337 if (sc->sc_firmware_state == 3) {
4339// iwm_flush_tx_path(sc, 0xf, IWM_CMD_SYNC);
4340 error = iwm_rm_sta(sc, vap, TRUE);
4341 if (error) {
4342 device_printf(sc->sc_dev,
4343 "%s: Failed to remove station: %d\n",
4344 __func__, error);
4345 }
4346 }
4347 if (sc->sc_firmware_state == 3) {
4348 error = iwm_mac_ctxt_changed(sc, vap);
4349 if (error) {
4350 device_printf(sc->sc_dev,
4351 "%s: Failed to change mac context: %d\n",
4352 __func__, error);
4353 }
4354 }
4355 if (sc->sc_firmware_state == 3) {
4356 error = iwm_sf_update(sc, vap, FALSE);
4357 if (error) {
4358 device_printf(sc->sc_dev,
4359 "%s: Failed to update smart FIFO: %d\n",
4360 __func__, error);
4361 }
4362 }
4363 if (sc->sc_firmware_state == 3) {
4364 error = iwm_rm_sta_id(sc, vap);
4365 if (error) {
4366 device_printf(sc->sc_dev,
4367 "%s: Failed to remove station id: %d\n",
4368 __func__, error);
4369 }
4370 }
4371 if (sc->sc_firmware_state == 3) {
4372 error = iwm_update_quotas(sc, NULL);
4373 if (error) {
4374 device_printf(sc->sc_dev,
4375 "%s: Failed to update PHY quota: %d\n",
4376 __func__, error);
4377 }
4378 }
4379 if (sc->sc_firmware_state == 3) {
4380 /* XXX Might need to specify bssid correctly. */
4381 error = iwm_mac_ctxt_changed(sc, vap);
4382 if (error) {
4383 device_printf(sc->sc_dev,
4384 "%s: Failed to change mac context: %d\n",
4385 __func__, error);
4386 }
4387 }
4388 if (sc->sc_firmware_state == 3) {
4389 sc->sc_firmware_state = 2;
4390 }
4391 if (sc->sc_firmware_state > 1) {
4392 error = iwm_binding_remove_vif(sc, ivp);
4393 if (error) {
4394 device_printf(sc->sc_dev,
4395 "%s: Failed to remove channel ctx: %d\n",
4396 __func__, error);
4397 }
4398 }
4399 if (sc->sc_firmware_state > 1) {
4400 sc->sc_firmware_state = 1;
4401 }
4402 ivp->phy_ctxt = NULL;
4403 if (sc->sc_firmware_state > 0) {
4404 error = iwm_mac_ctxt_changed(sc, vap);
4405 if (error) {
4406 device_printf(sc->sc_dev,
4407 "%s: Failed to change mac context: %d\n",
4408 __func__, error);
4409 }
4410 }
4411 if (sc->sc_firmware_state > 0) {
4412 error = iwm_power_update_mac(sc);
4413 if (error != 0) {
4414 device_printf(sc->sc_dev,
4415 "%s: failed to update power management\n",
4416 __func__);
4417 }
4418 }
4419 sc->sc_firmware_state = 0;
4420}
4421
4422static int
4423iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
4424{
4425 struct iwm_vap *ivp = IWM_VAP(vap);
4426 struct ieee80211com *ic = vap->iv_ic;
4427 struct iwm_softc *sc = ic->ic_softc;
4428 struct iwm_node *in;
4429 int error;
4430
4431 IWM_DPRINTF(sc, IWM_DEBUG_STATE,
4432 "switching state %s -> %s arg=0x%x\n",
4433 ieee80211_state_name[vap->iv_state],
4434 ieee80211_state_name[nstate],
4435 arg);
4436
4437 IEEE80211_UNLOCK(ic);
4438 IWM_LOCK(sc);
4439
4440 if ((sc->sc_flags & IWM_FLAG_SCAN_RUNNING) &&
4441 (nstate == IEEE80211_S_AUTH ||
4442 nstate == IEEE80211_S_ASSOC ||
4443 nstate == IEEE80211_S_RUN)) {
4444 /* Stop blinking for a scan, when authenticating. */
4446 }
4447
4448 if (vap->iv_state == IEEE80211_S_RUN && nstate != IEEE80211_S_RUN) {
4449 iwm_led_disable(sc);
4450 /* disable beacon filtering if we're hopping out of RUN */
4452 if (((in = IWM_NODE(vap->iv_bss)) != NULL))
4453 in->in_assoc = 0;
4454 }
4455
4456 if ((vap->iv_state == IEEE80211_S_AUTH ||
4457 vap->iv_state == IEEE80211_S_ASSOC ||
4458 vap->iv_state == IEEE80211_S_RUN) &&
4459 (nstate == IEEE80211_S_INIT ||
4460 nstate == IEEE80211_S_SCAN ||
4461 nstate == IEEE80211_S_AUTH)) {
4463 }
4464
4465 if ((vap->iv_state == IEEE80211_S_RUN ||
4466 vap->iv_state == IEEE80211_S_ASSOC) &&
4467 nstate == IEEE80211_S_INIT) {
4468 /*
4469 * In this case, iv_newstate() wants to send an 80211 frame on
4470 * the network that we are leaving. So we need to call it,
4471 * before tearing down all the firmware state.
4472 */
4473 IWM_UNLOCK(sc);
4474 IEEE80211_LOCK(ic);
4475 ivp->iv_newstate(vap, nstate, arg);
4476 IEEE80211_UNLOCK(ic);
4477 IWM_LOCK(sc);
4478 iwm_bring_down_firmware(sc, vap);
4479 IWM_UNLOCK(sc);
4480 IEEE80211_LOCK(ic);
4481 return 0;
4482 }
4483
4484 switch (nstate) {
4485 case IEEE80211_S_INIT:
4486 case IEEE80211_S_SCAN:
4487 break;
4488
4489 case IEEE80211_S_AUTH:
4490 iwm_bring_down_firmware(sc, vap);
4491 if ((error = iwm_auth(vap, sc)) != 0) {
4492 device_printf(sc->sc_dev,
4493 "%s: could not move to auth state: %d\n",
4494 __func__, error);
4495 iwm_bring_down_firmware(sc, vap);
4496 IWM_UNLOCK(sc);
4497 IEEE80211_LOCK(ic);
4498 return 1;
4499 }
4500 break;
4501
4502 case IEEE80211_S_ASSOC:
4503 /*
4504 * EBS may be disabled due to previous failures reported by FW.
4505 * Reset EBS status here assuming environment has been changed.
4506 */
4507 sc->last_ebs_successful = TRUE;
4508 break;
4509
4510 case IEEE80211_S_RUN:
4511 in = IWM_NODE(vap->iv_bss);
4512 /* Update the association state, now we have it all */
4513 /* (eg associd comes in at this point */
4514 error = iwm_update_sta(sc, in);
4515 if (error != 0) {
4516 device_printf(sc->sc_dev,
4517 "%s: failed to update STA\n", __func__);
4518 IWM_UNLOCK(sc);
4519 IEEE80211_LOCK(ic);
4520 return error;
4521 }
4522 in->in_assoc = 1;
4523 error = iwm_mac_ctxt_changed(sc, vap);
4524 if (error != 0) {
4525 device_printf(sc->sc_dev,
4526 "%s: failed to update MAC: %d\n", __func__, error);
4527 }
4528
4529 iwm_sf_update(sc, vap, FALSE);
4530 iwm_enable_beacon_filter(sc, ivp);
4532 iwm_update_quotas(sc, ivp);
4533 int rix = ieee80211_ratectl_rate(&in->in_ni, NULL, 0);
4534 iwm_setrates(sc, in, rix);
4535
4536 if ((error = iwm_send_lq_cmd(sc, &in->in_lq, TRUE)) != 0) {
4537 device_printf(sc->sc_dev,
4538 "%s: IWM_LQ_CMD failed: %d\n", __func__, error);
4539 }
4540
4541 iwm_led_enable(sc);
4542 break;
4543
4544 default:
4545 break;
4546 }
4547 IWM_UNLOCK(sc);
4548 IEEE80211_LOCK(ic);
4549
4550 return (ivp->iv_newstate(vap, nstate, arg));
4551}
4552
4553void
4554iwm_endscan_cb(void *arg, int pending)
4555{
4556 struct iwm_softc *sc = arg;
4557 struct ieee80211com *ic = &sc->sc_ic;
4558
4559 IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
4560 "%s: scan ended\n",
4561 __func__);
4562
4563 ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
4564}
4565
4566static int
4568{
4569 struct iwm_bt_coex_cmd bt_cmd;
4570
4571 bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
4573
4574 return iwm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
4575 &bt_cmd);
4576}
4577
4578static boolean_t
4580{
4581 boolean_t nvm_lar = sc->nvm_data->lar_enabled;
4582 boolean_t tlv_lar = iwm_fw_has_capa(sc, IWM_UCODE_TLV_CAPA_LAR_SUPPORT);
4583
4584 if (iwm_lar_disable)
4585 return FALSE;
4586
4587 /*
4588 * Enable LAR only if it is supported by the FW (TLV) &&
4589 * enabled in the NVM
4590 */
4592 return nvm_lar && tlv_lar;
4593 else
4594 return tlv_lar;
4595}
4596
4597static boolean_t
4599{
4602}
4603
4604static int
4605iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
4606{
4607 struct iwm_mcc_update_cmd mcc_cmd;
4608 struct iwm_host_cmd hcmd = {
4610 .flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
4611 .data = { &mcc_cmd },
4612 };
4613 int ret;
4614#ifdef IWM_DEBUG
4615 struct iwm_rx_packet *pkt;
4616 struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
4617 struct iwm_mcc_update_resp_v2 *mcc_resp;
4618 int n_channels;
4619 uint16_t mcc;
4620#endif
4622
4623 if (!iwm_is_lar_supported(sc)) {
4624 IWM_DPRINTF(sc, IWM_DEBUG_LAR, "%s: no LAR support\n",
4625 __func__);
4626 return 0;
4627 }
4628
4629 memset(&mcc_cmd, 0, sizeof(mcc_cmd));
4630 mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
4633 else
4635
4636 if (resp_v2)
4637 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
4638 else
4639 hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
4640
4641 IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4642 "send MCC update to FW with '%c%c' src = %d\n",
4643 alpha2[0], alpha2[1], mcc_cmd.source_id);
4644
4645 ret = iwm_send_cmd(sc, &hcmd);
4646 if (ret)
4647 return ret;
4648
4649#ifdef IWM_DEBUG
4650 pkt = hcmd.resp_pkt;
4651
4652 /* Extract MCC response */
4653 if (resp_v2) {
4654 mcc_resp = (void *)pkt->data;
4655 mcc = mcc_resp->mcc;
4656 n_channels = le32toh(mcc_resp->n_channels);
4657 } else {
4658 mcc_resp_v1 = (void *)pkt->data;
4659 mcc = mcc_resp_v1->mcc;
4660 n_channels = le32toh(mcc_resp_v1->n_channels);
4661 }
4662
4663 /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
4664 if (mcc == 0)
4665 mcc = 0x3030; /* "00" - world */
4666
4667 IWM_DPRINTF(sc, IWM_DEBUG_LAR,
4668 "regulatory domain '%c%c' (%d channels available)\n",
4669 mcc >> 8, mcc & 0xff, n_channels);
4670#endif
4671 iwm_free_resp(sc, &hcmd);
4672
4673 return 0;
4674}
4675
4676static void
4677iwm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
4678{
4679 struct iwm_host_cmd cmd = {
4681 .len = { sizeof(uint32_t), },
4682 .data = { &backoff, },
4683 };
4684
4685 if (iwm_send_cmd(sc, &cmd) != 0) {
4686 device_printf(sc->sc_dev,
4687 "failed to change thermal tx backoff\n");
4688 }
4689}
4690
4691static int
4693{
4694 struct ieee80211com *ic = &sc->sc_ic;
4695 int error, i, ac;
4696
4697 sc->sf_state = IWM_SF_UNINIT;
4698
4699 if ((error = iwm_start_hw(sc)) != 0) {
4700 printf("iwm_start_hw: failed %d\n", error);
4701 return error;
4702 }
4703
4704 if ((error = iwm_run_init_ucode(sc, 0)) != 0) {
4705 printf("iwm_run_init_ucode: failed %d\n", error);
4706 return error;
4707 }
4708
4709 /*
4710 * should stop and start HW since that INIT
4711 * image just loaded
4712 */
4713 iwm_stop_device(sc);
4714 sc->sc_ps_disabled = FALSE;
4715 if ((error = iwm_start_hw(sc)) != 0) {
4716 device_printf(sc->sc_dev, "could not initialize hardware\n");
4717 return error;
4718 }
4719
4720 /* omstart, this time with the regular firmware */
4722 if (error) {
4723 device_printf(sc->sc_dev, "could not load firmware\n");
4724 goto error;
4725 }
4726
4727 error = iwm_sf_update(sc, NULL, FALSE);
4728 if (error)
4729 device_printf(sc->sc_dev, "Failed to initialize Smart Fifo\n");
4730
4731 if ((error = iwm_send_bt_init_conf(sc)) != 0) {
4732 device_printf(sc->sc_dev, "bt init conf failed\n");
4733 goto error;
4734 }
4735
4737 if (error != 0) {
4738 device_printf(sc->sc_dev, "antenna config failed\n");
4739 goto error;
4740 }
4741
4742 /* Send phy db control command and then phy db calibration */
4743 if ((error = iwm_send_phy_db_data(sc->sc_phy_db)) != 0)
4744 goto error;
4745
4746 if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
4747 device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
4748 goto error;
4749 }
4750
4751 /* Add auxiliary station for scanning */
4752 if ((error = iwm_add_aux_sta(sc)) != 0) {
4753 device_printf(sc->sc_dev, "add_aux_sta failed\n");
4754 goto error;
4755 }
4756
4757 for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
4758 /*
4759 * The channel used here isn't relevant as it's
4760 * going to be overwritten in the other flows.
4761 * For now use the first channel we have.
4762 */
4763 if ((error = iwm_phy_ctxt_add(sc,
4764 &sc->sc_phyctxt[i], &ic->ic_channels[1], 1, 1)) != 0)
4765 goto error;
4766 }
4767
4768 /* Initialize tx backoffs to the minimum. */
4770 iwm_tt_tx_backoff(sc, 0);
4771
4772 if (iwm_config_ltr(sc) != 0)
4773 device_printf(sc->sc_dev, "PCIe LTR configuration failed\n");
4774
4775 error = iwm_power_update_device(sc);
4776 if (error)
4777 goto error;
4778
4779 if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
4780 goto error;
4781
4783 if ((error = iwm_config_umac_scan(sc)) != 0)
4784 goto error;
4785 }
4786
4787 /* Enable Tx queues. */
4788 for (ac = 0; ac < WME_NUM_AC; ac++) {
4789 error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
4790 iwm_ac_to_tx_fifo[ac]);
4791 if (error)
4792 goto error;
4793 }
4794
4795 if ((error = iwm_disable_beacon_filter(sc)) != 0) {
4796 device_printf(sc->sc_dev, "failed to disable beacon filter\n");
4797 goto error;
4798 }
4799
4800 return 0;
4801
4802 error:
4803 iwm_stop_device(sc);
4804 return error;
4805}
4806
4807/* Allow multicast from our BSSID. */
4808static int
4809iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
4810{
4811 struct ieee80211_node *ni = vap->iv_bss;
4812 struct iwm_mcast_filter_cmd *cmd;
4813 size_t size;
4814 int error;
4815
4816 size = roundup(sizeof(*cmd), 4);
4817 cmd = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
4818 if (cmd == NULL)
4819 return ENOMEM;
4820 cmd->filter_own = 1;
4821 cmd->port_id = 0;
4822 cmd->count = 0;
4823 cmd->pass_all = 1;
4824 IEEE80211_ADDR_COPY(cmd->bssid, ni->ni_bssid);
4825
4827 IWM_CMD_SYNC, size, cmd);
4828 free(cmd, M_DEVBUF);
4829
4830 return (error);
4831}
4832
4833/*
4834 * ifnet interfaces
4835 */
4836
4837static void
4839{
4840 int error;
4841
4842 if (sc->sc_flags & IWM_FLAG_HW_INITED) {
4843 return;
4844 }
4845 sc->sc_generation++;
4846 sc->sc_flags &= ~IWM_FLAG_STOPPED;
4847
4848 if ((error = iwm_init_hw(sc)) != 0) {
4849 printf("iwm_init_hw failed %d\n", error);
4850 iwm_stop(sc);
4851 return;
4852 }
4853
4854 /*
4855 * Ok, firmware loaded and we are jogging
4856 */
4858}
4859
4860static int
4861iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
4862{
4863 struct iwm_softc *sc;
4864 int error;
4865
4866 sc = ic->ic_softc;
4867
4868 IWM_LOCK(sc);
4869 if ((sc->sc_flags & IWM_FLAG_HW_INITED) == 0) {
4870 IWM_UNLOCK(sc);
4871 return (ENXIO);
4872 }
4873 error = mbufq_enqueue(&sc->sc_snd, m);
4874 if (error) {
4875 IWM_UNLOCK(sc);
4876 return (error);
4877 }
4878 iwm_start(sc);
4879 IWM_UNLOCK(sc);
4880 return (0);
4881}
4882
4883/*
4884 * Dequeue packets from sendq and call send.
4885 */
4886static void
4888{
4889 struct ieee80211_node *ni;
4890 struct mbuf *m;
4891 int ac = 0;
4892
4893 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "->%s\n", __func__);
4894 while (sc->qfullmsk == 0 &&
4895 (m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
4896 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
4897 if (iwm_tx(sc, m, ni, ac) != 0) {
4898 if_inc_counter(ni->ni_vap->iv_ifp,
4899 IFCOUNTER_OERRORS, 1);
4900 ieee80211_free_node(ni);
4901 continue;
4902 }
4903 if (sc->sc_tx_timer == 0) {
4904 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog,
4905 sc);
4906 }
4907 sc->sc_tx_timer = 15;
4908 }
4909 IWM_DPRINTF(sc, IWM_DEBUG_XMIT | IWM_DEBUG_TRACE, "<-%s\n", __func__);
4910}
4911
4912static void
4914{
4915
4916 sc->sc_flags &= ~IWM_FLAG_HW_INITED;
4918 sc->sc_generation++;
4920 sc->sc_tx_timer = 0;
4921 iwm_stop_device(sc);
4922 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
4923}
4924
4925static void
4927{
4928 struct iwm_softc *sc = arg;
4929 struct ieee80211com *ic = &sc->sc_ic;
4930
4931 if (sc->sc_attached == 0)
4932 return;
4933
4934 if (sc->sc_tx_timer > 0) {
4935 if (--sc->sc_tx_timer == 0) {
4936 device_printf(sc->sc_dev, "device timeout\n");
4937#ifdef IWM_DEBUG
4938 iwm_nic_error(sc);
4939#endif
4940 ieee80211_restart_all(ic);
4941 counter_u64_add(sc->sc_ic.ic_oerrors, 1);
4942 return;
4943 }
4944 callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
4945 }
4946}
4947
4948static void
4949iwm_parent(struct ieee80211com *ic)
4950{
4951 struct iwm_softc *sc = ic->ic_softc;
4952 int startall = 0;
4953 int rfkill = 0;
4954
4955 IWM_LOCK(sc);
4956 if (ic->ic_nrunning > 0) {
4957 if (!(sc->sc_flags & IWM_FLAG_HW_INITED)) {
4958 iwm_init(sc);
4959 rfkill = iwm_check_rfkill(sc);
4960 if (!rfkill)
4961 startall = 1;
4962 }
4963 } else if (sc->sc_flags & IWM_FLAG_HW_INITED)
4964 iwm_stop(sc);
4965 IWM_UNLOCK(sc);
4966 if (startall)
4967 ieee80211_start_all(ic);
4968 else if (rfkill)
4969 taskqueue_enqueue(sc->sc_tq, &sc->sc_rftoggle_task);
4970}
4971
4972static void
4973iwm_rftoggle_task(void *arg, int npending __unused)
4974{
4975 struct iwm_softc *sc = arg;
4976 struct ieee80211com *ic = &sc->sc_ic;
4977 int rfkill;
4978
4979 IWM_LOCK(sc);
4980 rfkill = iwm_check_rfkill(sc);
4981 IWM_UNLOCK(sc);
4982 if (rfkill) {
4983 device_printf(sc->sc_dev,
4984 "%s: rfkill switch, disabling interface\n", __func__);
4985 ieee80211_suspend_all(ic);
4986 ieee80211_notify_radio(ic, 0);
4987 } else {
4988 device_printf(sc->sc_dev,
4989 "%s: rfkill cleared, re-enabling interface\n", __func__);
4990 ieee80211_resume_all(ic);
4991 ieee80211_notify_radio(ic, 1);
4992 }
4993}
4994
4995/*
4996 * The interrupt side of things
4997 */
4998
4999/*
5000 * error dumping routines are from iwlwifi/mvm/utils.c
5001 */
5002
5003/*
5004 * Note: This structure is read from the device with IO accesses,
5005 * and the reading already does the endian conversion. As it is
5006 * read with uint32_t-sized accesses, any members with a different size
5007 * need to be ordered correctly though!
5008 */
5010 uint32_t valid; /* (nonzero) valid, (0) log is empty */
5011 uint32_t error_id; /* type of error */
5012 uint32_t trm_hw_status0; /* TRM HW status */
5013 uint32_t trm_hw_status1; /* TRM HW status */
5014 uint32_t blink2; /* branch link */
5015 uint32_t ilink1; /* interrupt link */
5016 uint32_t ilink2; /* interrupt link */
5017 uint32_t data1; /* error-specific data */
5018 uint32_t data2; /* error-specific data */
5019 uint32_t data3; /* error-specific data */
5020 uint32_t bcon_time; /* beacon timer */
5021 uint32_t tsf_low; /* network timestamp function timer */
5022 uint32_t tsf_hi; /* network timestamp function timer */
5023 uint32_t gp1; /* GP1 timer register */
5024 uint32_t gp2; /* GP2 timer register */
5025 uint32_t fw_rev_type; /* firmware revision type */
5026 uint32_t major; /* uCode version major */
5027 uint32_t minor; /* uCode version minor */
5028 uint32_t hw_ver; /* HW Silicon version */
5029 uint32_t brd_ver; /* HW board version */
5030 uint32_t log_pc; /* log program counter */
5031 uint32_t frame_ptr; /* frame pointer */
5032 uint32_t stack_ptr; /* stack pointer */
5033 uint32_t hcmd; /* last host command header */
5034 uint32_t isr0; /* isr status register LMPM_NIC_ISR0:
5035 * rxtx_flag */
5036 uint32_t isr1; /* isr status register LMPM_NIC_ISR1:
5037 * host_flag */
5038 uint32_t isr2; /* isr status register LMPM_NIC_ISR2:
5039 * enc_flag */
5040 uint32_t isr3; /* isr status register LMPM_NIC_ISR3:
5041 * time_flag */
5042 uint32_t isr4; /* isr status register LMPM_NIC_ISR4:
5043 * wico interrupt */
5044 uint32_t last_cmd_id; /* last HCMD id handled by the firmware */
5045 uint32_t wait_event; /* wait event() caller address */
5046 uint32_t l2p_control; /* L2pControlField */
5047 uint32_t l2p_duration; /* L2pDurationField */
5048 uint32_t l2p_mhvalid; /* L2pMhValidBits */
5049 uint32_t l2p_addr_match; /* L2pAddrMatchStat */
5050 uint32_t lmpm_pmg_sel; /* indicate which clocks are turned on
5051 * (LMPM_PMG_SEL) */
5052 uint32_t u_timestamp; /* indicate when the date and time of the
5053 * compilation */
5054 uint32_t flow_handler; /* FH read/write pointers, RX credit */
5055} __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
5056
5057/*
5058 * UMAC error struct - relevant starting from family 8000 chip.
5059 * Note: This structure is read from the device with IO accesses,
5060 * and the reading already does the endian conversion. As it is
5061 * read with u32-sized accesses, any members with a different size
5062 * need to be ordered correctly though!
5063 */
5065 uint32_t valid; /* (nonzero) valid, (0) log is empty */
5066 uint32_t error_id; /* type of error */
5067 uint32_t blink1; /* branch link */
5068 uint32_t blink2; /* branch link */
5069 uint32_t ilink1; /* interrupt link */
5070 uint32_t ilink2; /* interrupt link */
5071 uint32_t data1; /* error-specific data */
5072 uint32_t data2; /* error-specific data */
5073 uint32_t data3; /* error-specific data */
5074 uint32_t umac_major;
5075 uint32_t umac_minor;
5076 uint32_t frame_pointer; /* core register 27*/
5077 uint32_t stack_pointer; /* core register 28 */
5078 uint32_t cmd_header; /* latest host cmd sent to UMAC */
5079 uint32_t nic_isr_pref; /* ISR status register */
5080} __packed;
5081
5082#define ERROR_START_OFFSET (1 * sizeof(uint32_t))
5083#define ERROR_ELEM_SIZE (7 * sizeof(uint32_t))
5084
5085#ifdef IWM_DEBUG
5086struct {
5087 const char *name;
5088 uint8_t num;
5089} advanced_lookup[] = {
5090 { "NMI_INTERRUPT_WDG", 0x34 },
5091 { "SYSASSERT", 0x35 },
5092 { "UCODE_VERSION_MISMATCH", 0x37 },
5093 { "BAD_COMMAND", 0x38 },
5094 { "NMI_INTERRUPT_DATA_ACTION_PT", 0x3C },
5095 { "FATAL_ERROR", 0x3D },
5096 { "NMI_TRM_HW_ERR", 0x46 },
5097 { "NMI_INTERRUPT_TRM", 0x4C },
5098 { "NMI_INTERRUPT_BREAK_POINT", 0x54 },
5099 { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C },
5100 { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 },
5101 { "NMI_INTERRUPT_HOST", 0x66 },
5102 { "NMI_INTERRUPT_ACTION_PT", 0x7C },
5103 { "NMI_INTERRUPT_UNKNOWN", 0x84 },
5104 { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 },
5105 { "ADVANCED_SYSASSERT", 0 },
5106};
5107
5108static const char *
5109iwm_desc_lookup(uint32_t num)
5110{
5111 int i;
5112
5113 for (i = 0; i < nitems(advanced_lookup) - 1; i++)
5114 if (advanced_lookup[i].num == num)
5115 return advanced_lookup[i].name;
5116
5117 /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */
5118 return advanced_lookup[i].name;
5119}
5120
5121static void
5122iwm_nic_umac_error(struct iwm_softc *sc)
5123{
5124 struct iwm_umac_error_event_table table;
5125 uint32_t base;
5126
5127 base = sc->umac_error_event_table;
5128
5129 if (base < 0x800000) {
5130 device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
5131 base);
5132 return;
5133 }
5134
5135 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5136 device_printf(sc->sc_dev, "reading errlog failed\n");
5137 return;
5138 }
5139
5140 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5141 device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
5142 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5143 sc->sc_flags, table.valid);
5144 }
5145
5146 device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
5147 iwm_desc_lookup(table.error_id));
5148 device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
5149 device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
5150 device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
5151 table.ilink1);
5152 device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
5153 table.ilink2);
5154 device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
5155 device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
5156 device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
5157 device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
5158 device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
5159 device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
5160 table.frame_pointer);
5161 device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
5162 table.stack_pointer);
5163 device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
5164 device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
5165 table.nic_isr_pref);
5166}
5167
5168/*
5169 * Support for dumping the error log seemed like a good idea ...
5170 * but it's mostly hex junk and the only sensible thing is the
5171 * hw/ucode revision (which we know anyway). Since it's here,
5172 * I'll just leave it in, just in case e.g. the Intel guys want to
5173 * help us decipher some "ADVANCED_SYSASSERT" later.
5174 */
5175static void
5176iwm_nic_error(struct iwm_softc *sc)
5177{
5178 struct iwm_error_event_table table;
5179 uint32_t base;
5180
5181 device_printf(sc->sc_dev, "dumping device error log\n");
5182 base = sc->error_event_table[0];
5183 if (base < 0x800000) {
5184 device_printf(sc->sc_dev,
5185 "Invalid error log pointer 0x%08x\n", base);
5186 return;
5187 }
5188
5189 if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
5190 device_printf(sc->sc_dev, "reading errlog failed\n");
5191 return;
5192 }
5193
5194 if (!table.valid) {
5195 device_printf(sc->sc_dev, "errlog not found, skipping\n");
5196 return;
5197 }
5198
5199 if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
5200 device_printf(sc->sc_dev, "Start Error Log Dump:\n");
5201 device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
5202 sc->sc_flags, table.valid);
5203 }
5204
5205 device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
5206 iwm_desc_lookup(table.error_id));
5207 device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
5208 table.trm_hw_status0);
5209 device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
5210 table.trm_hw_status1);
5211 device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
5212 device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
5213 device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
5214 device_printf(sc->sc_dev, "%08X | data1\n", table.data1);
5215 device_printf(sc->sc_dev, "%08X | data2\n", table.data2);
5216 device_printf(sc->sc_dev, "%08X | data3\n", table.data3);
5217 device_printf(sc->sc_dev, "%08X | beacon time\n", table.bcon_time);
5218 device_printf(sc->sc_dev, "%08X | tsf low\n", table.tsf_low);
5219 device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
5220 device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
5221 device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
5222 device_printf(sc->sc_dev, "%08X | uCode revision type\n",
5223 table.fw_rev_type);
5224 device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
5225 device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
5226 device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
5227 device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
5228 device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
5229 device_printf(sc->sc_dev, "%08X | isr0\n", table.isr0);
5230 device_printf(sc->sc_dev, "%08X | isr1\n", table.isr1);
5231 device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
5232 device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
5233 device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
5234 device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
5235 device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
5236 device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
5237 device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
5238 device_printf(sc->sc_dev, "%08X | l2p_mhvalid\n", table.l2p_mhvalid);
5239 device_printf(sc->sc_dev, "%08X | l2p_addr_match\n", table.l2p_addr_match);
5240 device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
5241 device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
5242 device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
5243
5244 if (sc->umac_error_event_table)
5245 iwm_nic_umac_error(sc);
5246}
5247#endif
5248
5249static void
5250iwm_handle_rxb(struct iwm_softc *sc, struct mbuf *m)
5251{
5252 struct ieee80211com *ic = &sc->sc_ic;
5253 struct iwm_cmd_response *cresp;
5254 struct mbuf *m1;
5255 uint32_t offset = 0;
5256 uint32_t maxoff = IWM_RBUF_SIZE;
5257 uint32_t nextoff;
5258 boolean_t stolen = FALSE;
5259
5260#define HAVEROOM(a) \
5261 ((a) + sizeof(uint32_t) + sizeof(struct iwm_cmd_header) < maxoff)
5262
5263 while (HAVEROOM(offset)) {
5264 struct iwm_rx_packet *pkt = mtodoff(m, struct iwm_rx_packet *,
5265 offset);
5266 int qid, idx, code, len;
5267
5268 qid = pkt->hdr.qid;
5269 idx = pkt->hdr.idx;
5270
5271 code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
5272
5273 /*
5274 * randomly get these from the firmware, no idea why.
5275 * they at least seem harmless, so just ignore them for now
5276 */
5277 if ((pkt->hdr.code == 0 && (qid & ~0x80) == 0 && idx == 0) ||
5278 pkt->len_n_flags == htole32(IWM_FH_RSCSR_FRAME_INVALID)) {
5279 break;
5280 }
5281
5282 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5283 "rx packet qid=%d idx=%d type=%x\n",
5284 qid & ~0x80, pkt->hdr.idx, code);
5285
5286 len = iwm_rx_packet_len(pkt);
5287 len += sizeof(uint32_t); /* account for status word */
5288 nextoff = offset + roundup2(len, IWM_FH_RSCSR_FRAME_ALIGN);
5289
5291
5292 switch (code) {
5294 iwm_rx_rx_phy_cmd(sc, pkt);
5295 break;
5296
5297 case IWM_REPLY_RX_MPDU_CMD: {
5298 /*
5299 * If this is the last frame in the RX buffer, we
5300 * can directly feed the mbuf to the sharks here.
5301 */
5302 struct iwm_rx_packet *nextpkt = mtodoff(m,
5303 struct iwm_rx_packet *, nextoff);
5304 if (!HAVEROOM(nextoff) ||
5305 (nextpkt->hdr.code == 0 &&
5306 (nextpkt->hdr.qid & ~0x80) == 0 &&
5307 nextpkt->hdr.idx == 0) ||
5308 (nextpkt->len_n_flags ==
5309 htole32(IWM_FH_RSCSR_FRAME_INVALID))) {
5310 if (iwm_rx_mpdu(sc, m, offset, stolen)) {
5311 stolen = FALSE;
5312 /* Make sure we abort the loop */
5313 nextoff = maxoff;
5314 }
5315 break;
5316 }
5317
5318 /*
5319 * Use m_copym instead of m_split, because that
5320 * makes it easier to keep a valid rx buffer in
5321 * the ring, when iwm_rx_mpdu() fails.
5322 *
5323 * We need to start m_copym() at offset 0, to get the
5324 * M_PKTHDR flag preserved.
5325 */
5326 m1 = m_copym(m, 0, M_COPYALL, M_NOWAIT);
5327 if (m1) {
5328 if (iwm_rx_mpdu(sc, m1, offset, stolen))
5329 stolen = TRUE;
5330 else
5331 m_freem(m1);
5332 }
5333 break;
5334 }
5335
5336 case IWM_TX_CMD:
5337 iwm_rx_tx_cmd(sc, pkt);
5338 break;
5339
5341 struct iwm_missed_beacons_notif *resp;
5342 int missed;
5343
5344 /* XXX look at mac_id to determine interface ID */
5345 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5346
5347 resp = (void *)pkt->data;
5348 missed = le32toh(resp->consec_missed_beacons);
5349
5350 IWM_DPRINTF(sc, IWM_DEBUG_BEACON | IWM_DEBUG_STATE,
5351 "%s: MISSED_BEACON: mac_id=%d, "
5352 "consec_since_last_rx=%d, consec=%d, num_expect=%d "
5353 "num_rx=%d\n",
5354 __func__,
5355 le32toh(resp->mac_id),
5357 le32toh(resp->consec_missed_beacons),
5358 le32toh(resp->num_expected_beacons),
5359 le32toh(resp->num_recvd_beacons));
5360
5361 /* Be paranoid */
5362 if (vap == NULL)
5363 break;
5364
5365 /* XXX no net80211 locking? */
5366 if (vap->iv_state == IEEE80211_S_RUN &&
5367 (ic->ic_flags & IEEE80211_F_SCAN) == 0) {
5368 if (missed > vap->iv_bmissthreshold) {
5369 /* XXX bad locking; turn into task */
5370 IWM_UNLOCK(sc);
5371 ieee80211_beacon_miss(ic);
5372 IWM_LOCK(sc);
5373 }
5374 }
5375
5376 break;
5377 }
5378
5380 break;
5381
5382 case IWM_ALIVE:
5383 break;
5384
5386 break;
5387
5389 iwm_handle_rx_statistics(sc, pkt);
5390 break;
5391
5392 case IWM_NVM_ACCESS_CMD:
5393 case IWM_MCC_UPDATE_CMD:
5394 if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5395 memcpy(sc->sc_cmd_resp,
5396 pkt, sizeof(sc->sc_cmd_resp));
5397 }
5398 break;
5399
5401 struct iwm_mcc_chub_notif *notif;
5402 notif = (void *)pkt->data;
5403
5404 sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
5405 sc->sc_fw_mcc[1] = notif->mcc & 0xff;
5406 sc->sc_fw_mcc[2] = '\0';
5407 IWM_DPRINTF(sc, IWM_DEBUG_LAR,
5408 "fw source %d sent CC '%s'\n",
5409 notif->source_id, sc->sc_fw_mcc);
5410 break;
5411 }
5412
5416 struct iwm_dts_measurement_notif_v1 *notif;
5417
5418 if (iwm_rx_packet_payload_len(pkt) < sizeof(*notif)) {
5419 device_printf(sc->sc_dev,
5420 "Invalid DTS_MEASUREMENT_NOTIFICATION\n");
5421 break;
5422 }
5423 notif = (void *)pkt->data;
5424 IWM_DPRINTF(sc, IWM_DEBUG_TEMP,
5425 "IWM_DTS_MEASUREMENT_NOTIFICATION - %d\n",
5426 notif->temp);
5427 break;
5428 }
5429
5432 case IWM_ADD_STA:
5436 case IWM_LTR_CONFIG:
5439 case IWM_TIME_EVENT_CMD:
5447 case IWM_TIME_QUOTA_CMD:
5448 case IWM_REMOVE_STA:
5449 case IWM_TXPATH_FLUSH:
5450 case IWM_LQ_CMD:
5453 case IWM_BT_CONFIG:
5455 cresp = (void *)pkt->data;
5456 if (sc->sc_wantresp == (((qid & ~0x80) << 16) | idx)) {
5457 memcpy(sc->sc_cmd_resp,
5458 pkt, sizeof(*pkt)+sizeof(*cresp));
5459 }
5460 break;
5461
5462 /* ignore */
5463 case IWM_PHY_DB_CMD:
5464 break;
5465
5467 break;
5468
5471 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5472 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5473 ieee80211_runtask(ic, &sc->sc_es_task);
5474 }
5475 break;
5476
5478 break;
5479 }
5480
5483 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
5484 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
5485 ieee80211_runtask(ic, &sc->sc_es_task);
5486 }
5487 break;
5488
5491 notif = (void *)pkt->data;
5492
5493 IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
5494 "complete, status=0x%x, %d channels scanned\n",
5495 notif->status, notif->scanned_channels);
5496 break;
5497 }
5498
5499 case IWM_REPLY_ERROR: {
5500 struct iwm_error_resp *resp;
5501 resp = (void *)pkt->data;
5502
5503 device_printf(sc->sc_dev,
5504 "firmware error 0x%x, cmd 0x%x\n",
5505 le32toh(resp->error_type),
5506 resp->cmd_id);
5507 break;
5508 }
5509
5511 iwm_rx_time_event_notif(sc, pkt);
5512 break;
5513
5514 /*
5515 * Firmware versions 21 and 22 generate some DEBUG_LOG_MSG
5516 * messages. Just ignore them for now.
5517 */
5518 case IWM_DEBUG_LOG_MSG:
5519 break;
5520
5522 break;
5523
5524 case IWM_SCD_QUEUE_CFG: {
5525 struct iwm_scd_txq_cfg_rsp *rsp;
5526 rsp = (void *)pkt->data;
5527
5528 IWM_DPRINTF(sc, IWM_DEBUG_CMD,
5529 "queue cfg token=0x%x sta_id=%d "
5530 "tid=%d scd_queue=%d\n",
5531 rsp->token, rsp->sta_id, rsp->tid,
5532 rsp->scd_queue);
5533 break;
5534 }
5535
5536 default:
5537 device_printf(sc->sc_dev,
5538 "code %x, frame %d/%d %x unhandled\n",
5539 code, qid & ~0x80, idx, pkt->len_n_flags);
5540 break;
5541 }
5542
5543 /*
5544 * Why test bit 0x80? The Linux driver:
5545 *
5546 * There is one exception: uCode sets bit 15 when it
5547 * originates the response/notification, i.e. when the
5548 * response/notification is not a direct response to a
5549 * command sent by the driver. For example, uCode issues
5550 * IWM_REPLY_RX when it sends a received frame to the driver;
5551 * it is not a direct response to any driver command.
5552 *
5553 * Ok, so since when is 7 == 15? Well, the Linux driver
5554 * uses a slightly different format for pkt->hdr, and "qid"
5555 * is actually the upper byte of a two-byte field.
5556 */
5557 if (!(qid & (1 << 7)))
5558 iwm_cmd_done(sc, pkt);
5559
5560 offset = nextoff;
5561 }
5562 if (stolen)
5563 m_freem(m);
5564#undef HAVEROOM
5565}
5566
5567/*
5568 * Process an IWM_CSR_INT_BIT_FH_RX or IWM_CSR_INT_BIT_SW_RX interrupt.
5569 * Basic structure from if_iwn
5570 */
5571static void
5573{
5574 int count;
5575 uint32_t wreg;
5576 uint16_t hw;
5577
5578 bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
5579 BUS_DMASYNC_POSTREAD);
5580
5581 if (sc->cfg->mqrx_supported) {
5582 count = IWM_RX_MQ_RING_COUNT;
5584 } else {
5587 }
5588
5589 hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
5590
5591 /*
5592 * Process responses
5593 */
5594 while (sc->rxq.cur != hw) {
5595 struct iwm_rx_ring *ring = &sc->rxq;
5596 struct iwm_rx_data *data = &ring->data[ring->cur];
5597
5598 bus_dmamap_sync(ring->data_dmat, data->map,
5599 BUS_DMASYNC_POSTREAD);
5600
5601 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5602 "%s: hw = %d cur = %d\n", __func__, hw, ring->cur);
5603 iwm_handle_rxb(sc, data->m);
5604
5605 ring->cur = (ring->cur + 1) % count;
5606 }
5607
5608 /*
5609 * Tell the firmware that it can reuse the ring entries that
5610 * we have just processed.
5611 * Seems like the hardware gets upset unless we align
5612 * the write by 8??
5613 */
5614 hw = (hw == 0) ? count - 1 : hw - 1;
5615 IWM_WRITE(sc, wreg, rounddown2(hw, 8));
5616}
5617
5618static void
5619iwm_intr(void *arg)
5620{
5621 struct iwm_softc *sc = arg;
5622 int handled = 0;
5623 int r1, r2;
5624 int isperiodic = 0;
5625
5626 IWM_LOCK(sc);
5628
5629 if (sc->sc_flags & IWM_FLAG_USE_ICT) {
5630 uint32_t *ict = sc->ict_dma.vaddr;
5631 int tmp;
5632
5633 tmp = htole32(ict[sc->ict_cur]);
5634 if (!tmp)
5635 goto out_ena;
5636
5637 /*
5638 * ok, there was something. keep plowing until we have all.
5639 */
5640 r1 = r2 = 0;
5641 while (tmp) {
5642 r1 |= tmp;
5643 ict[sc->ict_cur] = 0;
5644 sc->ict_cur = (sc->ict_cur+1) % IWM_ICT_COUNT;
5645 tmp = htole32(ict[sc->ict_cur]);
5646 }
5647
5648 /* this is where the fun begins. don't ask */
5649 if (r1 == 0xffffffff)
5650 r1 = 0;
5651
5652 /* i am not expected to understand this */
5653 if (r1 & 0xc0000)
5654 r1 |= 0x8000;
5655 r1 = (0xff & r1) | ((0xff00 & r1) << 16);
5656 } else {
5657 r1 = IWM_READ(sc, IWM_CSR_INT);
5658 /* "hardware gone" (where, fishing?) */
5659 if (r1 == 0xffffffff || (r1 & 0xfffffff0) == 0xa5a5a5a0)
5660 goto out;
5662 }
5663 if (r1 == 0 && r2 == 0) {
5664 goto out_ena;
5665 }
5666
5667 IWM_WRITE(sc, IWM_CSR_INT, r1 | ~sc->sc_intmask);
5668
5669 /* Safely ignore these bits for debug checks below */
5671
5672 if (r1 & IWM_CSR_INT_BIT_SW_ERR) {
5673 int i;
5674 struct ieee80211com *ic = &sc->sc_ic;
5675 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
5676
5677#ifdef IWM_DEBUG
5678 iwm_nic_error(sc);
5679#endif
5680 /* Dump driver status (TX and RX rings) while we're here. */
5681 device_printf(sc->sc_dev, "driver status:\n");
5682 for (i = 0; i < IWM_MAX_QUEUES; i++) {
5683 struct iwm_tx_ring *ring = &sc->txq[i];
5684 device_printf(sc->sc_dev,
5685 " tx ring %2d: qid=%-2d cur=%-3d "
5686 "queued=%-3d\n",
5687 i, ring->qid, ring->cur, ring->queued);
5688 }
5689 device_printf(sc->sc_dev,
5690 " rx ring: cur=%d\n", sc->rxq.cur);
5691 device_printf(sc->sc_dev,
5692 " 802.11 state %d\n", (vap == NULL) ? -1 : vap->iv_state);
5693
5694 /* Reset our firmware state tracking. */
5695 sc->sc_firmware_state = 0;
5696 /* Don't stop the device; just do a VAP restart */
5697 IWM_UNLOCK(sc);
5698
5699 if (vap == NULL) {
5700 printf("%s: null vap\n", __func__);
5701 return;
5702 }
5703
5704 device_printf(sc->sc_dev, "%s: controller panicked, iv_state = %d; "
5705 "restarting\n", __func__, vap->iv_state);
5706
5707 ieee80211_restart_all(ic);
5708 return;
5709 }
5710
5711 if (r1 & IWM_CSR_INT_BIT_HW_ERR) {
5712 handled |= IWM_CSR_INT_BIT_HW_ERR;
5713 device_printf(sc->sc_dev, "hardware error, stopping device\n");
5714 iwm_stop(sc);
5715 goto out;
5716 }
5717
5718 /* firmware chunk loaded */
5719 if (r1 & IWM_CSR_INT_BIT_FH_TX) {
5721 handled |= IWM_CSR_INT_BIT_FH_TX;
5722 sc->sc_fw_chunk_done = 1;
5723 wakeup(&sc->sc_fw);
5724 }
5725
5726 if (r1 & IWM_CSR_INT_BIT_RF_KILL) {
5727 handled |= IWM_CSR_INT_BIT_RF_KILL;
5728 taskqueue_enqueue(sc->sc_tq, &sc->sc_rftoggle_task);
5729 }
5730
5731 /*
5732 * The Linux driver uses periodic interrupts to avoid races.
5733 * We cargo-cult like it's going out of fashion.
5734 */
5735 if (r1 & IWM_CSR_INT_BIT_RX_PERIODIC) {
5736 handled |= IWM_CSR_INT_BIT_RX_PERIODIC;
5738 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) == 0)
5739 IWM_WRITE_1(sc,
5741 isperiodic = 1;
5742 }
5743
5744 if ((r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX)) || isperiodic) {
5747
5748 iwm_notif_intr(sc);
5749
5750 /* enable periodic interrupt, see above */
5751 if (r1 & (IWM_CSR_INT_BIT_FH_RX | IWM_CSR_INT_BIT_SW_RX) && !isperiodic)
5754 }
5755
5756 if (__predict_false(r1 & ~handled))
5757 IWM_DPRINTF(sc, IWM_DEBUG_INTR,
5758 "%s: unhandled interrupts: %x\n", __func__, r1);
5759 out_ena:
5761 out:
5762 IWM_UNLOCK(sc);
5763 return;
5764}
5765
5766/*
5767 * Autoconf glue-sniffing
5768 */
5769#define PCI_VENDOR_INTEL 0x8086
5770#define PCI_PRODUCT_INTEL_WL_3160_1 0x08b3
5771#define PCI_PRODUCT_INTEL_WL_3160_2 0x08b4
5772#define PCI_PRODUCT_INTEL_WL_3165_1 0x3165
5773#define PCI_PRODUCT_INTEL_WL_3165_2 0x3166
5774#define PCI_PRODUCT_INTEL_WL_3168_1 0x24fb
5775#define PCI_PRODUCT_INTEL_WL_7260_1 0x08b1
5776#define PCI_PRODUCT_INTEL_WL_7260_2 0x08b2
5777#define PCI_PRODUCT_INTEL_WL_7265_1 0x095a
5778#define PCI_PRODUCT_INTEL_WL_7265_2 0x095b
5779#define PCI_PRODUCT_INTEL_WL_8260_1 0x24f3
5780#define PCI_PRODUCT_INTEL_WL_8260_2 0x24f4
5781#define PCI_PRODUCT_INTEL_WL_8265_1 0x24fd
5782#define PCI_PRODUCT_INTEL_WL_9560_1 0x9df0
5783#define PCI_PRODUCT_INTEL_WL_9560_2 0xa370
5784#define PCI_PRODUCT_INTEL_WL_9560_3 0x31dc
5785#define PCI_PRODUCT_INTEL_WL_9260_1 0x2526
5786
5787static const struct iwm_devices {
5788 uint16_t device;
5789 const struct iwm_cfg *cfg;
5790} iwm_devices[] = {
5808
5809static int
5810iwm_probe(device_t dev)
5811{
5812 int i;
5813
5814 for (i = 0; i < nitems(iwm_devices); i++) {
5815 if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
5816 pci_get_device(dev) == iwm_devices[i].device) {
5817 device_set_desc(dev, iwm_devices[i].cfg->name);
5818 return (BUS_PROBE_DEFAULT);
5819 }
5820 }
5821
5822 return (ENXIO);
5823}
5824
5825static int
5826iwm_dev_check(device_t dev)
5827{
5828 struct iwm_softc *sc;
5829 uint16_t devid;
5830 int i;
5831
5832 sc = device_get_softc(dev);
5833
5834 devid = pci_get_device(dev);
5835 for (i = 0; i < nitems(iwm_devices); i++) {
5836 if (iwm_devices[i].device == devid) {
5837 sc->cfg = iwm_devices[i].cfg;
5838 return (0);
5839 }
5840 }
5841 device_printf(dev, "unknown adapter type\n");
5842 return ENXIO;
5843}
5844
5845/* PCI registers */
5846#define PCI_CFG_RETRY_TIMEOUT 0x041
5847
5848static int
5849iwm_pci_attach(device_t dev)
5850{
5851 struct iwm_softc *sc;
5852 int count, error, rid;
5853 uint16_t reg;
5854
5855 sc = device_get_softc(dev);
5856
5857 /* We disable the RETRY_TIMEOUT register (0x41) to keep
5858 * PCI Tx retries from interfering with C3 CPU state */
5859 pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
5860
5861 /* Enable bus-mastering and hardware bug workaround. */
5862 pci_enable_busmaster(dev);
5863 reg = pci_read_config(dev, PCIR_STATUS, sizeof(reg));
5864 /* if !MSI */
5865 if (reg & PCIM_STATUS_INTxSTATE) {
5866 reg &= ~PCIM_STATUS_INTxSTATE;
5867 }
5868 pci_write_config(dev, PCIR_STATUS, reg, sizeof(reg));
5869
5870 rid = PCIR_BAR(0);
5871 sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
5872 RF_ACTIVE);
5873 if (sc->sc_mem == NULL) {
5874 device_printf(sc->sc_dev, "can't map mem space\n");
5875 return (ENXIO);
5876 }
5877 sc->sc_st = rman_get_bustag(sc->sc_mem);
5878 sc->sc_sh = rman_get_bushandle(sc->sc_mem);
5879
5880 /* Install interrupt handler. */
5881 count = 1;
5882 rid = 0;
5883 if (pci_alloc_msi(dev, &count) == 0)
5884 rid = 1;
5885 sc->sc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE |
5886 (rid != 0 ? 0 : RF_SHAREABLE));
5887 if (sc->sc_irq == NULL) {
5888 device_printf(dev, "can't map interrupt\n");
5889 return (ENXIO);
5890 }
5891 error = bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_NET | INTR_MPSAFE,
5892 NULL, iwm_intr, sc, &sc->sc_ih);
5893 if (error != 0) {
5894 device_printf(dev, "can't establish interrupt");
5895 return (error);
5896 }
5897 sc->sc_dmat = bus_get_dma_tag(sc->sc_dev);
5898
5899 return (0);
5900}
5901
5902static void
5903iwm_pci_detach(device_t dev)
5904{
5905 struct iwm_softc *sc = device_get_softc(dev);
5906
5907 if (sc->sc_irq != NULL) {
5908 bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
5909 bus_release_resource(dev, SYS_RES_IRQ,
5910 rman_get_rid(sc->sc_irq), sc->sc_irq);
5911 pci_release_msi(dev);
5912 }
5913 if (sc->sc_mem != NULL)
5914 bus_release_resource(dev, SYS_RES_MEMORY,
5915 rman_get_rid(sc->sc_mem), sc->sc_mem);
5916}
5917
5918static int
5919iwm_attach(device_t dev)
5920{
5921 struct iwm_softc *sc = device_get_softc(dev);
5922 struct ieee80211com *ic = &sc->sc_ic;
5923 int error;
5924 int txq_i, i;
5925
5926 sc->sc_dev = dev;
5927 sc->sc_attached = 1;
5928 IWM_LOCK_INIT(sc);
5929 mbufq_init(&sc->sc_snd, ifqmaxlen);
5930 callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
5931 callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
5932 TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
5933 TASK_INIT(&sc->sc_rftoggle_task, 0, iwm_rftoggle_task, sc);
5934
5935 sc->sc_tq = taskqueue_create("iwm_taskq", M_WAITOK,
5936 taskqueue_thread_enqueue, &sc->sc_tq);
5937 error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwm_taskq");
5938 if (error != 0) {
5939 device_printf(dev, "can't start taskq thread, error %d\n",
5940 error);
5941 goto fail;
5942 }
5943
5944 error = iwm_dev_check(dev);
5945 if (error != 0)
5946 goto fail;
5947
5949 if (sc->sc_notif_wait == NULL) {
5950 device_printf(dev, "failed to init notification wait struct\n");
5951 goto fail;
5952 }
5953
5954 sc->sf_state = IWM_SF_UNINIT;
5955
5956 /* Init phy db */
5957 sc->sc_phy_db = iwm_phy_db_init(sc);
5958 if (!sc->sc_phy_db) {
5959 device_printf(dev, "Cannot init phy_db\n");
5960 goto fail;
5961 }
5962
5963 /* Set EBS as successful as long as not stated otherwise by the FW. */
5964 sc->last_ebs_successful = TRUE;
5965
5966 /* PCI attach */
5967 error = iwm_pci_attach(dev);
5968 if (error != 0)
5969 goto fail;
5970
5971 sc->sc_wantresp = -1;
5972
5974 /*
5975 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
5976 * changed, and now the revision step also includes bit 0-1 (no more
5977 * "dash" value). To keep hw_rev backwards compatible - we'll store it
5978 * in the old format.
5979 */
5981 int ret;
5982 uint32_t hw_step;
5983
5984 sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
5985 (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
5986
5987 if (iwm_prepare_card_hw(sc) != 0) {
5988 device_printf(dev, "could not initialize hardware\n");
5989 goto fail;
5990 }
5991
5992 /*
5993 * In order to recognize C step the driver should read the
5994 * chip version id located at the AUX bus MISC address.
5995 */
5998 DELAY(2);
5999
6003 25000);
6004 if (!ret) {
6005 device_printf(sc->sc_dev,
6006 "Failed to wake up the nic\n");
6007 goto fail;
6008 }
6009
6010 if (iwm_nic_lock(sc)) {
6011 hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
6012 hw_step |= IWM_ENABLE_WFPM;
6013 iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
6014 hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
6015 hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
6016 if (hw_step == 0x3)
6017 sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
6018 (IWM_SILICON_C_STEP << 2);
6019 iwm_nic_unlock(sc);
6020 } else {
6021 device_printf(sc->sc_dev, "Failed to lock the nic\n");
6022 goto fail;
6023 }
6024 }
6025
6026 /* special-case 7265D, it has the same PCI IDs. */
6027 if (sc->cfg == &iwm7265_cfg &&
6029 sc->cfg = &iwm7265d_cfg;
6030 }
6031
6032 /* Allocate DMA memory for firmware transfers. */
6033 if ((error = iwm_alloc_fwmem(sc)) != 0) {
6034 device_printf(dev, "could not allocate memory for firmware\n");
6035 goto fail;
6036 }
6037
6038 /* Allocate "Keep Warm" page. */
6039 if ((error = iwm_alloc_kw(sc)) != 0) {
6040 device_printf(dev, "could not allocate keep warm page\n");
6041 goto fail;
6042 }
6043
6044 /* We use ICT interrupts */
6045 if ((error = iwm_alloc_ict(sc)) != 0) {
6046 device_printf(dev, "could not allocate ICT table\n");
6047 goto fail;
6048 }
6049
6050 /* Allocate TX scheduler "rings". */
6051 if ((error = iwm_alloc_sched(sc)) != 0) {
6052 device_printf(dev, "could not allocate TX scheduler rings\n");
6053 goto fail;
6054 }
6055
6056 /* Allocate TX rings */
6057 for (txq_i = 0; txq_i < nitems(sc->txq); txq_i++) {
6058 if ((error = iwm_alloc_tx_ring(sc,
6059 &sc->txq[txq_i], txq_i)) != 0) {
6060 device_printf(dev,
6061 "could not allocate TX ring %d\n",
6062 txq_i);
6063 goto fail;
6064 }
6065 }
6066
6067 /* Allocate RX ring. */
6068 if ((error = iwm_alloc_rx_ring(sc, &sc->rxq)) != 0) {
6069 device_printf(dev, "could not allocate RX ring\n");
6070 goto fail;
6071 }
6072
6073 /* Clear pending interrupts. */
6074 IWM_WRITE(sc, IWM_CSR_INT, 0xffffffff);
6075
6076 ic->ic_softc = sc;
6077 ic->ic_name = device_get_nameunit(sc->sc_dev);
6078 ic->ic_phytype = IEEE80211_T_OFDM; /* not only, but not used */
6079 ic->ic_opmode = IEEE80211_M_STA; /* default to BSS mode */
6080
6081 /* Set device capabilities. */
6082 ic->ic_caps =
6083 IEEE80211_C_STA |
6084 IEEE80211_C_WPA | /* WPA/RSN */
6085 IEEE80211_C_WME |
6086 IEEE80211_C_PMGT |
6087 IEEE80211_C_SHSLOT | /* short slot time supported */
6088 IEEE80211_C_SHPREAMBLE /* short preamble supported */
6089// IEEE80211_C_BGSCAN /* capable of bg scanning */
6090 ;
6091 /* Advertise full-offload scanning */
6092 ic->ic_flags_ext = IEEE80211_FEXT_SCAN_OFFLOAD;
6093 for (i = 0; i < nitems(sc->sc_phyctxt); i++) {
6094 sc->sc_phyctxt[i].id = i;
6095 sc->sc_phyctxt[i].color = 0;
6096 sc->sc_phyctxt[i].ref = 0;
6097 sc->sc_phyctxt[i].channel = NULL;
6098 }
6099
6100 /* Default noise floor */
6101 sc->sc_noise = -96;
6102
6103 /* Max RSSI */
6105
6106#ifdef IWM_DEBUG
6107 SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
6108 SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "debug",
6109 CTLFLAG_RW, &sc->sc_debug, 0, "control debugging");
6110#endif
6111
6112 error = iwm_read_firmware(sc);
6113 if (error) {
6114 goto fail;
6115 } else if (sc->sc_fw.fw_fp == NULL) {
6116 /*
6117 * XXX Add a solution for properly deferring firmware load
6118 * during bootup.
6119 */
6120 goto fail;
6121 } else {
6122 sc->sc_preinit_hook.ich_func = iwm_preinit;
6123 sc->sc_preinit_hook.ich_arg = sc;
6124 if (config_intrhook_establish(&sc->sc_preinit_hook) != 0) {
6125 device_printf(dev,
6126 "config_intrhook_establish failed\n");
6127 goto fail;
6128 }
6129 }
6130
6131 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6132 "<-%s\n", __func__);
6133
6134 return 0;
6135
6136 /* Free allocated memory if something failed during attachment. */
6137fail:
6138 iwm_detach_local(sc, 0);
6139
6140 return ENXIO;
6141}
6142
6143static int
6145{
6146 char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
6147
6148 if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
6149 return (FALSE);
6150
6151 return (TRUE);
6152}
6153
6154static int
6155iwm_wme_update(struct ieee80211com *ic)
6156{
6157#define IWM_EXP2(x) ((1 << (x)) - 1) /* CWmin = 2^ECWmin - 1 */
6158 struct iwm_softc *sc = ic->ic_softc;
6159 struct chanAccParams chp;
6160 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6161 struct iwm_vap *ivp = IWM_VAP(vap);
6162 struct iwm_node *in;
6163 struct wmeParams tmp[WME_NUM_AC];
6164 int aci, error;
6165
6166 if (vap == NULL)
6167 return (0);
6168
6169 ieee80211_wme_ic_getparams(ic, &chp);
6170
6171 IEEE80211_LOCK(ic);
6172 for (aci = 0; aci < WME_NUM_AC; aci++)
6173 tmp[aci] = chp.cap_wmeParams[aci];
6174 IEEE80211_UNLOCK(ic);
6175
6176 IWM_LOCK(sc);
6177 for (aci = 0; aci < WME_NUM_AC; aci++) {
6178 const struct wmeParams *ac = &tmp[aci];
6179 ivp->queue_params[aci].aifsn = ac->wmep_aifsn;
6180 ivp->queue_params[aci].cw_min = IWM_EXP2(ac->wmep_logcwmin);
6181 ivp->queue_params[aci].cw_max = IWM_EXP2(ac->wmep_logcwmax);
6182 ivp->queue_params[aci].edca_txop =
6183 IEEE80211_TXOP_TO_US(ac->wmep_txopLimit);
6184 }
6185 ivp->have_wme = TRUE;
6186 if (ivp->is_uploaded && vap->iv_bss != NULL) {
6187 in = IWM_NODE(vap->iv_bss);
6188 if (in->in_assoc) {
6189 if ((error = iwm_mac_ctxt_changed(sc, vap)) != 0) {
6190 device_printf(sc->sc_dev,
6191 "%s: failed to update MAC\n", __func__);
6192 }
6193 }
6194 }
6195 IWM_UNLOCK(sc);
6196
6197 return (0);
6198#undef IWM_EXP2
6199}
6200
6201static void
6202iwm_preinit(void *arg)
6203{
6204 struct iwm_softc *sc = arg;
6205 device_t dev = sc->sc_dev;
6206 struct ieee80211com *ic = &sc->sc_ic;
6207 int error;
6208
6209 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6210 "->%s\n", __func__);
6211
6212 IWM_LOCK(sc);
6213 if ((error = iwm_start_hw(sc)) != 0) {
6214 device_printf(dev, "could not initialize hardware\n");
6215 IWM_UNLOCK(sc);
6216 goto fail;
6217 }
6218
6219 error = iwm_run_init_ucode(sc, 1);
6220 iwm_stop_device(sc);
6221 if (error) {
6222 IWM_UNLOCK(sc);
6223 goto fail;
6224 }
6225 device_printf(dev,
6226 "hw rev 0x%x, fw ver %s, address %s\n",
6228 sc->sc_fwver, ether_sprintf(sc->nvm_data->hw_addr));
6229
6230 /* not all hardware can do 5GHz band */
6232 memset(&ic->ic_sup_rates[IEEE80211_MODE_11A], 0,
6233 sizeof(ic->ic_sup_rates[IEEE80211_MODE_11A]));
6234 IWM_UNLOCK(sc);
6235
6236 iwm_init_channel_map(ic, IEEE80211_CHAN_MAX, &ic->ic_nchans,
6237 ic->ic_channels);
6238
6239 /*
6240 * At this point we've committed - if we fail to do setup,
6241 * we now also have to tear down the net80211 state.
6242 */
6243 ieee80211_ifattach(ic);
6244 ic->ic_vap_create = iwm_vap_create;
6245 ic->ic_vap_delete = iwm_vap_delete;
6246 ic->ic_raw_xmit = iwm_raw_xmit;
6247 ic->ic_node_alloc = iwm_node_alloc;
6248 ic->ic_scan_start = iwm_scan_start;
6249 ic->ic_scan_end = iwm_scan_end;
6250 ic->ic_update_mcast = iwm_update_mcast;
6251 ic->ic_getradiocaps = iwm_init_channel_map;
6252 ic->ic_set_channel = iwm_set_channel;
6253 ic->ic_scan_curchan = iwm_scan_curchan;
6254 ic->ic_scan_mindwell = iwm_scan_mindwell;
6255 ic->ic_wme.wme_update = iwm_wme_update;
6256 ic->ic_parent = iwm_parent;
6257 ic->ic_transmit = iwm_transmit;
6259 if (bootverbose)
6260 ieee80211_announce(ic);
6261
6262 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6263 "<-%s\n", __func__);
6264 config_intrhook_disestablish(&sc->sc_preinit_hook);
6265
6266 return;
6267fail:
6268 config_intrhook_disestablish(&sc->sc_preinit_hook);
6269 iwm_detach_local(sc, 0);
6270}
6271
6272/*
6273 * Attach the interface to 802.11 radiotap.
6274 */
6275static void
6277{
6278 struct ieee80211com *ic = &sc->sc_ic;
6279
6280 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6281 "->%s begin\n", __func__);
6282 ieee80211_radiotap_attach(ic,
6283 &sc->sc_txtap.wt_ihdr, sizeof(sc->sc_txtap),
6285 &sc->sc_rxtap.wr_ihdr, sizeof(sc->sc_rxtap),
6287 IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_TRACE,
6288 "->%s end\n", __func__);
6289}
6290
6291static struct ieee80211vap *
6292iwm_vap_create(struct ieee80211com *ic, const char name[IFNAMSIZ], int unit,
6293 enum ieee80211_opmode opmode, int flags,
6294 const uint8_t bssid[IEEE80211_ADDR_LEN],
6295 const uint8_t mac[IEEE80211_ADDR_LEN])
6296{
6297 struct iwm_vap *ivp;
6298 struct ieee80211vap *vap;
6299
6300 if (!TAILQ_EMPTY(&ic->ic_vaps)) /* only one at a time */
6301 return NULL;
6302 ivp = malloc(sizeof(struct iwm_vap), M_80211_VAP, M_WAITOK | M_ZERO);
6303 vap = &ivp->iv_vap;
6304 ieee80211_vap_setup(ic, vap, name, unit, opmode, flags, bssid);
6305 vap->iv_bmissthreshold = 10; /* override default */
6306 /* Override with driver methods. */
6307 ivp->iv_newstate = vap->iv_newstate;
6308 vap->iv_newstate = iwm_newstate;
6309
6310 ivp->id = IWM_DEFAULT_MACID;
6311 ivp->color = IWM_DEFAULT_COLOR;
6312
6313 ivp->have_wme = FALSE;
6314 ivp->ps_disabled = FALSE;
6315
6316 ieee80211_ratectl_init(vap);
6317 /* Complete setup. */
6318 ieee80211_vap_attach(vap, ieee80211_media_change,
6319 ieee80211_media_status, mac);
6320 ic->ic_opmode = opmode;
6321
6322 return vap;
6323}
6324
6325static void
6326iwm_vap_delete(struct ieee80211vap *vap)
6327{
6328 struct iwm_vap *ivp = IWM_VAP(vap);
6329
6330 ieee80211_ratectl_deinit(vap);
6331 ieee80211_vap_detach(vap);
6332 free(ivp, M_80211_VAP);
6333}
6334
6335static void
6337{
6338 struct mbuf *m;
6339 struct ieee80211_node *ni;
6340
6341 while ((m = mbufq_dequeue(&sc->sc_snd)) != NULL) {
6342 ni = (struct ieee80211_node *)m->m_pkthdr.rcvif;
6343 ieee80211_free_node(ni);
6344 m_freem(m);
6345 }
6346}
6347
6348static void
6349iwm_scan_start(struct ieee80211com *ic)
6350{
6351 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6352 struct iwm_softc *sc = ic->ic_softc;
6353 int error;
6354
6355 IWM_LOCK(sc);
6356 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6357 /* This should not be possible */
6358 device_printf(sc->sc_dev,
6359 "%s: Previous scan not completed yet\n", __func__);
6360 }
6362 error = iwm_umac_scan(sc);
6363 else
6364 error = iwm_lmac_scan(sc);
6365 if (error != 0) {
6366 device_printf(sc->sc_dev, "could not initiate scan\n");
6367 IWM_UNLOCK(sc);
6368 ieee80211_cancel_scan(vap);
6369 } else {
6372 IWM_UNLOCK(sc);
6373 }
6374}
6375
6376static void
6377iwm_scan_end(struct ieee80211com *ic)
6378{
6379 struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
6380 struct iwm_softc *sc = ic->ic_softc;
6381
6382 IWM_LOCK(sc);
6384 if (vap->iv_state == IEEE80211_S_RUN)
6385 iwm_led_enable(sc);
6386 if (sc->sc_flags & IWM_FLAG_SCAN_RUNNING) {
6387 /*
6388 * Removing IWM_FLAG_SCAN_RUNNING now, is fine because
6389 * both iwm_scan_end and iwm_scan_start run in the ic->ic_tq
6390 * taskqueue.
6391 */
6392 sc->sc_flags &= ~IWM_FLAG_SCAN_RUNNING;
6394 }
6395 IWM_UNLOCK(sc);
6396
6397 /*
6398 * Make sure we don't race, if sc_es_task is still enqueued here.
6399 * This is to make sure that it won't call ieee80211_scan_done
6400 * when we have already started the next scan.
6401 */
6402 taskqueue_cancel(ic->ic_tq, &sc->sc_es_task, NULL);
6403}
6404
6405static void
6406iwm_update_mcast(struct ieee80211com *ic)
6407{
6408}
6409
6410static void
6411iwm_set_channel(struct ieee80211com *ic)
6412{
6413}
6414
6415static void
6416iwm_scan_curchan(struct ieee80211_scan_state *ss, unsigned long maxdwell)
6417{
6418}
6419
6420static void
6421iwm_scan_mindwell(struct ieee80211_scan_state *ss)
6422{
6423}
6424
6425void
6426iwm_init_task(void *arg1)
6427{
6428 struct iwm_softc *sc = arg1;
6429
6430 IWM_LOCK(sc);
6431 while (sc->sc_flags & IWM_FLAG_BUSY)
6432 msleep(&sc->sc_flags, &sc->sc_mtx, 0, "iwmpwr", 0);
6433 sc->sc_flags |= IWM_FLAG_BUSY;
6434 iwm_stop(sc);
6435 if (sc->sc_ic.ic_nrunning > 0)
6436 iwm_init(sc);
6437 sc->sc_flags &= ~IWM_FLAG_BUSY;
6438 wakeup(&sc->sc_flags);
6439 IWM_UNLOCK(sc);
6440}
6441
6442static int
6443iwm_resume(device_t dev)
6444{
6445 struct iwm_softc *sc = device_get_softc(dev);
6446 int do_reinit = 0;
6447
6448 /*
6449 * We disable the RETRY_TIMEOUT register (0x41) to keep
6450 * PCI Tx retries from interfering with C3 CPU state.
6451 */
6452 pci_write_config(dev, PCI_CFG_RETRY_TIMEOUT, 0x00, 1);
6453
6454 if (!sc->sc_attached)
6455 return 0;
6456
6457 iwm_init_task(device_get_softc(dev));
6458
6459 IWM_LOCK(sc);
6460 if (sc->sc_flags & IWM_FLAG_SCANNING) {
6461 sc->sc_flags &= ~IWM_FLAG_SCANNING;
6462 do_reinit = 1;
6463 }
6464 IWM_UNLOCK(sc);
6465
6466 if (do_reinit)
6467 ieee80211_resume_all(&sc->sc_ic);
6468
6469 return 0;
6470}
6471
6472static int
6473iwm_suspend(device_t dev)
6474{
6475 int do_stop = 0;
6476 struct iwm_softc *sc = device_get_softc(dev);
6477
6478 do_stop = !! (sc->sc_ic.ic_nrunning > 0);
6479
6480 if (!sc->sc_attached)
6481 return (0);
6482
6483 ieee80211_suspend_all(&sc->sc_ic);
6484
6485 if (do_stop) {
6486 IWM_LOCK(sc);
6487 iwm_stop(sc);
6489 IWM_UNLOCK(sc);
6490 }
6491
6492 return (0);
6493}
6494
6495static int
6496iwm_detach_local(struct iwm_softc *sc, int do_net80211)
6497{
6498 struct iwm_fw_info *fw = &sc->sc_fw;
6499 device_t dev = sc->sc_dev;
6500 int i;
6501
6502 if (!sc->sc_attached)
6503 return 0;
6504 sc->sc_attached = 0;
6505 if (do_net80211) {
6506 ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
6507 }
6508 iwm_stop_device(sc);
6509 taskqueue_drain_all(sc->sc_tq);
6510 taskqueue_free(sc->sc_tq);
6511 if (do_net80211) {
6512 IWM_LOCK(sc);
6514 IWM_UNLOCK(sc);
6515 ieee80211_ifdetach(&sc->sc_ic);
6516 }
6517 callout_drain(&sc->sc_led_blink_to);
6518 callout_drain(&sc->sc_watchdog_to);
6519
6521 sc->sc_phy_db = NULL;
6522
6524
6525 /* Free descriptor rings */
6526 iwm_free_rx_ring(sc, &sc->rxq);
6527 for (i = 0; i < nitems(sc->txq); i++)
6528 iwm_free_tx_ring(sc, &sc->txq[i]);
6529
6530 /* Free firmware */
6531 if (fw->fw_fp != NULL)
6532 iwm_fw_info_free(fw);
6533
6534 /* Free scheduler */
6539
6541
6542 /* Finished with the hardware - detach things */
6543 iwm_pci_detach(dev);
6544
6545 if (sc->sc_notif_wait != NULL) {
6547 sc->sc_notif_wait = NULL;
6548 }
6549
6550 IWM_LOCK_DESTROY(sc);
6551
6552 return (0);
6553}
6554
6555static int
6556iwm_detach(device_t dev)
6557{
6558 struct iwm_softc *sc = device_get_softc(dev);
6559
6560 return (iwm_detach_local(sc, 1));
6561}
6562
6563static device_method_t iwm_pci_methods[] = {
6564 /* Device interface */
6565 DEVMETHOD(device_probe, iwm_probe),
6566 DEVMETHOD(device_attach, iwm_attach),
6567 DEVMETHOD(device_detach, iwm_detach),
6568 DEVMETHOD(device_suspend, iwm_suspend),
6569 DEVMETHOD(device_resume, iwm_resume),
6570
6571 DEVMETHOD_END
6572};
6573
6574static driver_t iwm_pci_driver = {
6575 "iwm",
6577 sizeof (struct iwm_softc)
6578};
6579
6580static devclass_t iwm_devclass;
6581
6583MODULE_PNP_INFO("U16:device;P:#;T:vendor=0x8086", pci, iwm_pci_driver,
6584 iwm_devices, nitems(iwm_devices));
6585MODULE_DEPEND(iwm, firmware, 1, 1, 1);
6586MODULE_DEPEND(iwm, pci, 1, 1, 1);
6587MODULE_DEPEND(iwm, wlan, 1, 1, 1);
static void iwm_cmd_done(struct iwm_softc *, struct iwm_rx_packet *)
Definition: if_iwm.c:3546
static int iwm_set_ucode_capabilities(struct iwm_softc *sc, const uint8_t *data, struct iwm_ucode_capabilities *capa)
Definition: if_iwm.c:507
static int iwm_alloc_rx_ring(struct iwm_softc *, struct iwm_rx_ring *)
Definition: if_iwm.c:926
static int iwm_update_quotas(struct iwm_softc *, struct iwm_vap *)
Definition: if_iwm.c:3983
static int iwm_attach(device_t)
Definition: if_iwm.c:5919
#define IWM_UCODE_ALIVE_TIMEOUT
Definition: if_iwm.c:234
static int iwm_set_hw_address(struct iwm_softc *sc, struct iwm_nvm_data *data, const uint16_t *nvm_hw, const uint16_t *mac_override)
Definition: if_iwm.c:2137
@ IWM_READ_NVM_CHUNK_SUCCEED
Definition: if_iwm.c:1772
@ IWM_READ_NVM_CHUNK_NOT_VALID_ADDRESS
Definition: if_iwm.c:1773
static void iwm_scan_start(struct ieee80211com *)
Definition: if_iwm.c:6349
static void iwm_stop(struct iwm_softc *)
Definition: if_iwm.c:4913
const struct iwm_rate iwm_rates[]
static void iwm_stop_device(struct iwm_softc *)
Definition: if_iwm.c:1267
static int iwm_transmit(struct ieee80211com *ic, struct mbuf *m)
Definition: if_iwm.c:4861
static int iwm_pcie_load_given_ucode(struct iwm_softc *, const struct iwm_fw_img *)
Definition: if_iwm.c:2555
#define IWM_RIDX_IS_CCK(_i_)
Definition: if_iwm.c:226
static int iwm_config_ltr(struct iwm_softc *sc)
Definition: if_iwm.c:2978
static int iwm_raw_xmit(struct ieee80211_node *, struct mbuf *, const struct ieee80211_bpf_params *)
Definition: if_iwm.c:3917
int iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
Definition: if_iwm.c:1593
#define PCI_PRODUCT_INTEL_WL_7265_2
Definition: if_iwm.c:5778
#define PCI_PRODUCT_INTEL_WL_7265_1
Definition: if_iwm.c:5777
static int iwm_run_init_ucode(struct iwm_softc *, int)
Definition: if_iwm.c:2888
static int iwm_firmware_store_section(struct iwm_softc *, enum iwm_ucode_type, const uint8_t *, size_t)
Definition: if_iwm.c:423
static void iwm_watchdog(void *)
Definition: if_iwm.c:4926
static const struct iwm_rate * iwm_tx_fill_cmd(struct iwm_softc *, struct iwm_node *, struct mbuf *, struct iwm_tx_cmd *)
Definition: if_iwm.c:3638
static void iwm_free_nvm_data(struct iwm_nvm_data *)
Definition: if_iwm.c:2231
#define PCI_VENDOR_INTEL
Definition: if_iwm.c:5769
static device_method_t iwm_pci_methods[]
Definition: if_iwm.c:6563
static int iwm_pcie_load_firmware_chunk(struct iwm_softc *, uint32_t, bus_addr_t, uint32_t)
Definition: if_iwm.c:2404
static int iwm_set_ucode_api_flags(struct iwm_softc *sc, const uint8_t *data, struct iwm_ucode_capabilities *capa)
Definition: if_iwm.c:482
#define IWM_RIDX_MAX
Definition: if_iwm.c:225
static struct iwm_nvm_data * iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *, const uint16_t *, const uint16_t *, const uint16_t *, const uint16_t *, const uint16_t *)
Definition: if_iwm.c:2168
static int iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t)
Definition: if_iwm.c:409
static int iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int)
Definition: if_iwm.c:4423
static int iwm_set_default_calib(struct iwm_softc *, const void *)
Definition: if_iwm.c:461
TUNABLE_INT("hw.iwm.lar.disable", &iwm_lar_disable)
#define IWM_DEFAULT_SCAN_CHANNELS
Definition: if_iwm.c:452
static int iwm_rate2ridx(struct iwm_softc *, uint8_t)
Definition: if_iwm.c:4226
static int iwm_start_fw(struct iwm_softc *, const struct iwm_fw_img *)
Definition: if_iwm.c:2632
static struct ieee80211_node * iwm_node_alloc(struct ieee80211vap *, const uint8_t[IEEE80211_ADDR_LEN])
Definition: if_iwm.c:4191
static int iwm_get_noise(struct iwm_softc *, const struct iwm_statistics_rx_non_phy *)
Definition: if_iwm.c:3056
static int iwm_trans_pcie_fw_alive(struct iwm_softc *, uint32_t)
Definition: if_iwm.c:1688
static void iwm_init(struct iwm_softc *)
Definition: if_iwm.c:4838
static void iwm_pci_detach(device_t dev)
Definition: if_iwm.c:5903
#define IWM_NUM_2GHZ_CHANNELS
Definition: if_iwm.c:199
static bool iwm_rx_mpdu_mq(struct iwm_softc *sc, struct mbuf *m, uint32_t offset, bool stolen)
Definition: if_iwm.c:3262
MODULE_DEPEND(iwm, firmware, 1, 1, 1)
#define IWM_NVM_DEFAULT_CHUNK_SIZE
Definition: if_iwm.c:1765
static int iwm_pci_attach(device_t dev)
Definition: if_iwm.c:5849
static int iwm_tx(struct iwm_softc *, struct mbuf *, struct ieee80211_node *, int)
Definition: if_iwm.c:3706
static int iwm_tx_rateidx_global_lookup(struct iwm_softc *sc, uint8_t rate)
Definition: if_iwm.c:3618
static int iwm_nic_rx_legacy_init(struct iwm_softc *sc)
Definition: if_iwm.c:1465
static bool iwm_rx_mpdu(struct iwm_softc *, struct mbuf *, uint32_t, bool)
Definition: if_iwm.c:3372
#define PCI_PRODUCT_INTEL_WL_9560_2
Definition: if_iwm.c:5783
static int iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *, uint16_t *, uint32_t)
Definition: if_iwm.c:1874
static void iwm_disable_interrupts(struct iwm_softc *)
Definition: if_iwm.c:1223
MODULE_PNP_INFO("U16:device;P:#;T:vendor=0x8086", pci, iwm_pci_driver, iwm_devices, nitems(iwm_devices))
static void iwm_rx_tx_cmd(struct iwm_softc *, struct iwm_rx_packet *)
Definition: if_iwm.c:3490
_Static_assert(nitems(iwm_nvm_channels)<=IWM_NUM_CHANNELS, "IWM_NUM_CHANNELS is too small")
#define PCI_PRODUCT_INTEL_WL_3160_1
Definition: if_iwm.c:5770
static int iwm_wme_update(struct ieee80211com *ic)
Definition: if_iwm.c:6155
static int iwm_pcie_load_section(struct iwm_softc *, uint8_t, const struct iwm_fw_desc *)
Definition: if_iwm.c:2348
static void iwm_start(struct iwm_softc *)
Definition: if_iwm.c:4887
#define IWM_EXP2(x)
static driver_t iwm_pci_driver
Definition: if_iwm.c:6574
static int iwm_suspend(device_t dev)
Definition: if_iwm.c:6473
static int iwm_pcie_load_cpu_sections_8000(struct iwm_softc *sc, const struct iwm_fw_img *, int, int *)
Definition: if_iwm.c:2451
static void iwm_intr(void *)
Definition: if_iwm.c:5619
static void iwm_vap_delete(struct ieee80211vap *)
Definition: if_iwm.c:6326
static int iwm_nic_rx_mq_init(struct iwm_softc *sc)
Definition: if_iwm.c:1412
static int iwm_get_n_hw_addrs(const struct iwm_softc *, const uint16_t *)
Definition: if_iwm.c:2103
static void iwm_rftoggle_task(void *arg, int npending __unused)
Definition: if_iwm.c:4973
#define mtodoff(m, t, off)
Definition: if_iwm.c:175
int iwm_flush_tx_path(struct iwm_softc *sc, uint32_t tfd_msk, uint32_t flags)
Definition: if_iwm.c:3962
static int iwm_init_hw(struct iwm_softc *)
Definition: if_iwm.c:4692
#define PCI_PRODUCT_INTEL_WL_7260_2
Definition: if_iwm.c:5776
static void iwm_handle_rx_statistics(struct iwm_softc *, struct iwm_rx_packet *)
Definition: if_iwm.c:3087
static int iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *, const uint16_t *)
Definition: if_iwm.c:2093
static void iwm_add_channel_band(struct iwm_softc *, struct ieee80211_channel[], int, int *, int, size_t, const uint8_t[])
Definition: if_iwm.c:1940
#define PCI_PRODUCT_INTEL_WL_9560_1
Definition: if_iwm.c:5782
static int iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *)
Definition: if_iwm.c:2083
static int iwm_alloc_kw(struct iwm_softc *)
Definition: if_iwm.c:912
static int iwm_lar_disable
Definition: if_iwm.c:401
const uint8_t iwm_nvm_channels_8000[]
Definition: if_iwm.c:188
static void iwm_notif_intr(struct iwm_softc *)
Definition: if_iwm.c:5572
#define ERROR_ELEM_SIZE
Definition: if_iwm.c:5083
static void iwm_parent(struct ieee80211com *)
Definition: if_iwm.c:4949
static int iwm_detach_local(struct iwm_softc *sc, int)
Definition: if_iwm.c:6496
static void iwm_nic_config(struct iwm_softc *)
Definition: if_iwm.c:1361
static void iwm_xmit_queue_drain(struct iwm_softc *)
Definition: if_iwm.c:6336
static void iwm_free_rx_ring(struct iwm_softc *, struct iwm_rx_ring *)
Definition: if_iwm.c:1032
static void iwm_radiotap_attach(struct iwm_softc *)
Definition: if_iwm.c:6276
static int iwm_send_phy_cfg_cmd(struct iwm_softc *)
Definition: if_iwm.c:2701
static int iwm_nvm_init(struct iwm_softc *)
Definition: if_iwm.c:2292
static int iwm_probe(device_t dev)
Definition: if_iwm.c:5810
static void iwm_setrates(struct iwm_softc *, struct iwm_node *, int)
Definition: if_iwm.c:4244
#define PCI_PRODUCT_INTEL_WL_3165_1
Definition: if_iwm.c:5772
static void iwm_scan_mindwell(struct ieee80211_scan_state *)
Definition: if_iwm.c:6421
static void iwm_scan_curchan(struct ieee80211_scan_state *, unsigned long)
Definition: if_iwm.c:6416
static void iwm_free_tx_ring(struct iwm_softc *, struct iwm_tx_ring *)
Definition: if_iwm.c:1177
#define PCI_PRODUCT_INTEL_WL_8265_1
Definition: if_iwm.c:5781
static int iwm_pcie_load_given_ucode_8000(struct iwm_softc *, const struct iwm_fw_img *)
Definition: if_iwm.c:2593
static int iwm_alive_fn(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
Definition: if_iwm.c:2720
__FBSDID("$FreeBSD$")
#define IWM_UCODE_CALIB_TIMEOUT
Definition: if_iwm.c:235
static void iwm_init_task(void *)
Definition: if_iwm.c:6426
static int iwm_is_valid_ether_addr(uint8_t *)
Definition: if_iwm.c:6144
static void iwm_reset_rx_ring(struct iwm_softc *, struct iwm_rx_ring *)
Definition: if_iwm.c:1018
#define TB0_SIZE
Definition: if_iwm.c:3704
static struct iwm_nvm_data * iwm_parse_nvm_sections(struct iwm_softc *, struct iwm_nvm_section *)
Definition: if_iwm.c:2238
static int iwm_get_sku(const struct iwm_softc *, const uint16_t *, const uint16_t *)
Definition: if_iwm.c:2073
static uint8_t iwm_rate_from_ucode_rate(uint32_t)
Definition: if_iwm.c:4198
static int iwm_rx_tx_cmd_single(struct iwm_softc *, struct iwm_rx_packet *, struct iwm_node *)
Definition: if_iwm.c:3413
static int iwm_detach(device_t)
Definition: if_iwm.c:6556
static int iwm_rx_addbuf(struct iwm_softc *, int, int)
Definition: if_iwm.c:2996
static void iwm_rx_rx_phy_cmd(struct iwm_softc *, struct iwm_rx_packet *)
Definition: if_iwm.c:3043
#define PCI_PRODUCT_INTEL_WL_8260_2
Definition: if_iwm.c:5780
static void iwm_set_radio_cfg(const struct iwm_softc *, struct iwm_nvm_data *, uint32_t)
Definition: if_iwm.c:2116
static void iwm_endscan_cb(void *, int)
Definition: if_iwm.c:4554
static void iwm_set_hw_address_family_8000(struct iwm_softc *, struct iwm_nvm_data *, const uint16_t *, const uint16_t *)
Definition: if_iwm.c:2014
static int iwm_alloc_ict(struct iwm_softc *)
Definition: if_iwm.c:919
#define IWM_NVM_READ_OPCODE
Definition: if_iwm.c:1768
static int iwm_read_firmware(struct iwm_softc *)
Definition: if_iwm.c:540
#define PCI_PRODUCT_INTEL_WL_9260_1
Definition: if_iwm.c:5785
static int iwm_send_bt_init_conf(struct iwm_softc *)
Definition: if_iwm.c:4567
static void iwm_set_channel(struct ieee80211com *)
Definition: if_iwm.c:6411
static int iwm_rx_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_phy_info *phy_info)
Definition: if_iwm.c:3103
const uint8_t iwm_nvm_channels[]
Definition: if_iwm.c:177
#define PCI_PRODUCT_INTEL_WL_3165_2
Definition: if_iwm.c:5773
static devclass_t iwm_devclass
Definition: if_iwm.c:6580
uint8_t iwm_ridx2rate(struct ieee80211_rateset *rs, int ridx)
Definition: if_iwm.c:4211
static int iwm_dev_check(device_t dev)
Definition: if_iwm.c:5826
static int iwm_alloc_tx_ring(struct iwm_softc *, struct iwm_tx_ring *, int)
Definition: if_iwm.c:1069
static int iwm_auth(struct ieee80211vap *, struct iwm_softc *)
Definition: if_iwm.c:4062
static void iwm_bring_down_firmware(struct iwm_softc *sc, struct ieee80211vap *vap)
Definition: if_iwm.c:4328
static int iwm_alloc_fwmem(struct iwm_softc *)
Definition: if_iwm.c:894
static int iwm_nic_init(struct iwm_softc *)
Definition: if_iwm.c:1566
static struct ieee80211vap * iwm_vap_create(struct ieee80211com *, const char[IFNAMSIZ], int, enum ieee80211_opmode, int, const uint8_t[IEEE80211_ADDR_LEN], const uint8_t[IEEE80211_ADDR_LEN])
Definition: if_iwm.c:6292
static boolean_t iwm_is_wifi_mcc_supported(struct iwm_softc *)
Definition: if_iwm.c:4598
static bool iwm_rx_rx_mpdu(struct iwm_softc *sc, struct mbuf *m, uint32_t offset, bool stolen)
Definition: if_iwm.c:3148
static void iwm_preinit(void *)
Definition: if_iwm.c:6202
#define PCI_PRODUCT_INTEL_WL_8260_1
Definition: if_iwm.c:5779
static int iwm_nic_tx_init(struct iwm_softc *)
Definition: if_iwm.c:1529
struct iwm_tlv_calib_data __packed
#define PCI_CFG_RETRY_TIMEOUT
Definition: if_iwm.c:5846
DRIVER_MODULE(iwm, pci, iwm_pci_driver, iwm_devclass, NULL, NULL)
static uint32_t iwm_eeprom_channel_flags(uint16_t)
Definition: if_iwm.c:1921
static int iwm_resume(device_t dev)
Definition: if_iwm.c:6443
#define PCI_PRODUCT_INTEL_WL_7260_1
Definition: if_iwm.c:5775
static int iwm_nic_rx_init(struct iwm_softc *)
Definition: if_iwm.c:1520
#define IWM_N_HW_ADDR_MASK
Definition: if_iwm.c:200
static void iwm_tt_tx_backoff(struct iwm_softc *, uint32_t)
Definition: if_iwm.c:4677
#define PCI_PRODUCT_INTEL_WL_3160_2
Definition: if_iwm.c:5771
static void iwm_init_channel_map(struct ieee80211com *, int, int *, struct ieee80211_channel[])
Definition: if_iwm.c:1981
static int iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t)
Definition: if_iwm.c:2689
static void iwm_enable_fw_load_int(struct iwm_softc *sc)
Definition: if_iwm.c:2623
static int iwm_alloc_sched(struct iwm_softc *)
Definition: if_iwm.c:903
static int iwm_pcie_load_cpu_sections(struct iwm_softc *, const struct iwm_fw_img *, int, int *)
Definition: if_iwm.c:2513
static int iwm_rxmq_get_signal_strength(struct iwm_softc *sc, struct iwm_rx_mpdu_desc *desc)
Definition: if_iwm.c:3130
static void iwm_update_mcast(struct ieee80211com *)
Definition: if_iwm.c:6406
static int iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t, uint16_t, uint8_t *, uint16_t *)
Definition: if_iwm.c:1777
static boolean_t iwm_is_lar_supported(struct iwm_softc *)
Definition: if_iwm.c:4579
static int iwm_allow_mcast(struct ieee80211vap *, struct iwm_softc *)
Definition: if_iwm.c:4809
static int iwm_send_update_mcc_cmd(struct iwm_softc *, const char *)
Definition: if_iwm.c:4605
static void iwm_restore_interrupts(struct iwm_softc *)
Definition: if_iwm.c:1217
static int iwm_wait_phy_db_entry(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data)
Definition: if_iwm.c:2770
static void iwm_fw_info_free(struct iwm_fw_info *)
Definition: if_iwm.c:532
static void iwm_handle_rxb(struct iwm_softc *, struct mbuf *)
Definition: if_iwm.c:5250
#define HAVEROOM(a)
static void iwm_reset_tx_ring(struct iwm_softc *, struct iwm_tx_ring *)
Definition: if_iwm.c:1149
#define ERROR_START_OFFSET
Definition: if_iwm.c:5082
static void iwm_ict_reset(struct iwm_softc *)
Definition: if_iwm.c:1234
#define PCI_PRODUCT_INTEL_WL_9560_3
Definition: if_iwm.c:5784
static int iwm_load_ucode_wait_alive(struct iwm_softc *, enum iwm_ucode_type)
Definition: if_iwm.c:2792
#define PCI_PRODUCT_INTEL_WL_3168_1
Definition: if_iwm.c:5774
static void iwm_scan_end(struct ieee80211com *)
Definition: if_iwm.c:6377
static void iwm_enable_interrupts(struct iwm_softc *)
Definition: if_iwm.c:1210
const struct iwm_cfg iwm3160_cfg
Definition: if_iwm_7000.c:103
const struct iwm_cfg iwm3168_cfg
Definition: if_iwm_7000.c:117
const struct iwm_cfg iwm7265_cfg
Definition: if_iwm_7000.c:125
const struct iwm_cfg iwm7265d_cfg
Definition: if_iwm_7000.c:132
const struct iwm_cfg iwm3165_cfg
Definition: if_iwm_7000.c:110
const struct iwm_cfg iwm7260_cfg
Definition: if_iwm_7000.c:96
const struct iwm_cfg iwm8260_cfg
Definition: if_iwm_8000.c:91
const struct iwm_cfg iwm8265_cfg
Definition: if_iwm_8000.c:98
const struct iwm_cfg iwm9560_cfg
Definition: if_iwm_9000.c:90
const struct iwm_cfg iwm9260_cfg
Definition: if_iwm_9260.c:90
int iwm_binding_remove_vif(struct iwm_softc *sc, struct iwm_vap *ivp)
int iwm_binding_add_vif(struct iwm_softc *sc, struct iwm_vap *ivp)
@ IWM_DEVICE_FAMILY_9000
Definition: if_iwm_config.h:81
@ IWM_DEVICE_FAMILY_7000
Definition: if_iwm_config.h:79
@ IWM_DEVICE_FAMILY_8000
Definition: if_iwm_config.h:80
@ IWM_NVM_SDP
#define IWM_DPRINTF(sc, m, fmt,...)
Definition: if_iwm_debug.h:59
int iwm_send_paging_cmd(struct iwm_softc *sc, const struct iwm_fw_img *fw)
Definition: if_iwm_fw.c:314
int iwm_save_fw_paging(struct iwm_softc *sc, const struct iwm_fw_img *fw)
Definition: if_iwm_fw.c:301
void iwm_free_fw_paging(struct iwm_softc *sc)
Definition: if_iwm_fw.c:129
#define IWM_FW_PAGING_SIZE
Definition: if_iwm_fw.h:83
#define IWM_MAX_PAGING_IMAGE_SIZE
Definition: if_iwm_fw.h:99
void iwm_led_enable(struct iwm_softc *sc)
Definition: if_iwm_led.c:142
void iwm_led_disable(struct iwm_softc *sc)
Definition: if_iwm_led.c:149
void iwm_led_blink_stop(struct iwm_softc *sc)
Definition: if_iwm_led.c:184
void iwm_led_blink_start(struct iwm_softc *sc)
Definition: if_iwm_led.c:178
const uint8_t iwm_ac_to_tx_fifo[]
int iwm_mac_ctxt_add(struct iwm_softc *sc, struct ieee80211vap *vap)
int iwm_mac_ctxt_changed(struct iwm_softc *sc, struct ieee80211vap *vap)
void iwm_notification_wait_free(struct iwm_notif_wait_data *notif_data)
void iwm_remove_notification(struct iwm_notif_wait_data *notif_data, struct iwm_notification_wait *wait_entry)
void iwm_init_notification_wait(struct iwm_notif_wait_data *notif_data, struct iwm_notification_wait *wait_entry, const uint16_t *cmds, int n_cmds, int(*fn)(struct iwm_softc *sc, struct iwm_rx_packet *pkt, void *data), void *fn_data)
struct iwm_notif_wait_data * iwm_notification_wait_init(struct iwm_softc *sc)
int iwm_wait_notification(struct iwm_notif_wait_data *notif_data, struct iwm_notification_wait *wait_entry, int timeout)
void iwm_notification_wait_notify(struct iwm_notif_wait_data *notif_data, uint16_t cmd, struct iwm_rx_packet *pkt)
void iwm_write_prph64(struct iwm_softc *sc, uint64_t addr, uint64_t val)
void iwm_set_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
void iwm_enable_rfkill_int(struct iwm_softc *sc)
void iwm_set_bits_mask_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits, uint32_t mask)
int iwm_pcie_rx_stop(struct iwm_softc *sc)
uint32_t iwm_read_prph(struct iwm_softc *sc, uint32_t addr)
void iwm_pcie_clear_cmd_in_flight(struct iwm_softc *sc)
void iwm_set_pwr(struct iwm_softc *sc)
int iwm_apm_init(struct iwm_softc *sc)
int iwm_start_hw(struct iwm_softc *sc)
int iwm_check_rfkill(struct iwm_softc *sc)
int iwm_nic_lock(struct iwm_softc *sc)
int iwm_prepare_card_hw(struct iwm_softc *sc)
void iwm_nic_unlock(struct iwm_softc *sc)
int iwm_write_mem32(struct iwm_softc *sc, uint32_t addr, uint32_t val)
int iwm_write_mem(struct iwm_softc *sc, uint32_t addr, const void *buf, int dwords)
void iwm_clear_bits_prph(struct iwm_softc *sc, uint32_t reg, uint32_t bits)
int iwm_poll_bit(struct iwm_softc *sc, int reg, uint32_t bits, uint32_t mask, int timo)
void iwm_write_prph(struct iwm_softc *sc, uint32_t addr, uint32_t val)
void iwm_apm_stop(struct iwm_softc *sc)
int iwm_read_mem(struct iwm_softc *sc, uint32_t addr, void *buf, int dwords)
int iwm_phy_ctxt_add(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt, struct ieee80211_channel *chan, uint8_t chains_static, uint8_t chains_dynamic)
int iwm_phy_ctxt_changed(struct iwm_softc *sc, struct iwm_phy_ctxt *ctxt, struct ieee80211_channel *chan, uint8_t chains_static, uint8_t chains_dynamic)
int iwm_send_phy_db_data(struct iwm_phy_db *phy_db)
void iwm_phy_db_free(struct iwm_phy_db *phy_db)
struct iwm_phy_db * iwm_phy_db_init(struct iwm_softc *sc)
int iwm_phy_db_set_section(struct iwm_phy_db *phy_db, struct iwm_rx_packet *pkt)
int iwm_disable_beacon_filter(struct iwm_softc *sc)
Definition: if_iwm_power.c:367
int iwm_power_update_mac(struct iwm_softc *sc)
Definition: if_iwm_power.c:450
int iwm_enable_beacon_filter(struct iwm_softc *sc, struct iwm_vap *ivp)
Definition: if_iwm_power.c:356
int iwm_power_update_device(struct iwm_softc *sc)
Definition: if_iwm_power.c:472
void iwm_rx_lmac_scan_complete_notif(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
Definition: if_iwm_scan.c:245
int iwm_umac_scan(struct iwm_softc *sc)
Definition: if_iwm_scan.c:607
void iwm_rx_umac_scan_complete_notif(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
Definition: if_iwm_scan.c:271
int iwm_config_umac_scan(struct iwm_softc *sc)
Definition: if_iwm_scan.c:478
int iwm_lmac_scan(struct iwm_softc *sc)
Definition: if_iwm_scan.c:717
int iwm_scan_stop_wait(struct iwm_softc *sc)
Definition: if_iwm_scan.c:890
int iwm_sf_update(struct iwm_softc *sc, struct ieee80211vap *changed_vif, boolean_t remove_vif)
Definition: if_iwm_sf.c:298
int iwm_rm_sta(struct iwm_softc *sc, struct ieee80211vap *vap, boolean_t is_assoc)
Definition: if_iwm_sta.c:271
int iwm_rm_sta_id(struct iwm_softc *sc, struct ieee80211vap *vap)
Definition: if_iwm_sta.c:307
int iwm_update_sta(struct iwm_softc *sc, struct iwm_node *in)
Definition: if_iwm_sta.c:205
int iwm_add_aux_sta(struct iwm_softc *sc)
Definition: if_iwm_sta.c:354
int iwm_add_sta(struct iwm_softc *sc, struct iwm_node *in)
Definition: if_iwm_sta.c:199
void iwm_protect_session(struct iwm_softc *sc, struct iwm_vap *ivp, uint32_t duration, uint32_t max_delay, boolean_t wait_for_notif)
void iwm_rx_time_event_notif(struct iwm_softc *sc, struct iwm_rx_packet *pkt)
void iwm_stop_session_protection(struct iwm_softc *sc, struct iwm_vap *ivp)
void iwm_dma_contig_free(struct iwm_dma_info *dma)
Definition: if_iwm_util.c:477
void iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
Definition: if_iwm_util.c:418
int iwm_send_lq_cmd(struct iwm_softc *sc, struct iwm_lq_cmd *lq, boolean_t init)
Definition: if_iwm_util.c:503
int iwm_send_cmd_pdu(struct iwm_softc *sc, uint32_t id, uint32_t flags, uint16_t len, const void *data)
Definition: if_iwm_util.c:349
int iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma, bus_size_t size, bus_size_t alignment)
Definition: if_iwm_util.c:437
int iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
Definition: if_iwm_util.c:170
static uint8_t iwm_get_valid_tx_ant(struct iwm_softc *sc)
Definition: if_iwm_util.h:134
static uint32_t iwm_get_phy_config(struct iwm_softc *sc)
Definition: if_iwm_util.h:150
#define IWM_SCD_DRAM_BASE_ADDR
Definition: if_iwmreg.h:1391
#define IWM_FH_TX_CHICKEN_BITS_REG
Definition: if_iwmreg.h:1796
#define IWM_CSR_FH_INT_STATUS
Definition: if_iwmreg.h:98
#define IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS
Definition: if_iwmreg.h:1641
#define IWM_PHY_CONFIGURATION_CMD
Definition: if_iwmreg.h:2044
#define IWM_RFH_GEN_CFG_SERVICE_DMA_SNOOP
Definition: if_iwmreg.h:535
#define IWM_RX_INFO_ENERGY_ANT_B_POS
Definition: if_iwmreg.h:3193
#define IWM_CSR_INT_BIT_HW_ERR
Definition: if_iwmreg.h:197
#define IWM_SCD_QUEUE_CFG
Definition: if_iwmreg.h:2008
#define IWM_TX_STATUS_SUCCESS
Definition: if_iwmreg.h:5022
static unsigned int IWM_FH_MEM_CBBC_QUEUE(unsigned int chnl)
Definition: if_iwmreg.h:1490
#define IWM_FH_KW_MEM_ADDR_REG
Definition: if_iwmreg.h:1465
#define IWM_REPLY_RX_PHY_CMD
Definition: if_iwmreg.h:2079
#define IWM_DEFAULT_MAX_PROBE_LENGTH
Definition: if_iwmreg.h:974
#define IWM_ALWAYS_LONG_GROUP
Definition: if_iwmreg.h:6857
#define IWM_FH_TSSR_TX_STATUS_REG
Definition: if_iwmreg.h:1765
#define IWM_FH_RSCSR_CHNL0_WPTR
Definition: if_iwmreg.h:1594
#define IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE
Definition: if_iwmreg.h:1741
#define IWM_FH_TFDIB_CTRL1_REG(_chnl)
Definition: if_iwmreg.h:1687
#define IWM_CSR_INT
Definition: if_iwmreg.h:96
#define IWM_NVM_SECTION_TYPE_MAC_OVERRIDE
Definition: if_iwmreg.h:2410
#define IWM_NVM_RF_CFG_DASH_MSK_8000(x)
Definition: if_iwmreg.h:2365
#define IWM_READ(sc, reg)
Definition: if_iwmreg.h:6954
#define IWM_TX_CMD_FLG_PROT_REQUIRE
Definition: if_iwmreg.h:4821
#define IWM_CSR_HW_REV_DASH(_val)
Definition: if_iwmreg.h:297
#define IWM_FH_RSCSR_FRAME_ALIGN
Definition: if_iwmreg.h:6930
#define IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_DASH
Definition: if_iwmreg.h:176
#define IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG
Definition: if_iwmreg.h:1629
#define IWM_CSR_UCODE_SW_BIT_RFKILL
Definition: if_iwmreg.h:388
#define IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM
Definition: if_iwmreg.h:1747
#define IWM_RFH_GEN_CFG
Definition: if_iwmreg.h:534
#define IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE
Definition: if_iwmreg.h:1729
#define IWM_BT_COEX_WIFI
Definition: if_iwmreg.h:6599
#define IWM_MAC_PM_POWER_TABLE
Definition: if_iwmreg.h:2077
#define IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS
Definition: if_iwmreg.h:1640
#define IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE
Definition: if_iwmreg.h:558
#define IWM_NVM_SECTION_TYPE_PHY_SKU
Definition: if_iwmreg.h:2411
#define IWM_RFH_Q0_FRBDCB_RIDX
Definition: if_iwmreg.h:450
#define IWM_MAX_QUEUES
Definition: if_iwmreg.h:1923
#define IWM_DEFAULT_TX_RETRY
Definition: if_iwmreg.h:4914
#define IWM_TX_STATUS_DIRECT_DONE
Definition: if_iwmreg.h:5023
#define IWM_LTR_CONFIG
Definition: if_iwmreg.h:2051
#define IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
Definition: if_iwmreg.h:420
#define IWM_CSR_HW_REV_TYPE_MSK
Definition: if_iwmreg.h:309
#define IWM_MAC_CONTEXT_CMD
Definition: if_iwmreg.h:2014
#define IWM_RX_INFO_ENERGY_ANT_C_MSK
Definition: if_iwmreg.h:3191
#define IWM_SETBITS(sc, reg, mask)
Definition: if_iwmreg.h:6963
#define IWM_FH_RSCSR_CHNL0_RBDCB_WPTR_REG
Definition: if_iwmreg.h:1593
#define IWM_CSR_INT_PERIODIC_REG
Definition: if_iwmreg.h:104
#define IWM_CSR_FH_INT_RX_MASK
Definition: if_iwmreg.h:226
#define IWM_SCD_EN_CTRL
Definition: if_iwmreg.h:1400
#define IWM_REMOVE_STA
Definition: if_iwmreg.h:2000
#define IWM_CSR_RESET_REG_FLAG_SW_RESET
Definition: if_iwmreg.h:241
#define IWM_CSR_INT_BIT_RF_KILL
Definition: if_iwmreg.h:202
#define IWM_CSR_HW_REV_STEP(_val)
Definition: if_iwmreg.h:298
#define IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR
Definition: if_iwmreg.h:545
#define IWM_APMG_PS_CTRL_REG
Definition: if_iwmreg.h:1210
#define IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
Definition: if_iwmreg.h:284
#define IWM_SCD_QUEUE_STTS_REG_POS_TXF
Definition: if_iwmreg.h:1350
#define IWM_LQ_CMD
Definition: if_iwmreg.h:2021
#define IWM_NVM_VERSION_8000
Definition: if_iwmreg.h:2334
#define IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL
Definition: if_iwmreg.h:1655
#define IWM_NVM_ACCESS_CMD
Definition: if_iwmreg.h:2057
#define IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE
Definition: if_iwmreg.h:548
#define IWM_CSR_DRAM_INT_TBL_ENABLE
Definition: if_iwmreg.h:419
#define IWM_APMG_PCIDEV_STT_REG
Definition: if_iwmreg.h:1211
#define IWM_CSR_UCODE_DRV_GP1_CLR
Definition: if_iwmreg.h:136
#define IWM_NVM_SECTION_TYPE_SW
Definition: if_iwmreg.h:2400
#define IWM_NVM_RF_CFG_TYPE_MSK(x)
Definition: if_iwmreg.h:2359
#define IWM_FW_PHY_CFG_TX_CHAIN
Definition: if_iwmreg.h:1009
#define IWM_RADIO_CFG_8000
Definition: if_iwmreg.h:2335
#define IWM_UCODE_TLV_FLAGS_PAN
Definition: if_iwmreg.h:796
#define IWM_FW_CMD_ID_AND_COLOR(_id, _color)
Definition: if_iwmreg.h:2657
#define IWM_FH_MEM_TFDIB_REG1_ADDR_BITSHIFT
Definition: if_iwmreg.h:1680
#define IWM_SCD_QUEUE_STTS_REG_MSK
Definition: if_iwmreg.h:1354
#define IWM_SCD_TXFACT
Definition: if_iwmreg.h:1393
#define IWM_FW_PHY_CFG_RADIO_STEP
Definition: if_iwmreg.h:1005
#define IWM_CSR_FH_INT_TX_MASK
Definition: if_iwmreg.h:230
#define IWM_REPLY_BEACON_FILTERING_CMD
Definition: if_iwmreg.h:2094
#define IWM_NVM_SKU_CAP_BAND_52GHZ
Definition: if_iwmreg.h:2351
#define IWM_SCAN_ITERATION_COMPLETE_UMAC
Definition: if_iwmreg.h:1991
#define IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS
Definition: if_iwmreg.h:1230
#define IWM_RATE_54M_PLCP
Definition: if_iwmreg.h:4535
#define IWM_NVM_LAR_OFFSET_8000
Definition: if_iwmreg.h:2342
#define IWM_CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED
Definition: if_iwmreg.h:389
#define IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
Definition: if_iwmreg.h:421
#define IWM_CSR_DRAM_INT_TBL_REG
Definition: if_iwmreg.h:143
#define IWM_TX_CMD_LIFE_TIME_INFINITE
Definition: if_iwmreg.h:4899
#define IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_DASH
Definition: if_iwmreg.h:180
#define IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_STEP
Definition: if_iwmreg.h:168
#define IWM_MAX_BINDINGS
Definition: if_iwmreg.h:2646
#define IWM_SCD_GP_CTRL
Definition: if_iwmreg.h:1399
#define IWM_CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI
Definition: if_iwmreg.h:171
#define IWM_RX_RES_PHY_FLAGS_BAND_24
Definition: if_iwmreg.h:3277
#define IWM_RFH_RXF_DMA_RBDCB_SIZE_512
Definition: if_iwmreg.h:521
#define IWM_TIME_QUOTA_CMD
Definition: if_iwmreg.h:2018
static unsigned int IWM_SCD_QUEUE_RDPTR(unsigned int chnl)
Definition: if_iwmreg.h:1409
#define IWM_NVM_CHANNELS
Definition: if_iwmreg.h:2320
#define IWM_FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK
Definition: if_iwmreg.h:1683
#define IWM_TIME_EVENT_CMD
Definition: if_iwmreg.h:2015
#define IWM_CLRBITS(sc, reg, mask)
Definition: if_iwmreg.h:6966
#define IWM_UCODE_MINOR(ver)
Definition: if_iwmreg.h:985
#define IWM_FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD
Definition: if_iwmreg.h:1732
#define IWM_PHY_CONTEXT_CMD
Definition: if_iwmreg.h:1987
#define IWM_HW_STEP_LOCATION_BITS
Definition: if_iwmreg.h:622
#define IWM_FW_MEM_EXTENDED_END
Definition: if_iwmreg.h:554
#define IWM_WRITE(sc, reg, val)
Definition: if_iwmreg.h:6957
#define IWM_RATE_36M_PLCP
Definition: if_iwmreg.h:4533
#define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x)
Definition: if_iwmreg.h:2369
#define IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_STEP
Definition: if_iwmreg.h:174
#define IWM_NVM_RF_CFG_PNUM_MSK_8000(x)
Definition: if_iwmreg.h:2364
#define IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_STEP
Definition: if_iwmreg.h:181
#define IWM_CSR_INT_MASK
Definition: if_iwmreg.h:97
#define IWM_FW_PHY_CFG_RADIO_TYPE
Definition: if_iwmreg.h:1003
#define IWM_WFMP_MAC_ADDR_1
Definition: if_iwmreg.h:613
#define IWM_SCAN_ABORT_UMAC
Definition: if_iwmreg.h:1994
#define IWM_DTS_MEASUREMENT_NOTIF_WIDE
Definition: if_iwmreg.h:2137
#define IWM_NVM_RF_CFG_STEP_MSK_8000(x)
Definition: if_iwmreg.h:2366
#define IWM_MISSED_BEACONS_NOTIFICATION
Definition: if_iwmreg.h:2072
#define IWM_RFH_Q0_URBD_STTS_WPTR_LSB
Definition: if_iwmreg.h:461
#define IWM_NVM_CHANNELS_8000
Definition: if_iwmreg.h:2340
#define IWM_NVM_SECTION_TYPE_REGULATORY_SDP
Definition: if_iwmreg.h:2407
#define IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE
Definition: if_iwmreg.h:1351
#define IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_128
Definition: if_iwmreg.h:537
#define IWM_FW_PAGING_BLOCK_CMD
Definition: if_iwmreg.h:2031
#define IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY
Definition: if_iwmreg.h:1653
#define IWM_FW_PHY_CFG_RADIO_STEP_POS
Definition: if_iwmreg.h:1004
#define IWM_DTS_MEASUREMENT_NOTIFICATION
Definition: if_iwmreg.h:2098
#define IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK
Definition: if_iwmreg.h:1363
#define IWM_NUM_UCODE_TLV_API
Definition: if_iwmreg.h:858
#define IWM_TFD_QUEUE_SIZE_BC_DUP
Definition: if_iwmreg.h:1835
#define IWM_RATE_9M_PLCP
Definition: if_iwmreg.h:4529
#define IWM_FH_SRVC_CHNL
Definition: if_iwmreg.h:1790
#define IWM_WFMP_MAC_ADDR_0
Definition: if_iwmreg.h:612
#define IWM_TX_CMD_FLG_MH_PAD
Definition: if_iwmreg.h:4838
#define IWM_MAX_DBM
Definition: if_iwmreg.h:6952
#define IWM_CPU1_CPU2_SEPARATOR_SECTION
Definition: if_iwmreg.h:980
#define IWM_CSR_HW_IF_CONFIG_REG_POS_PHY_TYPE
Definition: if_iwmreg.h:179
#define IWM_SCAN_CFG_CMD
Definition: if_iwmreg.h:1992
#define IWM_AUX_MISC_REG
Definition: if_iwmreg.h:621
#define IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE
Definition: if_iwmreg.h:1728
#define IWM_NVM_CHANNEL_IBSS
Definition: if_iwmreg.h:2384
#define IWM_HOST_INT_OPER_MODE
Definition: if_iwmreg.h:671
#define IWM_APMG_CLK_DIS_REG
Definition: if_iwmreg.h:1209
#define IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(_chnl)
Definition: if_iwmreg.h:1793
#define IWM_FW_PHY_CFG_TX_CHAIN_POS
Definition: if_iwmreg.h:1008
#define IWM_FW_MEM_EXTENDED_START
Definition: if_iwmreg.h:553
#define IWM_RELEASE_CPU_RESET
Definition: if_iwmreg.h:1248
#define IWM_MCC_UPDATE_CMD
Definition: if_iwmreg.h:2084
#define IWM_FH_TCSR_CHNL_TX_BUF_STS_REG(_chnl)
Definition: if_iwmreg.h:1722
#define IWM_DUMP_TX_FIFO_FLUSH
Definition: if_iwmreg.h:5266
#define IWM_NVM_VERSION
Definition: if_iwmreg.h:2316
#define IWM_RFH_RXF_RXQ_ACTIVE
Definition: if_iwmreg.h:532
#define IWM_RX_INFO_ENERGY_ANT_C_POS
Definition: if_iwmreg.h:3194
#define IWM_RATE_6M_PLCP
Definition: if_iwmreg.h:4528
#define IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC
Definition: if_iwmreg.h:941
#define IWM_RATE_2M_PLCP
Definition: if_iwmreg.h:4537
#define IWM_NVM_SECTION_TYPE_CALIBRATION
Definition: if_iwmreg.h:2403
#define IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS
Definition: if_iwmreg.h:1360
#define IWM_CSR_HW_IF_CONFIG_REG_MSK_MAC_DASH
Definition: if_iwmreg.h:167
#define IWM_RFH_DMA_EN_ENABLE_VAL
Definition: if_iwmreg.h:530
#define IWM_RATE_11M_PLCP
Definition: if_iwmreg.h:4539
#define IWM_CSR_HW_REV
Definition: if_iwmreg.h:114
#define IWM_RATE_MCS_ANT_B_MSK
Definition: if_iwmreg.h:4666
#define IWM_MAC_ADDRESS_OVERRIDE_8000
Definition: if_iwmreg.h:2330
#define IWM_SKU_8000
Definition: if_iwmreg.h:2336
#define IWM_SCD_QUEUE_STTS_REG_POS_WSL
Definition: if_iwmreg.h:1352
#define IWM_FW_CTXT_INVALID
Definition: if_iwmreg.h:2655
#define IWM_RFH_RXF_DMA_RB_SIZE_4K
Definition: if_iwmreg.h:504
#define IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK
Definition: if_iwmreg.h:1361
#define IWM_SCD_CONTEXT_QUEUE_OFFSET(x)
Definition: if_iwmreg.h:1379
#define IWM_CALIB_RES_NOTIF_PHY_DB
Definition: if_iwmreg.h:2045
#define IWM_TX_STATUS_FAIL_LONG_LIMIT
Definition: if_iwmreg.h:5033
#define IWM_MFUART_LOAD_NOTIFICATION
Definition: if_iwmreg.h:2074
#define IWM_CSR_RESET
Definition: if_iwmreg.h:100
#define IWM_RFH_Q0_FRBDCB_BA_LSB
Definition: if_iwmreg.h:441
#define IWM_RX_INFO_ENERGY_ANT_A_POS
Definition: if_iwmreg.h:3192
#define IWM_FH_MEM_TB_MAX_LENGTH
Definition: if_iwmreg.h:437
#define IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2
Definition: if_iwmreg.h:957
#define IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K
Definition: if_iwmreg.h:1648
#define IWM_SCAN_OFFLOAD_COMPLETE
Definition: if_iwmreg.h:2037
#define IWM_TX_STATUS_FAIL_SHORT_LIMIT
Definition: if_iwmreg.h:5032
#define IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL
Definition: if_iwmreg.h:1646
#define IWM_FW_PHY_CFG_RADIO_TYPE_POS
Definition: if_iwmreg.h:1002
#define IWM_NUM_PHY_CTX
Definition: if_iwmreg.h:3134
#define IWM_CSR_INT_PERIODIC_DIS
Definition: if_iwmreg.h:191
#define IWM_SB_CPU_1_STATUS
Definition: if_iwmreg.h:635
#define IWM_NVM_LAR_ENABLED_8000
Definition: if_iwmreg.h:2343
#define IWM_RX_RB_TIMEOUT
Definition: if_iwmreg.h:1642
#define IWM_CSR_MAC_SHADOW_REG_CTRL
Definition: if_iwmreg.h:144
#define IWM_SCD_GP_CTRL_ENABLE_31_QUEUES
Definition: if_iwmreg.h:1364
#define IWM_TX_CMD_FLG_STA_RATE
Definition: if_iwmreg.h:4823
#define IWM_SCAN_COMPLETE_UMAC
Definition: if_iwmreg.h:1995
#define IWM_APMG_CLK_VAL_DMA_CLK_RQT
Definition: if_iwmreg.h:1219
static uint8_t iwm_get_dma_hi_addr(bus_addr_t addr)
Definition: if_iwmreg.h:1841
#define IWM_MCC_SOURCE_GET_CURRENT
Definition: if_iwmreg.h:6783
#define IWM_DEBUG_LOG_MSG
Definition: if_iwmreg.h:2101
#define IWM_RFH_RXF_DMA_CFG
Definition: if_iwmreg.h:497
iwm_ucode_tlv_type
Definition: if_iwmreg.h:1090
@ IWM_UCODE_TLV_SEC_WOWLAN
Definition: if_iwmreg.h:1112
@ IWM_UCODE_TLV_FW_VERSION
Definition: if_iwmreg.h:1130
@ IWM_UCODE_TLV_SEC_INIT
Definition: if_iwmreg.h:1111
@ IWM_UCODE_TLV_DEF_CALIB
Definition: if_iwmreg.h:1113
@ IWM_UCODE_TLV_PAGING
Definition: if_iwmreg.h:1127
@ IWM_UCODE_TLV_PROBE_MAX_LEN
Definition: if_iwmreg.h:1097
@ IWM_UCODE_TLV_PHY_SKU
Definition: if_iwmreg.h:1114
@ IWM_UCODE_TLV_SDIO_ADMA_ADDR
Definition: if_iwmreg.h:1129
@ IWM_UCODE_TLV_SEC_RT
Definition: if_iwmreg.h:1110
@ IWM_UCODE_TLV_PAN
Definition: if_iwmreg.h:1098
@ IWM_UCODE_TLV_CMD_VERSIONS
Definition: if_iwmreg.h:1134
@ IWM_UCODE_TLV_N_SCAN_CHANNELS
Definition: if_iwmreg.h:1126
@ IWM_UCODE_TLV_FW_MEM_SEG
Definition: if_iwmreg.h:1136
@ IWM_UCODE_TLV_NUM_OF_CPU
Definition: if_iwmreg.h:1118
@ IWM_UCODE_TLV_FW_GSCAN_CAPA
Definition: if_iwmreg.h:1135
@ IWM_UCODE_TLV_ENABLED_CAPABILITIES
Definition: if_iwmreg.h:1125
@ IWM_UCODE_TLV_API_CHANGES_SET
Definition: if_iwmreg.h:1124
@ IWM_UCODE_TLV_CSCHEME
Definition: if_iwmreg.h:1119
@ IWM_UCODE_TLV_FLAGS
Definition: if_iwmreg.h:1109
@ IWM_UCODE_TLV_SEC_RT_USNIFFER
Definition: if_iwmreg.h:1128
#define IWM_MIN_DBM
Definition: if_iwmreg.h:6951
#define IWM_SCD_CHAINEXT_EN
Definition: if_iwmreg.h:1396
#define IWM_NVM_CHANNEL_VALID
Definition: if_iwmreg.h:2383
#define IWM_CSR_INT_BIT_ALIVE
Definition: if_iwmreg.h:206
#define IWM_FW_PHY_CFG_RADIO_DASH
Definition: if_iwmreg.h:1007
#define IWM_SCAN_OFFLOAD_REQUEST_CMD
Definition: if_iwmreg.h:2034
#define IWM_CSR_INT_BIT_FH_TX
Definition: if_iwmreg.h:199
#define IWM_FH_TCSR_CHNL_NUM
Definition: if_iwmreg.h:1715
#define IWM_PHY_INFO_FLAG_SHPREAMBLE
Definition: if_iwmreg.h:3244
#define IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(_chnl)
Definition: if_iwmreg.h:1787
#define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x)
Definition: if_iwmreg.h:2368
#define IWM_RX_INFO_ENERGY_ANT_B_MSK
Definition: if_iwmreg.h:3190
#define IWM_RX_MPDU_MFLG2_PAD
Definition: if_iwmreg.h:3353
#define IWM_NVM_CHANNEL_RADAR
Definition: if_iwmreg.h:2386
#define IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE
Definition: if_iwmreg.h:5386
#define IWM_TX_CMD_FLG_ACK
Definition: if_iwmreg.h:4822
#define IWM_STATISTICS_NOTIFICATION
Definition: if_iwmreg.h:2065
#define IWM_RATE_5M_PLCP
Definition: if_iwmreg.h:4538
#define IWM_UCODE_TLV_CAPA_UMAC_SCAN
Definition: if_iwmreg.h:923
#define IWM_SKU
Definition: if_iwmreg.h:2318
#define IWM_SCD_CONTEXT_MEM_LOWER_BOUND
Definition: if_iwmreg.h:1368
#define IWM_CSR_GP_CNTRL
Definition: if_iwmreg.h:101
#define IWM_CSR_INT_BIT_SCD
Definition: if_iwmreg.h:200
#define IWM_TXPATH_FLUSH
Definition: if_iwmreg.h:2004
#define IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN
Definition: if_iwmreg.h:1353
#define IWM_PHY_DB_CMD
Definition: if_iwmreg.h:2046
#define IWM_SB_CPU_2_STATUS
Definition: if_iwmreg.h:636
#define IWM_LQ_FLAG_USE_RTS_MSK
Definition: if_iwmreg.h:4705
#define IWM_RX_MPDU_RES_STATUS_OVERRUN_OK
Definition: if_iwmreg.h:3321
#define IWM_POWER_TABLE_CMD
Definition: if_iwmreg.h:2049
#define IWM_RELEASE_CPU_RESET_BIT
Definition: if_iwmreg.h:1249
#define IWM_MCC_SOURCE_OLD_FW
Definition: if_iwmreg.h:6773
#define IWM_RATE_MCS_CCK_MSK
Definition: if_iwmreg.h:4565
#define IWM_CSR_INT_BIT_FH_RX
Definition: if_iwmreg.h:196
#define IWM_FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ
Definition: if_iwmreg.h:1631
#define IWM_CSR_INT_BIT_SW_RX
Definition: if_iwmreg.h:204
#define IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE
Definition: if_iwmreg.h:285
#define IWM_REPLY_ERROR
Definition: if_iwmreg.h:1983
#define IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN
Definition: if_iwmreg.h:1803
#define IWM_N_HW_ADDRS
Definition: if_iwmreg.h:2319
#define IWM_TFD_QUEUE_SIZE_MAX
Definition: if_iwmreg.h:1834
#define IWM_SCAN_OFFLOAD_ABORT_CMD
Definition: if_iwmreg.h:2035
#define IWM_WRITE_1(sc, reg, val)
Definition: if_iwmreg.h:6960
#define IWM_REPLY_THERMAL_MNG_BACKOFF
Definition: if_iwmreg.h:2054
#define IWM_RFH_RXF_DMA_DROP_TOO_LARGE_MASK
Definition: if_iwmreg.h:527
#define IWM_NVM_SKU_CAP_BAND_24GHZ
Definition: if_iwmreg.h:2350
#define IWM_CSR_INI_SET_MASK
Definition: if_iwmreg.h:208
#define IWM_TX_CMD_FLG_BT_DIS
Definition: if_iwmreg.h:4830
#define IWM_FRAME_LIMIT
Definition: if_iwmreg.h:6815
#define IWM_RX_MPDU_PHY_SHORT_PREAMBLE
Definition: if_iwmreg.h:3361
#define IWM_INIT_COMPLETE_NOTIF
Definition: if_iwmreg.h:1984
#define IWM_FH_TCSR_CHNL_TX_CONFIG_REG(_chnl)
Definition: if_iwmreg.h:1718
#define IWM_CSR_INT_BIT_RX_PERIODIC
Definition: if_iwmreg.h:198
#define IWM_SCAN_REQ_UMAC
Definition: if_iwmreg.h:1993
#define IWM_MAX_QUOTA
Definition: if_iwmreg.h:2992
static uint32_t iwm_rx_packet_payload_len(const struct iwm_rx_packet *pkt)
Definition: if_iwmreg.h:6944
#define IWM_RFH_Q0_FRBDCB_WIDX
Definition: if_iwmreg.h:444
#define IWM_RFH_Q0_URBDCB_BA_LSB
Definition: if_iwmreg.h:453
#define IWM_CSR_HW_IF_CONFIG_REG_POS_MAC_STEP
Definition: if_iwmreg.h:177
#define IWM_RFH_GEN_CFG_RFH_DMA_SNOOP
Definition: if_iwmreg.h:536
#define IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID
Definition: if_iwmreg.h:1745
#define IWM_ALIVE
Definition: if_iwmreg.h:1982
#define IWM_N_HW_ADDRS_8000
Definition: if_iwmreg.h:2337
#define IWM_FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX
Definition: if_iwmreg.h:1748
#define IWM_REPLY_RX_MPDU_CMD
Definition: if_iwmreg.h:2080
static unsigned int IWM_SCD_QUEUE_STATUS_BITS(unsigned int chnl)
Definition: if_iwmreg.h:1416
#define IWM_UCODE_TLV_API_WIFI_MCC_UPDATE
Definition: if_iwmreg.h:843
#define IWM_ADD_STA
Definition: if_iwmreg.h:1999
#define IWM_CSR_HW_IF_CONFIG_REG
Definition: if_iwmreg.h:94
#define IWM_HOST_INT_TIMEOUT_DEF
Definition: if_iwmreg.h:669
#define IWM_NVM_RF_CFG_STEP_MSK(x)
Definition: if_iwmreg.h:2358
#define IWM_CSR_INT_COALESCING
Definition: if_iwmreg.h:95
#define IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE
Definition: if_iwmreg.h:1365
#define IWM_NUM_UCODE_TLV_CAPA
Definition: if_iwmreg.h:966
#define IWM_FH_MEM_RCSR_CHNL0_RBDCB_WPTR
Definition: if_iwmreg.h:1630
#define IWM_TX_ANT_CONFIGURATION_CMD
Definition: if_iwmreg.h:2063
#define IWM_RATE_24M_PLCP
Definition: if_iwmreg.h:4532
#define IWM_RATE_MCS_ANT_POS
Definition: if_iwmreg.h:4664
#define IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_DASH
Definition: if_iwmreg.h:173
#define le16_to_cpup(_a_)
Definition: if_iwmreg.h:69
#define IWM_FW_PHY_CFG_RX_CHAIN
Definition: if_iwmreg.h:1011
#define IWM_NVM_NUM_OF_SECTIONS
Definition: if_iwmreg.h:2412
#define IWM_NVM_RF_CFG_TYPE_MSK_8000(x)
Definition: if_iwmreg.h:2367
#define IWM_CSR_HW_REV_TYPE_7265D
Definition: if_iwmreg.h:325
#define IWM_WFPM_CTRL_REG
Definition: if_iwmreg.h:617
#define IWM_TX_CMD_OFFLD_PAD
Definition: if_iwmreg.h:4941
#define IWM_RFH_RXF_DMA_MIN_RB_4_8
Definition: if_iwmreg.h:526
#define IWM_SCD_SRAM_BASE_ADDR
Definition: if_iwmreg.h:1390
#define IWM_FH_RSCSR_CHNL0_RBDCB_BASE_REG
Definition: if_iwmreg.h:1585
#define IWM_CSR_INT_BIT_SW_ERR
Definition: if_iwmreg.h:201
#define IWM_TX_CMD_FLG_SEQ_CTL
Definition: if_iwmreg.h:4831
#define IWM_RX_QUEUE_SIZE_LOG
Definition: if_iwmreg.h:1807
#define IWM_LMPM_CHICK
Definition: if_iwmreg.h:557
#define IWM_LTR_CFG_FLAG_FEATURE_ENABLE
Definition: if_iwmreg.h:4143
#define IWM_TLV_UCODE_MAGIC
Definition: if_iwmreg.h:1166
#define IWM_MCAST_FILTER_CMD
Definition: if_iwmreg.h:2103
#define IWM_TIME_EVENT_NOTIFICATION
Definition: if_iwmreg.h:2016
#define IWM_RATE_1M_PLCP
Definition: if_iwmreg.h:4536
static uint32_t iwm_rx_packet_len(const struct iwm_rx_packet *pkt)
Definition: if_iwmreg.h:6937
#define IWM_SCD_AGGR_SEL
Definition: if_iwmreg.h:1397
#define IWM_NVM_RF_CFG_PNUM_MSK(x)
Definition: if_iwmreg.h:2360
#define IWM_RATE_18M_PLCP
Definition: if_iwmreg.h:4531
#define IWM_UCODE_API(ver)
Definition: if_iwmreg.h:986
#define IWM_PHY_OPS_GROUP
Definition: if_iwmreg.h:2144
#define IWM_RATE_MCS_ANT_A_MSK
Definition: if_iwmreg.h:4665
#define IWM_RFH_GEN_CFG_RB_CHUNK_SIZE_64
Definition: if_iwmreg.h:538
@ IWM_SILICON_C_STEP
Definition: if_iwmreg.h:306
#define IWM_CSR_HW_IF_CONFIG_REG_BIT_MAC_SI
Definition: if_iwmreg.h:170
#define IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS
Definition: if_iwmreg.h:1362
#define IWM_SCD_TRANS_TBL_MEM_UPPER_BOUND
Definition: if_iwmreg.h:1377
#define IWM_TX_STATUS_FAIL_LIFE_EXPIRE
Definition: if_iwmreg.h:5037
#define IWM_UCODE_MAJOR(ver)
Definition: if_iwmreg.h:984
#define IWM_RFH_Q0_FRBDCB_WIDX_TRG
Definition: if_iwmreg.h:447
@ IWM_SF_UNINIT
Definition: if_iwmreg.h:3733
#define IWM_MCC_CHUB_UPDATE_CMD
Definition: if_iwmreg.h:2085
#define IWM_NVM_CHANNEL_ACTIVE
Definition: if_iwmreg.h:2385
#define IWM_PAGING_SEPARATOR_SECTION
Definition: if_iwmreg.h:981
#define IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE
Definition: if_iwmreg.h:1739
#define IWM_RTS_DFAULT_RETRY_LIMIT
Definition: if_iwmreg.h:4916
#define IWM_SCAN_ITERATION_COMPLETE
Definition: if_iwmreg.h:2041
#define IWM_RX_MPDU_RES_STATUS_CRC_OK
Definition: if_iwmreg.h:3320
#define IWM_NVM_SECTION_TYPE_REGULATORY
Definition: if_iwmreg.h:2402
#define IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS
Definition: if_iwmreg.h:1222
#define IWM_ALIVE_STATUS_OK
Definition: if_iwmreg.h:2524
#define IWM_BINDING_CONTEXT_CMD
Definition: if_iwmreg.h:2017
#define IWM_RX_INFO_ENERGY_ANT_ABC_IDX
Definition: if_iwmreg.h:3188
#define IWM_FH_TFDIB_CTRL0_REG(_chnl)
Definition: if_iwmreg.h:1686
#define IWM_REPLY_SF_CFG_CMD
Definition: if_iwmreg.h:2093
#define IWM_UCODE_TLV_CAPA_LAR_SUPPORT
Definition: if_iwmreg.h:922
#define IWM_RX_INFO_ENERGY_ANT_A_MSK
Definition: if_iwmreg.h:3189
#define IWM_BT_CONFIG
Definition: if_iwmreg.h:2064
#define IWM_FW_PHY_CFG_RADIO_DASH_POS
Definition: if_iwmreg.h:1006
#define IWM_HBUS_TARG_WRPTR
Definition: if_iwmreg.h:656
#define IWM_CSR_HW_IF_CONFIG_REG_MSK_PHY_TYPE
Definition: if_iwmreg.h:172
#define IWM_RFH_Q0_URBDCB_WIDX
Definition: if_iwmreg.h:456
#define IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ
Definition: if_iwmreg.h:286
#define IWM_RADIO_CFG
Definition: if_iwmreg.h:2317
#define IWM_TX_STATUS_MSK
Definition: if_iwmreg.h:5021
#define IWM_NVM_RF_CFG_DASH_MSK(x)
Definition: if_iwmreg.h:2357
#define IWM_FH_RSCSR_FRAME_INVALID
Definition: if_iwmreg.h:6929
#define IWM_RATE_12M_PLCP
Definition: if_iwmreg.h:4530
#define IWM_CSR_INT_PERIODIC_ENA
Definition: if_iwmreg.h:192
#define IWM_ENABLE_WFPM
Definition: if_iwmreg.h:619
#define IWM_NVM_LAR_OFFSET_8000_OLD
Definition: if_iwmreg.h:2341
#define IWM_WIDE_ID(grp, opcode)
Definition: if_iwmreg.h:6854
#define IWM_BT_COEX_HIGH_BAND_RET
Definition: if_iwmreg.h:6606
#define IWM_TX_CMD
Definition: if_iwmreg.h:2003
#define IWM_FW_PHY_CFG_RX_CHAIN_POS
Definition: if_iwmreg.h:1010
#define le32_to_cpup(_a_)
Definition: if_iwmreg.h:70
#define IWM_RATE_48M_PLCP
Definition: if_iwmreg.h:4534
@ IWM_PM_FRAME_MGMT
Definition: if_iwmreg.h:4857
@ IWM_PM_FRAME_NONE
Definition: if_iwmreg.h:4856
@ IWM_PM_FRAME_ASSOC
Definition: if_iwmreg.h:4858
#define IWM_CMD_QUEUE
Definition: if_iwmreg.h:1967
#define IWM_FH_UCODE_LOAD_STATUS
Definition: if_iwmreg.h:435
#define IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG
Definition: if_iwmreg.h:1578
#define IWM_HW_ADDR
Definition: if_iwmreg.h:2313
#define IWM_FH_RSCSR_CHNL0_RDPTR
Definition: if_iwmreg.h:1597
#define IWM_FLAG_STOPPED
Definition: if_iwmvar.h:420
#define IWM_RX_RADIOTAP_PRESENT
Definition: if_iwmvar.h:118
#define IWM_RX_LEGACY_RING_COUNT
Definition: if_iwmvar.h:288
#define IWM_TX_RADIOTAP_PRESENT
Definition: if_iwmvar.h:134
#define IWM_FLAG_SCANNING
Definition: if_iwmvar.h:423
#define IWM_LOCK_DESTROY(_sc)
Definition: if_iwmvar.h:568
#define IWM_TX_RING_COUNT
Definition: if_iwmvar.h:263
#define IWM_UNLOCK(_sc)
Definition: if_iwmvar.h:567
#define IWM_DEFAULT_COLOR
Definition: if_iwmvar.h:398
#define IWM_DEFAULT_MACID
Definition: if_iwmvar.h:397
#define IWM_NODE(_ni)
Definition: if_iwmvar.h:392
#define IWM_VAP(_vap)
Definition: if_iwmvar.h:382
#define IWM_RX_MQ_RING_COUNT
Definition: if_iwmvar.h:289
#define IWM_RBUF_SIZE
Definition: if_iwmvar.h:291
#define IWM_TX_RING_HIMARK
Definition: if_iwmvar.h:265
#define IWM_TE_SESSION_PROTECTION_MAX_TIME_MS
Definition: if_iwmvar.h:315
#define IWM_TX_RING_LOMARK
Definition: if_iwmvar.h:264
iwm_ucode_type
Definition: if_iwmvar.h:152
@ IWM_UCODE_REGULAR
Definition: if_iwmvar.h:153
@ IWM_UCODE_WOWLAN
Definition: if_iwmvar.h:155
@ IWM_UCODE_TYPE_MAX
Definition: if_iwmvar.h:157
@ IWM_UCODE_REGULAR_USNIFFER
Definition: if_iwmvar.h:156
@ IWM_UCODE_INIT
Definition: if_iwmvar.h:154
@ IWM_CMD_SYNC
Definition: if_iwmvar.h:324
@ IWM_CMD_WANT_SKB
Definition: if_iwmvar.h:326
@ IWM_CMD_SEND_IN_RFKILL
Definition: if_iwmvar.h:327
#define IWM_LOCK_INIT(_sc)
Definition: if_iwmvar.h:563
#define IWM_ICT_SIZE
Definition: if_iwmvar.h:401
#define IWM_MAX_SCATTER
Definition: if_iwmvar.h:293
#define IWM_STATION_ID
Definition: if_iwmvar.h:394
#define IWM_NUM_CHANNELS
Definition: if_iwmvar.h:210
#define IWM_FLAG_RFKILL
Definition: if_iwmvar.h:421
#define IWM_ICT_COUNT
Definition: if_iwmvar.h:402
static bool iwm_fw_has_capa(struct iwm_softc *sc, unsigned int capa)
Definition: if_iwmvar.h:577
#define IWM_NUM_CHANNELS_8000
Definition: if_iwmvar.h:211
#define IWM_LOCK(_sc)
Definition: if_iwmvar.h:566
#define IWM_FLAG_SCAN_RUNNING
Definition: if_iwmvar.h:424
#define IWM_FLAG_HW_INITED
Definition: if_iwmvar.h:419
static bool iwm_fw_has_api(struct iwm_softc *sc, unsigned int api)
Definition: if_iwmvar.h:571
#define IWM_FLAG_USE_ICT
Definition: if_iwmvar.h:418
#define IWM_FLAG_BUSY
Definition: if_iwmvar.h:422
#define IWM_ICT_PADDR_SHIFT
Definition: if_iwmvar.h:403
#define IWM_UCODE_SECTION_MAX
Definition: if_iwmvar.h:140
uint16_t sequence
Definition: if_iwmreg.h:5132
uint16_t tfd_offset[IWM_TFD_QUEUE_BC_SIZE]
Definition: if_iwmreg.h:1907
uint32_t scd_base_addr
Definition: if_iwm.c:239
struct iwm_lmac_alive lmac_data
Definition: if_iwmreg.h:2556
struct iwm_umac_alive umac_data
Definition: if_iwmreg.h:2557
struct iwm_lmac_alive lmac_data[2]
Definition: if_iwmreg.h:2563
struct iwm_umac_alive umac_data
Definition: if_iwmreg.h:2564
uint16_t status
Definition: if_iwmreg.h:2561
uint32_t enabled_modules
Definition: if_iwmreg.h:6618
uint32_t mode
Definition: if_iwmreg.h:6617
uint32_t flow_trigger
Definition: if_iwmreg.h:2204
uint32_t event_trigger
Definition: if_iwmreg.h:2205
uint16_t eeprom_size
int mqrx_supported
enum iwm_nvm_type nvm_type
enum iwm_device_family device_family
int host_interrupt_operation_mode
uint8_t nvm_hw_section_num
int integrated
const char * fw_name
uint8_t flags
Definition: if_iwmreg.h:6861
uint8_t code
Definition: if_iwmreg.h:6860
struct iwm_cmd_header hdr
Definition: if_iwmreg.h:6894
uint8_t data[IWM_DEF_CMD_PAYLOAD_SIZE]
Definition: if_iwmreg.h:6895
const struct iwm_cfg * cfg
Definition: if_iwm.c:5789
uint16_t device
Definition: if_iwm.c:5788
bus_dma_tag_t tag
Definition: if_iwmvar.h:245
void * vaddr
Definition: if_iwmvar.h:249
bus_dmamap_t map
Definition: if_iwmvar.h:246
bus_size_t size
Definition: if_iwmvar.h:250
bus_addr_t paddr
Definition: if_iwmvar.h:248
uint32_t l2p_addr_match
Definition: if_iwm.c:5049
uint32_t lmpm_pmg_sel
Definition: if_iwm.c:5050
uint32_t l2p_duration
Definition: if_iwm.c:5047
uint32_t l2p_control
Definition: if_iwm.c:5046
uint32_t bcon_time
Definition: if_iwm.c:5020
uint32_t last_cmd_id
Definition: if_iwm.c:5044
uint32_t frame_ptr
Definition: if_iwm.c:5031
uint32_t trm_hw_status0
Definition: if_iwm.c:5012
uint32_t stack_ptr
Definition: if_iwm.c:5032
uint32_t u_timestamp
Definition: if_iwm.c:5052
uint32_t flow_handler
Definition: if_iwm.c:5054
uint32_t fw_rev_type
Definition: if_iwm.c:5025
uint32_t l2p_mhvalid
Definition: if_iwm.c:5048
uint32_t wait_event
Definition: if_iwm.c:5045
uint32_t trm_hw_status1
Definition: if_iwm.c:5013
uint8_t cmd_id
Definition: if_iwmreg.h:2619
uint32_t error_type
Definition: if_iwmreg.h:2618
struct iwm_fw_cipher_scheme cs[]
Definition: if_iwmreg.h:1048
uint32_t offset
Definition: if_iwmvar.h:172
const void * data
Definition: if_iwmvar.h:170
uint32_t len
Definition: if_iwmvar.h:171
struct iwm_fw_desc sec[IWM_UCODE_SECTION_MAX]
Definition: if_iwmvar.h:176
int fw_count
Definition: if_iwmvar.h:177
uint32_t paging_mem_size
Definition: if_iwmvar.h:179
int is_dual_cpus
Definition: if_iwmvar.h:178
struct iwm_ucode_capabilities ucode_capa
Definition: if_iwmvar.h:188
uint32_t phy_config
Definition: if_iwmvar.h:190
const struct firmware * fw_fp
Definition: if_iwmvar.h:183
uint8_t valid_rx_ant
Definition: if_iwmvar.h:192
struct iwm_fw_img img[IWM_UCODE_TYPE_MAX]
Definition: if_iwmvar.h:186
uint8_t valid_tx_ant
Definition: if_iwmvar.h:191
uint32_t id
Definition: if_iwmvar.h:232
struct iwm_rx_packet * resp_pkt
Definition: if_iwmvar.h:226
uint16_t len[IWM_MAX_CMD_TBS_PER_TFD]
Definition: if_iwmvar.h:233
const void * data[IWM_MAX_CMD_TBS_PER_TFD]
Definition: if_iwmvar.h:225
uint32_t error_event_table_ptr
Definition: if_iwmreg.h:2536
uint8_t ver_subtype
Definition: if_iwmreg.h:2531
uint32_t log_event_table_ptr
Definition: if_iwmreg.h:2537
uint32_t scd_base_ptr
Definition: if_iwmreg.h:2541
uint8_t ver_type
Definition: if_iwmreg.h:2532
uint8_t dual_stream_ant_msk
Definition: if_iwmreg.h:4768
uint8_t flags
Definition: if_iwmreg.h:4765
uint8_t sta_id
Definition: if_iwmreg.h:4761
uint32_t rs_table[IWM_LQ_MAX_RETRY_NUM]
Definition: if_iwmreg.h:4775
uint8_t single_stream_ant_msk
Definition: if_iwmreg.h:4767
uint32_t consec_missed_beacons_since_last_rx
Definition: if_iwmreg.h:3459
uint32_t consec_missed_beacons
Definition: if_iwmreg.h:3460
int in_assoc
Definition: if_iwmvar.h:388
struct ieee80211_node in_ni
Definition: if_iwmvar.h:385
struct iwm_lq_cmd in_lq
Definition: if_iwmvar.h:390
struct iwm_statistics_rx rx
Definition: if_iwmreg.h:3720
uint8_t radio_cfg_pnum
Definition: if_iwmvar.h:208
int n_hw_addrs
Definition: if_iwmvar.h:196
uint8_t radio_cfg_type
Definition: if_iwmvar.h:205
int sku_cap_band_52GHz_enable
Definition: if_iwmvar.h:200
uint16_t nvm_ch_flags[]
Definition: if_iwmvar.h:217
uint8_t valid_rx_ant
Definition: if_iwmvar.h:209
uint8_t radio_cfg_step
Definition: if_iwmvar.h:206
int sku_cap_11n_enable
Definition: if_iwmvar.h:201
uint16_t nvm_version
Definition: if_iwmvar.h:213
boolean_t lar_enabled
Definition: if_iwmvar.h:216
uint8_t radio_cfg_dash
Definition: if_iwmvar.h:207
uint8_t valid_tx_ant
Definition: if_iwmvar.h:209
int sku_cap_band_24GHz_enable
Definition: if_iwmvar.h:199
uint8_t hw_addr[IEEE80211_ADDR_LEN]
Definition: if_iwmvar.h:197
uint8_t * data
Definition: if_iwm.c:231
uint16_t length
Definition: if_iwm.c:230
uint32_t phy_cfg
Definition: if_iwmreg.h:2235
struct iwm_calib_ctrl calib_control
Definition: if_iwmreg.h:2236
struct ieee80211_channel * channel
Definition: if_iwmvar.h:343
uint32_t ref
Definition: if_iwmvar.h:342
uint16_t color
Definition: if_iwmvar.h:341
uint16_t id
Definition: if_iwmvar.h:340
struct iwm_softc * sc
uint8_t rate
Definition: if_iwm.c:207
uint8_t plcp
Definition: if_iwm.c:208
uint16_t closed_rb_num
Definition: if_iwmreg.h:1826
bus_dmamap_t map
Definition: if_iwmvar.h:297
struct mbuf * m
Definition: if_iwmvar.h:296
uint32_t rate_n_flags
Definition: if_iwmreg.h:3374
uint32_t gp2_on_air_rise
Definition: if_iwmreg.h:3379
uint8_t mac_flags2
Definition: if_iwmreg.h:3401
uint16_t status
Definition: if_iwmreg.h:3410
uint16_t mpdu_len
Definition: if_iwmreg.h:3399
struct iwm_rx_mpdu_desc_v1 v1
Definition: if_iwmreg.h:3414
uint16_t phy_info
Definition: if_iwmreg.h:3403
uint32_t len_n_flags
Definition: if_iwmreg.h:6923
uint8_t data[]
Definition: if_iwmreg.h:6925
struct iwm_cmd_header hdr
Definition: if_iwmreg.h:6924
uint16_t phy_flags
Definition: if_iwmreg.h:3243
uint32_t system_timestamp
Definition: if_iwmreg.h:3240
uint32_t non_cfg_phy[IWM_RX_INFO_PHY_CNT]
Definition: if_iwmreg.h:3246
uint16_t channel
Definition: if_iwmreg.h:3245
uint8_t cfg_phy_cnt
Definition: if_iwmreg.h:3237
struct ieee80211_radiotap_header wr_ihdr
Definition: if_iwmvar.h:108
struct iwm_dma_info free_desc_dma
Definition: if_iwmvar.h:301
struct iwm_dma_info used_desc_dma
Definition: if_iwmvar.h:302
bus_dma_tag_t data_dmat
Definition: if_iwmvar.h:309
bus_dmamap_t spare_map
Definition: if_iwmvar.h:308
struct iwm_dma_info stat_dma
Definition: if_iwmvar.h:303
void * desc
Definition: if_iwmvar.h:305
struct iwm_rx_data data[512]
Definition: if_iwmvar.h:307
struct iwm_rb_status * stat
Definition: if_iwmvar.h:306
uint32_t umac_error_event_table
Definition: if_iwmvar.h:529
int sc_firmware_state
Definition: if_iwmvar.h:551
struct iwm_tlv_calib_ctrl sc_default_calib[IWM_UCODE_TYPE_MAX]
Definition: if_iwmvar.h:482
void * sc_ih
Definition: if_iwmvar.h:439
int sc_ltr_enabled
Definition: if_iwmvar.h:548
struct iwm_tx_radiotap_header sc_txtap
Definition: if_iwmvar.h:518
struct iwm_fw_info sc_fw
Definition: if_iwmvar.h:481
struct mbufq sc_snd
Definition: if_iwmvar.h:413
int sc_max_rssi
Definition: if_iwmvar.h:520
int sc_fw_chunk_done
Definition: if_iwmvar.h:460
bus_space_tag_t sc_st
Definition: if_iwmvar.h:435
uint32_t error_event_table[2]
Definition: if_iwmvar.h:527
int sc_generation
Definition: if_iwmvar.h:479
int sc_hw_rev
Definition: if_iwmvar.h:454
int support_umac_log
Definition: if_iwmvar.h:530
struct taskqueue * sc_tq
Definition: if_iwmvar.h:502
struct resource * sc_mem
Definition: if_iwmvar.h:434
uint32_t log_event_table
Definition: if_iwmvar.h:528
int ict_cur
Definition: if_iwmvar.h:452
struct task sc_rftoggle_task
Definition: if_iwmvar.h:504
int cmd_hold_nic_awake
Definition: if_iwmvar.h:524
int sc_wantresp
Definition: if_iwmvar.h:500
int ucode_loaded
Definition: if_iwmvar.h:463
int sc_noise
Definition: if_iwmvar.h:515
struct iwm_rx_ring rxq
Definition: if_iwmvar.h:447
enum iwm_sf_state sf_state
Definition: if_iwmvar.h:543
int sc_flags
Definition: if_iwmvar.h:417
struct mtx sc_mtx
Definition: if_iwmvar.h:412
struct iwm_dma_info kw_dma
Definition: if_iwmvar.h:457
uint32_t scd_base_addr
Definition: if_iwmvar.h:443
enum iwm_ucode_type cur_ucode
Definition: if_iwmvar.h:462
int sc_attached
Definition: if_iwmvar.h:410
device_t sc_dev
Definition: if_iwmvar.h:408
boolean_t last_ebs_successful
Definition: if_iwmvar.h:540
struct iwm_tx_ring txq[IWM_MAX_QUEUES]
Definition: if_iwmvar.h:446
struct iwm_rx_radiotap_header sc_rxtap
Definition: if_iwmvar.h:517
char sc_fw_mcc[3]
Definition: if_iwmvar.h:466
int qfullmsk
Definition: if_iwmvar.h:448
struct iwm_dma_info ict_dma
Definition: if_iwmvar.h:451
struct callout sc_watchdog_to
Definition: if_iwmvar.h:428
struct resource * sc_irq
Definition: if_iwmvar.h:433
bus_dma_tag_t sc_dmat
Definition: if_iwmvar.h:438
struct task sc_es_task
Definition: if_iwmvar.h:503
struct ieee80211_ratectl_tx_status sc_txs
Definition: if_iwmvar.h:415
struct iwm_dma_info sched_dma
Definition: if_iwmvar.h:442
struct iwm_phy_ctxt sc_phyctxt[IWM_NUM_PHY_CTX]
Definition: if_iwmvar.h:512
struct iwm_dma_info fw_dma
Definition: if_iwmvar.h:458
const struct iwm_cfg * cfg
Definition: if_iwmvar.h:484
uint8_t sc_cmd_resp[IWM_CMD_RESP_MAX]
Definition: if_iwmvar.h:499
struct iwm_nvm_data * nvm_data
Definition: if_iwmvar.h:485
struct iwm_phy_db * sc_phy_db
Definition: if_iwmvar.h:486
char sc_fwver[32]
Definition: if_iwmvar.h:464
struct iwm_notif_wait_data * sc_notif_wait
Definition: if_iwmvar.h:522
struct iwm_notif_statistics sc_stats
Definition: if_iwmvar.h:514
boolean_t sc_ps_disabled
Definition: if_iwmvar.h:546
struct intr_config_hook sc_preinit_hook
Definition: if_iwmvar.h:427
struct ieee80211com sc_ic
Definition: if_iwmvar.h:414
bus_space_handle_t sc_sh
Definition: if_iwmvar.h:436
struct iwm_rx_phy_info sc_last_phy_info
Definition: if_iwmvar.h:506
int sc_intmask
Definition: if_iwmvar.h:468
struct callout sc_led_blink_to
Definition: if_iwmvar.h:429
uint32_t sc_debug
Definition: if_iwmvar.h:409
int sc_tx_timer
Definition: if_iwmvar.h:490
uint32_t beacon_silence_rssi[3]
Definition: if_iwmreg.h:3569
struct iwm_statistics_rx_non_phy general
Definition: if_iwmreg.h:3698
uint32_t lo
Definition: if_iwmreg.h:1856
uint16_t hi_n_len
Definition: if_iwmreg.h:1857
uint8_t num_tbs
Definition: if_iwmreg.h:1890
struct iwm_tfd_tb tbs[IWM_NUM_OF_TBS]
Definition: if_iwmreg.h:1891
struct iwm_time_quota_data_v1 quotas[IWM_MAX_BINDINGS]
Definition: if_iwmreg.h:3013
uint32_t event_trigger
Definition: if_iwmreg.h:999
uint32_t flow_trigger
Definition: if_iwmreg.h:998
struct iwm_tlv_calib_ctrl calib
Definition: if_iwm.c:457
uint32_t ucode_type
Definition: if_iwm.c:456
uint8_t sec_ctl
Definition: if_iwmreg.h:4994
uint8_t data_retry_limit
Definition: if_iwmreg.h:5003
uint8_t dram_msb_ptr
Definition: if_iwmreg.h:5001
uint16_t pm_frame_timeout
Definition: if_iwmreg.h:5005
uint32_t life_time
Definition: if_iwmreg.h:4999
uint16_t offload_assist
Definition: if_iwmreg.h:4985
uint32_t dram_lsb_ptr
Definition: if_iwmreg.h:5000
uint8_t initial_rate_index
Definition: if_iwmreg.h:4995
uint8_t sta_id
Definition: if_iwmreg.h:4993
uint8_t tid_tspec
Definition: if_iwmreg.h:5004
uint32_t tx_flags
Definition: if_iwmreg.h:4986
uint8_t rts_retry_limit
Definition: if_iwmreg.h:5002
uint32_t rate_n_flags
Definition: if_iwmreg.h:4992
uint16_t len
Definition: if_iwmreg.h:4984
bus_addr_t cmd_paddr
Definition: if_iwmvar.h:269
bus_addr_t scratch_paddr
Definition: if_iwmvar.h:270
bus_dmamap_t map
Definition: if_iwmvar.h:268
struct mbuf * m
Definition: if_iwmvar.h:271
struct iwm_node * in
Definition: if_iwmvar.h:272
struct ieee80211_radiotap_header wt_ihdr
Definition: if_iwmvar.h:127
uint16_t wireless_media_time
Definition: if_iwmreg.h:5185
struct iwm_agg_tx_status status
Definition: if_iwmreg.h:5201
uint8_t frame_count
Definition: if_iwmreg.h:5180
uint8_t failure_rts
Definition: if_iwmreg.h:5182
uint32_t initial_rate
Definition: if_iwmreg.h:5184
uint8_t failure_frame
Definition: if_iwmreg.h:5183
uint8_t bt_kill_count
Definition: if_iwmreg.h:5181
struct iwm_dma_info desc_dma
Definition: if_iwmvar.h:277
struct iwm_tx_data data[IWM_TX_RING_COUNT]
Definition: if_iwmvar.h:282
struct iwm_device_cmd * cmd
Definition: if_iwmvar.h:280
struct iwm_tfd * desc
Definition: if_iwmvar.h:279
bus_dma_tag_t data_dmat
Definition: if_iwmvar.h:281
struct iwm_dma_info cmd_dma
Definition: if_iwmvar.h:278
uint32_t api_flags
Definition: if_iwmreg.h:1158
uint32_t api_index
Definition: if_iwmreg.h:1157
uint32_t api_capa
Definition: if_iwmreg.h:1163
uint32_t api_index
Definition: if_iwmreg.h:1162
uint32_t max_probe_length
Definition: if_iwmvar.h:161
uint8_t enabled_capa[howmany(IWM_NUM_UCODE_TLV_CAPA, NBBY)]
Definition: if_iwmvar.h:165
uint8_t enabled_api[howmany(IWM_NUM_UCODE_TLV_API, NBBY)]
Definition: if_iwmvar.h:164
uint32_t n_scan_channels
Definition: if_iwmvar.h:162
uint8_t data[0]
Definition: if_iwmreg.h:1153
uint32_t length
Definition: if_iwmreg.h:1152
uint32_t type
Definition: if_iwmreg.h:1151
uint32_t error_info_addr
Definition: if_iwmreg.h:2549
uint32_t umac_minor
Definition: if_iwmreg.h:2548
uint32_t umac_major
Definition: if_iwmreg.h:2547
uint16_t id
Definition: if_iwmvar.h:363
boolean_t have_wme
Definition: if_iwmvar.h:366
struct iwm_phy_ctxt * phy_ctxt
Definition: if_iwmvar.h:361
uint8_t aifsn
Definition: if_iwmvar.h:376
struct ieee80211vap iv_vap
Definition: if_iwmvar.h:354
uint16_t edca_txop
Definition: if_iwmvar.h:375
uint16_t cw_max
Definition: if_iwmvar.h:374
uint16_t cw_min
Definition: if_iwmvar.h:373
uint16_t color
Definition: if_iwmvar.h:364
boolean_t ps_disabled
Definition: if_iwmvar.h:380
struct iwm_vap::@32 queue_params[WME_NUM_AC]
int is_uploaded
Definition: if_iwmvar.h:355
int(* iv_newstate)(struct ieee80211vap *, enum ieee80211_state, int)
Definition: if_iwmvar.h:358
int iv_auth
Definition: if_iwmvar.h:356