FreeBSD kernel amd64 PCI device code
pci.c
Go to the documentation of this file.
1/*-
2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3 *
4 * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
5 * Copyright (c) 2000, Michael Smith <msmith@freebsd.org>
6 * Copyright (c) 2000, BSDi
7 * All rights reserved.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice unmodified, this list of conditions, and the following
14 * disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31#include <sys/cdefs.h>
32__FBSDID("$FreeBSD$");
33
34#include "opt_acpi.h"
35#include "opt_iommu.h"
36#include "opt_bus.h"
37
38#include <sys/param.h>
39#include <sys/conf.h>
40#include <sys/endian.h>
41#include <sys/eventhandler.h>
42#include <sys/fcntl.h>
43#include <sys/kernel.h>
44#include <sys/limits.h>
45#include <sys/linker.h>
46#include <sys/malloc.h>
47#include <sys/module.h>
48#include <sys/queue.h>
49#include <sys/sbuf.h>
50#include <sys/sysctl.h>
51#include <sys/systm.h>
52#include <sys/taskqueue.h>
53#include <sys/tree.h>
54
55#include <vm/vm.h>
56#include <vm/pmap.h>
57#include <vm/vm_extern.h>
58
59#include <sys/bus.h>
60#include <machine/bus.h>
61#include <sys/rman.h>
62#include <machine/resource.h>
63#include <machine/stdarg.h>
64
65#if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
66#include <machine/intr_machdep.h>
67#endif
68
69#include <sys/pciio.h>
70#include <dev/pci/pcireg.h>
71#include <dev/pci/pcivar.h>
72#include <dev/pci/pci_private.h>
73
74#ifdef PCI_IOV
75#include <sys/nv.h>
77#endif
78
79#include <dev/usb/controller/xhcireg.h>
80#include <dev/usb/controller/ehcireg.h>
81#include <dev/usb/controller/ohcireg.h>
82#include <dev/usb/controller/uhcireg.h>
83
84#include <dev/iommu/iommu.h>
85
86#include "pcib_if.h"
87#include "pci_if.h"
88
89#define PCIR_IS_BIOS(cfg, reg) \
90 (((cfg)->hdrtype == PCIM_HDRTYPE_NORMAL && reg == PCIR_BIOS) || \
91 ((cfg)->hdrtype == PCIM_HDRTYPE_BRIDGE && reg == PCIR_BIOS_1))
92
93static int pci_has_quirk(uint32_t devid, int quirk);
94static pci_addr_t pci_mapbase(uint64_t mapreg);
95static const char *pci_maptype(uint64_t mapreg);
96static int pci_maprange(uint64_t mapreg);
97static pci_addr_t pci_rombase(uint64_t mapreg);
98static int pci_romsize(uint64_t testval);
99static void pci_fixancient(pcicfgregs *cfg);
100static int pci_printf(pcicfgregs *cfg, const char *fmt, ...);
101
102static int pci_porten(device_t dev);
103static int pci_memen(device_t dev);
104static void pci_assign_interrupt(device_t bus, device_t dev,
105 int force_route);
106static int pci_add_map(device_t bus, device_t dev, int reg,
107 struct resource_list *rl, int force, int prefetch);
108static int pci_probe(device_t dev);
109static void pci_load_vendor_data(void);
110static int pci_describe_parse_line(char **ptr, int *vendor,
111 int *device, char **desc);
112static char *pci_describe_device(device_t dev);
113static int pci_modevent(module_t mod, int what, void *arg);
114static void pci_hdrtypedata(device_t pcib, int b, int s, int f,
115 pcicfgregs *cfg);
116static void pci_read_cap(device_t pcib, pcicfgregs *cfg);
117static int pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg,
118 int reg, uint32_t *data);
119#if 0
120static int pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg,
121 int reg, uint32_t data);
122#endif
123static void pci_read_vpd(device_t pcib, pcicfgregs *cfg);
124static void pci_mask_msix(device_t dev, u_int index);
125static void pci_unmask_msix(device_t dev, u_int index);
126static int pci_msi_blacklisted(void);
127static int pci_msix_blacklisted(void);
128static void pci_resume_msi(device_t dev);
129static void pci_resume_msix(device_t dev);
130static int pci_remap_intr_method(device_t bus, device_t dev,
131 u_int irq);
132static void pci_hint_device_unit(device_t acdev, device_t child,
133 const char *name, int *unitp);
134static int pci_reset_post(device_t dev, device_t child);
135static int pci_reset_prepare(device_t dev, device_t child);
136static int pci_reset_child(device_t dev, device_t child,
137 int flags);
138
139static int pci_get_id_method(device_t dev, device_t child,
140 enum pci_id_type type, uintptr_t *rid);
141static struct pci_devinfo * pci_fill_devinfo(device_t pcib, device_t bus, int d,
142 int b, int s, int f, uint16_t vid, uint16_t did);
143
144static device_method_t pci_methods[] = {
145 /* Device interface */
146 DEVMETHOD(device_probe, pci_probe),
147 DEVMETHOD(device_attach, pci_attach),
148 DEVMETHOD(device_detach, pci_detach),
149 DEVMETHOD(device_shutdown, bus_generic_shutdown),
150 DEVMETHOD(device_suspend, bus_generic_suspend),
151 DEVMETHOD(device_resume, pci_resume),
152
153 /* Bus interface */
154 DEVMETHOD(bus_print_child, pci_print_child),
155 DEVMETHOD(bus_probe_nomatch, pci_probe_nomatch),
156 DEVMETHOD(bus_read_ivar, pci_read_ivar),
157 DEVMETHOD(bus_write_ivar, pci_write_ivar),
158 DEVMETHOD(bus_driver_added, pci_driver_added),
159 DEVMETHOD(bus_setup_intr, pci_setup_intr),
160 DEVMETHOD(bus_teardown_intr, pci_teardown_intr),
161 DEVMETHOD(bus_reset_prepare, pci_reset_prepare),
162 DEVMETHOD(bus_reset_post, pci_reset_post),
163 DEVMETHOD(bus_reset_child, pci_reset_child),
164
165 DEVMETHOD(bus_get_dma_tag, pci_get_dma_tag),
166 DEVMETHOD(bus_get_resource_list,pci_get_resource_list),
167 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
168 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
169 DEVMETHOD(bus_delete_resource, pci_delete_resource),
170 DEVMETHOD(bus_alloc_resource, pci_alloc_resource),
171 DEVMETHOD(bus_adjust_resource, bus_generic_adjust_resource),
172 DEVMETHOD(bus_release_resource, pci_release_resource),
173 DEVMETHOD(bus_activate_resource, pci_activate_resource),
174 DEVMETHOD(bus_deactivate_resource, pci_deactivate_resource),
175 DEVMETHOD(bus_child_deleted, pci_child_deleted),
176 DEVMETHOD(bus_child_detached, pci_child_detached),
177 DEVMETHOD(bus_child_pnpinfo, pci_child_pnpinfo_method),
178 DEVMETHOD(bus_child_location, pci_child_location_method),
179 DEVMETHOD(bus_get_device_path, pci_get_device_path_method),
180 DEVMETHOD(bus_hint_device_unit, pci_hint_device_unit),
181 DEVMETHOD(bus_remap_intr, pci_remap_intr_method),
182 DEVMETHOD(bus_suspend_child, pci_suspend_child),
183 DEVMETHOD(bus_resume_child, pci_resume_child),
184 DEVMETHOD(bus_rescan, pci_rescan_method),
185
186 /* PCI interface */
187 DEVMETHOD(pci_read_config, pci_read_config_method),
188 DEVMETHOD(pci_write_config, pci_write_config_method),
189 DEVMETHOD(pci_enable_busmaster, pci_enable_busmaster_method),
190 DEVMETHOD(pci_disable_busmaster, pci_disable_busmaster_method),
191 DEVMETHOD(pci_enable_io, pci_enable_io_method),
192 DEVMETHOD(pci_disable_io, pci_disable_io_method),
193 DEVMETHOD(pci_get_vpd_ident, pci_get_vpd_ident_method),
194 DEVMETHOD(pci_get_vpd_readonly, pci_get_vpd_readonly_method),
195 DEVMETHOD(pci_get_powerstate, pci_get_powerstate_method),
196 DEVMETHOD(pci_set_powerstate, pci_set_powerstate_method),
198 DEVMETHOD(pci_find_cap, pci_find_cap_method),
199 DEVMETHOD(pci_find_next_cap, pci_find_next_cap_method),
200 DEVMETHOD(pci_find_extcap, pci_find_extcap_method),
201 DEVMETHOD(pci_find_next_extcap, pci_find_next_extcap_method),
202 DEVMETHOD(pci_find_htcap, pci_find_htcap_method),
203 DEVMETHOD(pci_find_next_htcap, pci_find_next_htcap_method),
204 DEVMETHOD(pci_alloc_msi, pci_alloc_msi_method),
205 DEVMETHOD(pci_alloc_msix, pci_alloc_msix_method),
206 DEVMETHOD(pci_enable_msi, pci_enable_msi_method),
207 DEVMETHOD(pci_enable_msix, pci_enable_msix_method),
208 DEVMETHOD(pci_disable_msi, pci_disable_msi_method),
209 DEVMETHOD(pci_remap_msix, pci_remap_msix_method),
210 DEVMETHOD(pci_release_msi, pci_release_msi_method),
211 DEVMETHOD(pci_msi_count, pci_msi_count_method),
212 DEVMETHOD(pci_msix_count, pci_msix_count_method),
213 DEVMETHOD(pci_msix_pba_bar, pci_msix_pba_bar_method),
214 DEVMETHOD(pci_msix_table_bar, pci_msix_table_bar_method),
215 DEVMETHOD(pci_get_id, pci_get_id_method),
216 DEVMETHOD(pci_alloc_devinfo, pci_alloc_devinfo_method),
217 DEVMETHOD(pci_child_added, pci_child_added_method),
218#ifdef PCI_IOV
221 DEVMETHOD(pci_create_iov_child, pci_create_iov_child_method),
222#endif
223
224 DEVMETHOD_END
225};
226
227DEFINE_CLASS_0(pci, pci_driver, pci_methods, sizeof(struct pci_softc));
228
229static devclass_t pci_devclass;
231 BUS_PASS_BUS);
233
234static char *pci_vendordata;
236
237struct pci_quirk {
238 uint32_t devid; /* Vendor/device of the card */
239 int type;
240#define PCI_QUIRK_MAP_REG 1 /* PCI map register in weird place */
241#define PCI_QUIRK_DISABLE_MSI 2 /* Neither MSI nor MSI-X work */
242#define PCI_QUIRK_ENABLE_MSI_VM 3 /* Older chipset in VM where MSI works */
243#define PCI_QUIRK_UNMAP_REG 4 /* Ignore PCI map register */
244#define PCI_QUIRK_DISABLE_MSIX 5 /* MSI-X doesn't work */
245#define PCI_QUIRK_MSI_INTX_BUG 6 /* PCIM_CMD_INTxDIS disables MSI */
246#define PCI_QUIRK_REALLOC_BAR 7 /* Can't allocate memory at the default address */
247 int arg1;
248 int arg2;
249};
250
251static const struct pci_quirk pci_quirks[] = {
252 /* The Intel 82371AB and 82443MX have a map register at offset 0x90. */
253 { 0x71138086, PCI_QUIRK_MAP_REG, 0x90, 0 },
254 { 0x719b8086, PCI_QUIRK_MAP_REG, 0x90, 0 },
255 /* As does the Serverworks OSB4 (the SMBus mapping register) */
256 { 0x02001166, PCI_QUIRK_MAP_REG, 0x90, 0 },
257
258 /*
259 * MSI doesn't work with the ServerWorks CNB20-HE Host Bridge
260 * or the CMIC-SL (AKA ServerWorks GC_LE).
261 */
262 { 0x00141166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
263 { 0x00171166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
264
265 /*
266 * MSI doesn't work on earlier Intel chipsets including
267 * E7500, E7501, E7505, 845, 865, 875/E7210, and 855.
268 */
269 { 0x25408086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
270 { 0x254c8086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
271 { 0x25508086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
272 { 0x25608086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
273 { 0x25708086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
274 { 0x25788086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
275 { 0x35808086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
276
277 /*
278 * MSI doesn't work with devices behind the AMD 8131 HT-PCIX
279 * bridge.
280 */
281 { 0x74501022, PCI_QUIRK_DISABLE_MSI, 0, 0 },
282
283 /*
284 * Some virtualization environments emulate an older chipset
285 * but support MSI just fine. QEMU uses the Intel 82440.
286 */
287 { 0x12378086, PCI_QUIRK_ENABLE_MSI_VM, 0, 0 },
288
289 /*
290 * HPET MMIO base address may appear in Bar1 for AMD SB600 SMBus
291 * controller depending on SoftPciRst register (PM_IO 0x55 [7]).
292 * It prevents us from attaching hpet(4) when the bit is unset.
293 * Note this quirk only affects SB600 revision A13 and earlier.
294 * For SB600 A21 and later, firmware must set the bit to hide it.
295 * For SB700 and later, it is unused and hardcoded to zero.
296 */
297 { 0x43851002, PCI_QUIRK_UNMAP_REG, 0x14, 0 },
298
299 /*
300 * Atheros AR8161/AR8162/E2200/E2400/E2500 Ethernet controllers have
301 * a bug that MSI interrupt does not assert if PCIM_CMD_INTxDIS bit
302 * of the command register is set.
303 */
304 { 0x10911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
305 { 0xE0911969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
306 { 0xE0A11969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
307 { 0xE0B11969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
308 { 0x10901969, PCI_QUIRK_MSI_INTX_BUG, 0, 0 },
309
310 /*
311 * Broadcom BCM5714(S)/BCM5715(S)/BCM5780(S) Ethernet MACs don't
312 * issue MSI interrupts with PCIM_CMD_INTxDIS set either.
313 */
314 { 0x166814e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5714 */
315 { 0x166914e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5714S */
316 { 0x166a14e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5780 */
317 { 0x166b14e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5780S */
318 { 0x167814e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5715 */
319 { 0x167914e4, PCI_QUIRK_MSI_INTX_BUG, 0, 0 }, /* BCM5715S */
320
321 /*
322 * HPE Gen 10 VGA has a memory range that can't be allocated in the
323 * expected place.
324 */
325 { 0x98741002, PCI_QUIRK_REALLOC_BAR, 0, 0 },
326 { 0 }
327};
328
329/* map register information */
330#define PCI_MAPMEM 0x01 /* memory map */
331#define PCI_MAPMEMP 0x02 /* prefetchable memory map */
332#define PCI_MAPPORT 0x04 /* port map */
333
334struct devlist pci_devq;
336uint32_t pci_numdevs = 0;
338
339/* sysctl vars */
340SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
341 "PCI bus tuning parameters");
342
343static int pci_enable_io_modes = 1;
344SYSCTL_INT(_hw_pci, OID_AUTO, enable_io_modes, CTLFLAG_RWTUN,
346 "Enable I/O and memory bits in the config register. Some BIOSes do not"
347 " enable these bits correctly. We'd like to do this all the time, but"
348 " there are some peripherals that this causes problems with.");
349
350static int pci_do_realloc_bars = 1;
351SYSCTL_INT(_hw_pci, OID_AUTO, realloc_bars, CTLFLAG_RWTUN,
353 "Attempt to allocate a new range for any BARs whose original "
354 "firmware-assigned ranges fail to allocate during the initial device scan.");
355
357SYSCTL_INT(_hw_pci, OID_AUTO, do_power_nodriver, CTLFLAG_RWTUN,
359 "Place a function into D3 state when no driver attaches to it. 0 means"
360 " disable. 1 means conservatively place devices into D3 state. 2 means"
361 " aggressively place devices into D3 state. 3 means put absolutely"
362 " everything in D3 state.");
363
365SYSCTL_INT(_hw_pci, OID_AUTO, do_power_resume, CTLFLAG_RWTUN,
367 "Transition from D3 -> D0 on resume.");
368
370SYSCTL_INT(_hw_pci, OID_AUTO, do_power_suspend, CTLFLAG_RWTUN,
372 "Transition from D0 -> D3 on suspend.");
373
374static int pci_do_msi = 1;
375SYSCTL_INT(_hw_pci, OID_AUTO, enable_msi, CTLFLAG_RWTUN, &pci_do_msi, 1,
376 "Enable support for MSI interrupts");
377
378static int pci_do_msix = 1;
379SYSCTL_INT(_hw_pci, OID_AUTO, enable_msix, CTLFLAG_RWTUN, &pci_do_msix, 1,
380 "Enable support for MSI-X interrupts");
381
383SYSCTL_INT(_hw_pci, OID_AUTO, msix_rewrite_table, CTLFLAG_RWTUN,
385 "Rewrite entire MSI-X table when updating MSI-X entries");
386
388SYSCTL_INT(_hw_pci, OID_AUTO, honor_msi_blacklist, CTLFLAG_RDTUN,
389 &pci_honor_msi_blacklist, 1, "Honor chipset blacklist for MSI/MSI-X");
390
391#if defined(__i386__) || defined(__amd64__)
392static int pci_usb_takeover = 1;
393#else
394static int pci_usb_takeover = 0;
395#endif
396SYSCTL_INT(_hw_pci, OID_AUTO, usb_early_takeover, CTLFLAG_RDTUN,
398 "Enable early takeover of USB controllers. Disable this if you depend on"
399 " BIOS emulation of USB devices, that is you use USB devices (like"
400 " keyboard or mouse) but do not load USB drivers");
401
402static int pci_clear_bars;
403SYSCTL_INT(_hw_pci, OID_AUTO, clear_bars, CTLFLAG_RDTUN, &pci_clear_bars, 0,
404 "Ignore firmware-assigned resources for BARs.");
405
406#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
407static int pci_clear_buses;
408SYSCTL_INT(_hw_pci, OID_AUTO, clear_buses, CTLFLAG_RDTUN, &pci_clear_buses, 0,
409 "Ignore firmware-assigned bus numbers.");
410#endif
411
412static int pci_enable_ari = 1;
413SYSCTL_INT(_hw_pci, OID_AUTO, enable_ari, CTLFLAG_RDTUN, &pci_enable_ari,
414 0, "Enable support for PCIe Alternative RID Interpretation");
415
417SYSCTL_INT(_hw_pci, OID_AUTO, enable_aspm, CTLFLAG_RDTUN, &pci_enable_aspm,
418 0, "Enable support for PCIe Active State Power Management");
419
421SYSCTL_INT(_hw_pci, OID_AUTO, clear_aer_on_attach, CTLFLAG_RWTUN,
423 "Clear port and device AER state on driver attach");
424
425static int
426pci_has_quirk(uint32_t devid, int quirk)
427{
428 const struct pci_quirk *q;
429
430 for (q = &pci_quirks[0]; q->devid; q++) {
431 if (q->devid == devid && q->type == quirk)
432 return (1);
433 }
434 return (0);
435}
436
437/* Find a device_t by bus/slot/function in domain 0 */
438
439device_t
440pci_find_bsf(uint8_t bus, uint8_t slot, uint8_t func)
441{
442
443 return (pci_find_dbsf(0, bus, slot, func));
444}
445
446/* Find a device_t by domain/bus/slot/function */
447
448device_t
449pci_find_dbsf(uint32_t domain, uint8_t bus, uint8_t slot, uint8_t func)
450{
451 struct pci_devinfo *dinfo = NULL;
452
453 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
454 if ((dinfo->cfg.domain == domain) &&
455 (dinfo->cfg.bus == bus) &&
456 (dinfo->cfg.slot == slot) &&
457 (dinfo->cfg.func == func)) {
458 break;
459 }
460 }
461
462 return (dinfo != NULL ? dinfo->cfg.dev : NULL);
463}
464
465/* Find a device_t by vendor/device ID */
466
467device_t
468pci_find_device(uint16_t vendor, uint16_t device)
469{
470 struct pci_devinfo *dinfo;
471
472 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
473 if ((dinfo->cfg.vendor == vendor) &&
474 (dinfo->cfg.device == device)) {
475 return (dinfo->cfg.dev);
476 }
477 }
478
479 return (NULL);
480}
481
482device_t
483pci_find_class(uint8_t class, uint8_t subclass)
484{
485 struct pci_devinfo *dinfo;
486
487 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
488 if (dinfo->cfg.baseclass == class &&
489 dinfo->cfg.subclass == subclass) {
490 return (dinfo->cfg.dev);
491 }
492 }
493
494 return (NULL);
495}
496
497device_t
498pci_find_class_from(uint8_t class, uint8_t subclass, device_t from)
499{
500 struct pci_devinfo *dinfo;
501 bool found = false;
502
503 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
504 if (from != NULL && found == false) {
505 if (from != dinfo->cfg.dev)
506 continue;
507 found = true;
508 continue;
509 }
510 if (dinfo->cfg.baseclass == class &&
511 dinfo->cfg.subclass == subclass) {
512 return (dinfo->cfg.dev);
513 }
514 }
515
516 return (NULL);
517}
518
519static int
520pci_printf(pcicfgregs *cfg, const char *fmt, ...)
521{
522 va_list ap;
523 int retval;
524
525 retval = printf("pci%d:%d:%d:%d: ", cfg->domain, cfg->bus, cfg->slot,
526 cfg->func);
527 va_start(ap, fmt);
528 retval += vprintf(fmt, ap);
529 va_end(ap);
530 return (retval);
531}
532
533/* return base address of memory or port map */
534
535static pci_addr_t
536pci_mapbase(uint64_t mapreg)
537{
538
539 if (PCI_BAR_MEM(mapreg))
540 return (mapreg & PCIM_BAR_MEM_BASE);
541 else
542 return (mapreg & PCIM_BAR_IO_BASE);
543}
544
545/* return map type of memory or port map */
546
547static const char *
548pci_maptype(uint64_t mapreg)
549{
550
551 if (PCI_BAR_IO(mapreg))
552 return ("I/O Port");
553 if (mapreg & PCIM_BAR_MEM_PREFETCH)
554 return ("Prefetchable Memory");
555 return ("Memory");
556}
557
558/* return log2 of map size decoded for memory or port map */
559
560int
561pci_mapsize(uint64_t testval)
562{
563 int ln2size;
564
565 testval = pci_mapbase(testval);
566 ln2size = 0;
567 if (testval != 0) {
568 while ((testval & 1) == 0)
569 {
570 ln2size++;
571 testval >>= 1;
572 }
573 }
574 return (ln2size);
575}
576
577/* return base address of device ROM */
578
579static pci_addr_t
580pci_rombase(uint64_t mapreg)
581{
582
583 return (mapreg & PCIM_BIOS_ADDR_MASK);
584}
585
586/* return log2 of map size decided for device ROM */
587
588static int
589pci_romsize(uint64_t testval)
590{
591 int ln2size;
592
593 testval = pci_rombase(testval);
594 ln2size = 0;
595 if (testval != 0) {
596 while ((testval & 1) == 0)
597 {
598 ln2size++;
599 testval >>= 1;
600 }
601 }
602 return (ln2size);
603}
604
605/* return log2 of address range supported by map register */
606
607static int
608pci_maprange(uint64_t mapreg)
609{
610 int ln2range = 0;
611
612 if (PCI_BAR_IO(mapreg))
613 ln2range = 32;
614 else
615 switch (mapreg & PCIM_BAR_MEM_TYPE) {
616 case PCIM_BAR_MEM_32:
617 ln2range = 32;
618 break;
619 case PCIM_BAR_MEM_1MB:
620 ln2range = 20;
621 break;
622 case PCIM_BAR_MEM_64:
623 ln2range = 64;
624 break;
625 }
626 return (ln2range);
627}
628
629/* adjust some values from PCI 1.0 devices to match 2.0 standards ... */
630
631static void
633{
635 return;
636
637 /* PCI to PCI bridges use header type 1 */
638 if (cfg->baseclass == PCIC_BRIDGE && cfg->subclass == PCIS_BRIDGE_PCI)
640}
641
642/* extract header type specific config data */
643
644static void
645pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg)
646{
647#define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
648 switch (cfg->hdrtype & PCIM_HDRTYPE) {
650 cfg->subvendor = REG(PCIR_SUBVEND_0, 2);
651 cfg->subdevice = REG(PCIR_SUBDEV_0, 2);
652 cfg->mingnt = REG(PCIR_MINGNT, 1);
653 cfg->maxlat = REG(PCIR_MAXLAT, 1);
654 cfg->nummaps = PCI_MAXMAPS_0;
655 break;
662 cfg->nummaps = PCI_MAXMAPS_1;
663 break;
670 cfg->subvendor = REG(PCIR_SUBVEND_2, 2);
671 cfg->subdevice = REG(PCIR_SUBDEV_2, 2);
672 cfg->nummaps = PCI_MAXMAPS_2;
673 break;
674 }
675#undef REG
676}
677
678/* read configuration header into pcicfgregs structure */
679struct pci_devinfo *
680pci_read_device(device_t pcib, device_t bus, int d, int b, int s, int f)
681{
682#define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
683 uint16_t vid, did;
684
685 vid = REG(PCIR_VENDOR, 2);
686 if (vid == PCIV_INVALID)
687 return (NULL);
688
689 did = REG(PCIR_DEVICE, 2);
690
691 return (pci_fill_devinfo(pcib, bus, d, b, s, f, vid, did));
692}
693
694struct pci_devinfo *
696{
697
698 return (malloc(sizeof(struct pci_devinfo), M_DEVBUF,
699 M_WAITOK | M_ZERO));
700}
701
702static struct pci_devinfo *
703pci_fill_devinfo(device_t pcib, device_t bus, int d, int b, int s, int f,
704 uint16_t vid, uint16_t did)
705{
706 struct pci_devinfo *devlist_entry;
707 pcicfgregs *cfg;
708
709 devlist_entry = PCI_ALLOC_DEVINFO(bus);
710
711 cfg = &devlist_entry->cfg;
712
713 cfg->domain = d;
714 cfg->bus = b;
715 cfg->slot = s;
716 cfg->func = f;
717 cfg->vendor = vid;
718 cfg->device = did;
719 cfg->cmdreg = REG(PCIR_COMMAND, 2);
720 cfg->statreg = REG(PCIR_STATUS, 2);
721 cfg->baseclass = REG(PCIR_CLASS, 1);
722 cfg->subclass = REG(PCIR_SUBCLASS, 1);
723 cfg->progif = REG(PCIR_PROGIF, 1);
724 cfg->revid = REG(PCIR_REVID, 1);
725 cfg->hdrtype = REG(PCIR_HDRTYPE, 1);
726 cfg->cachelnsz = REG(PCIR_CACHELNSZ, 1);
727 cfg->lattimer = REG(PCIR_LATTIMER, 1);
728 cfg->intpin = REG(PCIR_INTPIN, 1);
729 cfg->intline = REG(PCIR_INTLINE, 1);
730
731 cfg->mfdev = (cfg->hdrtype & PCIM_MFDEV) != 0;
732 cfg->hdrtype &= ~PCIM_MFDEV;
733 STAILQ_INIT(&cfg->maps);
734
735 cfg->iov = NULL;
736
737 pci_fixancient(cfg);
738 pci_hdrtypedata(pcib, b, s, f, cfg);
739
741 pci_read_cap(pcib, cfg);
742
743 STAILQ_INSERT_TAIL(&pci_devq, devlist_entry, pci_links);
744
745 devlist_entry->conf.pc_sel.pc_domain = cfg->domain;
746 devlist_entry->conf.pc_sel.pc_bus = cfg->bus;
747 devlist_entry->conf.pc_sel.pc_dev = cfg->slot;
748 devlist_entry->conf.pc_sel.pc_func = cfg->func;
749 devlist_entry->conf.pc_hdr = cfg->hdrtype;
750
751 devlist_entry->conf.pc_subvendor = cfg->subvendor;
752 devlist_entry->conf.pc_subdevice = cfg->subdevice;
753 devlist_entry->conf.pc_vendor = cfg->vendor;
754 devlist_entry->conf.pc_device = cfg->device;
755
756 devlist_entry->conf.pc_class = cfg->baseclass;
757 devlist_entry->conf.pc_subclass = cfg->subclass;
758 devlist_entry->conf.pc_progif = cfg->progif;
759 devlist_entry->conf.pc_revid = cfg->revid;
760
761 pci_numdevs++;
763
764 return (devlist_entry);
765}
766#undef REG
767
768static void
770{
771#define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, \
772 cfg->ea.ea_location + (n), w)
773 int num_ent;
774 int ptr;
775 int a, b;
776 uint32_t val;
777 int ent_size;
778 uint32_t dw[4];
779 uint64_t base, max_offset;
780 struct pci_ea_entry *eae;
781
782 if (cfg->ea.ea_location == 0)
783 return;
784
785 STAILQ_INIT(&cfg->ea.ea_entries);
786
787 /* Determine the number of entries */
788 num_ent = REG(PCIR_EA_NUM_ENT, 2);
789 num_ent &= PCIM_EA_NUM_ENT_MASK;
790
791 /* Find the first entry to care of */
792 ptr = PCIR_EA_FIRST_ENT;
793
794 /* Skip DWORD 2 for type 1 functions */
796 ptr += 4;
797
798 for (a = 0; a < num_ent; a++) {
799 eae = malloc(sizeof(*eae), M_DEVBUF, M_WAITOK | M_ZERO);
800 eae->eae_cfg_offset = cfg->ea.ea_location + ptr;
801
802 /* Read a number of dwords in the entry */
803 val = REG(ptr, 4);
804 ptr += 4;
805 ent_size = (val & PCIM_EA_ES);
806
807 for (b = 0; b < ent_size; b++) {
808 dw[b] = REG(ptr, 4);
809 ptr += 4;
810 }
811
812 eae->eae_flags = val;
814
815 base = dw[0] & PCIM_EA_FIELD_MASK;
816 max_offset = dw[1] | ~PCIM_EA_FIELD_MASK;
817 b = 2;
818 if (((dw[0] & PCIM_EA_IS_64) != 0) && (b < ent_size)) {
819 base |= (uint64_t)dw[b] << 32UL;
820 b++;
821 }
822 if (((dw[1] & PCIM_EA_IS_64) != 0)
823 && (b < ent_size)) {
824 max_offset |= (uint64_t)dw[b] << 32UL;
825 b++;
826 }
827
828 eae->eae_base = base;
829 eae->eae_max_offset = max_offset;
830
831 STAILQ_INSERT_TAIL(&cfg->ea.ea_entries, eae, eae_link);
832
833 if (bootverbose) {
834 printf("PCI(EA) dev %04x:%04x, bei %d, flags #%x, base #%jx, max_offset #%jx\n",
835 cfg->vendor, cfg->device, eae->eae_bei, eae->eae_flags,
836 (uintmax_t)eae->eae_base, (uintmax_t)eae->eae_max_offset);
837 }
838 }
839}
840#undef REG
841
842static void
844{
845#define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
846#define WREG(n, v, w) PCIB_WRITE_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, v, w)
847#if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
848 uint64_t addr;
849#endif
850 uint32_t val;
851 int ptr, nextptr, ptrptr;
852
853 switch (cfg->hdrtype & PCIM_HDRTYPE) {
856 ptrptr = PCIR_CAP_PTR;
857 break;
859 ptrptr = PCIR_CAP_PTR_2; /* cardbus capabilities ptr */
860 break;
861 default:
862 return; /* no extended capabilities support */
863 }
864 nextptr = REG(ptrptr, 1); /* sanity check? */
865
866 /*
867 * Read capability entries.
868 */
869 while (nextptr != 0) {
870 /* Sanity check */
871 if (nextptr > 255) {
872 printf("illegal PCI extended capability offset %d\n",
873 nextptr);
874 return;
875 }
876 /* Find the next entry */
877 ptr = nextptr;
878 nextptr = REG(ptr + PCICAP_NEXTPTR, 1);
879
880 /* Process this entry */
881 switch (REG(ptr + PCICAP_ID, 1)) {
882 case PCIY_PMG: /* PCI power management */
883 if (cfg->pp.pp_cap == 0) {
884 cfg->pp.pp_cap = REG(ptr + PCIR_POWER_CAP, 2);
885 cfg->pp.pp_status = ptr + PCIR_POWER_STATUS;
886 cfg->pp.pp_bse = ptr + PCIR_POWER_BSE;
887 if ((nextptr - ptr) > PCIR_POWER_DATA)
888 cfg->pp.pp_data = ptr + PCIR_POWER_DATA;
889 }
890 break;
891 case PCIY_HT: /* HyperTransport */
892 /* Determine HT-specific capability type. */
893 val = REG(ptr + PCIR_HT_COMMAND, 2);
894
895 if ((val & 0xe000) == PCIM_HTCAP_SLAVE)
896 cfg->ht.ht_slave = ptr;
897
898#if defined(__i386__) || defined(__amd64__) || defined(__powerpc__)
899 switch (val & PCIM_HTCMD_CAP_MASK) {
901 if (!(val & PCIM_HTCMD_MSI_FIXED)) {
902 /* Sanity check the mapping window. */
904 4);
905 addr <<= 32;
907 4);
908 if (addr != MSI_INTEL_ADDR_BASE)
909 device_printf(pcib,
910 "HT device at pci%d:%d:%d:%d has non-default MSI window 0x%llx\n",
911 cfg->domain, cfg->bus,
912 cfg->slot, cfg->func,
913 (long long)addr);
914 } else
915 addr = MSI_INTEL_ADDR_BASE;
916
917 cfg->ht.ht_msimap = ptr;
918 cfg->ht.ht_msictrl = val;
919 cfg->ht.ht_msiaddr = addr;
920 break;
921 }
922#endif
923 break;
924 case PCIY_MSI: /* PCI MSI */
925 cfg->msi.msi_location = ptr;
926 cfg->msi.msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2);
927 cfg->msi.msi_msgnum = 1 << ((cfg->msi.msi_ctrl &
929 break;
930 case PCIY_MSIX: /* PCI MSI-X */
931 cfg->msix.msix_location = ptr;
932 cfg->msix.msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2);
933 cfg->msix.msix_msgnum = (cfg->msix.msix_ctrl &
935 val = REG(ptr + PCIR_MSIX_TABLE, 4);
938 cfg->msix.msix_table_offset = val & ~PCIM_MSIX_BIR_MASK;
939 val = REG(ptr + PCIR_MSIX_PBA, 4);
942 cfg->msix.msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK;
943 break;
944 case PCIY_VPD: /* PCI Vital Product Data */
945 cfg->vpd.vpd_reg = ptr;
946 break;
947 case PCIY_SUBVENDOR:
948 /* Should always be true. */
949 if ((cfg->hdrtype & PCIM_HDRTYPE) ==
951 val = REG(ptr + PCIR_SUBVENDCAP_ID, 4);
952 cfg->subvendor = val & 0xffff;
953 cfg->subdevice = val >> 16;
954 }
955 break;
956 case PCIY_PCIX: /* PCI-X */
957 /*
958 * Assume we have a PCI-X chipset if we have
959 * at least one PCI-PCI bridge with a PCI-X
960 * capability. Note that some systems with
961 * PCI-express or HT chipsets might match on
962 * this check as well.
963 */
964 if ((cfg->hdrtype & PCIM_HDRTYPE) ==
966 pcix_chipset = 1;
967 cfg->pcix.pcix_location = ptr;
968 break;
969 case PCIY_EXPRESS: /* PCI-express */
970 /*
971 * Assume we have a PCI-express chipset if we have
972 * at least one PCI-express device.
973 */
974 pcie_chipset = 1;
975 cfg->pcie.pcie_location = ptr;
976 val = REG(ptr + PCIER_FLAGS, 2);
978 break;
979 case PCIY_EA: /* Enhanced Allocation */
980 cfg->ea.ea_location = ptr;
982 break;
983 default:
984 break;
985 }
986 }
987
988#if defined(__powerpc__)
989 /*
990 * Enable the MSI mapping window for all HyperTransport
991 * slaves. PCI-PCI bridges have their windows enabled via
992 * PCIB_MAP_MSI().
993 */
994 if (cfg->ht.ht_slave != 0 && cfg->ht.ht_msimap != 0 &&
996 device_printf(pcib,
997 "Enabling MSI window for HyperTransport slave at pci%d:%d:%d:%d\n",
998 cfg->domain, cfg->bus, cfg->slot, cfg->func);
1001 2);
1002 }
1003#endif
1004/* REG and WREG use carry through to next functions */
1005}
1006
1007/*
1008 * PCI Vital Product Data
1009 */
1010
1011#define PCI_VPD_TIMEOUT 1000000
1012
1013static int
1014pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t *data)
1015{
1016 int count = PCI_VPD_TIMEOUT;
1017
1018 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
1019
1020 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg, 2);
1021
1022 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) != 0x8000) {
1023 if (--count < 0)
1024 return (ENXIO);
1025 DELAY(1); /* limit looping */
1026 }
1027 *data = (REG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, 4));
1028
1029 return (0);
1030}
1031
1032#if 0
1033static int
1034pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data)
1035{
1036 int count = PCI_VPD_TIMEOUT;
1037
1038 KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned"));
1039
1040 WREG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, data, 4);
1041 WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg | 0x8000, 2);
1042 while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) == 0x8000) {
1043 if (--count < 0)
1044 return (ENXIO);
1045 DELAY(1); /* limit looping */
1046 }
1047
1048 return (0);
1049}
1050#endif
1051
1052#undef PCI_VPD_TIMEOUT
1053
1055 device_t pcib;
1057 uint32_t val;
1059 int off;
1060 uint8_t cksum;
1061};
1062
1063static int
1064vpd_nextbyte(struct vpd_readstate *vrs, uint8_t *data)
1065{
1066 uint32_t reg;
1067 uint8_t byte;
1068
1069 if (vrs->bytesinval == 0) {
1070 if (pci_read_vpd_reg(vrs->pcib, vrs->cfg, vrs->off, &reg))
1071 return (ENXIO);
1072 vrs->val = le32toh(reg);
1073 vrs->off += 4;
1074 byte = vrs->val & 0xff;
1075 vrs->bytesinval = 3;
1076 } else {
1077 vrs->val = vrs->val >> 8;
1078 byte = vrs->val & 0xff;
1079 vrs->bytesinval--;
1080 }
1081
1082 vrs->cksum += byte;
1083 *data = byte;
1084 return (0);
1085}
1086
1087static void
1089{
1090 struct vpd_readstate vrs;
1091 int state;
1092 int name;
1093 int remain;
1094 int i;
1095 int alloc, off; /* alloc/off for RO/W arrays */
1096 int cksumvalid;
1097 int dflen;
1098 int firstrecord;
1099 uint8_t byte;
1100 uint8_t byte2;
1101
1102 /* init vpd reader */
1103 vrs.bytesinval = 0;
1104 vrs.off = 0;
1105 vrs.pcib = pcib;
1106 vrs.cfg = cfg;
1107 vrs.cksum = 0;
1108
1109 state = 0;
1110 name = remain = i = 0; /* shut up stupid gcc */
1111 alloc = off = 0; /* shut up stupid gcc */
1112 dflen = 0; /* shut up stupid gcc */
1113 cksumvalid = -1;
1114 firstrecord = 1;
1115 while (state >= 0) {
1116 if (vpd_nextbyte(&vrs, &byte)) {
1117 pci_printf(cfg, "VPD read timed out\n");
1118 state = -2;
1119 break;
1120 }
1121#if 0
1122 pci_printf(cfg, "vpd: val: %#x, off: %d, bytesinval: %d, byte: "
1123 "%#hhx, state: %d, remain: %d, name: %#x, i: %d\n", vrs.val,
1124 vrs.off, vrs.bytesinval, byte, state, remain, name, i);
1125#endif
1126 switch (state) {
1127 case 0: /* item name */
1128 if (byte & 0x80) {
1129 if (vpd_nextbyte(&vrs, &byte2)) {
1130 state = -2;
1131 break;
1132 }
1133 remain = byte2;
1134 if (vpd_nextbyte(&vrs, &byte2)) {
1135 state = -2;
1136 break;
1137 }
1138 remain |= byte2 << 8;
1139 name = byte & 0x7f;
1140 } else {
1141 remain = byte & 0x7;
1142 name = (byte >> 3) & 0xf;
1143 }
1144 if (firstrecord) {
1145 if (name != 0x2) {
1146 pci_printf(cfg, "VPD data does not " \
1147 "start with ident (%#x)\n", name);
1148 state = -2;
1149 break;
1150 }
1151 firstrecord = 0;
1152 }
1153 if (vrs.off + remain - vrs.bytesinval > 0x8000) {
1155 "VPD data overflow, remain %#x\n", remain);
1156 state = -1;
1157 break;
1158 }
1159 switch (name) {
1160 case 0x2: /* String */
1161 if (cfg->vpd.vpd_ident != NULL) {
1163 "duplicate VPD ident record\n");
1164 state = -2;
1165 break;
1166 }
1167 if (remain > 255) {
1169 "VPD ident length %d exceeds 255\n",
1170 remain);
1171 state = -2;
1172 break;
1173 }
1174 cfg->vpd.vpd_ident = malloc(remain + 1,
1175 M_DEVBUF, M_WAITOK);
1176 i = 0;
1177 state = 1;
1178 break;
1179 case 0xf: /* End */
1180 state = -1;
1181 break;
1182 case 0x10: /* VPD-R */
1183 alloc = 8;
1184 off = 0;
1185 cfg->vpd.vpd_ros = malloc(alloc *
1186 sizeof(*cfg->vpd.vpd_ros), M_DEVBUF,
1187 M_WAITOK | M_ZERO);
1188 state = 2;
1189 break;
1190 case 0x11: /* VPD-W */
1191 alloc = 8;
1192 off = 0;
1193 cfg->vpd.vpd_w = malloc(alloc *
1194 sizeof(*cfg->vpd.vpd_w), M_DEVBUF,
1195 M_WAITOK | M_ZERO);
1196 state = 5;
1197 break;
1198 default: /* Invalid data, abort */
1199 pci_printf(cfg, "invalid VPD name: %#x\n", name);
1200 state = -2;
1201 break;
1202 }
1203 break;
1204
1205 case 1: /* Identifier String */
1206 cfg->vpd.vpd_ident[i++] = byte;
1207 remain--;
1208 if (remain == 0) {
1209 cfg->vpd.vpd_ident[i] = '\0';
1210 state = 0;
1211 }
1212 break;
1213
1214 case 2: /* VPD-R Keyword Header */
1215 if (off == alloc) {
1216 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
1217 (alloc *= 2) * sizeof(*cfg->vpd.vpd_ros),
1218 M_DEVBUF, M_WAITOK | M_ZERO);
1219 }
1220 cfg->vpd.vpd_ros[off].keyword[0] = byte;
1221 if (vpd_nextbyte(&vrs, &byte2)) {
1222 state = -2;
1223 break;
1224 }
1225 cfg->vpd.vpd_ros[off].keyword[1] = byte2;
1226 if (vpd_nextbyte(&vrs, &byte2)) {
1227 state = -2;
1228 break;
1229 }
1230 cfg->vpd.vpd_ros[off].len = dflen = byte2;
1231 if (dflen == 0 &&
1232 strncmp(cfg->vpd.vpd_ros[off].keyword, "RV",
1233 2) == 0) {
1234 /*
1235 * if this happens, we can't trust the rest
1236 * of the VPD.
1237 */
1238 pci_printf(cfg, "invalid VPD RV record");
1239 cksumvalid = 0;
1240 state = -1;
1241 break;
1242 } else if (dflen == 0) {
1243 cfg->vpd.vpd_ros[off].value = malloc(1 *
1244 sizeof(*cfg->vpd.vpd_ros[off].value),
1245 M_DEVBUF, M_WAITOK);
1246 cfg->vpd.vpd_ros[off].value[0] = '\x00';
1247 } else
1248 cfg->vpd.vpd_ros[off].value = malloc(
1249 (dflen + 1) *
1250 sizeof(*cfg->vpd.vpd_ros[off].value),
1251 M_DEVBUF, M_WAITOK);
1252 remain -= 3;
1253 i = 0;
1254 /* keep in sync w/ state 3's transistions */
1255 if (dflen == 0 && remain == 0)
1256 state = 0;
1257 else if (dflen == 0)
1258 state = 2;
1259 else
1260 state = 3;
1261 break;
1262
1263 case 3: /* VPD-R Keyword Value */
1264 cfg->vpd.vpd_ros[off].value[i++] = byte;
1265 if (strncmp(cfg->vpd.vpd_ros[off].keyword,
1266 "RV", 2) == 0 && cksumvalid == -1) {
1267 if (vrs.cksum == 0)
1268 cksumvalid = 1;
1269 else {
1270 if (bootverbose)
1272 "bad VPD cksum, remain %hhu\n",
1273 vrs.cksum);
1274 cksumvalid = 0;
1275 state = -1;
1276 break;
1277 }
1278 }
1279 dflen--;
1280 remain--;
1281 /* keep in sync w/ state 2's transistions */
1282 if (dflen == 0)
1283 cfg->vpd.vpd_ros[off++].value[i++] = '\0';
1284 if (dflen == 0 && remain == 0) {
1285 cfg->vpd.vpd_rocnt = off;
1286 cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros,
1287 off * sizeof(*cfg->vpd.vpd_ros),
1288 M_DEVBUF, M_WAITOK | M_ZERO);
1289 state = 0;
1290 } else if (dflen == 0)
1291 state = 2;
1292 break;
1293
1294 case 4:
1295 remain--;
1296 if (remain == 0)
1297 state = 0;
1298 break;
1299
1300 case 5: /* VPD-W Keyword Header */
1301 if (off == alloc) {
1302 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
1303 (alloc *= 2) * sizeof(*cfg->vpd.vpd_w),
1304 M_DEVBUF, M_WAITOK | M_ZERO);
1305 }
1306 cfg->vpd.vpd_w[off].keyword[0] = byte;
1307 if (vpd_nextbyte(&vrs, &byte2)) {
1308 state = -2;
1309 break;
1310 }
1311 cfg->vpd.vpd_w[off].keyword[1] = byte2;
1312 if (vpd_nextbyte(&vrs, &byte2)) {
1313 state = -2;
1314 break;
1315 }
1316 cfg->vpd.vpd_w[off].len = dflen = byte2;
1317 cfg->vpd.vpd_w[off].start = vrs.off - vrs.bytesinval;
1318 cfg->vpd.vpd_w[off].value = malloc((dflen + 1) *
1319 sizeof(*cfg->vpd.vpd_w[off].value),
1320 M_DEVBUF, M_WAITOK);
1321 remain -= 3;
1322 i = 0;
1323 /* keep in sync w/ state 6's transistions */
1324 if (dflen == 0 && remain == 0)
1325 state = 0;
1326 else if (dflen == 0)
1327 state = 5;
1328 else
1329 state = 6;
1330 break;
1331
1332 case 6: /* VPD-W Keyword Value */
1333 cfg->vpd.vpd_w[off].value[i++] = byte;
1334 dflen--;
1335 remain--;
1336 /* keep in sync w/ state 5's transistions */
1337 if (dflen == 0)
1338 cfg->vpd.vpd_w[off++].value[i++] = '\0';
1339 if (dflen == 0 && remain == 0) {
1340 cfg->vpd.vpd_wcnt = off;
1341 cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w,
1342 off * sizeof(*cfg->vpd.vpd_w),
1343 M_DEVBUF, M_WAITOK | M_ZERO);
1344 state = 0;
1345 } else if (dflen == 0)
1346 state = 5;
1347 break;
1348
1349 default:
1350 pci_printf(cfg, "invalid state: %d\n", state);
1351 state = -1;
1352 break;
1353 }
1354
1355 if (cfg->vpd.vpd_ident == NULL || cfg->vpd.vpd_ident[0] == '\0') {
1356 pci_printf(cfg, "no valid vpd ident found\n");
1357 state = -2;
1358 }
1359 }
1360
1361 if (cksumvalid <= 0 || state < -1) {
1362 /* read-only data bad, clean up */
1363 if (cfg->vpd.vpd_ros != NULL) {
1364 for (off = 0; cfg->vpd.vpd_ros[off].value; off++)
1365 free(cfg->vpd.vpd_ros[off].value, M_DEVBUF);
1366 free(cfg->vpd.vpd_ros, M_DEVBUF);
1367 cfg->vpd.vpd_ros = NULL;
1368 }
1369 }
1370 if (state < -1) {
1371 /* I/O error, clean up */
1372 pci_printf(cfg, "failed to read VPD data.\n");
1373 if (cfg->vpd.vpd_ident != NULL) {
1374 free(cfg->vpd.vpd_ident, M_DEVBUF);
1375 cfg->vpd.vpd_ident = NULL;
1376 }
1377 if (cfg->vpd.vpd_w != NULL) {
1378 for (off = 0; cfg->vpd.vpd_w[off].value; off++)
1379 free(cfg->vpd.vpd_w[off].value, M_DEVBUF);
1380 free(cfg->vpd.vpd_w, M_DEVBUF);
1381 cfg->vpd.vpd_w = NULL;
1382 }
1383 }
1384 cfg->vpd.vpd_cached = 1;
1385#undef REG
1386#undef WREG
1387}
1388
1389int
1390pci_get_vpd_ident_method(device_t dev, device_t child, const char **identptr)
1391{
1392 struct pci_devinfo *dinfo = device_get_ivars(child);
1393 pcicfgregs *cfg = &dinfo->cfg;
1394
1395 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1396 pci_read_vpd(device_get_parent(dev), cfg);
1397
1398 *identptr = cfg->vpd.vpd_ident;
1399
1400 if (*identptr == NULL)
1401 return (ENXIO);
1402
1403 return (0);
1404}
1405
1406int
1407pci_get_vpd_readonly_method(device_t dev, device_t child, const char *kw,
1408 const char **vptr)
1409{
1410 struct pci_devinfo *dinfo = device_get_ivars(child);
1411 pcicfgregs *cfg = &dinfo->cfg;
1412 int i;
1413
1414 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1415 pci_read_vpd(device_get_parent(dev), cfg);
1416
1417 for (i = 0; i < cfg->vpd.vpd_rocnt; i++)
1418 if (memcmp(kw, cfg->vpd.vpd_ros[i].keyword,
1419 sizeof(cfg->vpd.vpd_ros[i].keyword)) == 0) {
1420 *vptr = cfg->vpd.vpd_ros[i].value;
1421 return (0);
1422 }
1423
1424 *vptr = NULL;
1425 return (ENXIO);
1426}
1427
1428struct pcicfg_vpd *
1430{
1431 struct pci_devinfo *dinfo = device_get_ivars(dev);
1432 pcicfgregs *cfg = &dinfo->cfg;
1433
1434 if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0)
1435 pci_read_vpd(device_get_parent(device_get_parent(dev)), cfg);
1436 return (&cfg->vpd);
1437}
1438
1439/*
1440 * Find the requested HyperTransport capability and return the offset
1441 * in configuration space via the pointer provided. The function
1442 * returns 0 on success and an error code otherwise.
1443 */
1444int
1445pci_find_htcap_method(device_t dev, device_t child, int capability, int *capreg)
1446{
1447 int ptr, error;
1448 uint16_t val;
1449
1450 error = pci_find_cap(child, PCIY_HT, &ptr);
1451 if (error)
1452 return (error);
1453
1454 /*
1455 * Traverse the capabilities list checking each HT capability
1456 * to see if it matches the requested HT capability.
1457 */
1458 for (;;) {
1459 val = pci_read_config(child, ptr + PCIR_HT_COMMAND, 2);
1462 val &= 0xe000;
1463 else
1465 if (val == capability) {
1466 if (capreg != NULL)
1467 *capreg = ptr;
1468 return (0);
1469 }
1470
1471 /* Skip to the next HT capability. */
1472 if (pci_find_next_cap(child, PCIY_HT, ptr, &ptr) != 0)
1473 break;
1474 }
1475
1476 return (ENOENT);
1477}
1478
1479/*
1480 * Find the next requested HyperTransport capability after start and return
1481 * the offset in configuration space via the pointer provided. The function
1482 * returns 0 on success and an error code otherwise.
1483 */
1484int
1486 int start, int *capreg)
1487{
1488 int ptr;
1489 uint16_t val;
1490
1491 KASSERT(pci_read_config(child, start + PCICAP_ID, 1) == PCIY_HT,
1492 ("start capability is not HyperTransport capability"));
1493 ptr = start;
1494
1495 /*
1496 * Traverse the capabilities list checking each HT capability
1497 * to see if it matches the requested HT capability.
1498 */
1499 for (;;) {
1500 /* Skip to the next HT capability. */
1501 if (pci_find_next_cap(child, PCIY_HT, ptr, &ptr) != 0)
1502 break;
1503
1504 val = pci_read_config(child, ptr + PCIR_HT_COMMAND, 2);
1507 val &= 0xe000;
1508 else
1510 if (val == capability) {
1511 if (capreg != NULL)
1512 *capreg = ptr;
1513 return (0);
1514 }
1515 }
1516
1517 return (ENOENT);
1518}
1519
1520/*
1521 * Find the requested capability and return the offset in
1522 * configuration space via the pointer provided. The function returns
1523 * 0 on success and an error code otherwise.
1524 */
1525int
1526pci_find_cap_method(device_t dev, device_t child, int capability,
1527 int *capreg)
1528{
1529 struct pci_devinfo *dinfo = device_get_ivars(child);
1530 pcicfgregs *cfg = &dinfo->cfg;
1531 uint32_t status;
1532 uint8_t ptr;
1533
1534 /*
1535 * Check the CAP_LIST bit of the PCI status register first.
1536 */
1537 status = pci_read_config(child, PCIR_STATUS, 2);
1539 return (ENXIO);
1540
1541 /*
1542 * Determine the start pointer of the capabilities list.
1543 */
1544 switch (cfg->hdrtype & PCIM_HDRTYPE) {
1547 ptr = PCIR_CAP_PTR;
1548 break;
1550 ptr = PCIR_CAP_PTR_2;
1551 break;
1552 default:
1553 /* XXX: panic? */
1554 return (ENXIO); /* no extended capabilities support */
1555 }
1556 ptr = pci_read_config(child, ptr, 1);
1557
1558 /*
1559 * Traverse the capabilities list.
1560 */
1561 while (ptr != 0) {
1562 if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) {
1563 if (capreg != NULL)
1564 *capreg = ptr;
1565 return (0);
1566 }
1567 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
1568 }
1569
1570 return (ENOENT);
1571}
1572
1573/*
1574 * Find the next requested capability after start and return the offset in
1575 * configuration space via the pointer provided. The function returns
1576 * 0 on success and an error code otherwise.
1577 */
1578int
1580 int start, int *capreg)
1581{
1582 uint8_t ptr;
1583
1584 KASSERT(pci_read_config(child, start + PCICAP_ID, 1) == capability,
1585 ("start capability is not expected capability"));
1586
1587 ptr = pci_read_config(child, start + PCICAP_NEXTPTR, 1);
1588 while (ptr != 0) {
1589 if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) {
1590 if (capreg != NULL)
1591 *capreg = ptr;
1592 return (0);
1593 }
1594 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
1595 }
1596
1597 return (ENOENT);
1598}
1599
1600/*
1601 * Find the requested extended capability and return the offset in
1602 * configuration space via the pointer provided. The function returns
1603 * 0 on success and an error code otherwise.
1604 */
1605int
1607 int *capreg)
1608{
1609 struct pci_devinfo *dinfo = device_get_ivars(child);
1610 pcicfgregs *cfg = &dinfo->cfg;
1611 uint32_t ecap;
1612 uint16_t ptr;
1613
1614 /* Only supported for PCI-express devices. */
1615 if (cfg->pcie.pcie_location == 0)
1616 return (ENXIO);
1617
1618 ptr = PCIR_EXTCAP;
1619 ecap = pci_read_config(child, ptr, 4);
1620 if (ecap == 0xffffffff || ecap == 0)
1621 return (ENOENT);
1622 for (;;) {
1623 if (PCI_EXTCAP_ID(ecap) == capability) {
1624 if (capreg != NULL)
1625 *capreg = ptr;
1626 return (0);
1627 }
1628 ptr = PCI_EXTCAP_NEXTPTR(ecap);
1629 if (ptr == 0)
1630 break;
1631 ecap = pci_read_config(child, ptr, 4);
1632 }
1633
1634 return (ENOENT);
1635}
1636
1637/*
1638 * Find the next requested extended capability after start and return the
1639 * offset in configuration space via the pointer provided. The function
1640 * returns 0 on success and an error code otherwise.
1641 */
1642int
1644 int start, int *capreg)
1645{
1646 struct pci_devinfo *dinfo = device_get_ivars(child);
1647 pcicfgregs *cfg = &dinfo->cfg;
1648 uint32_t ecap;
1649 uint16_t ptr;
1650
1651 /* Only supported for PCI-express devices. */
1652 if (cfg->pcie.pcie_location == 0)
1653 return (ENXIO);
1654
1655 ecap = pci_read_config(child, start, 4);
1656 KASSERT(PCI_EXTCAP_ID(ecap) == capability,
1657 ("start extended capability is not expected capability"));
1658 ptr = PCI_EXTCAP_NEXTPTR(ecap);
1659 while (ptr != 0) {
1660 ecap = pci_read_config(child, ptr, 4);
1661 if (PCI_EXTCAP_ID(ecap) == capability) {
1662 if (capreg != NULL)
1663 *capreg = ptr;
1664 return (0);
1665 }
1666 ptr = PCI_EXTCAP_NEXTPTR(ecap);
1667 }
1668
1669 return (ENOENT);
1670}
1671
1672/*
1673 * Support for MSI-X message interrupts.
1674 */
1675static void
1676pci_write_msix_entry(device_t dev, u_int index, uint64_t address, uint32_t data)
1677{
1678 struct pci_devinfo *dinfo = device_get_ivars(dev);
1679 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1680 uint32_t offset;
1681
1682 KASSERT(msix->msix_table_len > index, ("bogus index"));
1683 offset = msix->msix_table_offset + index * 16;
1684 bus_write_4(msix->msix_table_res, offset, address & 0xffffffff);
1685 bus_write_4(msix->msix_table_res, offset + 4, address >> 32);
1686 bus_write_4(msix->msix_table_res, offset + 8, data);
1687}
1688
1689void
1690pci_enable_msix_method(device_t dev, device_t child, u_int index,
1691 uint64_t address, uint32_t data)
1692{
1693
1695 struct pci_devinfo *dinfo = device_get_ivars(child);
1696 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1697
1698 /*
1699 * Some VM hosts require MSIX to be disabled in the
1700 * control register before updating the MSIX table
1701 * entries are allowed. It is not enough to only
1702 * disable MSIX while updating a single entry. MSIX
1703 * must be disabled while updating all entries in the
1704 * table.
1705 */
1706 pci_write_config(child,
1710 } else
1712
1713 /* Enable MSI -> HT mapping. */
1715}
1716
1717void
1718pci_mask_msix(device_t dev, u_int index)
1719{
1720 struct pci_devinfo *dinfo = device_get_ivars(dev);
1721 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1722 uint32_t offset, val;
1723
1724 KASSERT(msix->msix_msgnum > index, ("bogus index"));
1725 offset = msix->msix_table_offset + index * 16 + 12;
1726 val = bus_read_4(msix->msix_table_res, offset);
1728
1729 /*
1730 * Some devices (e.g. Samsung PM961) do not support reads of this
1731 * register, so always write the new value.
1732 */
1733 bus_write_4(msix->msix_table_res, offset, val);
1734}
1735
1736void
1737pci_unmask_msix(device_t dev, u_int index)
1738{
1739 struct pci_devinfo *dinfo = device_get_ivars(dev);
1740 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1741 uint32_t offset, val;
1742
1743 KASSERT(msix->msix_table_len > index, ("bogus index"));
1744 offset = msix->msix_table_offset + index * 16 + 12;
1745 val = bus_read_4(msix->msix_table_res, offset);
1746 val &= ~PCIM_MSIX_VCTRL_MASK;
1747
1748 /*
1749 * Some devices (e.g. Samsung PM961) do not support reads of this
1750 * register, so always write the new value.
1751 */
1752 bus_write_4(msix->msix_table_res, offset, val);
1753}
1754
1755int
1757{
1758 struct pci_devinfo *dinfo = device_get_ivars(dev);
1759 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1760 uint32_t offset, bit;
1761
1762 KASSERT(msix->msix_table_len > index, ("bogus index"));
1763 offset = msix->msix_pba_offset + (index / 32) * 4;
1764 bit = 1 << index % 32;
1765 return (bus_read_4(msix->msix_pba_res, offset) & bit);
1766}
1767
1768/*
1769 * Restore MSI-X registers and table during resume. If MSI-X is
1770 * enabled then walk the virtual table to restore the actual MSI-X
1771 * table.
1772 */
1773static void
1775{
1776 struct pci_devinfo *dinfo = device_get_ivars(dev);
1777 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1778 struct msix_table_entry *mte;
1779 struct msix_vector *mv;
1780 int i;
1781
1782 if (msix->msix_alloc > 0) {
1783 /* First, mask all vectors. */
1784 for (i = 0; i < msix->msix_msgnum; i++)
1785 pci_mask_msix(dev, i);
1786
1787 /* Second, program any messages with at least one handler. */
1788 for (i = 0; i < msix->msix_table_len; i++) {
1789 mte = &msix->msix_table[i];
1790 if (mte->mte_vector == 0 || mte->mte_handlers == 0)
1791 continue;
1792 mv = &msix->msix_vectors[mte->mte_vector - 1];
1794 mv->mv_data);
1795 pci_unmask_msix(dev, i);
1796 }
1797 }
1798 pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL,
1799 msix->msix_ctrl, 2);
1800}
1801
1802/*
1803 * Attempt to allocate *count MSI-X messages. The actual number allocated is
1804 * returned in *count. After this function returns, each message will be
1805 * available to the driver as SYS_RES_IRQ resources starting at rid 1.
1806 */
1807int
1808pci_alloc_msix_method(device_t dev, device_t child, int *count)
1809{
1810 struct pci_devinfo *dinfo = device_get_ivars(child);
1811 pcicfgregs *cfg = &dinfo->cfg;
1812 struct resource_list_entry *rle;
1813 int actual, error, i, irq, max;
1814
1815 /* Don't let count == 0 get us into trouble. */
1816 if (*count == 0)
1817 return (EINVAL);
1818
1819 /* If rid 0 is allocated, then fail. */
1820 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1821 if (rle != NULL && rle->res != NULL)
1822 return (ENXIO);
1823
1824 /* Already have allocated messages? */
1825 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
1826 return (ENXIO);
1827
1828 /* If MSI-X is blacklisted for this system, fail. */
1830 return (ENXIO);
1831
1832 /* MSI-X capability present? */
1833 if (cfg->msix.msix_location == 0 || !pci_do_msix)
1834 return (ENODEV);
1835
1836 /* Make sure the appropriate BARs are mapped. */
1837 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1838 cfg->msix.msix_table_bar);
1839 if (rle == NULL || rle->res == NULL ||
1840 !(rman_get_flags(rle->res) & RF_ACTIVE))
1841 return (ENXIO);
1842 cfg->msix.msix_table_res = rle->res;
1843 if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) {
1844 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
1845 cfg->msix.msix_pba_bar);
1846 if (rle == NULL || rle->res == NULL ||
1847 !(rman_get_flags(rle->res) & RF_ACTIVE))
1848 return (ENXIO);
1849 }
1850 cfg->msix.msix_pba_res = rle->res;
1851
1852 if (bootverbose)
1853 device_printf(child,
1854 "attempting to allocate %d MSI-X vectors (%d supported)\n",
1855 *count, cfg->msix.msix_msgnum);
1856 max = min(*count, cfg->msix.msix_msgnum);
1857 for (i = 0; i < max; i++) {
1858 /* Allocate a message. */
1859 error = PCIB_ALLOC_MSIX(device_get_parent(dev), child, &irq);
1860 if (error) {
1861 if (i == 0)
1862 return (error);
1863 break;
1864 }
1865 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
1866 irq, 1);
1867 }
1868 actual = i;
1869
1870 if (bootverbose) {
1871 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 1);
1872 if (actual == 1)
1873 device_printf(child, "using IRQ %ju for MSI-X\n",
1874 rle->start);
1875 else {
1876 int run;
1877
1878 /*
1879 * Be fancy and try to print contiguous runs of
1880 * IRQ values as ranges. 'irq' is the previous IRQ.
1881 * 'run' is true if we are in a range.
1882 */
1883 device_printf(child, "using IRQs %ju", rle->start);
1884 irq = rle->start;
1885 run = 0;
1886 for (i = 1; i < actual; i++) {
1887 rle = resource_list_find(&dinfo->resources,
1888 SYS_RES_IRQ, i + 1);
1889
1890 /* Still in a run? */
1891 if (rle->start == irq + 1) {
1892 run = 1;
1893 irq++;
1894 continue;
1895 }
1896
1897 /* Finish previous range. */
1898 if (run) {
1899 printf("-%d", irq);
1900 run = 0;
1901 }
1902
1903 /* Start new range. */
1904 printf(",%ju", rle->start);
1905 irq = rle->start;
1906 }
1907
1908 /* Unfinished range? */
1909 if (run)
1910 printf("-%d", irq);
1911 printf(" for MSI-X\n");
1912 }
1913 }
1914
1915 /* Mask all vectors. */
1916 for (i = 0; i < cfg->msix.msix_msgnum; i++)
1917 pci_mask_msix(child, i);
1918
1919 /* Allocate and initialize vector data and virtual table. */
1920 cfg->msix.msix_vectors = malloc(sizeof(struct msix_vector) * actual,
1921 M_DEVBUF, M_WAITOK | M_ZERO);
1922 cfg->msix.msix_table = malloc(sizeof(struct msix_table_entry) * actual,
1923 M_DEVBUF, M_WAITOK | M_ZERO);
1924 for (i = 0; i < actual; i++) {
1925 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1926 cfg->msix.msix_vectors[i].mv_irq = rle->start;
1927 cfg->msix.msix_table[i].mte_vector = i + 1;
1928 }
1929
1930 /* Update control register to enable MSI-X. */
1932 pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL,
1933 cfg->msix.msix_ctrl, 2);
1934
1935 /* Update counts of alloc'd messages. */
1936 cfg->msix.msix_alloc = actual;
1937 cfg->msix.msix_table_len = actual;
1938 *count = actual;
1939 return (0);
1940}
1941
1942/*
1943 * By default, pci_alloc_msix() will assign the allocated IRQ
1944 * resources consecutively to the first N messages in the MSI-X table.
1945 * However, device drivers may want to use different layouts if they
1946 * either receive fewer messages than they asked for, or they wish to
1947 * populate the MSI-X table sparsely. This method allows the driver
1948 * to specify what layout it wants. It must be called after a
1949 * successful pci_alloc_msix() but before any of the associated
1950 * SYS_RES_IRQ resources are allocated via bus_alloc_resource().
1951 *
1952 * The 'vectors' array contains 'count' message vectors. The array
1953 * maps directly to the MSI-X table in that index 0 in the array
1954 * specifies the vector for the first message in the MSI-X table, etc.
1955 * The vector value in each array index can either be 0 to indicate
1956 * that no vector should be assigned to a message slot, or it can be a
1957 * number from 1 to N (where N is the count returned from a
1958 * succcessful call to pci_alloc_msix()) to indicate which message
1959 * vector (IRQ) to be used for the corresponding message.
1960 *
1961 * On successful return, each message with a non-zero vector will have
1962 * an associated SYS_RES_IRQ whose rid is equal to the array index +
1963 * 1. Additionally, if any of the IRQs allocated via the previous
1964 * call to pci_alloc_msix() are not used in the mapping, those IRQs
1965 * will be freed back to the system automatically.
1966 *
1967 * For example, suppose a driver has a MSI-X table with 6 messages and
1968 * asks for 6 messages, but pci_alloc_msix() only returns a count of
1969 * 3. Call the three vectors allocated by pci_alloc_msix() A, B, and
1970 * C. After the call to pci_alloc_msix(), the device will be setup to
1971 * have an MSI-X table of ABC--- (where - means no vector assigned).
1972 * If the driver then passes a vector array of { 1, 0, 1, 2, 0, 2 },
1973 * then the MSI-X table will look like A-AB-B, and the 'C' vector will
1974 * be freed back to the system. This device will also have valid
1975 * SYS_RES_IRQ rids of 1, 3, 4, and 6.
1976 *
1977 * In any case, the SYS_RES_IRQ rid X will always map to the message
1978 * at MSI-X table index X - 1 and will only be valid if a vector is
1979 * assigned to that table entry.
1980 */
1981int
1982pci_remap_msix_method(device_t dev, device_t child, int count,
1983 const u_int *vectors)
1984{
1985 struct pci_devinfo *dinfo = device_get_ivars(child);
1986 struct pcicfg_msix *msix = &dinfo->cfg.msix;
1987 struct resource_list_entry *rle;
1988 int i, irq, j, *used;
1989
1990 /*
1991 * Have to have at least one message in the table but the
1992 * table can't be bigger than the actual MSI-X table in the
1993 * device.
1994 */
1995 if (count == 0 || count > msix->msix_msgnum)
1996 return (EINVAL);
1997
1998 /* Sanity check the vectors. */
1999 for (i = 0; i < count; i++)
2000 if (vectors[i] > msix->msix_alloc)
2001 return (EINVAL);
2002
2003 /*
2004 * Make sure there aren't any holes in the vectors to be used.
2005 * It's a big pain to support it, and it doesn't really make
2006 * sense anyway. Also, at least one vector must be used.
2007 */
2008 used = malloc(sizeof(int) * msix->msix_alloc, M_DEVBUF, M_WAITOK |
2009 M_ZERO);
2010 for (i = 0; i < count; i++)
2011 if (vectors[i] != 0)
2012 used[vectors[i] - 1] = 1;
2013 for (i = 0; i < msix->msix_alloc - 1; i++)
2014 if (used[i] == 0 && used[i + 1] == 1) {
2015 free(used, M_DEVBUF);
2016 return (EINVAL);
2017 }
2018 if (used[0] != 1) {
2019 free(used, M_DEVBUF);
2020 return (EINVAL);
2021 }
2022
2023 /* Make sure none of the resources are allocated. */
2024 for (i = 0; i < msix->msix_table_len; i++) {
2025 if (msix->msix_table[i].mte_vector == 0)
2026 continue;
2027 if (msix->msix_table[i].mte_handlers > 0) {
2028 free(used, M_DEVBUF);
2029 return (EBUSY);
2030 }
2031 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
2032 KASSERT(rle != NULL, ("missing resource"));
2033 if (rle->res != NULL) {
2034 free(used, M_DEVBUF);
2035 return (EBUSY);
2036 }
2037 }
2038
2039 /* Free the existing resource list entries. */
2040 for (i = 0; i < msix->msix_table_len; i++) {
2041 if (msix->msix_table[i].mte_vector == 0)
2042 continue;
2043 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
2044 }
2045
2046 /*
2047 * Build the new virtual table keeping track of which vectors are
2048 * used.
2049 */
2050 free(msix->msix_table, M_DEVBUF);
2051 msix->msix_table = malloc(sizeof(struct msix_table_entry) * count,
2052 M_DEVBUF, M_WAITOK | M_ZERO);
2053 for (i = 0; i < count; i++)
2054 msix->msix_table[i].mte_vector = vectors[i];
2055 msix->msix_table_len = count;
2056
2057 /* Free any unused IRQs and resize the vectors array if necessary. */
2058 j = msix->msix_alloc - 1;
2059 if (used[j] == 0) {
2060 struct msix_vector *vec;
2061
2062 while (used[j] == 0) {
2063 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
2064 msix->msix_vectors[j].mv_irq);
2065 j--;
2066 }
2067 vec = malloc(sizeof(struct msix_vector) * (j + 1), M_DEVBUF,
2068 M_WAITOK);
2069 bcopy(msix->msix_vectors, vec, sizeof(struct msix_vector) *
2070 (j + 1));
2071 free(msix->msix_vectors, M_DEVBUF);
2072 msix->msix_vectors = vec;
2073 msix->msix_alloc = j + 1;
2074 }
2075 free(used, M_DEVBUF);
2076
2077 /* Map the IRQs onto the rids. */
2078 for (i = 0; i < count; i++) {
2079 if (vectors[i] == 0)
2080 continue;
2081 irq = msix->msix_vectors[vectors[i] - 1].mv_irq;
2082 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
2083 irq, 1);
2084 }
2085
2086 if (bootverbose) {
2087 device_printf(child, "Remapped MSI-X IRQs as: ");
2088 for (i = 0; i < count; i++) {
2089 if (i != 0)
2090 printf(", ");
2091 if (vectors[i] == 0)
2092 printf("---");
2093 else
2094 printf("%d",
2095 msix->msix_vectors[vectors[i] - 1].mv_irq);
2096 }
2097 printf("\n");
2098 }
2099
2100 return (0);
2101}
2102
2103static int
2104pci_release_msix(device_t dev, device_t child)
2105{
2106 struct pci_devinfo *dinfo = device_get_ivars(child);
2107 struct pcicfg_msix *msix = &dinfo->cfg.msix;
2108 struct resource_list_entry *rle;
2109 int i;
2110
2111 /* Do we have any messages to release? */
2112 if (msix->msix_alloc == 0)
2113 return (ENODEV);
2114
2115 /* Make sure none of the resources are allocated. */
2116 for (i = 0; i < msix->msix_table_len; i++) {
2117 if (msix->msix_table[i].mte_vector == 0)
2118 continue;
2119 if (msix->msix_table[i].mte_handlers > 0)
2120 return (EBUSY);
2121 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
2122 KASSERT(rle != NULL, ("missing resource"));
2123 if (rle->res != NULL)
2124 return (EBUSY);
2125 }
2126
2127 /* Update control register to disable MSI-X. */
2128 msix->msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE;
2129 pci_write_config(child, msix->msix_location + PCIR_MSIX_CTRL,
2130 msix->msix_ctrl, 2);
2131
2132 /* Free the resource list entries. */
2133 for (i = 0; i < msix->msix_table_len; i++) {
2134 if (msix->msix_table[i].mte_vector == 0)
2135 continue;
2136 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
2137 }
2138 free(msix->msix_table, M_DEVBUF);
2139 msix->msix_table_len = 0;
2140
2141 /* Release the IRQs. */
2142 for (i = 0; i < msix->msix_alloc; i++)
2143 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
2144 msix->msix_vectors[i].mv_irq);
2145 free(msix->msix_vectors, M_DEVBUF);
2146 msix->msix_alloc = 0;
2147 return (0);
2148}
2149
2150/*
2151 * Return the max supported MSI-X messages this device supports.
2152 * Basically, assuming the MD code can alloc messages, this function
2153 * should return the maximum value that pci_alloc_msix() can return.
2154 * Thus, it is subject to the tunables, etc.
2155 */
2156int
2158{
2159 struct pci_devinfo *dinfo = device_get_ivars(child);
2160 struct pcicfg_msix *msix = &dinfo->cfg.msix;
2161
2162 if (pci_do_msix && msix->msix_location != 0)
2163 return (msix->msix_msgnum);
2164 return (0);
2165}
2166
2167int
2169{
2170 struct pci_devinfo *dinfo = device_get_ivars(child);
2171 struct pcicfg_msix *msix = &dinfo->cfg.msix;
2172
2173 if (pci_do_msix && msix->msix_location != 0)
2174 return (msix->msix_pba_bar);
2175 return (-1);
2176}
2177
2178int
2180{
2181 struct pci_devinfo *dinfo = device_get_ivars(child);
2182 struct pcicfg_msix *msix = &dinfo->cfg.msix;
2183
2184 if (pci_do_msix && msix->msix_location != 0)
2185 return (msix->msix_table_bar);
2186 return (-1);
2187}
2188
2189/*
2190 * HyperTransport MSI mapping control
2191 */
2192void
2193pci_ht_map_msi(device_t dev, uint64_t addr)
2194{
2195 struct pci_devinfo *dinfo = device_get_ivars(dev);
2196 struct pcicfg_ht *ht = &dinfo->cfg.ht;
2197
2198 if (!ht->ht_msimap)
2199 return;
2200
2201 if (addr && !(ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) &&
2202 ht->ht_msiaddr >> 20 == addr >> 20) {
2203 /* Enable MSI -> HT mapping. */
2205 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
2206 ht->ht_msictrl, 2);
2207 }
2208
2209 if (!addr && ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) {
2210 /* Disable MSI -> HT mapping. */
2211 ht->ht_msictrl &= ~PCIM_HTCMD_MSI_ENABLE;
2212 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
2213 ht->ht_msictrl, 2);
2214 }
2215}
2216
2217int
2219{
2220 struct pci_devinfo *dinfo = device_get_ivars(dev);
2221 int cap;
2222 uint16_t val;
2223
2224 cap = dinfo->cfg.pcie.pcie_location;
2225 if (cap == 0)
2226 return (0);
2227 val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2);
2229 return (val != 0);
2230}
2231
2232int
2234{
2235 struct pci_devinfo *dinfo = device_get_ivars(dev);
2236 int cap;
2237 uint16_t val;
2238
2239 cap = dinfo->cfg.pcie.pcie_location;
2240 if (cap == 0)
2241 return (0);
2242 val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2);
2244 val >>= 5;
2245 return (1 << (val + 7));
2246}
2247
2248int
2250{
2251 struct pci_devinfo *dinfo = device_get_ivars(dev);
2252 int cap;
2253 uint16_t val;
2254
2255 cap = dinfo->cfg.pcie.pcie_location;
2256 if (cap == 0)
2257 return (0);
2258 val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2);
2260 val >>= 12;
2261 return (1 << (val + 7));
2262}
2263
2264int
2265pci_set_max_read_req(device_t dev, int size)
2266{
2267 struct pci_devinfo *dinfo = device_get_ivars(dev);
2268 int cap;
2269 uint16_t val;
2270
2271 cap = dinfo->cfg.pcie.pcie_location;
2272 if (cap == 0)
2273 return (0);
2274 if (size < 128)
2275 size = 128;
2276 if (size > 4096)
2277 size = 4096;
2278 size = (1 << (fls(size) - 1));
2279 val = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2);
2280 val &= ~PCIEM_CTL_MAX_READ_REQUEST;
2281 val |= (fls(size) - 8) << 12;
2282 pci_write_config(dev, cap + PCIER_DEVICE_CTL, val, 2);
2283 return (size);
2284}
2285
2286uint32_t
2287pcie_read_config(device_t dev, int reg, int width)
2288{
2289 struct pci_devinfo *dinfo = device_get_ivars(dev);
2290 int cap;
2291
2292 cap = dinfo->cfg.pcie.pcie_location;
2293 if (cap == 0) {
2294 if (width == 2)
2295 return (0xffff);
2296 return (0xffffffff);
2297 }
2298
2299 return (pci_read_config(dev, cap + reg, width));
2300}
2301
2302void
2303pcie_write_config(device_t dev, int reg, uint32_t value, int width)
2304{
2305 struct pci_devinfo *dinfo = device_get_ivars(dev);
2306 int cap;
2307
2308 cap = dinfo->cfg.pcie.pcie_location;
2309 if (cap == 0)
2310 return;
2311 pci_write_config(dev, cap + reg, value, width);
2312}
2313
2314/*
2315 * Adjusts a PCI-e capability register by clearing the bits in mask
2316 * and setting the bits in (value & mask). Bits not set in mask are
2317 * not adjusted.
2318 *
2319 * Returns the old value on success or all ones on failure.
2320 */
2321uint32_t
2322pcie_adjust_config(device_t dev, int reg, uint32_t mask, uint32_t value,
2323 int width)
2324{
2325 struct pci_devinfo *dinfo = device_get_ivars(dev);
2326 uint32_t old, new;
2327 int cap;
2328
2329 cap = dinfo->cfg.pcie.pcie_location;
2330 if (cap == 0) {
2331 if (width == 2)
2332 return (0xffff);
2333 return (0xffffffff);
2334 }
2335
2336 old = pci_read_config(dev, cap + reg, width);
2337 new = old & ~mask;
2338 new |= (value & mask);
2339 pci_write_config(dev, cap + reg, new, width);
2340 return (old);
2341}
2342
2343/*
2344 * Support for MSI message signalled interrupts.
2345 */
2346void
2347pci_enable_msi_method(device_t dev, device_t child, uint64_t address,
2348 uint16_t data)
2349{
2350 struct pci_devinfo *dinfo = device_get_ivars(child);
2351 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2352
2353 /* Write data and address values. */
2354 pci_write_config(child, msi->msi_location + PCIR_MSI_ADDR,
2355 address & 0xffffffff, 4);
2356 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
2357 pci_write_config(child, msi->msi_location + PCIR_MSI_ADDR_HIGH,
2358 address >> 32, 4);
2359 pci_write_config(child, msi->msi_location + PCIR_MSI_DATA_64BIT,
2360 data, 2);
2361 } else
2362 pci_write_config(child, msi->msi_location + PCIR_MSI_DATA, data,
2363 2);
2364
2365 /* Enable MSI in the control register. */
2367 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
2368 msi->msi_ctrl, 2);
2369
2370 /* Enable MSI -> HT mapping. */
2372}
2373
2374void
2376{
2377 struct pci_devinfo *dinfo = device_get_ivars(child);
2378 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2379
2380 /* Disable MSI -> HT mapping. */
2382
2383 /* Disable MSI in the control register. */
2384 msi->msi_ctrl &= ~PCIM_MSICTRL_MSI_ENABLE;
2385 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
2386 msi->msi_ctrl, 2);
2387}
2388
2389/*
2390 * Restore MSI registers during resume. If MSI is enabled then
2391 * restore the data and address registers in addition to the control
2392 * register.
2393 */
2394static void
2396{
2397 struct pci_devinfo *dinfo = device_get_ivars(dev);
2398 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2399 uint64_t address;
2400 uint16_t data;
2401
2402 if (msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) {
2403 address = msi->msi_addr;
2404 data = msi->msi_data;
2405 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
2406 address & 0xffffffff, 4);
2407 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
2408 pci_write_config(dev, msi->msi_location +
2409 PCIR_MSI_ADDR_HIGH, address >> 32, 4);
2410 pci_write_config(dev, msi->msi_location +
2412 } else
2413 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA,
2414 data, 2);
2415 }
2416 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
2417 2);
2418}
2419
2420static int
2421pci_remap_intr_method(device_t bus, device_t dev, u_int irq)
2422{
2423 struct pci_devinfo *dinfo = device_get_ivars(dev);
2424 pcicfgregs *cfg = &dinfo->cfg;
2425 struct resource_list_entry *rle;
2426 struct msix_table_entry *mte;
2427 struct msix_vector *mv;
2428 uint64_t addr;
2429 uint32_t data;
2430 int error, i, j;
2431
2432 /*
2433 * Handle MSI first. We try to find this IRQ among our list
2434 * of MSI IRQs. If we find it, we request updated address and
2435 * data registers and apply the results.
2436 */
2437 if (cfg->msi.msi_alloc > 0) {
2438 /* If we don't have any active handlers, nothing to do. */
2439 if (cfg->msi.msi_handlers == 0)
2440 return (0);
2441 for (i = 0; i < cfg->msi.msi_alloc; i++) {
2442 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ,
2443 i + 1);
2444 if (rle->start == irq) {
2445 error = PCIB_MAP_MSI(device_get_parent(bus),
2446 dev, irq, &addr, &data);
2447 if (error)
2448 return (error);
2449 pci_disable_msi(dev);
2450 dinfo->cfg.msi.msi_addr = addr;
2451 dinfo->cfg.msi.msi_data = data;
2452 pci_enable_msi(dev, addr, data);
2453 return (0);
2454 }
2455 }
2456 return (ENOENT);
2457 }
2458
2459 /*
2460 * For MSI-X, we check to see if we have this IRQ. If we do,
2461 * we request the updated mapping info. If that works, we go
2462 * through all the slots that use this IRQ and update them.
2463 */
2464 if (cfg->msix.msix_alloc > 0) {
2465 for (i = 0; i < cfg->msix.msix_alloc; i++) {
2466 mv = &cfg->msix.msix_vectors[i];
2467 if (mv->mv_irq == irq) {
2468 error = PCIB_MAP_MSI(device_get_parent(bus),
2469 dev, irq, &addr, &data);
2470 if (error)
2471 return (error);
2472 mv->mv_address = addr;
2473 mv->mv_data = data;
2474 for (j = 0; j < cfg->msix.msix_table_len; j++) {
2475 mte = &cfg->msix.msix_table[j];
2476 if (mte->mte_vector != i + 1)
2477 continue;
2478 if (mte->mte_handlers == 0)
2479 continue;
2480 pci_mask_msix(dev, j);
2481 pci_enable_msix(dev, j, addr, data);
2482 pci_unmask_msix(dev, j);
2483 }
2484 }
2485 }
2486 return (ENOENT);
2487 }
2488
2489 return (ENOENT);
2490}
2491
2492/*
2493 * Returns true if the specified device is blacklisted because MSI
2494 * doesn't work.
2495 */
2496int
2498{
2499
2501 return (0);
2502
2503 return (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSI));
2504}
2505
2506/*
2507 * Determine if MSI is blacklisted globally on this system. Currently,
2508 * we just check for blacklisted chipsets as represented by the
2509 * host-PCI bridge at device 0:0:0. In the future, it may become
2510 * necessary to check other system attributes, such as the kenv values
2511 * that give the motherboard manufacturer and model number.
2512 */
2513static int
2515{
2516 device_t dev;
2517
2519 return (0);
2520
2521 /* Blacklist all non-PCI-express and non-PCI-X chipsets. */
2522 if (!(pcie_chipset || pcix_chipset)) {
2523 if (vm_guest != VM_GUEST_NO) {
2524 /*
2525 * Whitelist older chipsets in virtual
2526 * machines known to support MSI.
2527 */
2528 dev = pci_find_bsf(0, 0, 0);
2529 if (dev != NULL)
2530 return (!pci_has_quirk(pci_get_devid(dev),
2532 }
2533 return (1);
2534 }
2535
2536 dev = pci_find_bsf(0, 0, 0);
2537 if (dev != NULL)
2539 return (0);
2540}
2541
2542/*
2543 * Returns true if the specified device is blacklisted because MSI-X
2544 * doesn't work. Note that this assumes that if MSI doesn't work,
2545 * MSI-X doesn't either.
2546 */
2547int
2549{
2550
2552 return (0);
2553
2554 if (pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_DISABLE_MSIX))
2555 return (1);
2556
2558}
2559
2560/*
2561 * Determine if MSI-X is blacklisted globally on this system. If MSI
2562 * is blacklisted, assume that MSI-X is as well. Check for additional
2563 * chipsets where MSI works but MSI-X does not.
2564 */
2565static int
2567{
2568 device_t dev;
2569
2571 return (0);
2572
2573 dev = pci_find_bsf(0, 0, 0);
2574 if (dev != NULL && pci_has_quirk(pci_get_devid(dev),
2576 return (1);
2577
2578 return (pci_msi_blacklisted());
2579}
2580
2581/*
2582 * Attempt to allocate *count MSI messages. The actual number allocated is
2583 * returned in *count. After this function returns, each message will be
2584 * available to the driver as SYS_RES_IRQ resources starting at a rid 1.
2585 */
2586int
2587pci_alloc_msi_method(device_t dev, device_t child, int *count)
2588{
2589 struct pci_devinfo *dinfo = device_get_ivars(child);
2590 pcicfgregs *cfg = &dinfo->cfg;
2591 struct resource_list_entry *rle;
2592 int actual, error, i, irqs[32];
2593 uint16_t ctrl;
2594
2595 /* Don't let count == 0 get us into trouble. */
2596 if (*count == 0)
2597 return (EINVAL);
2598
2599 /* If rid 0 is allocated, then fail. */
2600 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
2601 if (rle != NULL && rle->res != NULL)
2602 return (ENXIO);
2603
2604 /* Already have allocated messages? */
2605 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
2606 return (ENXIO);
2607
2608 /* If MSI is blacklisted for this system, fail. */
2609 if (pci_msi_blacklisted())
2610 return (ENXIO);
2611
2612 /* MSI capability present? */
2613 if (cfg->msi.msi_location == 0 || !pci_do_msi)
2614 return (ENODEV);
2615
2616 if (bootverbose)
2617 device_printf(child,
2618 "attempting to allocate %d MSI vectors (%d supported)\n",
2619 *count, cfg->msi.msi_msgnum);
2620
2621 /* Don't ask for more than the device supports. */
2622 actual = min(*count, cfg->msi.msi_msgnum);
2623
2624 /* Don't ask for more than 32 messages. */
2625 actual = min(actual, 32);
2626
2627 /* MSI requires power of 2 number of messages. */
2628 if (!powerof2(actual))
2629 return (EINVAL);
2630
2631 for (;;) {
2632 /* Try to allocate N messages. */
2633 error = PCIB_ALLOC_MSI(device_get_parent(dev), child, actual,
2634 actual, irqs);
2635 if (error == 0)
2636 break;
2637 if (actual == 1)
2638 return (error);
2639
2640 /* Try N / 2. */
2641 actual >>= 1;
2642 }
2643
2644 /*
2645 * We now have N actual messages mapped onto SYS_RES_IRQ
2646 * resources in the irqs[] array, so add new resources
2647 * starting at rid 1.
2648 */
2649 for (i = 0; i < actual; i++)
2650 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1,
2651 irqs[i], irqs[i], 1);
2652
2653 if (bootverbose) {
2654 if (actual == 1)
2655 device_printf(child, "using IRQ %d for MSI\n", irqs[0]);
2656 else {
2657 int run;
2658
2659 /*
2660 * Be fancy and try to print contiguous runs
2661 * of IRQ values as ranges. 'run' is true if
2662 * we are in a range.
2663 */
2664 device_printf(child, "using IRQs %d", irqs[0]);
2665 run = 0;
2666 for (i = 1; i < actual; i++) {
2667 /* Still in a run? */
2668 if (irqs[i] == irqs[i - 1] + 1) {
2669 run = 1;
2670 continue;
2671 }
2672
2673 /* Finish previous range. */
2674 if (run) {
2675 printf("-%d", irqs[i - 1]);
2676 run = 0;
2677 }
2678
2679 /* Start new range. */
2680 printf(",%d", irqs[i]);
2681 }
2682
2683 /* Unfinished range? */
2684 if (run)
2685 printf("-%d", irqs[actual - 1]);
2686 printf(" for MSI\n");
2687 }
2688 }
2689
2690 /* Update control register with actual count. */
2691 ctrl = cfg->msi.msi_ctrl;
2692 ctrl &= ~PCIM_MSICTRL_MME_MASK;
2693 ctrl |= (ffs(actual) - 1) << 4;
2694 cfg->msi.msi_ctrl = ctrl;
2695 pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2);
2696
2697 /* Update counts of alloc'd messages. */
2698 cfg->msi.msi_alloc = actual;
2699 cfg->msi.msi_handlers = 0;
2700 *count = actual;
2701 return (0);
2702}
2703
2704/* Release the MSI messages associated with this device. */
2705int
2707{
2708 struct pci_devinfo *dinfo = device_get_ivars(child);
2709 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2710 struct resource_list_entry *rle;
2711 int error, i, irqs[32];
2712
2713 /* Try MSI-X first. */
2714 error = pci_release_msix(dev, child);
2715 if (error != ENODEV)
2716 return (error);
2717
2718 /* Do we have any messages to release? */
2719 if (msi->msi_alloc == 0)
2720 return (ENODEV);
2721 KASSERT(msi->msi_alloc <= 32, ("more than 32 alloc'd messages"));
2722
2723 /* Make sure none of the resources are allocated. */
2724 if (msi->msi_handlers > 0)
2725 return (EBUSY);
2726 for (i = 0; i < msi->msi_alloc; i++) {
2727 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
2728 KASSERT(rle != NULL, ("missing MSI resource"));
2729 if (rle->res != NULL)
2730 return (EBUSY);
2731 irqs[i] = rle->start;
2732 }
2733
2734 /* Update control register with 0 count. */
2735 KASSERT(!(msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE),
2736 ("%s: MSI still enabled", __func__));
2737 msi->msi_ctrl &= ~PCIM_MSICTRL_MME_MASK;
2738 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
2739 msi->msi_ctrl, 2);
2740
2741 /* Release the messages. */
2742 PCIB_RELEASE_MSI(device_get_parent(dev), child, msi->msi_alloc, irqs);
2743 for (i = 0; i < msi->msi_alloc; i++)
2744 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
2745
2746 /* Update alloc count. */
2747 msi->msi_alloc = 0;
2748 msi->msi_addr = 0;
2749 msi->msi_data = 0;
2750 return (0);
2751}
2752
2753/*
2754 * Return the max supported MSI messages this device supports.
2755 * Basically, assuming the MD code can alloc messages, this function
2756 * should return the maximum value that pci_alloc_msi() can return.
2757 * Thus, it is subject to the tunables, etc.
2758 */
2759int
2760pci_msi_count_method(device_t dev, device_t child)
2761{
2762 struct pci_devinfo *dinfo = device_get_ivars(child);
2763 struct pcicfg_msi *msi = &dinfo->cfg.msi;
2764
2765 if (pci_do_msi && msi->msi_location != 0)
2766 return (msi->msi_msgnum);
2767 return (0);
2768}
2769
2770/* free pcicfgregs structure and all depending data structures */
2771
2772int
2773pci_freecfg(struct pci_devinfo *dinfo)
2774{
2775 struct devlist *devlist_head;
2776 struct pci_map *pm, *next;
2777 int i;
2778
2779 devlist_head = &pci_devq;
2780
2781 if (dinfo->cfg.vpd.vpd_reg) {
2782 free(dinfo->cfg.vpd.vpd_ident, M_DEVBUF);
2783 for (i = 0; i < dinfo->cfg.vpd.vpd_rocnt; i++)
2784 free(dinfo->cfg.vpd.vpd_ros[i].value, M_DEVBUF);
2785 free(dinfo->cfg.vpd.vpd_ros, M_DEVBUF);
2786 for (i = 0; i < dinfo->cfg.vpd.vpd_wcnt; i++)
2787 free(dinfo->cfg.vpd.vpd_w[i].value, M_DEVBUF);
2788 free(dinfo->cfg.vpd.vpd_w, M_DEVBUF);
2789 }
2790 STAILQ_FOREACH_SAFE(pm, &dinfo->cfg.maps, pm_link, next) {
2791 free(pm, M_DEVBUF);
2792 }
2793 STAILQ_REMOVE(devlist_head, dinfo, pci_devinfo, pci_links);
2794 free(dinfo, M_DEVBUF);
2795
2796 /* increment the generation count */
2798
2799 /* we're losing one device */
2800 pci_numdevs--;
2801 return (0);
2802}
2803
2804/*
2805 * PCI power manangement
2806 */
2807int
2809{
2810 struct pci_devinfo *dinfo = device_get_ivars(child);
2811 pcicfgregs *cfg = &dinfo->cfg;
2812 uint16_t status;
2813 int oldstate, highest, delay;
2814
2815 if (cfg->pp.pp_cap == 0)
2816 return (EOPNOTSUPP);
2817
2818 /*
2819 * Optimize a no state change request away. While it would be OK to
2820 * write to the hardware in theory, some devices have shown odd
2821 * behavior when going from D3 -> D3.
2822 */
2823 oldstate = pci_get_powerstate(child);
2824 if (oldstate == state)
2825 return (0);
2826
2827 /*
2828 * The PCI power management specification states that after a state
2829 * transition between PCI power states, system software must
2830 * guarantee a minimal delay before the function accesses the device.
2831 * Compute the worst case delay that we need to guarantee before we
2832 * access the device. Many devices will be responsive much more
2833 * quickly than this delay, but there are some that don't respond
2834 * instantly to state changes. Transitions to/from D3 state require
2835 * 10ms, while D2 requires 200us, and D0/1 require none. The delay
2836 * is done below with DELAY rather than a sleeper function because
2837 * this function can be called from contexts where we cannot sleep.
2838 */
2839 highest = (oldstate > state) ? oldstate : state;
2840 if (highest == PCI_POWERSTATE_D3)
2841 delay = 10000;
2842 else if (highest == PCI_POWERSTATE_D2)
2843 delay = 200;
2844 else
2845 delay = 0;
2846 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2)
2847 & ~PCIM_PSTAT_DMASK;
2848 switch (state) {
2849 case PCI_POWERSTATE_D0:
2851 break;
2852 case PCI_POWERSTATE_D1:
2853 if ((cfg->pp.pp_cap & PCIM_PCAP_D1SUPP) == 0)
2854 return (EOPNOTSUPP);
2856 break;
2857 case PCI_POWERSTATE_D2:
2858 if ((cfg->pp.pp_cap & PCIM_PCAP_D2SUPP) == 0)
2859 return (EOPNOTSUPP);
2861 break;
2862 case PCI_POWERSTATE_D3:
2864 break;
2865 default:
2866 return (EINVAL);
2867 }
2868
2869 if (bootverbose)
2870 pci_printf(cfg, "Transition from D%d to D%d\n", oldstate,
2871 state);
2872
2873 PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_status, status, 2);
2874 if (delay)
2875 DELAY(delay);
2876 return (0);
2877}
2878
2879int
2881{
2882 struct pci_devinfo *dinfo = device_get_ivars(child);
2883 pcicfgregs *cfg = &dinfo->cfg;
2884 uint16_t status;
2885 int result;
2886
2887 if (cfg->pp.pp_cap != 0) {
2888 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2);
2889 switch (status & PCIM_PSTAT_DMASK) {
2890 case PCIM_PSTAT_D0:
2891 result = PCI_POWERSTATE_D0;
2892 break;
2893 case PCIM_PSTAT_D1:
2894 result = PCI_POWERSTATE_D1;
2895 break;
2896 case PCIM_PSTAT_D2:
2897 result = PCI_POWERSTATE_D2;
2898 break;
2899 case PCIM_PSTAT_D3:
2900 result = PCI_POWERSTATE_D3;
2901 break;
2902 default:
2903 result = PCI_POWERSTATE_UNKNOWN;
2904 break;
2905 }
2906 } else {
2907 /* No support, device is always at D0 */
2908 result = PCI_POWERSTATE_D0;
2909 }
2910 return (result);
2911}
2912
2913/*
2914 * Some convenience functions for PCI device drivers.
2915 */
2916
2917static __inline void
2918pci_set_command_bit(device_t dev, device_t child, uint16_t bit)
2919{
2920 uint16_t command;
2921
2922 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2923 command |= bit;
2924 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2925}
2926
2927static __inline void
2928pci_clear_command_bit(device_t dev, device_t child, uint16_t bit)
2929{
2930 uint16_t command;
2931
2932 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
2933 command &= ~bit;
2934 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
2935}
2936
2937int
2939{
2941 return (0);
2942}
2943
2944int
2946{
2948 return (0);
2949}
2950
2951int
2952pci_enable_io_method(device_t dev, device_t child, int space)
2953{
2954 uint16_t bit;
2955
2956 switch(space) {
2957 case SYS_RES_IOPORT:
2958 bit = PCIM_CMD_PORTEN;
2959 break;
2960 case SYS_RES_MEMORY:
2961 bit = PCIM_CMD_MEMEN;
2962 break;
2963 default:
2964 return (EINVAL);
2965 }
2967 return (0);
2968}
2969
2970int
2971pci_disable_io_method(device_t dev, device_t child, int space)
2972{
2973 uint16_t bit;
2974
2975 switch(space) {
2976 case SYS_RES_IOPORT:
2977 bit = PCIM_CMD_PORTEN;
2978 break;
2979 case SYS_RES_MEMORY:
2980 bit = PCIM_CMD_MEMEN;
2981 break;
2982 default:
2983 return (EINVAL);
2984 }
2986 return (0);
2987}
2988
2989/*
2990 * New style pci driver. Parent device is either a pci-host-bridge or a
2991 * pci-pci-bridge. Both kinds are represented by instances of pcib.
2992 */
2993
2994void
2995pci_print_verbose(struct pci_devinfo *dinfo)
2996{
2997
2998 if (bootverbose) {
2999 pcicfgregs *cfg = &dinfo->cfg;
3000
3001 printf("found->\tvendor=0x%04x, dev=0x%04x, revid=0x%02x\n",
3002 cfg->vendor, cfg->device, cfg->revid);
3003 printf("\tdomain=%d, bus=%d, slot=%d, func=%d\n",
3004 cfg->domain, cfg->bus, cfg->slot, cfg->func);
3005 printf("\tclass=%02x-%02x-%02x, hdrtype=0x%02x, mfdev=%d\n",
3006 cfg->baseclass, cfg->subclass, cfg->progif, cfg->hdrtype,
3007 cfg->mfdev);
3008 printf("\tcmdreg=0x%04x, statreg=0x%04x, cachelnsz=%d (dwords)\n",
3009 cfg->cmdreg, cfg->statreg, cfg->cachelnsz);
3010 printf("\tlattimer=0x%02x (%d ns), mingnt=0x%02x (%d ns), maxlat=0x%02x (%d ns)\n",
3011 cfg->lattimer, cfg->lattimer * 30, cfg->mingnt,
3012 cfg->mingnt * 250, cfg->maxlat, cfg->maxlat * 250);
3013 if (cfg->intpin > 0)
3014 printf("\tintpin=%c, irq=%d\n",
3015 cfg->intpin +'a' -1, cfg->intline);
3016 if (cfg->pp.pp_cap) {
3017 uint16_t status;
3018
3019 status = pci_read_config(cfg->dev, cfg->pp.pp_status, 2);
3020 printf("\tpowerspec %d supports D0%s%s D3 current D%d\n",
3021 cfg->pp.pp_cap & PCIM_PCAP_SPEC,
3022 cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "",
3023 cfg->pp.pp_cap & PCIM_PCAP_D2SUPP ? " D2" : "",
3025 }
3026 if (cfg->msi.msi_location) {
3027 int ctrl;
3028
3029 ctrl = cfg->msi.msi_ctrl;
3030 printf("\tMSI supports %d message%s%s%s\n",
3031 cfg->msi.msi_msgnum,
3032 (cfg->msi.msi_msgnum == 1) ? "" : "s",
3033 (ctrl & PCIM_MSICTRL_64BIT) ? ", 64 bit" : "",
3034 (ctrl & PCIM_MSICTRL_VECTOR) ? ", vector masks":"");
3035 }
3036 if (cfg->msix.msix_location) {
3037 printf("\tMSI-X supports %d message%s ",
3038 cfg->msix.msix_msgnum,
3039 (cfg->msix.msix_msgnum == 1) ? "" : "s");
3040 if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar)
3041 printf("in map 0x%x\n",
3042 cfg->msix.msix_table_bar);
3043 else
3044 printf("in maps 0x%x and 0x%x\n",
3045 cfg->msix.msix_table_bar,
3046 cfg->msix.msix_pba_bar);
3047 }
3048 }
3049}
3050
3051static int
3053{
3054 return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_PORTEN) != 0;
3055}
3056
3057static int
3059{
3060 return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_MEMEN) != 0;
3061}
3062
3063void
3064pci_read_bar(device_t dev, int reg, pci_addr_t *mapp, pci_addr_t *testvalp,
3065 int *bar64)
3066{
3067 struct pci_devinfo *dinfo;
3068 pci_addr_t map, testval;
3069 int ln2range;
3070 uint16_t cmd;
3071
3072 /*
3073 * The device ROM BAR is special. It is always a 32-bit
3074 * memory BAR. Bit 0 is special and should not be set when
3075 * sizing the BAR.
3076 */
3077 dinfo = device_get_ivars(dev);
3078 if (PCIR_IS_BIOS(&dinfo->cfg, reg)) {
3079 map = pci_read_config(dev, reg, 4);
3080 pci_write_config(dev, reg, 0xfffffffe, 4);
3081 testval = pci_read_config(dev, reg, 4);
3082 pci_write_config(dev, reg, map, 4);
3083 *mapp = map;
3084 *testvalp = testval;
3085 if (bar64 != NULL)
3086 *bar64 = 0;
3087 return;
3088 }
3089
3090 map = pci_read_config(dev, reg, 4);
3091 ln2range = pci_maprange(map);
3092 if (ln2range == 64)
3093 map |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32;
3094
3095 /*
3096 * Disable decoding via the command register before
3097 * determining the BAR's length since we will be placing it in
3098 * a weird state.
3099 */
3100 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
3101 pci_write_config(dev, PCIR_COMMAND,
3102 cmd & ~(PCI_BAR_MEM(map) ? PCIM_CMD_MEMEN : PCIM_CMD_PORTEN), 2);
3103
3104 /*
3105 * Determine the BAR's length by writing all 1's. The bottom
3106 * log_2(size) bits of the BAR will stick as 0 when we read
3107 * the value back.
3108 *
3109 * NB: according to the PCI Local Bus Specification, rev. 3.0:
3110 * "Software writes 0FFFFFFFFh to both registers, reads them back,
3111 * and combines the result into a 64-bit value." (section 6.2.5.1)
3112 *
3113 * Writes to both registers must be performed before attempting to
3114 * read back the size value.
3115 */
3116 testval = 0;
3117 pci_write_config(dev, reg, 0xffffffff, 4);
3118 if (ln2range == 64) {
3119 pci_write_config(dev, reg + 4, 0xffffffff, 4);
3120 testval |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32;
3121 }
3122 testval |= pci_read_config(dev, reg, 4);
3123
3124 /*
3125 * Restore the original value of the BAR. We may have reprogrammed
3126 * the BAR of the low-level console device and when booting verbose,
3127 * we need the console device addressable.
3128 */
3129 pci_write_config(dev, reg, map, 4);
3130 if (ln2range == 64)
3131 pci_write_config(dev, reg + 4, map >> 32, 4);
3132 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
3133
3134 *mapp = map;
3135 *testvalp = testval;
3136 if (bar64 != NULL)
3137 *bar64 = (ln2range == 64);
3138}
3139
3140static void
3141pci_write_bar(device_t dev, struct pci_map *pm, pci_addr_t base)
3142{
3143 struct pci_devinfo *dinfo;
3144 int ln2range;
3145
3146 /* The device ROM BAR is always a 32-bit memory BAR. */
3147 dinfo = device_get_ivars(dev);
3148 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg))
3149 ln2range = 32;
3150 else
3151 ln2range = pci_maprange(pm->pm_value);
3152 pci_write_config(dev, pm->pm_reg, base, 4);
3153 if (ln2range == 64)
3154 pci_write_config(dev, pm->pm_reg + 4, base >> 32, 4);
3155 pm->pm_value = pci_read_config(dev, pm->pm_reg, 4);
3156 if (ln2range == 64)
3157 pm->pm_value |= (pci_addr_t)pci_read_config(dev,
3158 pm->pm_reg + 4, 4) << 32;
3159}
3160
3161struct pci_map *
3162pci_find_bar(device_t dev, int reg)
3163{
3164 struct pci_devinfo *dinfo;
3165 struct pci_map *pm;
3166
3167 dinfo = device_get_ivars(dev);
3168 STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) {
3169 if (pm->pm_reg == reg)
3170 return (pm);
3171 }
3172 return (NULL);
3173}
3174
3175int
3176pci_bar_enabled(device_t dev, struct pci_map *pm)
3177{
3178 struct pci_devinfo *dinfo;
3179 uint16_t cmd;
3180
3181 dinfo = device_get_ivars(dev);
3182 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) &&
3183 !(pm->pm_value & PCIM_BIOS_ENABLE))
3184 return (0);
3185#ifdef PCI_IOV
3186 if ((dinfo->cfg.flags & PCICFG_VF) != 0) {
3187 struct pcicfg_iov *iov;
3188
3189 iov = dinfo->cfg.iov;
3190 cmd = pci_read_config(iov->iov_pf,
3191 iov->iov_pos + PCIR_SRIOV_CTL, 2);
3192 return ((cmd & PCIM_SRIOV_VF_MSE) != 0);
3193 }
3194#endif
3195 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
3196 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg) || PCI_BAR_MEM(pm->pm_value))
3197 return ((cmd & PCIM_CMD_MEMEN) != 0);
3198 else
3199 return ((cmd & PCIM_CMD_PORTEN) != 0);
3200}
3201
3202struct pci_map *
3204{
3205 struct pci_devinfo *dinfo;
3206 struct pci_map *pm, *prev;
3207
3208 dinfo = device_get_ivars(dev);
3209 pm = malloc(sizeof(*pm), M_DEVBUF, M_WAITOK | M_ZERO);
3210 pm->pm_reg = reg;
3211 pm->pm_value = value;
3212 pm->pm_size = size;
3213 STAILQ_FOREACH(prev, &dinfo->cfg.maps, pm_link) {
3214 KASSERT(prev->pm_reg != pm->pm_reg, ("duplicate map %02x",
3215 reg));
3216 if (STAILQ_NEXT(prev, pm_link) == NULL ||
3217 STAILQ_NEXT(prev, pm_link)->pm_reg > pm->pm_reg)
3218 break;
3219 }
3220 if (prev != NULL)
3221 STAILQ_INSERT_AFTER(&dinfo->cfg.maps, prev, pm, pm_link);
3222 else
3223 STAILQ_INSERT_TAIL(&dinfo->cfg.maps, pm, pm_link);
3224 return (pm);
3225}
3226
3227static void
3229{
3230 struct pci_devinfo *dinfo;
3231 struct pci_map *pm;
3232 int ln2range;
3233
3234 dinfo = device_get_ivars(dev);
3235 STAILQ_FOREACH(pm, &dinfo->cfg.maps, pm_link) {
3236 if (PCIR_IS_BIOS(&dinfo->cfg, pm->pm_reg))
3237 ln2range = 32;
3238 else
3239 ln2range = pci_maprange(pm->pm_value);
3240 pci_write_config(dev, pm->pm_reg, pm->pm_value, 4);
3241 if (ln2range == 64)
3242 pci_write_config(dev, pm->pm_reg + 4,
3243 pm->pm_value >> 32, 4);
3244 }
3245}
3246
3247/*
3248 * Add a resource based on a pci map register. Return 1 if the map
3249 * register is a 32bit map register or 2 if it is a 64bit register.
3250 */
3251static int
3252pci_add_map(device_t bus, device_t dev, int reg, struct resource_list *rl,
3253 int force, int prefetch)
3254{
3255 struct pci_map *pm;
3256 pci_addr_t base, map, testval;
3257 pci_addr_t start, end, count;
3258 int barlen, basezero, flags, maprange, mapsize, type;
3259 uint16_t cmd;
3260 struct resource *res;
3261
3262 /*
3263 * The BAR may already exist if the device is a CardBus card
3264 * whose CIS is stored in this BAR.
3265 */
3266 pm = pci_find_bar(dev, reg);
3267 if (pm != NULL) {
3268 maprange = pci_maprange(pm->pm_value);
3269 barlen = maprange == 64 ? 2 : 1;
3270 return (barlen);
3271 }
3272
3273 pci_read_bar(dev, reg, &map, &testval, NULL);
3274 if (PCI_BAR_MEM(map)) {
3275 type = SYS_RES_MEMORY;
3276 if (map & PCIM_BAR_MEM_PREFETCH)
3277 prefetch = 1;
3278 } else
3279 type = SYS_RES_IOPORT;
3280 mapsize = pci_mapsize(testval);
3281 base = pci_mapbase(map);
3282#ifdef __PCI_BAR_ZERO_VALID
3283 basezero = 0;
3284#else
3285 basezero = base == 0;
3286#endif
3287 maprange = pci_maprange(map);
3288 barlen = maprange == 64 ? 2 : 1;
3289
3290 /*
3291 * For I/O registers, if bottom bit is set, and the next bit up
3292 * isn't clear, we know we have a BAR that doesn't conform to the
3293 * spec, so ignore it. Also, sanity check the size of the data
3294 * areas to the type of memory involved. Memory must be at least
3295 * 16 bytes in size, while I/O ranges must be at least 4.
3296 */
3297 if (PCI_BAR_IO(testval) && (testval & PCIM_BAR_IO_RESERVED) != 0)
3298 return (barlen);
3299 if ((type == SYS_RES_MEMORY && mapsize < 4) ||
3300 (type == SYS_RES_IOPORT && mapsize < 2))
3301 return (barlen);
3302
3303 /* Save a record of this BAR. */
3304 pm = pci_add_bar(dev, reg, map, mapsize);
3305 if (bootverbose) {
3306 printf("\tmap[%02x]: type %s, range %2d, base %#jx, size %2d",
3307 reg, pci_maptype(map), maprange, (uintmax_t)base, mapsize);
3308 if (type == SYS_RES_IOPORT && !pci_porten(dev))
3309 printf(", port disabled\n");
3310 else if (type == SYS_RES_MEMORY && !pci_memen(dev))
3311 printf(", memory disabled\n");
3312 else
3313 printf(", enabled\n");
3314 }
3315
3316 /*
3317 * If base is 0, then we have problems if this architecture does
3318 * not allow that. It is best to ignore such entries for the
3319 * moment. These will be allocated later if the driver specifically
3320 * requests them. However, some removable buses look better when
3321 * all resources are allocated, so allow '0' to be overriden.
3322 *
3323 * Similarly treat maps whose values is the same as the test value
3324 * read back. These maps have had all f's written to them by the
3325 * BIOS in an attempt to disable the resources.
3326 */
3327 if (!force && (basezero || map == testval))
3328 return (barlen);
3329 if ((u_long)base != base) {
3330 device_printf(bus,
3331 "pci%d:%d:%d:%d bar %#x too many address bits",
3332 pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev),
3333 pci_get_function(dev), reg);
3334 return (barlen);
3335 }
3336
3337 /*
3338 * This code theoretically does the right thing, but has
3339 * undesirable side effects in some cases where peripherals
3340 * respond oddly to having these bits enabled. Let the user
3341 * be able to turn them off (since pci_enable_io_modes is 1 by
3342 * default).
3343 */
3344 if (pci_enable_io_modes) {
3345 /* Turn on resources that have been left off by a lazy BIOS */
3346 if (type == SYS_RES_IOPORT && !pci_porten(dev)) {
3347 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
3348 cmd |= PCIM_CMD_PORTEN;
3349 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
3350 }
3351 if (type == SYS_RES_MEMORY && !pci_memen(dev)) {
3352 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
3353 cmd |= PCIM_CMD_MEMEN;
3354 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
3355 }
3356 } else {
3357 if (type == SYS_RES_IOPORT && !pci_porten(dev))
3358 return (barlen);
3359 if (type == SYS_RES_MEMORY && !pci_memen(dev))
3360 return (barlen);
3361 }
3362
3363 count = (pci_addr_t)1 << mapsize;
3364 flags = RF_ALIGNMENT_LOG2(mapsize);
3365 if (prefetch)
3366 flags |= RF_PREFETCHABLE;
3367 if (basezero || base == pci_mapbase(testval) || pci_clear_bars) {
3368 start = 0; /* Let the parent decide. */
3369 end = ~0;
3370 } else {
3371 start = base;
3372 end = base + count - 1;
3373 }
3374 resource_list_add(rl, type, reg, start, end, count);
3375
3376 /*
3377 * Try to allocate the resource for this BAR from our parent
3378 * so that this resource range is already reserved. The
3379 * driver for this device will later inherit this resource in
3380 * pci_alloc_resource().
3381 */
3382 res = resource_list_reserve(rl, bus, dev, type, &reg, start, end, count,
3383 flags);
3385 || pci_has_quirk(pci_get_devid(dev), PCI_QUIRK_REALLOC_BAR))
3386 && res == NULL && (start != 0 || end != ~0)) {
3387 /*
3388 * If the allocation fails, try to allocate a resource for
3389 * this BAR using any available range. The firmware felt
3390 * it was important enough to assign a resource, so don't
3391 * disable decoding if we can help it.
3392 */
3393 resource_list_delete(rl, type, reg);
3394 resource_list_add(rl, type, reg, 0, ~0, count);
3395 res = resource_list_reserve(rl, bus, dev, type, &reg, 0, ~0,
3396 count, flags);
3397 }
3398 if (res == NULL) {
3399 /*
3400 * If the allocation fails, delete the resource list entry
3401 * and disable decoding for this device.
3402 *
3403 * If the driver requests this resource in the future,
3404 * pci_reserve_map() will try to allocate a fresh
3405 * resource range.
3406 */
3407 resource_list_delete(rl, type, reg);
3408 pci_disable_io(dev, type);
3409 if (bootverbose)
3410 device_printf(bus,
3411 "pci%d:%d:%d:%d bar %#x failed to allocate\n",
3412 pci_get_domain(dev), pci_get_bus(dev),
3413 pci_get_slot(dev), pci_get_function(dev), reg);
3414 } else {
3415 start = rman_get_start(res);
3416 pci_write_bar(dev, pm, start);
3417 }
3418 return (barlen);
3419}
3420
3421/*
3422 * For ATA devices we need to decide early what addressing mode to use.
3423 * Legacy demands that the primary and secondary ATA ports sits on the
3424 * same addresses that old ISA hardware did. This dictates that we use
3425 * those addresses and ignore the BAR's if we cannot set PCI native
3426 * addressing mode.
3427 */
3428static void
3429pci_ata_maps(device_t bus, device_t dev, struct resource_list *rl, int force,
3430 uint32_t prefetchmask)
3431{
3432 int rid, type, progif;
3433#if 0
3434 /* if this device supports PCI native addressing use it */
3435 progif = pci_read_config(dev, PCIR_PROGIF, 1);
3436 if ((progif & 0x8a) == 0x8a) {
3437 if (pci_mapbase(pci_read_config(dev, PCIR_BAR(0), 4)) &&
3438 pci_mapbase(pci_read_config(dev, PCIR_BAR(2), 4))) {
3439 printf("Trying ATA native PCI addressing mode\n");
3440 pci_write_config(dev, PCIR_PROGIF, progif | 0x05, 1);
3441 }
3442 }
3443#endif
3444 progif = pci_read_config(dev, PCIR_PROGIF, 1);
3445 type = SYS_RES_IOPORT;
3446 if (progif & PCIP_STORAGE_IDE_MODEPRIM) {
3447 pci_add_map(bus, dev, PCIR_BAR(0), rl, force,
3448 prefetchmask & (1 << 0));
3449 pci_add_map(bus, dev, PCIR_BAR(1), rl, force,
3450 prefetchmask & (1 << 1));
3451 } else {
3452 rid = PCIR_BAR(0);
3453 resource_list_add(rl, type, rid, 0x1f0, 0x1f7, 8);
3454 (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x1f0,
3455 0x1f7, 8, 0);
3456 rid = PCIR_BAR(1);
3457 resource_list_add(rl, type, rid, 0x3f6, 0x3f6, 1);
3458 (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x3f6,
3459 0x3f6, 1, 0);
3460 }
3461 if (progif & PCIP_STORAGE_IDE_MODESEC) {
3462 pci_add_map(bus, dev, PCIR_BAR(2), rl, force,
3463 prefetchmask & (1 << 2));
3464 pci_add_map(bus, dev, PCIR_BAR(3), rl, force,
3465 prefetchmask & (1 << 3));
3466 } else {
3467 rid = PCIR_BAR(2);
3468 resource_list_add(rl, type, rid, 0x170, 0x177, 8);
3469 (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x170,
3470 0x177, 8, 0);
3471 rid = PCIR_BAR(3);
3472 resource_list_add(rl, type, rid, 0x376, 0x376, 1);
3473 (void)resource_list_reserve(rl, bus, dev, type, &rid, 0x376,
3474 0x376, 1, 0);
3475 }
3476 pci_add_map(bus, dev, PCIR_BAR(4), rl, force,
3477 prefetchmask & (1 << 4));
3478 pci_add_map(bus, dev, PCIR_BAR(5), rl, force,
3479 prefetchmask & (1 << 5));
3480}
3481
3482static void
3483pci_assign_interrupt(device_t bus, device_t dev, int force_route)
3484{
3485 struct pci_devinfo *dinfo = device_get_ivars(dev);
3486 pcicfgregs *cfg = &dinfo->cfg;
3487 char tunable_name[64];
3488 int irq;
3489
3490 /* Has to have an intpin to have an interrupt. */
3491 if (cfg->intpin == 0)
3492 return;
3493
3494 /* Let the user override the IRQ with a tunable. */
3495 irq = PCI_INVALID_IRQ;
3496 snprintf(tunable_name, sizeof(tunable_name),
3497 "hw.pci%d.%d.%d.INT%c.irq",
3498 cfg->domain, cfg->bus, cfg->slot, cfg->intpin + 'A' - 1);
3499 if (TUNABLE_INT_FETCH(tunable_name, &irq) && (irq >= 255 || irq <= 0))
3500 irq = PCI_INVALID_IRQ;
3501
3502 /*
3503 * If we didn't get an IRQ via the tunable, then we either use the
3504 * IRQ value in the intline register or we ask the bus to route an
3505 * interrupt for us. If force_route is true, then we only use the
3506 * value in the intline register if the bus was unable to assign an
3507 * IRQ.
3508 */
3509 if (!PCI_INTERRUPT_VALID(irq)) {
3510 if (!PCI_INTERRUPT_VALID(cfg->intline) || force_route)
3511 irq = PCI_ASSIGN_INTERRUPT(bus, dev);
3512 if (!PCI_INTERRUPT_VALID(irq))
3513 irq = cfg->intline;
3514 }
3515
3516 /* If after all that we don't have an IRQ, just bail. */
3517 if (!PCI_INTERRUPT_VALID(irq))
3518 return;
3519
3520 /* Update the config register if it changed. */
3521 if (irq != cfg->intline) {
3522 cfg->intline = irq;
3523 pci_write_config(dev, PCIR_INTLINE, irq, 1);
3524 }
3525
3526 /* Add this IRQ as rid 0 interrupt resource. */
3527 resource_list_add(&dinfo->resources, SYS_RES_IRQ, 0, irq, irq, 1);
3528}
3529
3530/* Perform early OHCI takeover from SMM. */
3531static void
3533{
3534 struct resource *res;
3535 uint32_t ctl;
3536 int rid;
3537 int i;
3538
3539 rid = PCIR_BAR(0);
3540 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
3541 if (res == NULL)
3542 return;
3543
3544 ctl = bus_read_4(res, OHCI_CONTROL);
3545 if (ctl & OHCI_IR) {
3546 if (bootverbose)
3547 printf("ohci early: "
3548 "SMM active, request owner change\n");
3549 bus_write_4(res, OHCI_COMMAND_STATUS, OHCI_OCR);
3550 for (i = 0; (i < 100) && (ctl & OHCI_IR); i++) {
3551 DELAY(1000);
3552 ctl = bus_read_4(res, OHCI_CONTROL);
3553 }
3554 if (ctl & OHCI_IR) {
3555 if (bootverbose)
3556 printf("ohci early: "
3557 "SMM does not respond, resetting\n");
3558 bus_write_4(res, OHCI_CONTROL, OHCI_HCFS_RESET);
3559 }
3560 /* Disable interrupts */
3561 bus_write_4(res, OHCI_INTERRUPT_DISABLE, OHCI_ALL_INTRS);
3562 }
3563
3564 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3565}
3566
3567/* Perform early UHCI takeover from SMM. */
3568static void
3570{
3571 struct resource *res;
3572 int rid;
3573
3574 /*
3575 * Set the PIRQD enable bit and switch off all the others. We don't
3576 * want legacy support to interfere with us XXX Does this also mean
3577 * that the BIOS won't touch the keyboard anymore if it is connected
3578 * to the ports of the root hub?
3579 */
3580 pci_write_config(self, PCI_LEGSUP, PCI_LEGSUP_USBPIRQDEN, 2);
3581
3582 /* Disable interrupts */
3583 rid = PCI_UHCI_BASE_REG;
3584 res = bus_alloc_resource_any(self, SYS_RES_IOPORT, &rid, RF_ACTIVE);
3585 if (res != NULL) {
3586 bus_write_2(res, UHCI_INTR, 0);
3587 bus_release_resource(self, SYS_RES_IOPORT, rid, res);
3588 }
3589}
3590
3591/* Perform early EHCI takeover from SMM. */
3592static void
3594{
3595 struct resource *res;
3596 uint32_t cparams;
3597 uint32_t eec;
3598 uint8_t eecp;
3599 uint8_t bios_sem;
3600 uint8_t offs;
3601 int rid;
3602 int i;
3603
3604 rid = PCIR_BAR(0);
3605 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
3606 if (res == NULL)
3607 return;
3608
3609 cparams = bus_read_4(res, EHCI_HCCPARAMS);
3610
3611 /* Synchronise with the BIOS if it owns the controller. */
3612 for (eecp = EHCI_HCC_EECP(cparams); eecp != 0;
3613 eecp = EHCI_EECP_NEXT(eec)) {
3614 eec = pci_read_config(self, eecp, 4);
3615 if (EHCI_EECP_ID(eec) != EHCI_EC_LEGSUP) {
3616 continue;
3617 }
3618 bios_sem = pci_read_config(self, eecp +
3619 EHCI_LEGSUP_BIOS_SEM, 1);
3620 if (bios_sem == 0) {
3621 continue;
3622 }
3623 if (bootverbose)
3624 printf("ehci early: "
3625 "SMM active, request owner change\n");
3626
3627 pci_write_config(self, eecp + EHCI_LEGSUP_OS_SEM, 1, 1);
3628
3629 for (i = 0; (i < 100) && (bios_sem != 0); i++) {
3630 DELAY(1000);
3631 bios_sem = pci_read_config(self, eecp +
3632 EHCI_LEGSUP_BIOS_SEM, 1);
3633 }
3634
3635 if (bios_sem != 0) {
3636 if (bootverbose)
3637 printf("ehci early: "
3638 "SMM does not respond\n");
3639 }
3640 /* Disable interrupts */
3641 offs = EHCI_CAPLENGTH(bus_read_4(res, EHCI_CAPLEN_HCIVERSION));
3642 bus_write_4(res, offs + EHCI_USBINTR, 0);
3643 }
3644 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3645}
3646
3647/* Perform early XHCI takeover from SMM. */
3648static void
3650{
3651 struct resource *res;
3652 uint32_t cparams;
3653 uint32_t eec;
3654 uint8_t eecp;
3655 uint8_t bios_sem;
3656 uint8_t offs;
3657 int rid;
3658 int i;
3659
3660 rid = PCIR_BAR(0);
3661 res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE);
3662 if (res == NULL)
3663 return;
3664
3665 cparams = bus_read_4(res, XHCI_HCSPARAMS0);
3666
3667 eec = -1;
3668
3669 /* Synchronise with the BIOS if it owns the controller. */
3670 for (eecp = XHCI_HCS0_XECP(cparams) << 2; eecp != 0 && XHCI_XECP_NEXT(eec);
3671 eecp += XHCI_XECP_NEXT(eec) << 2) {
3672 eec = bus_read_4(res, eecp);
3673
3674 if (XHCI_XECP_ID(eec) != XHCI_ID_USB_LEGACY)
3675 continue;
3676
3677 bios_sem = bus_read_1(res, eecp + XHCI_XECP_BIOS_SEM);
3678 if (bios_sem == 0)
3679 continue;
3680
3681 if (bootverbose)
3682 printf("xhci early: "
3683 "SMM active, request owner change\n");
3684
3685 bus_write_1(res, eecp + XHCI_XECP_OS_SEM, 1);
3686
3687 /* wait a maximum of 5 second */
3688
3689 for (i = 0; (i < 5000) && (bios_sem != 0); i++) {
3690 DELAY(1000);
3691 bios_sem = bus_read_1(res, eecp +
3692 XHCI_XECP_BIOS_SEM);
3693 }
3694
3695 if (bios_sem != 0) {
3696 if (bootverbose)
3697 printf("xhci early: "
3698 "SMM does not respond\n");
3699 }
3700
3701 /* Disable interrupts */
3702 offs = bus_read_1(res, XHCI_CAPLENGTH);
3703 bus_write_4(res, offs + XHCI_USBCMD, 0);
3704 bus_read_4(res, offs + XHCI_USBSTS);
3705 }
3706 bus_release_resource(self, SYS_RES_MEMORY, rid, res);
3707}
3708
3709#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
3710static void
3711pci_reserve_secbus(device_t bus, device_t dev, pcicfgregs *cfg,
3712 struct resource_list *rl)
3713{
3714 struct resource *res;
3715 char *cp;
3716 rman_res_t start, end, count;
3717 int rid, sec_bus, sec_reg, sub_bus, sub_reg, sup_bus;
3718
3719 switch (cfg->hdrtype & PCIM_HDRTYPE) {
3721 sec_reg = PCIR_SECBUS_1;
3722 sub_reg = PCIR_SUBBUS_1;
3723 break;
3725 sec_reg = PCIR_SECBUS_2;
3726 sub_reg = PCIR_SUBBUS_2;
3727 break;
3728 default:
3729 return;
3730 }
3731
3732 /*
3733 * If the existing bus range is valid, attempt to reserve it
3734 * from our parent. If this fails for any reason, clear the
3735 * secbus and subbus registers.
3736 *
3737 * XXX: Should we reset sub_bus to sec_bus if it is < sec_bus?
3738 * This would at least preserve the existing sec_bus if it is
3739 * valid.
3740 */
3741 sec_bus = PCI_READ_CONFIG(bus, dev, sec_reg, 1);
3742 sub_bus = PCI_READ_CONFIG(bus, dev, sub_reg, 1);
3743
3744 /* Quirk handling. */
3745 switch (pci_get_devid(dev)) {
3746 case 0x12258086: /* Intel 82454KX/GX (Orion) */
3747 sup_bus = pci_read_config(dev, 0x41, 1);
3748 if (sup_bus != 0xff) {
3749 sec_bus = sup_bus + 1;
3750 sub_bus = sup_bus + 1;
3751 PCI_WRITE_CONFIG(bus, dev, sec_reg, sec_bus, 1);
3752 PCI_WRITE_CONFIG(bus, dev, sub_reg, sub_bus, 1);
3753 }
3754 break;
3755
3756 case 0x00dd10de:
3757 /* Compaq R3000 BIOS sets wrong subordinate bus number. */
3758 if ((cp = kern_getenv("smbios.planar.maker")) == NULL)
3759 break;
3760 if (strncmp(cp, "Compal", 6) != 0) {
3761 freeenv(cp);
3762 break;
3763 }
3764 freeenv(cp);
3765 if ((cp = kern_getenv("smbios.planar.product")) == NULL)
3766 break;
3767 if (strncmp(cp, "08A0", 4) != 0) {
3768 freeenv(cp);
3769 break;
3770 }
3771 freeenv(cp);
3772 if (sub_bus < 0xa) {
3773 sub_bus = 0xa;
3774 PCI_WRITE_CONFIG(bus, dev, sub_reg, sub_bus, 1);
3775 }
3776 break;
3777 }
3778
3779 if (bootverbose)
3780 printf("\tsecbus=%d, subbus=%d\n", sec_bus, sub_bus);
3781 if (sec_bus > 0 && sub_bus >= sec_bus) {
3782 start = sec_bus;
3783 end = sub_bus;
3784 count = end - start + 1;
3785
3786 resource_list_add(rl, PCI_RES_BUS, 0, 0, ~0, count);
3787
3788 /*
3789 * If requested, clear secondary bus registers in
3790 * bridge devices to force a complete renumbering
3791 * rather than reserving the existing range. However,
3792 * preserve the existing size.
3793 */
3794 if (pci_clear_buses)
3795 goto clear;
3796
3797 rid = 0;
3798 res = resource_list_reserve(rl, bus, dev, PCI_RES_BUS, &rid,
3799 start, end, count, 0);
3800 if (res != NULL)
3801 return;
3802
3803 if (bootverbose)
3804 device_printf(bus,
3805 "pci%d:%d:%d:%d secbus failed to allocate\n",
3806 pci_get_domain(dev), pci_get_bus(dev),
3807 pci_get_slot(dev), pci_get_function(dev));
3808 }
3809
3810clear:
3811 PCI_WRITE_CONFIG(bus, dev, sec_reg, 0, 1);
3812 PCI_WRITE_CONFIG(bus, dev, sub_reg, 0, 1);
3813}
3814
3815static struct resource *
3816pci_alloc_secbus(device_t dev, device_t child, int *rid, rman_res_t start,
3817 rman_res_t end, rman_res_t count, u_int flags)
3818{
3819 struct pci_devinfo *dinfo;
3820 pcicfgregs *cfg;
3821 struct resource_list *rl;
3822 struct resource *res;
3823 int sec_reg, sub_reg;
3824
3825 dinfo = device_get_ivars(child);
3826 cfg = &dinfo->cfg;
3827 rl = &dinfo->resources;
3828 switch (cfg->hdrtype & PCIM_HDRTYPE) {
3830 sec_reg = PCIR_SECBUS_1;
3831 sub_reg = PCIR_SUBBUS_1;
3832 break;
3834 sec_reg = PCIR_SECBUS_2;
3835 sub_reg = PCIR_SUBBUS_2;
3836 break;
3837 default:
3838 return (NULL);
3839 }
3840
3841 if (*rid != 0)
3842 return (NULL);
3843
3844 if (resource_list_find(rl, PCI_RES_BUS, *rid) == NULL)
3845 resource_list_add(rl, PCI_RES_BUS, *rid, start, end, count);
3846 if (!resource_list_reserved(rl, PCI_RES_BUS, *rid)) {
3847 res = resource_list_reserve(rl, dev, child, PCI_RES_BUS, rid,
3848 start, end, count, flags & ~RF_ACTIVE);
3849 if (res == NULL) {
3850 resource_list_delete(rl, PCI_RES_BUS, *rid);
3851 device_printf(child, "allocating %ju bus%s failed\n",
3852 count, count == 1 ? "" : "es");
3853 return (NULL);
3854 }
3855 if (bootverbose)
3856 device_printf(child,
3857 "Lazy allocation of %ju bus%s at %ju\n", count,
3858 count == 1 ? "" : "es", rman_get_start(res));
3859 PCI_WRITE_CONFIG(dev, child, sec_reg, rman_get_start(res), 1);
3860 PCI_WRITE_CONFIG(dev, child, sub_reg, rman_get_end(res), 1);
3861 }
3862 return (resource_list_alloc(rl, dev, child, PCI_RES_BUS, rid, start,
3863 end, count, flags));
3864}
3865#endif
3866
3867static int
3868pci_ea_bei_to_rid(device_t dev, int bei)
3869{
3870#ifdef PCI_IOV
3871 struct pci_devinfo *dinfo;
3872 int iov_pos;
3873 struct pcicfg_iov *iov;
3874
3875 dinfo = device_get_ivars(dev);
3876 iov = dinfo->cfg.iov;
3877 if (iov != NULL)
3878 iov_pos = iov->iov_pos;
3879 else
3880 iov_pos = 0;
3881#endif
3882
3883 /* Check if matches BAR */
3884 if ((bei >= PCIM_EA_BEI_BAR_0) &&
3885 (bei <= PCIM_EA_BEI_BAR_5))
3886 return (PCIR_BAR(bei));
3887
3888 /* Check ROM */
3889 if (bei == PCIM_EA_BEI_ROM)
3890 return (PCIR_BIOS);
3891
3892#ifdef PCI_IOV
3893 /* Check if matches VF_BAR */
3894 if ((iov != NULL) && (bei >= PCIM_EA_BEI_VF_BAR_0) &&
3895 (bei <= PCIM_EA_BEI_VF_BAR_5))
3896 return (PCIR_SRIOV_BAR(bei - PCIM_EA_BEI_VF_BAR_0) +
3897 iov_pos);
3898#endif
3899
3900 return (-1);
3901}
3902
3903int
3905{
3906 struct pci_ea_entry *ea;
3907 struct pci_devinfo *dinfo;
3908
3909 dinfo = device_get_ivars(dev);
3910
3911 STAILQ_FOREACH(ea, &dinfo->cfg.ea.ea_entries, eae_link) {
3912 if (pci_ea_bei_to_rid(dev, ea->eae_bei) == rid)
3913 return ((ea->eae_flags & PCIM_EA_ENABLE) > 0);
3914 }
3915
3916 return (0);
3917}
3918
3919void
3920pci_add_resources_ea(device_t bus, device_t dev, int alloc_iov)
3921{
3922 struct pci_ea_entry *ea;
3923 struct pci_devinfo *dinfo;
3924 pci_addr_t start, end, count;
3925 struct resource_list *rl;
3926 int type, flags, rid;
3927 struct resource *res;
3928 uint32_t tmp;
3929#ifdef PCI_IOV
3930 struct pcicfg_iov *iov;
3931#endif
3932
3933 dinfo = device_get_ivars(dev);
3934 rl = &dinfo->resources;
3935 flags = 0;
3936
3937#ifdef PCI_IOV
3938 iov = dinfo->cfg.iov;
3939#endif
3940
3941 if (dinfo->cfg.ea.ea_location == 0)
3942 return;
3943
3944 STAILQ_FOREACH(ea, &dinfo->cfg.ea.ea_entries, eae_link) {
3945 /*
3946 * TODO: Ignore EA-BAR if is not enabled.
3947 * Currently the EA implementation supports
3948 * only situation, where EA structure contains
3949 * predefined entries. In case they are not enabled
3950 * leave them unallocated and proceed with
3951 * a legacy-BAR mechanism.
3952 */
3953 if ((ea->eae_flags & PCIM_EA_ENABLE) == 0)
3954 continue;
3955
3956 switch ((ea->eae_flags & PCIM_EA_PP) >> PCIM_EA_PP_OFFSET) {
3959 flags = RF_PREFETCHABLE;
3960 /* FALLTHROUGH */
3961 case PCIM_EA_P_VF_MEM:
3962 case PCIM_EA_P_MEM:
3963 type = SYS_RES_MEMORY;
3964 break;
3965 case PCIM_EA_P_IO:
3966 type = SYS_RES_IOPORT;
3967 break;
3968 default:
3969 continue;
3970 }
3971
3972 if (alloc_iov != 0) {
3973#ifdef PCI_IOV
3974 /* Allocating IOV, confirm BEI matches */
3975 if ((ea->eae_bei < PCIM_EA_BEI_VF_BAR_0) ||
3977 continue;
3978#else
3979 continue;
3980#endif
3981 } else {
3982 /* Allocating BAR, confirm BEI matches */
3983 if (((ea->eae_bei < PCIM_EA_BEI_BAR_0) ||
3984 (ea->eae_bei > PCIM_EA_BEI_BAR_5)) &&
3985 (ea->eae_bei != PCIM_EA_BEI_ROM))
3986 continue;
3987 }
3988
3990 if (rid < 0)
3991 continue;
3992
3993 /* Skip resources already allocated by EA */
3994 if ((resource_list_find(rl, SYS_RES_MEMORY, rid) != NULL) ||
3995 (resource_list_find(rl, SYS_RES_IOPORT, rid) != NULL))
3996 continue;
3997
3998 start = ea->eae_base;
3999 count = ea->eae_max_offset + 1;
4000#ifdef PCI_IOV
4001 if (iov != NULL)
4002 count = count * iov->iov_num_vfs;
4003#endif
4004 end = start + count - 1;
4005 if (count == 0)
4006 continue;
4007
4008 resource_list_add(rl, type, rid, start, end, count);
4009 res = resource_list_reserve(rl, bus, dev, type, &rid, start, end, count,
4010 flags);
4011 if (res == NULL) {
4012 resource_list_delete(rl, type, rid);
4013
4014 /*
4015 * Failed to allocate using EA, disable entry.
4016 * Another attempt to allocation will be performed
4017 * further, but this time using legacy BAR registers
4018 */
4019 tmp = pci_read_config(dev, ea->eae_cfg_offset, 4);
4020 tmp &= ~PCIM_EA_ENABLE;
4021 pci_write_config(dev, ea->eae_cfg_offset, tmp, 4);
4022
4023 /*
4024 * Disabling entry might fail in case it is hardwired.
4025 * Read flags again to match current status.
4026 */
4027 ea->eae_flags = pci_read_config(dev, ea->eae_cfg_offset, 4);
4028
4029 continue;
4030 }
4031
4032 /* As per specification, fill BAR with zeros */
4033 pci_write_config(dev, rid, 0, 4);
4034 }
4035}
4036
4037void
4038pci_add_resources(device_t bus, device_t dev, int force, uint32_t prefetchmask)
4039{
4040 struct pci_devinfo *dinfo;
4041 pcicfgregs *cfg;
4042 struct resource_list *rl;
4043 const struct pci_quirk *q;
4044 uint32_t devid;
4045 int i;
4046
4047 dinfo = device_get_ivars(dev);
4048 cfg = &dinfo->cfg;
4049 rl = &dinfo->resources;
4050 devid = (cfg->device << 16) | cfg->vendor;
4051
4052 /* Allocate resources using Enhanced Allocation */
4054
4055 /* ATA devices needs special map treatment */
4056 if ((pci_get_class(dev) == PCIC_STORAGE) &&
4057 (pci_get_subclass(dev) == PCIS_STORAGE_IDE) &&
4058 ((pci_get_progif(dev) & PCIP_STORAGE_IDE_MASTERDEV) ||
4059 (!pci_read_config(dev, PCIR_BAR(0), 4) &&
4060 !pci_read_config(dev, PCIR_BAR(2), 4))) )
4061 pci_ata_maps(bus, dev, rl, force, prefetchmask);
4062 else
4063 for (i = 0; i < cfg->nummaps;) {
4064 /* Skip resources already managed by EA */
4065 if ((resource_list_find(rl, SYS_RES_MEMORY, PCIR_BAR(i)) != NULL) ||
4066 (resource_list_find(rl, SYS_RES_IOPORT, PCIR_BAR(i)) != NULL) ||
4068 i++;
4069 continue;
4070 }
4071
4072 /*
4073 * Skip quirked resources.
4074 */
4075 for (q = &pci_quirks[0]; q->devid != 0; q++)
4076 if (q->devid == devid &&
4077 q->type == PCI_QUIRK_UNMAP_REG &&
4078 q->arg1 == PCIR_BAR(i))
4079 break;
4080 if (q->devid != 0) {
4081 i++;
4082 continue;
4083 }
4084 i += pci_add_map(bus, dev, PCIR_BAR(i), rl, force,
4085 prefetchmask & (1 << i));
4086 }
4087
4088 /*
4089 * Add additional, quirked resources.
4090 */
4091 for (q = &pci_quirks[0]; q->devid != 0; q++)
4092 if (q->devid == devid && q->type == PCI_QUIRK_MAP_REG)
4093 pci_add_map(bus, dev, q->arg1, rl, force, 0);
4094
4095 if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline)) {
4096#ifdef __PCI_REROUTE_INTERRUPT
4097 /*
4098 * Try to re-route interrupts. Sometimes the BIOS or
4099 * firmware may leave bogus values in these registers.
4100 * If the re-route fails, then just stick with what we
4101 * have.
4102 */
4104#else
4106#endif
4107 }
4108
4109 if (pci_usb_takeover && pci_get_class(dev) == PCIC_SERIALBUS &&
4110 pci_get_subclass(dev) == PCIS_SERIALBUS_USB) {
4111 if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_XHCI)
4113 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_EHCI)
4115 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_OHCI)
4117 else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_UHCI)
4119 }
4120
4121#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
4122 /*
4123 * Reserve resources for secondary bus ranges behind bridge
4124 * devices.
4125 */
4126 pci_reserve_secbus(bus, dev, cfg, rl);
4127#endif
4128}
4129
4130static struct pci_devinfo *
4131pci_identify_function(device_t pcib, device_t dev, int domain, int busno,
4132 int slot, int func)
4133{
4134 struct pci_devinfo *dinfo;
4135
4136 dinfo = pci_read_device(pcib, dev, domain, busno, slot, func);
4137 if (dinfo != NULL)
4138 pci_add_child(dev, dinfo);
4139
4140 return (dinfo);
4141}
4142
4143void
4144pci_add_children(device_t dev, int domain, int busno)
4145{
4146#define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w)
4147 device_t pcib = device_get_parent(dev);
4148 struct pci_devinfo *dinfo;
4149 int maxslots;
4150 int s, f, pcifunchigh;
4151 uint8_t hdrtype;
4152 int first_func;
4153
4154 /*
4155 * Try to detect a device at slot 0, function 0. If it exists, try to
4156 * enable ARI. We must enable ARI before detecting the rest of the
4157 * functions on this bus as ARI changes the set of slots and functions
4158 * that are legal on this bus.
4159 */
4160 dinfo = pci_identify_function(pcib, dev, domain, busno, 0, 0);
4161 if (dinfo != NULL && pci_enable_ari)
4162 PCIB_TRY_ENABLE_ARI(pcib, dinfo->cfg.dev);
4163
4164 /*
4165 * Start looking for new devices on slot 0 at function 1 because we
4166 * just identified the device at slot 0, function 0.
4167 */
4168 first_func = 1;
4169
4170 maxslots = PCIB_MAXSLOTS(pcib);
4171 for (s = 0; s <= maxslots; s++, first_func = 0) {
4172 pcifunchigh = 0;
4173 f = 0;
4174 DELAY(1);
4175
4176 /* If function 0 is not present, skip to the next slot. */
4177 if (REG(PCIR_VENDOR, 2) == PCIV_INVALID)
4178 continue;
4179 hdrtype = REG(PCIR_HDRTYPE, 1);
4180 if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE)
4181 continue;
4182 if (hdrtype & PCIM_MFDEV)
4183 pcifunchigh = PCIB_MAXFUNCS(pcib);
4184 for (f = first_func; f <= pcifunchigh; f++)
4185 pci_identify_function(pcib, dev, domain, busno, s, f);
4186 }
4187#undef REG
4188}
4189
4190int
4192{
4193#define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w)
4194 device_t pcib = device_get_parent(dev);
4195 device_t child, *devlist, *unchanged;
4196 int devcount, error, i, j, maxslots, oldcount;
4197 int busno, domain, s, f, pcifunchigh;
4198 uint8_t hdrtype;
4199
4200 /* No need to check for ARI on a rescan. */
4201 error = device_get_children(dev, &devlist, &devcount);
4202 if (error)
4203 return (error);
4204 if (devcount != 0) {
4205 unchanged = malloc(devcount * sizeof(device_t), M_TEMP,
4206 M_NOWAIT | M_ZERO);
4207 if (unchanged == NULL) {
4208 free(devlist, M_TEMP);
4209 return (ENOMEM);
4210 }
4211 } else
4212 unchanged = NULL;
4213
4214 domain = pcib_get_domain(dev);
4215 busno = pcib_get_bus(dev);
4216 maxslots = PCIB_MAXSLOTS(pcib);
4217 for (s = 0; s <= maxslots; s++) {
4218 /* If function 0 is not present, skip to the next slot. */
4219 f = 0;
4220 if (REG(PCIR_VENDOR, 2) == PCIV_INVALID)
4221 continue;
4222 pcifunchigh = 0;
4223 hdrtype = REG(PCIR_HDRTYPE, 1);
4224 if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE)
4225 continue;
4226 if (hdrtype & PCIM_MFDEV)
4227 pcifunchigh = PCIB_MAXFUNCS(pcib);
4228 for (f = 0; f <= pcifunchigh; f++) {
4229 if (REG(PCIR_VENDOR, 2) == PCIV_INVALID)
4230 continue;
4231
4232 /*
4233 * Found a valid function. Check if a
4234 * device_t for this device already exists.
4235 */
4236 for (i = 0; i < devcount; i++) {
4237 child = devlist[i];
4238 if (child == NULL)
4239 continue;
4240 if (pci_get_slot(child) == s &&
4241 pci_get_function(child) == f) {
4242 unchanged[i] = child;
4243 goto next_func;
4244 }
4245 }
4246
4247 pci_identify_function(pcib, dev, domain, busno, s, f);
4248 next_func:;
4249 }
4250 }
4251
4252 /* Remove devices that are no longer present. */
4253 for (i = 0; i < devcount; i++) {
4254 if (unchanged[i] != NULL)
4255 continue;
4256 device_delete_child(dev, devlist[i]);
4257 }
4258
4259 free(devlist, M_TEMP);
4260 oldcount = devcount;
4261
4262 /* Try to attach the devices just added. */
4263 error = device_get_children(dev, &devlist, &devcount);
4264 if (error) {
4265 free(unchanged, M_TEMP);
4266 return (error);
4267 }
4268
4269 for (i = 0; i < devcount; i++) {
4270 for (j = 0; j < oldcount; j++) {
4271 if (devlist[i] == unchanged[j])
4272 goto next_device;
4273 }
4274
4275 device_probe_and_attach(devlist[i]);
4276 next_device:;
4277 }
4278
4279 free(unchanged, M_TEMP);
4280 free(devlist, M_TEMP);
4281 return (0);
4282#undef REG
4283}
4284
4285#ifdef PCI_IOV
4286device_t
4287pci_add_iov_child(device_t bus, device_t pf, uint16_t rid, uint16_t vid,
4288 uint16_t did)
4289{
4290 struct pci_devinfo *vf_dinfo;
4291 device_t pcib;
4292 int busno, slot, func;
4293
4294 pcib = device_get_parent(bus);
4295
4296 PCIB_DECODE_RID(pcib, rid, &busno, &slot, &func);
4297
4298 vf_dinfo = pci_fill_devinfo(pcib, bus, pci_get_domain(pcib), busno,
4299 slot, func, vid, did);
4300
4301 vf_dinfo->cfg.flags |= PCICFG_VF;
4302 pci_add_child(bus, vf_dinfo);
4303
4304 return (vf_dinfo->cfg.dev);
4305}
4306
4307device_t
4308pci_create_iov_child_method(device_t bus, device_t pf, uint16_t rid,
4309 uint16_t vid, uint16_t did)
4310{
4311
4312 return (pci_add_iov_child(bus, pf, rid, vid, did));
4313}
4314#endif
4315
4316/*
4317 * For PCIe device set Max_Payload_Size to match PCIe root's.
4318 */
4319static void
4321{
4322 struct pci_devinfo *dinfo = device_get_ivars(dev);
4323 device_t root;
4324 uint16_t rmps, mmps, mps;
4325
4326 if (dinfo->cfg.pcie.pcie_location == 0)
4327 return;
4329 if (root == NULL)
4330 return;
4331 /* Check whether the MPS is already configured. */
4332 rmps = pcie_read_config(root, PCIER_DEVICE_CTL, 2) &
4336 if (mps == rmps)
4337 return;
4338 /* Check whether the device is capable of the root's MPS. */
4341 if (rmps > mmps) {
4342 /*
4343 * The device is unable to handle root's MPS. Limit root.
4344 * XXX: We should traverse through all the tree, applying
4345 * it to all the devices.
4346 */
4348 PCIEM_CTL_MAX_PAYLOAD, mmps, 2);
4349 } else {
4351 PCIEM_CTL_MAX_PAYLOAD, rmps, 2);
4352 }
4353}
4354
4355static void
4356pci_add_child_clear_aer(device_t dev, struct pci_devinfo *dinfo)
4357{
4358 int aer;
4359 uint32_t r;
4360 uint16_t r2;
4361
4362 if (dinfo->cfg.pcie.pcie_location != 0 &&
4363 dinfo->cfg.pcie.pcie_type == PCIEM_TYPE_ROOT_PORT) {
4364 r2 = pci_read_config(dev, dinfo->cfg.pcie.pcie_location +
4365 PCIER_ROOT_CTL, 2);
4366 r2 &= ~(PCIEM_ROOT_CTL_SERR_CORR |
4368 pci_write_config(dev, dinfo->cfg.pcie.pcie_location +
4369 PCIER_ROOT_CTL, r2, 2);
4370 }
4371 if (pci_find_extcap(dev, PCIZ_AER, &aer) == 0) {
4372 r = pci_read_config(dev, aer + PCIR_AER_UC_STATUS, 4);
4373 pci_write_config(dev, aer + PCIR_AER_UC_STATUS, r, 4);
4374 if (r != 0 && bootverbose) {
4375 pci_printf(&dinfo->cfg,
4376 "clearing AER UC 0x%08x -> 0x%08x\n",
4377 r, pci_read_config(dev, aer + PCIR_AER_UC_STATUS,
4378 4));
4379 }
4380
4381 r = pci_read_config(dev, aer + PCIR_AER_UC_MASK, 4);
4399 pci_write_config(dev, aer + PCIR_AER_UC_MASK, r, 4);
4400
4401 r = pci_read_config(dev, aer + PCIR_AER_COR_STATUS, 4);
4402 pci_write_config(dev, aer + PCIR_AER_COR_STATUS, r, 4);
4403 if (r != 0 && bootverbose) {
4404 pci_printf(&dinfo->cfg,
4405 "clearing AER COR 0x%08x -> 0x%08x\n",
4406 r, pci_read_config(dev, aer + PCIR_AER_COR_STATUS,
4407 4));
4408 }
4409
4410 r = pci_read_config(dev, aer + PCIR_AER_COR_MASK, 4);
4419 pci_write_config(dev, aer + PCIR_AER_COR_MASK, r, 4);
4420
4421 r = pci_read_config(dev, dinfo->cfg.pcie.pcie_location +
4422 PCIER_DEVICE_CTL, 2);
4425 pci_write_config(dev, dinfo->cfg.pcie.pcie_location +
4426 PCIER_DEVICE_CTL, r, 2);
4427 }
4428}
4429
4430void
4431pci_add_child(device_t bus, struct pci_devinfo *dinfo)
4432{
4433 device_t dev;
4434
4435 dinfo->cfg.dev = dev = device_add_child(bus, NULL, -1);
4436 device_set_ivars(dev, dinfo);
4437 resource_list_init(&dinfo->resources);
4438 pci_cfg_save(dev, dinfo, 0);
4439 pci_cfg_restore(dev, dinfo);
4440 pci_print_verbose(dinfo);
4441 pci_add_resources(bus, dev, 0, 0);
4443 pci_child_added(dinfo->cfg.dev);
4444
4447
4448 EVENTHANDLER_INVOKE(pci_add_device, dinfo->cfg.dev);
4449}
4450
4451void
4453{
4454
4455}
4456
4457static int
4459{
4460
4461 device_set_desc(dev, "PCI bus");
4462
4463 /* Allow other subclasses to override this driver. */
4464 return (BUS_PROBE_GENERIC);
4465}
4466
4467int
4469{
4470 struct pci_softc *sc;
4471 int busno, domain;
4472#ifdef PCI_RES_BUS
4473 int rid;
4474#endif
4475
4476 sc = device_get_softc(dev);
4477 domain = pcib_get_domain(dev);
4478 busno = pcib_get_bus(dev);
4479#ifdef PCI_RES_BUS
4480 rid = 0;
4481 sc->sc_bus = bus_alloc_resource(dev, PCI_RES_BUS, &rid, busno, busno,
4482 1, 0);
4483 if (sc->sc_bus == NULL) {
4484 device_printf(dev, "failed to allocate bus number\n");
4485 return (ENXIO);
4486 }
4487#endif
4488 if (bootverbose)
4489 device_printf(dev, "domain=%d, physical bus=%d\n",
4490 domain, busno);
4491 sc->sc_dma_tag = bus_get_dma_tag(dev);
4492 return (0);
4493}
4494
4495int
4497{
4498 int busno, domain, error;
4499
4500 error = pci_attach_common(dev);
4501 if (error)
4502 return (error);
4503
4504 /*
4505 * Since there can be multiple independently numbered PCI
4506 * buses on systems with multiple PCI domains, we can't use
4507 * the unit number to decide which bus we are probing. We ask
4508 * the parent pcib what our domain and bus numbers are.
4509 */
4510 domain = pcib_get_domain(dev);
4511 busno = pcib_get_bus(dev);
4512 pci_add_children(dev, domain, busno);
4513 return (bus_generic_attach(dev));
4514}
4515
4516int
4518{
4519#ifdef PCI_RES_BUS
4520 struct pci_softc *sc;
4521#endif
4522 int error;
4523
4524 error = bus_generic_detach(dev);
4525 if (error)
4526 return (error);
4527#ifdef PCI_RES_BUS
4528 sc = device_get_softc(dev);
4529 error = bus_release_resource(dev, PCI_RES_BUS, 0, sc->sc_bus);
4530 if (error)
4531 return (error);
4532#endif
4533 return (device_delete_children(dev));
4534}
4535
4536static void
4537pci_hint_device_unit(device_t dev, device_t child, const char *name, int *unitp)
4538{
4539 int line, unit;
4540 const char *at;
4541 char me1[24], me2[32];
4542 uint8_t b, s, f;
4543 uint32_t d;
4544 device_location_cache_t *cache;
4545
4546 d = pci_get_domain(child);
4547 b = pci_get_bus(child);
4548 s = pci_get_slot(child);
4549 f = pci_get_function(child);
4550 snprintf(me1, sizeof(me1), "pci%u:%u:%u", b, s, f);
4551 snprintf(me2, sizeof(me2), "pci%u:%u:%u:%u", d, b, s, f);
4552 line = 0;
4553 cache = dev_wired_cache_init();
4554 while (resource_find_dev(&line, name, &unit, "at", NULL) == 0) {
4555 resource_string_value(name, unit, "at", &at);
4556 if (strcmp(at, me1) == 0 || strcmp(at, me2) == 0) {
4557 *unitp = unit;
4558 break;
4559 }
4560 if (dev_wired_cache_match(cache, child, at)) {
4561 *unitp = unit;
4562 break;
4563 }
4564 }
4565 dev_wired_cache_fini(cache);
4566}
4567
4568static void
4569pci_set_power_child(device_t dev, device_t child, int state)
4570{
4571 device_t pcib;
4572 int dstate;
4573
4574 /*
4575 * Set the device to the given state. If the firmware suggests
4576 * a different power state, use it instead. If power management
4577 * is not present, the firmware is responsible for managing
4578 * device power. Skip children who aren't attached since they
4579 * are handled separately.
4580 */
4581 pcib = device_get_parent(dev);
4582 dstate = state;
4583 if (device_is_attached(child) &&
4584 PCIB_POWER_FOR_SLEEP(pcib, child, &dstate) == 0)
4585 pci_set_powerstate(child, dstate);
4586}
4587
4588int
4589pci_suspend_child(device_t dev, device_t child)
4590{
4591 struct pci_devinfo *dinfo;
4592 struct resource_list_entry *rle;
4593 int error;
4594
4595 dinfo = device_get_ivars(child);
4596
4597 /*
4598 * Save the PCI configuration space for the child and set the
4599 * device in the appropriate power state for this sleep state.
4600 */
4601 pci_cfg_save(child, dinfo, 0);
4602
4603 /* Suspend devices before potentially powering them down. */
4604 error = bus_generic_suspend_child(dev, child);
4605
4606 if (error)
4607 return (error);
4608
4610 /*
4611 * Make sure this device's interrupt handler is not invoked
4612 * in the case the device uses a shared interrupt that can
4613 * be raised by some other device.
4614 * This is applicable only to regular (legacy) PCI interrupts
4615 * as MSI/MSI-X interrupts are never shared.
4616 */
4617 rle = resource_list_find(&dinfo->resources,
4618 SYS_RES_IRQ, 0);
4619 if (rle != NULL && rle->res != NULL)
4620 (void)bus_suspend_intr(child, rle->res);
4621 pci_set_power_child(dev, child, PCI_POWERSTATE_D3);
4622 }
4623
4624 return (0);
4625}
4626
4627int
4628pci_resume_child(device_t dev, device_t child)
4629{
4630 struct pci_devinfo *dinfo;
4631 struct resource_list_entry *rle;
4632
4634 pci_set_power_child(dev, child, PCI_POWERSTATE_D0);
4635
4636 dinfo = device_get_ivars(child);
4637 pci_cfg_restore(child, dinfo);
4638 if (!device_is_attached(child))
4639 pci_cfg_save(child, dinfo, 1);
4640
4641 bus_generic_resume_child(dev, child);
4642
4643 /*
4644 * Allow interrupts only after fully resuming the driver and hardware.
4645 */
4647 /* See pci_suspend_child for details. */
4648 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
4649 if (rle != NULL && rle->res != NULL)
4650 (void)bus_resume_intr(child, rle->res);
4651 }
4652
4653 return (0);
4654}
4655
4656int
4658{
4659 device_t child, *devlist;
4660 int error, i, numdevs;
4661
4662 if ((error = device_get_children(dev, &devlist, &numdevs)) != 0)
4663 return (error);
4664
4665 /*
4666 * Resume critical devices first, then everything else later.
4667 */
4668 for (i = 0; i < numdevs; i++) {
4669 child = devlist[i];
4670 switch (pci_get_class(child)) {
4671 case PCIC_DISPLAY:
4672 case PCIC_MEMORY:
4673 case PCIC_BRIDGE:
4674 case PCIC_BASEPERIPH:
4675 BUS_RESUME_CHILD(dev, child);
4676 break;
4677 }
4678 }
4679 for (i = 0; i < numdevs; i++) {
4680 child = devlist[i];
4681 switch (pci_get_class(child)) {
4682 case PCIC_DISPLAY:
4683 case PCIC_MEMORY:
4684 case PCIC_BRIDGE:
4685 case PCIC_BASEPERIPH:
4686 break;
4687 default:
4688 BUS_RESUME_CHILD(dev, child);
4689 }
4690 }
4691 free(devlist, M_TEMP);
4692 return (0);
4693}
4694
4695static void
4697{
4698 caddr_t data;
4699 void *ptr;
4700 size_t sz;
4701
4702 data = preload_search_by_type("pci_vendor_data");
4703 if (data != NULL) {
4704 ptr = preload_fetch_addr(data);
4705 sz = preload_fetch_size(data);
4706 if (ptr != NULL && sz != 0) {
4707 pci_vendordata = ptr;
4709 /* terminate the database */
4711 }
4712 }
4713}
4714
4715void
4716pci_driver_added(device_t dev, driver_t *driver)
4717{
4718 int numdevs;
4719 device_t *devlist;
4720 device_t child;
4721 struct pci_devinfo *dinfo;
4722 int i;
4723
4724 if (bootverbose)
4725 device_printf(dev, "driver added\n");
4726 DEVICE_IDENTIFY(driver, dev);
4727 if (device_get_children(dev, &devlist, &numdevs) != 0)
4728 return;
4729 for (i = 0; i < numdevs; i++) {
4730 child = devlist[i];
4731 if (device_get_state(child) != DS_NOTPRESENT)
4732 continue;
4733 dinfo = device_get_ivars(child);
4734 pci_print_verbose(dinfo);
4735 if (bootverbose)
4736 pci_printf(&dinfo->cfg, "reprobing on driver added\n");
4737 pci_cfg_restore(child, dinfo);
4738 if (device_probe_and_attach(child) != 0)
4740 }
4741 free(devlist, M_TEMP);
4742}
4743
4744int
4745pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags,
4746 driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep)
4747{
4748 struct pci_devinfo *dinfo;
4749 struct msix_table_entry *mte;
4750 struct msix_vector *mv;
4751 uint64_t addr;
4752 uint32_t data;
4753 void *cookie;
4754 int error, rid;
4755
4756 error = bus_generic_setup_intr(dev, child, irq, flags, filter, intr,
4757 arg, &cookie);
4758 if (error)
4759 return (error);
4760
4761 /* If this is not a direct child, just bail out. */
4762 if (device_get_parent(child) != dev) {
4763 *cookiep = cookie;
4764 return(0);
4765 }
4766
4767 rid = rman_get_rid(irq);
4768 if (rid == 0) {
4769 /* Make sure that INTx is enabled */
4771 } else {
4772 /*
4773 * Check to see if the interrupt is MSI or MSI-X.
4774 * Ask our parent to map the MSI and give
4775 * us the address and data register values.
4776 * If we fail for some reason, teardown the
4777 * interrupt handler.
4778 */
4779 dinfo = device_get_ivars(child);
4780 if (dinfo->cfg.msi.msi_alloc > 0) {
4781 if (dinfo->cfg.msi.msi_addr == 0) {
4782 KASSERT(dinfo->cfg.msi.msi_handlers == 0,
4783 ("MSI has handlers, but vectors not mapped"));
4784 error = PCIB_MAP_MSI(device_get_parent(dev),
4785 child, rman_get_start(irq), &addr, &data);
4786 if (error)
4787 goto bad;
4788 dinfo->cfg.msi.msi_addr = addr;
4789 dinfo->cfg.msi.msi_data = data;
4790 }
4791 if (dinfo->cfg.msi.msi_handlers == 0)
4792 pci_enable_msi(child, dinfo->cfg.msi.msi_addr,
4793 dinfo->cfg.msi.msi_data);
4794 dinfo->cfg.msi.msi_handlers++;
4795 } else {
4796 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
4797 ("No MSI or MSI-X interrupts allocated"));
4798 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
4799 ("MSI-X index too high"));
4800 mte = &dinfo->cfg.msix.msix_table[rid - 1];
4801 KASSERT(mte->mte_vector != 0, ("no message vector"));
4802 mv = &dinfo->cfg.msix.msix_vectors[mte->mte_vector - 1];
4803 KASSERT(mv->mv_irq == rman_get_start(irq),
4804 ("IRQ mismatch"));
4805 if (mv->mv_address == 0) {
4806 KASSERT(mte->mte_handlers == 0,
4807 ("MSI-X table entry has handlers, but vector not mapped"));
4808 error = PCIB_MAP_MSI(device_get_parent(dev),
4809 child, rman_get_start(irq), &addr, &data);
4810 if (error)
4811 goto bad;
4812 mv->mv_address = addr;
4813 mv->mv_data = data;
4814 }
4815
4816 /*
4817 * The MSIX table entry must be made valid by
4818 * incrementing the mte_handlers before
4819 * calling pci_enable_msix() and
4820 * pci_resume_msix(). Else the MSIX rewrite
4821 * table quirk will not work as expected.
4822 */
4823 mte->mte_handlers++;
4824 if (mte->mte_handlers == 1) {
4825 pci_enable_msix(child, rid - 1, mv->mv_address,
4826 mv->mv_data);
4828 }
4829 }
4830
4831 /*
4832 * Make sure that INTx is disabled if we are using MSI/MSI-X,
4833 * unless the device is affected by PCI_QUIRK_MSI_INTX_BUG,
4834 * in which case we "enable" INTx so MSI/MSI-X actually works.
4835 */
4836 if (!pci_has_quirk(pci_get_devid(child),
4839 else
4841 bad:
4842 if (error) {
4843 (void)bus_generic_teardown_intr(dev, child, irq,
4844 cookie);
4845 return (error);
4846 }
4847 }
4848 *cookiep = cookie;
4849 return (0);
4850}
4851
4852int
4853pci_teardown_intr(device_t dev, device_t child, struct resource *irq,
4854 void *cookie)
4855{
4856 struct msix_table_entry *mte;
4857 struct resource_list_entry *rle;
4858 struct pci_devinfo *dinfo;
4859 int error, rid;
4860
4861 if (irq == NULL || !(rman_get_flags(irq) & RF_ACTIVE))
4862 return (EINVAL);
4863
4864 /* If this isn't a direct child, just bail out */
4865 if (device_get_parent(child) != dev)
4866 return(bus_generic_teardown_intr(dev, child, irq, cookie));
4867
4868 rid = rman_get_rid(irq);
4869 if (rid == 0) {
4870 /* Mask INTx */
4872 } else {
4873 /*
4874 * Check to see if the interrupt is MSI or MSI-X. If so,
4875 * decrement the appropriate handlers count and mask the
4876 * MSI-X message, or disable MSI messages if the count
4877 * drops to 0.
4878 */
4879 dinfo = device_get_ivars(child);
4880 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid);
4881 if (rle->res != irq)
4882 return (EINVAL);
4883 if (dinfo->cfg.msi.msi_alloc > 0) {
4884 KASSERT(rid <= dinfo->cfg.msi.msi_alloc,
4885 ("MSI-X index too high"));
4886 if (dinfo->cfg.msi.msi_handlers == 0)
4887 return (EINVAL);
4888 dinfo->cfg.msi.msi_handlers--;
4889 if (dinfo->cfg.msi.msi_handlers == 0)
4890 pci_disable_msi(child);
4891 } else {
4892 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
4893 ("No MSI or MSI-X interrupts allocated"));
4894 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
4895 ("MSI-X index too high"));
4896 mte = &dinfo->cfg.msix.msix_table[rid - 1];
4897 if (mte->mte_handlers == 0)
4898 return (EINVAL);
4899 mte->mte_handlers--;
4900 if (mte->mte_handlers == 0)
4901 pci_mask_msix(child, rid - 1);
4902 }
4903 }
4904 error = bus_generic_teardown_intr(dev, child, irq, cookie);
4905 if (rid > 0)
4906 KASSERT(error == 0,
4907 ("%s: generic teardown failed for MSI/MSI-X", __func__));
4908 return (error);
4909}
4910
4911int
4912pci_print_child(device_t dev, device_t child)
4913{
4914 struct pci_devinfo *dinfo;
4915 struct resource_list *rl;
4916 int retval = 0;
4917
4918 dinfo = device_get_ivars(child);
4919 rl = &dinfo->resources;
4920
4921 retval += bus_print_child_header(dev, child);
4922
4923 retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#jx");
4924 retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#jx");
4925 retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%jd");
4926 if (device_get_flags(dev))
4927 retval += printf(" flags %#x", device_get_flags(dev));
4928
4929 retval += printf(" at device %d.%d", pci_get_slot(child),
4930 pci_get_function(child));
4931
4932 retval += bus_print_child_domain(dev, child);
4933 retval += bus_print_child_footer(dev, child);
4934
4935 return (retval);
4936}
4937
4938static const struct
4939{
4940 int class;
4942 int report; /* 0 = bootverbose, 1 = always */
4943 const char *desc;
4944} pci_nomatch_tab[] = {
4945 {PCIC_OLD, -1, 1, "old"},
4946 {PCIC_OLD, PCIS_OLD_NONVGA, 1, "non-VGA display device"},
4947 {PCIC_OLD, PCIS_OLD_VGA, 1, "VGA-compatible display device"},
4948 {PCIC_STORAGE, -1, 1, "mass storage"},
4949 {PCIC_STORAGE, PCIS_STORAGE_SCSI, 1, "SCSI"},
4950 {PCIC_STORAGE, PCIS_STORAGE_IDE, 1, "ATA"},
4951 {PCIC_STORAGE, PCIS_STORAGE_FLOPPY, 1, "floppy disk"},
4952 {PCIC_STORAGE, PCIS_STORAGE_IPI, 1, "IPI"},
4953 {PCIC_STORAGE, PCIS_STORAGE_RAID, 1, "RAID"},
4954 {PCIC_STORAGE, PCIS_STORAGE_ATA_ADMA, 1, "ATA (ADMA)"},
4955 {PCIC_STORAGE, PCIS_STORAGE_SATA, 1, "SATA"},
4956 {PCIC_STORAGE, PCIS_STORAGE_SAS, 1, "SAS"},
4957 {PCIC_STORAGE, PCIS_STORAGE_NVM, 1, "NVM"},
4958 {PCIC_NETWORK, -1, 1, "network"},
4959 {PCIC_NETWORK, PCIS_NETWORK_ETHERNET, 1, "ethernet"},
4960 {PCIC_NETWORK, PCIS_NETWORK_TOKENRING, 1, "token ring"},
4961 {PCIC_NETWORK, PCIS_NETWORK_FDDI, 1, "fddi"},
4962 {PCIC_NETWORK, PCIS_NETWORK_ATM, 1, "ATM"},
4963 {PCIC_NETWORK, PCIS_NETWORK_ISDN, 1, "ISDN"},
4964 {PCIC_DISPLAY, -1, 1, "display"},
4965 {PCIC_DISPLAY, PCIS_DISPLAY_VGA, 1, "VGA"},
4966 {PCIC_DISPLAY, PCIS_DISPLAY_XGA, 1, "XGA"},
4967 {PCIC_DISPLAY, PCIS_DISPLAY_3D, 1, "3D"},
4968 {PCIC_MULTIMEDIA, -1, 1, "multimedia"},
4971 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_TELE, 1, "telephony"},
4973 {PCIC_MEMORY, -1, 1, "memory"},
4974 {PCIC_MEMORY, PCIS_MEMORY_RAM, 1, "RAM"},
4975 {PCIC_MEMORY, PCIS_MEMORY_FLASH, 1, "flash"},
4976 {PCIC_BRIDGE, -1, 1, "bridge"},
4977 {PCIC_BRIDGE, PCIS_BRIDGE_HOST, 1, "HOST-PCI"},
4978 {PCIC_BRIDGE, PCIS_BRIDGE_ISA, 1, "PCI-ISA"},
4979 {PCIC_BRIDGE, PCIS_BRIDGE_EISA, 1, "PCI-EISA"},
4980 {PCIC_BRIDGE, PCIS_BRIDGE_MCA, 1, "PCI-MCA"},
4981 {PCIC_BRIDGE, PCIS_BRIDGE_PCI, 1, "PCI-PCI"},
4982 {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, 1, "PCI-PCMCIA"},
4983 {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, 1, "PCI-NuBus"},
4984 {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, 1, "PCI-CardBus"},
4985 {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, 1, "PCI-RACEway"},
4986 {PCIC_SIMPLECOMM, -1, 1, "simple comms"},
4987 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_UART, 1, "UART"}, /* could detect 16550 */
4988 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_PAR, 1, "parallel port"},
4989 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MULSER, 1, "multiport serial"},
4990 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MODEM, 1, "generic modem"},
4991 {PCIC_BASEPERIPH, -1, 0, "base peripheral"},
4992 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PIC, 1, "interrupt controller"},
4993 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_DMA, 1, "DMA controller"},
4995 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_RTC, 1, "realtime clock"},
4996 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, 1, "PCI hot-plug controller"},
4997 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_SDHC, 1, "SD host controller"},
4999 {PCIC_INPUTDEV, -1, 1, "input device"},
5000 {PCIC_INPUTDEV, PCIS_INPUTDEV_KEYBOARD, 1, "keyboard"},
5001 {PCIC_INPUTDEV, PCIS_INPUTDEV_DIGITIZER,1, "digitizer"},
5002 {PCIC_INPUTDEV, PCIS_INPUTDEV_MOUSE, 1, "mouse"},
5003 {PCIC_INPUTDEV, PCIS_INPUTDEV_SCANNER, 1, "scanner"},
5004 {PCIC_INPUTDEV, PCIS_INPUTDEV_GAMEPORT, 1, "gameport"},
5005 {PCIC_DOCKING, -1, 1, "docking station"},
5006 {PCIC_PROCESSOR, -1, 1, "processor"},
5007 {PCIC_SERIALBUS, -1, 1, "serial bus"},
5008 {PCIC_SERIALBUS, PCIS_SERIALBUS_FW, 1, "FireWire"},
5009 {PCIC_SERIALBUS, PCIS_SERIALBUS_ACCESS, 1, "AccessBus"},
5012 {PCIC_SERIALBUS, PCIS_SERIALBUS_FC, 1, "Fibre Channel"},
5013 {PCIC_SERIALBUS, PCIS_SERIALBUS_SMBUS, 0, "SMBus"},
5014 {PCIC_WIRELESS, -1, 1, "wireless controller"},
5015 {PCIC_WIRELESS, PCIS_WIRELESS_IRDA, 1, "iRDA"},
5016 {PCIC_WIRELESS, PCIS_WIRELESS_IR, 1, "IR"},
5017 {PCIC_WIRELESS, PCIS_WIRELESS_RF, 1, "RF"},
5018 {PCIC_INTELLIIO, -1, 1, "intelligent I/O controller"},
5020 {PCIC_SATCOM, -1, 1, "satellite communication"},
5021 {PCIC_SATCOM, PCIS_SATCOM_TV, 1, "sat TV"},
5022 {PCIC_SATCOM, PCIS_SATCOM_AUDIO, 1, "sat audio"},
5023 {PCIC_SATCOM, PCIS_SATCOM_VOICE, 1, "sat voice"},
5024 {PCIC_SATCOM, PCIS_SATCOM_DATA, 1, "sat data"},
5025 {PCIC_CRYPTO, -1, 1, "encrypt/decrypt"},
5026 {PCIC_CRYPTO, PCIS_CRYPTO_NETCOMP, 1, "network/computer crypto"},
5027 {PCIC_CRYPTO, PCIS_CRYPTO_ENTERTAIN, 1, "entertainment crypto"},
5028 {PCIC_DASP, -1, 0, "dasp"},
5029 {PCIC_DASP, PCIS_DASP_DPIO, 1, "DPIO module"},
5030 {PCIC_DASP, PCIS_DASP_PERFCNTRS, 1, "performance counters"},
5031 {PCIC_DASP, PCIS_DASP_COMM_SYNC, 1, "communication synchronizer"},
5032 {PCIC_DASP, PCIS_DASP_MGMT_CARD, 1, "signal processing management"},
5033 {0, 0, 0, NULL}
5035
5036void
5037pci_probe_nomatch(device_t dev, device_t child)
5038{
5039 int i, report;
5040 const char *cp, *scp;
5041 char *device;
5042
5043 /*
5044 * Look for a listing for this device in a loaded device database.
5045 */
5046 report = 1;
5047 if ((device = pci_describe_device(child)) != NULL) {
5048 device_printf(dev, "<%s>", device);
5049 free(device, M_DEVBUF);
5050 } else {
5051 /*
5052 * Scan the class/subclass descriptions for a general
5053 * description.
5054 */
5055 cp = "unknown";
5056 scp = NULL;
5057 for (i = 0; pci_nomatch_tab[i].desc != NULL; i++) {
5058 if (pci_nomatch_tab[i].class == pci_get_class(child)) {
5059 if (pci_nomatch_tab[i].subclass == -1) {
5060 cp = pci_nomatch_tab[i].desc;
5061 report = pci_nomatch_tab[i].report;
5062 } else if (pci_nomatch_tab[i].subclass ==
5063 pci_get_subclass(child)) {
5064 scp = pci_nomatch_tab[i].desc;
5065 report = pci_nomatch_tab[i].report;
5066 }
5067 }
5068 }
5069 if (report || bootverbose) {
5070 device_printf(dev, "<%s%s%s>",
5071 cp ? cp : "",
5072 ((cp != NULL) && (scp != NULL)) ? ", " : "",
5073 scp ? scp : "");
5074 }
5075 }
5076 if (report || bootverbose) {
5077 printf(" at device %d.%d (no driver attached)\n",
5078 pci_get_slot(child), pci_get_function(child));
5079 }
5080 pci_cfg_save(child, device_get_ivars(child), 1);
5081}
5082
5083void
5084pci_child_detached(device_t dev, device_t child)
5085{
5086 struct pci_devinfo *dinfo;
5087 struct resource_list *rl;
5088
5089 dinfo = device_get_ivars(child);
5090 rl = &dinfo->resources;
5091
5092 /*
5093 * Have to deallocate IRQs before releasing any MSI messages and
5094 * have to release MSI messages before deallocating any memory
5095 * BARs.
5096 */
5097 if (resource_list_release_active(rl, dev, child, SYS_RES_IRQ) != 0)
5098 pci_printf(&dinfo->cfg, "Device leaked IRQ resources\n");
5099 if (dinfo->cfg.msi.msi_alloc != 0 || dinfo->cfg.msix.msix_alloc != 0) {
5100 if (dinfo->cfg.msi.msi_alloc != 0)
5101 pci_printf(&dinfo->cfg, "Device leaked %d MSI "
5102 "vectors\n", dinfo->cfg.msi.msi_alloc);
5103 else
5104 pci_printf(&dinfo->cfg, "Device leaked %d MSI-X "
5105 "vectors\n", dinfo->cfg.msix.msix_alloc);
5106 (void)pci_release_msi(child);
5107 }
5108 if (resource_list_release_active(rl, dev, child, SYS_RES_MEMORY) != 0)
5109 pci_printf(&dinfo->cfg, "Device leaked memory resources\n");
5110 if (resource_list_release_active(rl, dev, child, SYS_RES_IOPORT) != 0)
5111 pci_printf(&dinfo->cfg, "Device leaked I/O resources\n");
5112#ifdef PCI_RES_BUS
5113 if (resource_list_release_active(rl, dev, child, PCI_RES_BUS) != 0)
5114 pci_printf(&dinfo->cfg, "Device leaked PCI bus numbers\n");
5115#endif
5116
5117 pci_cfg_save(child, dinfo, 1);
5118}
5119
5120/*
5121 * Parse the PCI device database, if loaded, and return a pointer to a
5122 * description of the device.
5123 *
5124 * The database is flat text formatted as follows:
5125 *
5126 * Any line not in a valid format is ignored.
5127 * Lines are terminated with newline '\n' characters.
5128 *
5129 * A VENDOR line consists of the 4 digit (hex) vendor code, a TAB, then
5130 * the vendor name.
5131 *
5132 * A DEVICE line is entered immediately below the corresponding VENDOR ID.
5133 * - devices cannot be listed without a corresponding VENDOR line.
5134 * A DEVICE line consists of a TAB, the 4 digit (hex) device code,
5135 * another TAB, then the device name.
5136 */
5137
5138/*
5139 * Assuming (ptr) points to the beginning of a line in the database,
5140 * return the vendor or device and description of the next entry.
5141 * The value of (vendor) or (device) inappropriate for the entry type
5142 * is set to -1. Returns nonzero at the end of the database.
5143 *
5144 * Note that this is slightly unrobust in the face of corrupt data;
5145 * we attempt to safeguard against this by spamming the end of the
5146 * database with a newline when we initialise.
5147 */
5148static int
5149pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc)
5150{
5151 char *cp = *ptr;
5152 int left;
5153
5154 *device = -1;
5155 *vendor = -1;
5156 **desc = '\0';
5157 for (;;) {
5158 left = pci_vendordata_size - (cp - pci_vendordata);
5159 if (left <= 0) {
5160 *ptr = cp;
5161 return(1);
5162 }
5163
5164 /* vendor entry? */
5165 if (*cp != '\t' &&
5166 sscanf(cp, "%x\t%80[^\n]", vendor, *desc) == 2)
5167 break;
5168 /* device entry? */
5169 if (*cp == '\t' &&
5170 sscanf(cp, "%x\t%80[^\n]", device, *desc) == 2)
5171 break;
5172
5173 /* skip to next line */
5174 while (*cp != '\n' && left > 0) {
5175 cp++;
5176 left--;
5177 }
5178 if (*cp == '\n') {
5179 cp++;
5180 left--;
5181 }
5182 }
5183 /* skip to next line */
5184 while (*cp != '\n' && left > 0) {
5185 cp++;
5186 left--;
5187 }
5188 if (*cp == '\n' && left > 0)
5189 cp++;
5190 *ptr = cp;
5191 return(0);
5192}
5193
5194static char *
5196{
5197 int vendor, device;
5198 char *desc, *vp, *dp, *line;
5199
5200 desc = vp = dp = NULL;
5201
5202 /*
5203 * If we have no vendor data, we can't do anything.
5204 */
5205 if (pci_vendordata == NULL)
5206 goto out;
5207
5208 /*
5209 * Scan the vendor data looking for this device
5210 */
5211 line = pci_vendordata;
5212 if ((vp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
5213 goto out;
5214 for (;;) {
5215 if (pci_describe_parse_line(&line, &vendor, &device, &vp))
5216 goto out;
5217 if (vendor == pci_get_vendor(dev))
5218 break;
5219 }
5220 if ((dp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
5221 goto out;
5222 for (;;) {
5223 if (pci_describe_parse_line(&line, &vendor, &device, &dp)) {
5224 *dp = 0;
5225 break;
5226 }
5227 if (vendor != -1) {
5228 *dp = 0;
5229 break;
5230 }
5231 if (device == pci_get_device(dev))
5232 break;
5233 }
5234 if (dp[0] == '\0')
5235 snprintf(dp, 80, "0x%x", pci_get_device(dev));
5236 if ((desc = malloc(strlen(vp) + strlen(dp) + 3, M_DEVBUF, M_NOWAIT)) !=
5237 NULL)
5238 sprintf(desc, "%s, %s", vp, dp);
5239out:
5240 if (vp != NULL)
5241 free(vp, M_DEVBUF);
5242 if (dp != NULL)
5243 free(dp, M_DEVBUF);
5244 return(desc);
5245}
5246
5247int
5248pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
5249{
5250 struct pci_devinfo *dinfo;
5251 pcicfgregs *cfg;
5252
5253 dinfo = device_get_ivars(child);
5254 cfg = &dinfo->cfg;
5255
5256 switch (which) {
5257 case PCI_IVAR_ETHADDR:
5258 /*
5259 * The generic accessor doesn't deal with failure, so
5260 * we set the return value, then return an error.
5261 */
5262 *((uint8_t **) result) = NULL;
5263 return (EINVAL);
5264 case PCI_IVAR_SUBVENDOR:
5265 *result = cfg->subvendor;
5266 break;
5267 case PCI_IVAR_SUBDEVICE:
5268 *result = cfg->subdevice;
5269 break;
5270 case PCI_IVAR_VENDOR:
5271 *result = cfg->vendor;
5272 break;
5273 case PCI_IVAR_DEVICE:
5274 *result = cfg->device;
5275 break;
5276 case PCI_IVAR_DEVID:
5277 *result = (cfg->device << 16) | cfg->vendor;
5278 break;
5279 case PCI_IVAR_CLASS:
5280 *result = cfg->baseclass;
5281 break;
5282 case PCI_IVAR_SUBCLASS:
5283 *result = cfg->subclass;
5284 break;
5285 case PCI_IVAR_PROGIF:
5286 *result = cfg->progif;
5287 break;
5288 case PCI_IVAR_REVID:
5289 *result = cfg->revid;
5290 break;
5291 case PCI_IVAR_INTPIN:
5292 *result = cfg->intpin;
5293 break;
5294 case PCI_IVAR_IRQ:
5295 *result = cfg->intline;
5296 break;
5297 case PCI_IVAR_DOMAIN:
5298 *result = cfg->domain;
5299 break;
5300 case PCI_IVAR_BUS:
5301 *result = cfg->bus;
5302 break;
5303 case PCI_IVAR_SLOT:
5304 *result = cfg->slot;
5305 break;
5306 case PCI_IVAR_FUNCTION:
5307 *result = cfg->func;
5308 break;
5309 case PCI_IVAR_CMDREG:
5310 *result = cfg->cmdreg;
5311 break;
5312 case PCI_IVAR_CACHELNSZ:
5313 *result = cfg->cachelnsz;
5314 break;
5315 case PCI_IVAR_MINGNT:
5316 if (cfg->hdrtype != PCIM_HDRTYPE_NORMAL) {
5317 *result = -1;
5318 return (EINVAL);
5319 }
5320 *result = cfg->mingnt;
5321 break;
5322 case PCI_IVAR_MAXLAT:
5323 if (cfg->hdrtype != PCIM_HDRTYPE_NORMAL) {
5324 *result = -1;
5325 return (EINVAL);
5326 }
5327 *result = cfg->maxlat;
5328 break;
5329 case PCI_IVAR_LATTIMER:
5330 *result = cfg->lattimer;
5331 break;
5332 default:
5333 return (ENOENT);
5334 }
5335 return (0);
5336}
5337
5338int
5339pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
5340{
5341 struct pci_devinfo *dinfo;
5342
5343 dinfo = device_get_ivars(child);
5344
5345 switch (which) {
5346 case PCI_IVAR_INTPIN:
5347 dinfo->cfg.intpin = value;
5348 return (0);
5349 case PCI_IVAR_ETHADDR:
5350 case PCI_IVAR_SUBVENDOR:
5351 case PCI_IVAR_SUBDEVICE:
5352 case PCI_IVAR_VENDOR:
5353 case PCI_IVAR_DEVICE:
5354 case PCI_IVAR_DEVID:
5355 case PCI_IVAR_CLASS:
5356 case PCI_IVAR_SUBCLASS:
5357 case PCI_IVAR_PROGIF:
5358 case PCI_IVAR_REVID:
5359 case PCI_IVAR_IRQ:
5360 case PCI_IVAR_DOMAIN:
5361 case PCI_IVAR_BUS:
5362 case PCI_IVAR_SLOT:
5363 case PCI_IVAR_FUNCTION:
5364 return (EINVAL); /* disallow for now */
5365
5366 default:
5367 return (ENOENT);
5368 }
5369}
5370
5371#include "opt_ddb.h"
5372#ifdef DDB
5373#include <ddb/ddb.h>
5374#include <sys/cons.h>
5375
5376/*
5377 * List resources based on pci map registers, used for within ddb
5378 */
5379
5380DB_SHOW_COMMAND(pciregs, db_pci_dump)
5381{
5382 struct pci_devinfo *dinfo;
5383 struct devlist *devlist_head;
5384 struct pci_conf *p;
5385 const char *name;
5386 int i, error, none_count;
5387
5388 none_count = 0;
5389 /* get the head of the device queue */
5390 devlist_head = &pci_devq;
5391
5392 /*
5393 * Go through the list of devices and print out devices
5394 */
5395 for (error = 0, i = 0,
5396 dinfo = STAILQ_FIRST(devlist_head);
5397 (dinfo != NULL) && (error == 0) && (i < pci_numdevs) && !db_pager_quit;
5398 dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {
5399 /* Populate pd_name and pd_unit */
5400 name = NULL;
5401 if (dinfo->cfg.dev)
5402 name = device_get_name(dinfo->cfg.dev);
5403
5404 p = &dinfo->conf;
5405 db_printf("%s%d@pci%d:%d:%d:%d:\tclass=0x%06x card=0x%08x "
5406 "chip=0x%08x rev=0x%02x hdr=0x%02x\n",
5407 (name && *name) ? name : "none",
5408 (name && *name) ? (int)device_get_unit(dinfo->cfg.dev) :
5409 none_count++,
5410 p->pc_sel.pc_domain, p->pc_sel.pc_bus, p->pc_sel.pc_dev,
5411 p->pc_sel.pc_func, (p->pc_class << 16) |
5412 (p->pc_subclass << 8) | p->pc_progif,
5413 (p->pc_subdevice << 16) | p->pc_subvendor,
5414 (p->pc_device << 16) | p->pc_vendor,
5415 p->pc_revid, p->pc_hdr);
5416 }
5417}
5418#endif /* DDB */
5419
5420struct resource *
5421pci_reserve_map(device_t dev, device_t child, int type, int *rid,
5422 rman_res_t start, rman_res_t end, rman_res_t count, u_int num,
5423 u_int flags)
5424{
5425 struct pci_devinfo *dinfo = device_get_ivars(child);
5426 struct resource_list *rl = &dinfo->resources;
5427 struct resource *res;
5428 struct pci_map *pm;
5429 uint16_t cmd;
5430 pci_addr_t map, testval;
5431 int mapsize;
5432
5433 res = NULL;
5434
5435 /* If rid is managed by EA, ignore it */
5437 goto out;
5438
5439 pm = pci_find_bar(child, *rid);
5440 if (pm != NULL) {
5441 /* This is a BAR that we failed to allocate earlier. */
5442 mapsize = pm->pm_size;
5443 map = pm->pm_value;
5444 } else {
5445 /*
5446 * Weed out the bogons, and figure out how large the
5447 * BAR/map is. BARs that read back 0 here are bogus
5448 * and unimplemented. Note: atapci in legacy mode are
5449 * special and handled elsewhere in the code. If you
5450 * have a atapci device in legacy mode and it fails
5451 * here, that other code is broken.
5452 */
5453 pci_read_bar(child, *rid, &map, &testval, NULL);
5454
5455 /*
5456 * Determine the size of the BAR and ignore BARs with a size
5457 * of 0. Device ROM BARs use a different mask value.
5458 */
5459 if (PCIR_IS_BIOS(&dinfo->cfg, *rid))
5460 mapsize = pci_romsize(testval);
5461 else
5462 mapsize = pci_mapsize(testval);
5463 if (mapsize == 0)
5464 goto out;
5465 pm = pci_add_bar(child, *rid, map, mapsize);
5466 }
5467
5468 if (PCI_BAR_MEM(map) || PCIR_IS_BIOS(&dinfo->cfg, *rid)) {
5469 if (type != SYS_RES_MEMORY) {
5470 if (bootverbose)
5471 device_printf(dev,
5472 "child %s requested type %d for rid %#x,"
5473 " but the BAR says it is an memio\n",
5474 device_get_nameunit(child), type, *rid);
5475 goto out;
5476 }
5477 } else {
5478 if (type != SYS_RES_IOPORT) {
5479 if (bootverbose)
5480 device_printf(dev,
5481 "child %s requested type %d for rid %#x,"
5482 " but the BAR says it is an ioport\n",
5483 device_get_nameunit(child), type, *rid);
5484 goto out;
5485 }
5486 }
5487
5488 /*
5489 * For real BARs, we need to override the size that
5490 * the driver requests, because that's what the BAR
5491 * actually uses and we would otherwise have a
5492 * situation where we might allocate the excess to
5493 * another driver, which won't work.
5494 */
5495 count = ((pci_addr_t)1 << mapsize) * num;
5496 if (RF_ALIGNMENT(flags) < mapsize)
5497 flags = (flags & ~RF_ALIGNMENT_MASK) | RF_ALIGNMENT_LOG2(mapsize);
5498 if (PCI_BAR_MEM(map) && (map & PCIM_BAR_MEM_PREFETCH))
5499 flags |= RF_PREFETCHABLE;
5500
5501 /*
5502 * Allocate enough resource, and then write back the
5503 * appropriate BAR for that resource.
5504 */
5505 resource_list_add(rl, type, *rid, start, end, count);
5506 res = resource_list_reserve(rl, dev, child, type, rid, start, end,
5507 count, flags & ~RF_ACTIVE);
5508 if (res == NULL) {
5509 resource_list_delete(rl, type, *rid);
5510 device_printf(child,
5511 "%#jx bytes of rid %#x res %d failed (%#jx, %#jx).\n",
5512 count, *rid, type, start, end);
5513 goto out;
5514 }
5515 if (bootverbose)
5516 device_printf(child,
5517 "Lazy allocation of %#jx bytes rid %#x type %d at %#jx\n",
5518 count, *rid, type, rman_get_start(res));
5519
5520 /* Disable decoding via the CMD register before updating the BAR */
5521 cmd = pci_read_config(child, PCIR_COMMAND, 2);
5522 pci_write_config(child, PCIR_COMMAND,
5523 cmd & ~(PCI_BAR_MEM(map) ? PCIM_CMD_MEMEN : PCIM_CMD_PORTEN), 2);
5524
5525 map = rman_get_start(res);
5526 pci_write_bar(child, pm, map);
5527
5528 /* Restore the original value of the CMD register */
5529 pci_write_config(child, PCIR_COMMAND, cmd, 2);
5530out:
5531 return (res);
5532}
5533
5534struct resource *
5535pci_alloc_multi_resource(device_t dev, device_t child, int type, int *rid,
5536 rman_res_t start, rman_res_t end, rman_res_t count, u_long num,
5537 u_int flags)
5538{
5539 struct pci_devinfo *dinfo;
5540 struct resource_list *rl;
5541 struct resource_list_entry *rle;
5542 struct resource *res;
5543 pcicfgregs *cfg;
5544
5545 /*
5546 * Perform lazy resource allocation
5547 */
5548 dinfo = device_get_ivars(child);
5549 rl = &dinfo->resources;
5550 cfg = &dinfo->cfg;
5551 switch (type) {
5552#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
5553 case PCI_RES_BUS:
5554 return (pci_alloc_secbus(dev, child, rid, start, end, count,
5555 flags));
5556#endif
5557 case SYS_RES_IRQ:
5558 /*
5559 * Can't alloc legacy interrupt once MSI messages have
5560 * been allocated.
5561 */
5562 if (*rid == 0 && (cfg->msi.msi_alloc > 0 ||
5563 cfg->msix.msix_alloc > 0))
5564 return (NULL);
5565
5566 /*
5567 * If the child device doesn't have an interrupt
5568 * routed and is deserving of an interrupt, try to
5569 * assign it one.
5570 */
5571 if (*rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) &&
5572 (cfg->intpin != 0))
5574 break;
5575 case SYS_RES_IOPORT:
5576 case SYS_RES_MEMORY:
5577#ifdef NEW_PCIB
5578 /*
5579 * PCI-PCI bridge I/O window resources are not BARs.
5580 * For those allocations just pass the request up the
5581 * tree.
5582 */
5583 if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE) {
5584 switch (*rid) {
5585 case PCIR_IOBASEL_1:
5586 case PCIR_MEMBASE_1:
5587 case PCIR_PMBASEL_1:
5588 /*
5589 * XXX: Should we bother creating a resource
5590 * list entry?
5591 */
5592 return (bus_generic_alloc_resource(dev, child,
5593 type, rid, start, end, count, flags));
5594 }
5595 }
5596#endif
5597 /* Reserve resources for this BAR if needed. */
5598 rle = resource_list_find(rl, type, *rid);
5599 if (rle == NULL) {
5600 res = pci_reserve_map(dev, child, type, rid, start, end,
5601 count, num, flags);
5602 if (res == NULL)
5603 return (NULL);
5604 }
5605 }
5606 return (resource_list_alloc(rl, dev, child, type, rid,
5607 start, end, count, flags));
5608}
5609
5610struct resource *
5611pci_alloc_resource(device_t dev, device_t child, int type, int *rid,
5612 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
5613{
5614#ifdef PCI_IOV
5615 struct pci_devinfo *dinfo;
5616#endif
5617
5618 if (device_get_parent(child) != dev)
5619 return (BUS_ALLOC_RESOURCE(device_get_parent(dev), child,
5620 type, rid, start, end, count, flags));
5621
5622#ifdef PCI_IOV
5623 dinfo = device_get_ivars(child);
5624 if (dinfo->cfg.flags & PCICFG_VF) {
5625 switch (type) {
5626 /* VFs can't have I/O BARs. */
5627 case SYS_RES_IOPORT:
5628 return (NULL);
5629 case SYS_RES_MEMORY:
5631 start, end, count, flags));
5632 }
5633
5634 /* Fall through for other types of resource allocations. */
5635 }
5636#endif
5637
5639 count, 1, flags));
5640}
5641
5642int
5643pci_release_resource(device_t dev, device_t child, int type, int rid,
5644 struct resource *r)
5645{
5646 struct pci_devinfo *dinfo;
5647 struct resource_list *rl;
5648 pcicfgregs *cfg;
5649
5650 if (device_get_parent(child) != dev)
5651 return (BUS_RELEASE_RESOURCE(device_get_parent(dev), child,
5652 type, rid, r));
5653
5654 dinfo = device_get_ivars(child);
5655 cfg = &dinfo->cfg;
5656
5657#ifdef PCI_IOV
5658 if (dinfo->cfg.flags & PCICFG_VF) {
5659 switch (type) {
5660 /* VFs can't have I/O BARs. */
5661 case SYS_RES_IOPORT:
5662 return (EDOOFUS);
5663 case SYS_RES_MEMORY:
5665 r));
5666 }
5667
5668 /* Fall through for other types of resource allocations. */
5669 }
5670#endif
5671
5672#ifdef NEW_PCIB
5673 /*
5674 * PCI-PCI bridge I/O window resources are not BARs. For
5675 * those allocations just pass the request up the tree.
5676 */
5677 if (cfg->hdrtype == PCIM_HDRTYPE_BRIDGE &&
5678 (type == SYS_RES_IOPORT || type == SYS_RES_MEMORY)) {
5679 switch (rid) {
5680 case PCIR_IOBASEL_1:
5681 case PCIR_MEMBASE_1:
5682 case PCIR_PMBASEL_1:
5683 return (bus_generic_release_resource(dev, child, type,
5684 rid, r));
5685 }
5686 }
5687#endif
5688
5689 rl = &dinfo->resources;
5690 return (resource_list_release(rl, dev, child, type, rid, r));
5691}
5692
5693int
5694pci_activate_resource(device_t dev, device_t child, int type, int rid,
5695 struct resource *r)
5696{
5697 struct pci_devinfo *dinfo;
5698 int error;
5699
5700 error = bus_generic_activate_resource(dev, child, type, rid, r);
5701 if (error)
5702 return (error);
5703
5704 /* Enable decoding in the command register when activating BARs. */
5705 if (device_get_parent(child) == dev) {
5706 /* Device ROMs need their decoding explicitly enabled. */
5707 dinfo = device_get_ivars(child);
5708 if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid))
5710 rman_get_start(r) | PCIM_BIOS_ENABLE);
5711 switch (type) {
5712 case SYS_RES_IOPORT:
5713 case SYS_RES_MEMORY:
5714 error = PCI_ENABLE_IO(dev, child, type);
5715 break;
5716 }
5717 }
5718 return (error);
5719}
5720
5721int
5722pci_deactivate_resource(device_t dev, device_t child, int type,
5723 int rid, struct resource *r)
5724{
5725 struct pci_devinfo *dinfo;
5726 int error;
5727
5728 error = bus_generic_deactivate_resource(dev, child, type, rid, r);
5729 if (error)
5730 return (error);
5731
5732 /* Disable decoding for device ROMs. */
5733 if (device_get_parent(child) == dev) {
5734 dinfo = device_get_ivars(child);
5735 if (type == SYS_RES_MEMORY && PCIR_IS_BIOS(&dinfo->cfg, rid))
5737 rman_get_start(r));
5738 }
5739 return (0);
5740}
5741
5742void
5743pci_child_deleted(device_t dev, device_t child)
5744{
5745 struct resource_list_entry *rle;
5746 struct resource_list *rl;
5747 struct pci_devinfo *dinfo;
5748
5749 dinfo = device_get_ivars(child);
5750 rl = &dinfo->resources;
5751
5752 EVENTHANDLER_INVOKE(pci_delete_device, child);
5753
5754 /* Turn off access to resources we're about to free */
5755 if (bus_child_present(child) != 0) {
5756 pci_write_config(child, PCIR_COMMAND, pci_read_config(child,
5758
5759 pci_disable_busmaster(child);
5760 }
5761
5762 /* Free all allocated resources */
5763 STAILQ_FOREACH(rle, rl, link) {
5764 if (rle->res) {
5765 if (rman_get_flags(rle->res) & RF_ACTIVE ||
5766 resource_list_busy(rl, rle->type, rle->rid)) {
5767 pci_printf(&dinfo->cfg,
5768 "Resource still owned, oops. "
5769 "(type=%d, rid=%d, addr=%lx)\n",
5770 rle->type, rle->rid,
5771 rman_get_start(rle->res));
5772 bus_release_resource(child, rle->type, rle->rid,
5773 rle->res);
5774 }
5775 resource_list_unreserve(rl, dev, child, rle->type,
5776 rle->rid);
5777 }
5778 }
5779 resource_list_free(rl);
5780
5781 pci_freecfg(dinfo);
5782}
5783
5784void
5785pci_delete_resource(device_t dev, device_t child, int type, int rid)
5786{
5787 struct pci_devinfo *dinfo;
5788 struct resource_list *rl;
5789 struct resource_list_entry *rle;
5790
5791 if (device_get_parent(child) != dev)
5792 return;
5793
5794 dinfo = device_get_ivars(child);
5795 rl = &dinfo->resources;
5796 rle = resource_list_find(rl, type, rid);
5797 if (rle == NULL)
5798 return;
5799
5800 if (rle->res) {
5801 if (rman_get_flags(rle->res) & RF_ACTIVE ||
5802 resource_list_busy(rl, type, rid)) {
5803 device_printf(dev, "delete_resource: "
5804 "Resource still owned by child, oops. "
5805 "(type=%d, rid=%d, addr=%jx)\n",
5806 type, rid, rman_get_start(rle->res));
5807 return;
5808 }
5809 resource_list_unreserve(rl, dev, child, type, rid);
5810 }
5811 resource_list_delete(rl, type, rid);
5812}
5813
5814struct resource_list *
5815pci_get_resource_list (device_t dev, device_t child)
5816{
5817 struct pci_devinfo *dinfo = device_get_ivars(child);
5818
5819 return (&dinfo->resources);
5820}
5821
5822#ifdef IOMMU
5823bus_dma_tag_t
5824pci_get_dma_tag(device_t bus, device_t dev)
5825{
5826 bus_dma_tag_t tag;
5827 struct pci_softc *sc;
5828
5829 if (device_get_parent(dev) == bus) {
5830 /* try iommu and return if it works */
5831 tag = iommu_get_dma_tag(bus, dev);
5832 } else
5833 tag = NULL;
5834 if (tag == NULL) {
5835 sc = device_get_softc(bus);
5836 tag = sc->sc_dma_tag;
5837 }
5838 return (tag);
5839}
5840#else
5841bus_dma_tag_t
5842pci_get_dma_tag(device_t bus, device_t dev)
5843{
5844 struct pci_softc *sc = device_get_softc(bus);
5845
5846 return (sc->sc_dma_tag);
5847}
5848#endif
5849
5850uint32_t
5851pci_read_config_method(device_t dev, device_t child, int reg, int width)
5852{
5853 struct pci_devinfo *dinfo = device_get_ivars(child);
5854 pcicfgregs *cfg = &dinfo->cfg;
5855
5856#ifdef PCI_IOV
5857 /*
5858 * SR-IOV VFs don't implement the VID or DID registers, so we have to
5859 * emulate them here.
5860 */
5861 if (cfg->flags & PCICFG_VF) {
5862 if (reg == PCIR_VENDOR) {
5863 switch (width) {
5864 case 4:
5865 return (cfg->device << 16 | cfg->vendor);
5866 case 2:
5867 return (cfg->vendor);
5868 case 1:
5869 return (cfg->vendor & 0xff);
5870 default:
5871 return (0xffffffff);
5872 }
5873 } else if (reg == PCIR_DEVICE) {
5874 switch (width) {
5875 /* Note that an unaligned 4-byte read is an error. */
5876 case 2:
5877 return (cfg->device);
5878 case 1:
5879 return (cfg->device & 0xff);
5880 default:
5881 return (0xffffffff);
5882 }
5883 }
5884 }
5885#endif
5886
5887 return (PCIB_READ_CONFIG(device_get_parent(dev),
5888 cfg->bus, cfg->slot, cfg->func, reg, width));
5889}
5890
5891void
5892pci_write_config_method(device_t dev, device_t child, int reg,
5893 uint32_t val, int width)
5894{
5895 struct pci_devinfo *dinfo = device_get_ivars(child);
5896 pcicfgregs *cfg = &dinfo->cfg;
5897
5898 PCIB_WRITE_CONFIG(device_get_parent(dev),
5899 cfg->bus, cfg->slot, cfg->func, reg, val, width);
5900}
5901
5902int
5903pci_child_location_method(device_t dev, device_t child, struct sbuf *sb)
5904{
5905
5906 sbuf_printf(sb, "slot=%d function=%d dbsf=pci%d:%d:%d:%d",
5907 pci_get_slot(child), pci_get_function(child), pci_get_domain(child),
5908 pci_get_bus(child), pci_get_slot(child), pci_get_function(child));
5909 return (0);
5910}
5911
5912int
5913pci_child_pnpinfo_method(device_t dev, device_t child, struct sbuf *sb)
5914{
5915 struct pci_devinfo *dinfo;
5916 pcicfgregs *cfg;
5917
5918 dinfo = device_get_ivars(child);
5919 cfg = &dinfo->cfg;
5920 sbuf_printf(sb, "vendor=0x%04x device=0x%04x subvendor=0x%04x "
5921 "subdevice=0x%04x class=0x%02x%02x%02x", cfg->vendor, cfg->device,
5922 cfg->subvendor, cfg->subdevice, cfg->baseclass, cfg->subclass,
5923 cfg->progif);
5924 return (0);
5925}
5926
5927int
5928pci_get_device_path_method(device_t bus, device_t child, const char *locator,
5929 struct sbuf *sb)
5930{
5931 device_t parent = device_get_parent(bus);
5932 int rv;
5933
5934 if (strcmp(locator, BUS_LOCATOR_UEFI) == 0) {
5935 rv = bus_generic_get_device_path(parent, bus, locator, sb);
5936 if (rv == 0) {
5937 sbuf_printf(sb, "/Pci(0x%x,0x%x)", pci_get_slot(child),
5938 pci_get_function(child));
5939 }
5940 return (0);
5941 }
5942 return (bus_generic_get_device_path(bus, child, locator, sb));
5943}
5944
5945int
5947{
5948 struct pci_devinfo *dinfo = device_get_ivars(child);
5949 pcicfgregs *cfg = &dinfo->cfg;
5950
5951 return (PCIB_ROUTE_INTERRUPT(device_get_parent(dev), child,
5952 cfg->intpin));
5953}
5954
5955static void
5956pci_lookup(void *arg, const char *name, device_t *dev)
5957{
5958 long val;
5959 char *end;
5960 int domain, bus, slot, func;
5961
5962 if (*dev != NULL)
5963 return;
5964
5965 /*
5966 * Accept pciconf-style selectors of either pciD:B:S:F or
5967 * pciB:S:F. In the latter case, the domain is assumed to
5968 * be zero.
5969 */
5970 if (strncmp(name, "pci", 3) != 0)
5971 return;
5972 val = strtol(name + 3, &end, 10);
5973 if (val < 0 || val > INT_MAX || *end != ':')
5974 return;
5975 domain = val;
5976 val = strtol(end + 1, &end, 10);
5977 if (val < 0 || val > INT_MAX || *end != ':')
5978 return;
5979 bus = val;
5980 val = strtol(end + 1, &end, 10);
5981 if (val < 0 || val > INT_MAX)
5982 return;
5983 slot = val;
5984 if (*end == ':') {
5985 val = strtol(end + 1, &end, 10);
5986 if (val < 0 || val > INT_MAX || *end != '\0')
5987 return;
5988 func = val;
5989 } else if (*end == '\0') {
5990 func = slot;
5991 slot = bus;
5992 bus = domain;
5993 domain = 0;
5994 } else
5995 return;
5996
5997 if (domain > PCI_DOMAINMAX || bus > PCI_BUSMAX || slot > PCI_SLOTMAX ||
5998 func > PCIE_ARI_FUNCMAX || (slot != 0 && func > PCI_FUNCMAX))
5999 return;
6000
6001 *dev = pci_find_dbsf(domain, bus, slot, func);
6002}
6003
6004static int
6005pci_modevent(module_t mod, int what, void *arg)
6006{
6007 static struct cdev *pci_cdev;
6008 static eventhandler_tag tag;
6009
6010 switch (what) {
6011 case MOD_LOAD:
6012 STAILQ_INIT(&pci_devq);
6013 pci_generation = 0;
6014 pci_cdev = make_dev(&pcicdev, 0, UID_ROOT, GID_WHEEL, 0644,
6015 "pci");
6017 tag = EVENTHANDLER_REGISTER(dev_lookup, pci_lookup, NULL,
6018 1000);
6019 break;
6020
6021 case MOD_UNLOAD:
6022 if (tag != NULL)
6023 EVENTHANDLER_DEREGISTER(dev_lookup, tag);
6024 destroy_dev(pci_cdev);
6025 break;
6026 }
6027
6028 return (0);
6029}
6030
6031static void
6032pci_cfg_restore_pcie(device_t dev, struct pci_devinfo *dinfo)
6033{
6034#define WREG(n, v) pci_write_config(dev, pos + (n), (v), 2)
6035 struct pcicfg_pcie *cfg;
6036 int version, pos;
6037
6038 cfg = &dinfo->cfg.pcie;
6039 pos = cfg->pcie_location;
6040
6041 version = cfg->pcie_flags & PCIEM_FLAGS_VERSION;
6042
6044
6045 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
6049
6050 if (version > 1 || (cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
6052 (cfg->pcie_flags & PCIEM_FLAGS_SLOT))))
6054
6055 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
6058
6059 if (version > 1) {
6063 }
6064#undef WREG
6065}
6066
6067static void
6068pci_cfg_restore_pcix(device_t dev, struct pci_devinfo *dinfo)
6069{
6070 pci_write_config(dev, dinfo->cfg.pcix.pcix_location + PCIXR_COMMAND,
6071 dinfo->cfg.pcix.pcix_command, 2);
6072}
6073
6074void
6075pci_cfg_restore(device_t dev, struct pci_devinfo *dinfo)
6076{
6077
6078 /*
6079 * Restore the device to full power mode. We must do this
6080 * before we restore the registers because moving from D3 to
6081 * D0 will cause the chip's BARs and some other registers to
6082 * be reset to some unknown power on reset values. Cut down
6083 * the noise on boot by doing nothing if we are already in
6084 * state D0.
6085 */
6086 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0)
6087 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
6088 pci_write_config(dev, PCIR_INTLINE, dinfo->cfg.intline, 1);
6089 pci_write_config(dev, PCIR_INTPIN, dinfo->cfg.intpin, 1);
6090 pci_write_config(dev, PCIR_CACHELNSZ, dinfo->cfg.cachelnsz, 1);
6091 pci_write_config(dev, PCIR_LATTIMER, dinfo->cfg.lattimer, 1);
6092 pci_write_config(dev, PCIR_PROGIF, dinfo->cfg.progif, 1);
6093 pci_write_config(dev, PCIR_REVID, dinfo->cfg.revid, 1);
6094 switch (dinfo->cfg.hdrtype & PCIM_HDRTYPE) {
6096 pci_write_config(dev, PCIR_MINGNT, dinfo->cfg.mingnt, 1);
6097 pci_write_config(dev, PCIR_MAXLAT, dinfo->cfg.maxlat, 1);
6098 break;
6100 pci_write_config(dev, PCIR_SECLAT_1,
6101 dinfo->cfg.bridge.br_seclat, 1);
6102 pci_write_config(dev, PCIR_SUBBUS_1,
6103 dinfo->cfg.bridge.br_subbus, 1);
6104 pci_write_config(dev, PCIR_SECBUS_1,
6105 dinfo->cfg.bridge.br_secbus, 1);
6106 pci_write_config(dev, PCIR_PRIBUS_1,
6107 dinfo->cfg.bridge.br_pribus, 1);
6108 pci_write_config(dev, PCIR_BRIDGECTL_1,
6109 dinfo->cfg.bridge.br_control, 2);
6110 break;
6112 pci_write_config(dev, PCIR_SECLAT_2,
6113 dinfo->cfg.bridge.br_seclat, 1);
6114 pci_write_config(dev, PCIR_SUBBUS_2,
6115 dinfo->cfg.bridge.br_subbus, 1);
6116 pci_write_config(dev, PCIR_SECBUS_2,
6117 dinfo->cfg.bridge.br_secbus, 1);
6118 pci_write_config(dev, PCIR_PRIBUS_2,
6119 dinfo->cfg.bridge.br_pribus, 1);
6120 pci_write_config(dev, PCIR_BRIDGECTL_2,
6121 dinfo->cfg.bridge.br_control, 2);
6122 break;
6123 }
6125
6126 if ((dinfo->cfg.hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_BRIDGE)
6127 pci_write_config(dev, PCIR_COMMAND, dinfo->cfg.cmdreg, 2);
6128
6129 /*
6130 * Restore extended capabilities for PCI-Express and PCI-X
6131 */
6132 if (dinfo->cfg.pcie.pcie_location != 0)
6133 pci_cfg_restore_pcie(dev, dinfo);
6134 if (dinfo->cfg.pcix.pcix_location != 0)
6135 pci_cfg_restore_pcix(dev, dinfo);
6136
6137 /* Restore MSI and MSI-X configurations if they are present. */
6138 if (dinfo->cfg.msi.msi_location != 0)
6140 if (dinfo->cfg.msix.msix_location != 0)
6142
6143#ifdef PCI_IOV
6144 if (dinfo->cfg.iov != NULL)
6145 pci_iov_cfg_restore(dev, dinfo);
6146#endif
6147}
6148
6149static void
6150pci_cfg_save_pcie(device_t dev, struct pci_devinfo *dinfo)
6151{
6152#define RREG(n) pci_read_config(dev, pos + (n), 2)
6153 struct pcicfg_pcie *cfg;
6154 int version, pos;
6155
6156 cfg = &dinfo->cfg.pcie;
6157 pos = cfg->pcie_location;
6158
6159 cfg->pcie_flags = RREG(PCIER_FLAGS);
6160
6161 version = cfg->pcie_flags & PCIEM_FLAGS_VERSION;
6162
6164
6165 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
6169
6170 if (version > 1 || (cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
6172 (cfg->pcie_flags & PCIEM_FLAGS_SLOT))))
6174
6175 if (version > 1 || cfg->pcie_type == PCIEM_TYPE_ROOT_PORT ||
6178
6179 if (version > 1) {
6183 }
6184#undef RREG
6185}
6186
6187static void
6188pci_cfg_save_pcix(device_t dev, struct pci_devinfo *dinfo)
6189{
6190 dinfo->cfg.pcix.pcix_command = pci_read_config(dev,
6191 dinfo->cfg.pcix.pcix_location + PCIXR_COMMAND, 2);
6192}
6193
6194void
6195pci_cfg_save(device_t dev, struct pci_devinfo *dinfo, int setstate)
6196{
6197 uint32_t cls;
6198 int ps;
6199
6200 /*
6201 * Some drivers apparently write to these registers w/o updating our
6202 * cached copy. No harm happens if we update the copy, so do so here
6203 * so we can restore them. The COMMAND register is modified by the
6204 * bus w/o updating the cache. This should represent the normally
6205 * writable portion of the 'defined' part of type 0/1/2 headers.
6206 */
6207 dinfo->cfg.vendor = pci_read_config(dev, PCIR_VENDOR, 2);
6208 dinfo->cfg.device = pci_read_config(dev, PCIR_DEVICE, 2);
6209 dinfo->cfg.cmdreg = pci_read_config(dev, PCIR_COMMAND, 2);
6210 dinfo->cfg.intline = pci_read_config(dev, PCIR_INTLINE, 1);
6211 dinfo->cfg.intpin = pci_read_config(dev, PCIR_INTPIN, 1);
6212 dinfo->cfg.cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
6213 dinfo->cfg.lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
6214 dinfo->cfg.baseclass = pci_read_config(dev, PCIR_CLASS, 1);
6215 dinfo->cfg.subclass = pci_read_config(dev, PCIR_SUBCLASS, 1);
6216 dinfo->cfg.progif = pci_read_config(dev, PCIR_PROGIF, 1);
6217 dinfo->cfg.revid = pci_read_config(dev, PCIR_REVID, 1);
6218 switch (dinfo->cfg.hdrtype & PCIM_HDRTYPE) {
6220 dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_0, 2);
6221 dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_0, 2);
6222 dinfo->cfg.mingnt = pci_read_config(dev, PCIR_MINGNT, 1);
6223 dinfo->cfg.maxlat = pci_read_config(dev, PCIR_MAXLAT, 1);
6224 break;
6226 dinfo->cfg.bridge.br_seclat = pci_read_config(dev,
6227 PCIR_SECLAT_1, 1);
6228 dinfo->cfg.bridge.br_subbus = pci_read_config(dev,
6229 PCIR_SUBBUS_1, 1);
6230 dinfo->cfg.bridge.br_secbus = pci_read_config(dev,
6231 PCIR_SECBUS_1, 1);
6232 dinfo->cfg.bridge.br_pribus = pci_read_config(dev,
6233 PCIR_PRIBUS_1, 1);
6234 dinfo->cfg.bridge.br_control = pci_read_config(dev,
6235 PCIR_BRIDGECTL_1, 2);
6236 break;
6238 dinfo->cfg.bridge.br_seclat = pci_read_config(dev,
6239 PCIR_SECLAT_2, 1);
6240 dinfo->cfg.bridge.br_subbus = pci_read_config(dev,
6241 PCIR_SUBBUS_2, 1);
6242 dinfo->cfg.bridge.br_secbus = pci_read_config(dev,
6243 PCIR_SECBUS_2, 1);
6244 dinfo->cfg.bridge.br_pribus = pci_read_config(dev,
6245 PCIR_PRIBUS_2, 1);
6246 dinfo->cfg.bridge.br_control = pci_read_config(dev,
6247 PCIR_BRIDGECTL_2, 2);
6248 dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_2, 2);
6249 dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_2, 2);
6250 break;
6251 }
6252
6253 if (dinfo->cfg.pcie.pcie_location != 0)
6254 pci_cfg_save_pcie(dev, dinfo);
6255
6256 if (dinfo->cfg.pcix.pcix_location != 0)
6257 pci_cfg_save_pcix(dev, dinfo);
6258
6259#ifdef PCI_IOV
6260 if (dinfo->cfg.iov != NULL)
6261 pci_iov_cfg_save(dev, dinfo);
6262#endif
6263
6264 /*
6265 * don't set the state for display devices, base peripherals and
6266 * memory devices since bad things happen when they are powered down.
6267 * We should (a) have drivers that can easily detach and (b) use
6268 * generic drivers for these devices so that some device actually
6269 * attaches. We need to make sure that when we implement (a) we don't
6270 * power the device down on a reattach.
6271 */
6272 cls = pci_get_class(dev);
6273 if (!setstate)
6274 return;
6275 switch (pci_do_power_nodriver)
6276 {
6277 case 0: /* NO powerdown at all */
6278 return;
6279 case 1: /* Conservative about what to power down */
6280 if (cls == PCIC_STORAGE)
6281 return;
6282 /*FALLTHROUGH*/
6283 case 2: /* Aggressive about what to power down */
6284 if (cls == PCIC_DISPLAY || cls == PCIC_MEMORY ||
6285 cls == PCIC_BASEPERIPH)
6286 return;
6287 /*FALLTHROUGH*/
6288 case 3: /* Power down everything */
6289 break;
6290 }
6291 /*
6292 * PCI spec says we can only go into D3 state from D0 state.
6293 * Transition from D[12] into D0 before going to D3 state.
6294 */
6295 ps = pci_get_powerstate(dev);
6296 if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3)
6297 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
6298 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D3)
6299 pci_set_powerstate(dev, PCI_POWERSTATE_D3);
6300}
6301
6302/* Wrapper APIs suitable for device driver use. */
6303void
6305{
6306 struct pci_devinfo *dinfo;
6307
6308 dinfo = device_get_ivars(dev);
6309 pci_cfg_save(dev, dinfo, 0);
6310}
6311
6312void
6314{
6315 struct pci_devinfo *dinfo;
6316
6317 dinfo = device_get_ivars(dev);
6318 pci_cfg_restore(dev, dinfo);
6319}
6320
6321static int
6322pci_get_id_method(device_t dev, device_t child, enum pci_id_type type,
6323 uintptr_t *id)
6324{
6325
6326 return (PCIB_GET_ID(device_get_parent(dev), child, type, id));
6327}
6328
6329/* Find the upstream port of a given PCI device in a root complex. */
6330device_t
6332{
6333 struct pci_devinfo *dinfo;
6334 devclass_t pci_class;
6335 device_t pcib, bus;
6336
6337 pci_class = devclass_find("pci");
6338 KASSERT(device_get_devclass(device_get_parent(dev)) == pci_class,
6339 ("%s: non-pci device %s", __func__, device_get_nameunit(dev)));
6340
6341 /*
6342 * Walk the bridge hierarchy until we find a PCI-e root
6343 * port or a non-PCI device.
6344 */
6345 for (;;) {
6346 bus = device_get_parent(dev);
6347 KASSERT(bus != NULL, ("%s: null parent of %s", __func__,
6348 device_get_nameunit(dev)));
6349
6350 pcib = device_get_parent(bus);
6351 KASSERT(pcib != NULL, ("%s: null bridge of %s", __func__,
6352 device_get_nameunit(bus)));
6353
6354 /*
6355 * pcib's parent must be a PCI bus for this to be a
6356 * PCI-PCI bridge.
6357 */
6358 if (device_get_devclass(device_get_parent(pcib)) != pci_class)
6359 return (NULL);
6360
6361 dinfo = device_get_ivars(pcib);
6362 if (dinfo->cfg.pcie.pcie_location != 0 &&
6363 dinfo->cfg.pcie.pcie_type == PCIEM_TYPE_ROOT_PORT)
6364 return (pcib);
6365
6366 dev = pcib;
6367 }
6368}
6369
6370/*
6371 * Wait for pending transactions to complete on a PCI-express function.
6372 *
6373 * The maximum delay is specified in milliseconds in max_delay. Note
6374 * that this function may sleep.
6375 *
6376 * Returns true if the function is idle and false if the timeout is
6377 * exceeded. If dev is not a PCI-express function, this returns true.
6378 */
6379bool
6380pcie_wait_for_pending_transactions(device_t dev, u_int max_delay)
6381{
6382 struct pci_devinfo *dinfo = device_get_ivars(dev);
6383 uint16_t sta;
6384 int cap;
6385
6386 cap = dinfo->cfg.pcie.pcie_location;
6387 if (cap == 0)
6388 return (true);
6389
6390 sta = pci_read_config(dev, cap + PCIER_DEVICE_STA, 2);
6391 while (sta & PCIEM_STA_TRANSACTION_PND) {
6392 if (max_delay == 0)
6393 return (false);
6394
6395 /* Poll once every 100 milliseconds up to the timeout. */
6396 if (max_delay > 100) {
6397 pause_sbt("pcietp", 100 * SBT_1MS, 0, C_HARDCLOCK);
6398 max_delay -= 100;
6399 } else {
6400 pause_sbt("pcietp", max_delay * SBT_1MS, 0,
6401 C_HARDCLOCK);
6402 max_delay = 0;
6403 }
6404 sta = pci_read_config(dev, cap + PCIER_DEVICE_STA, 2);
6405 }
6406
6407 return (true);
6408}
6409
6410/*
6411 * Determine the maximum Completion Timeout in microseconds.
6412 *
6413 * For non-PCI-express functions this returns 0.
6414 */
6415int
6417{
6418 struct pci_devinfo *dinfo = device_get_ivars(dev);
6419 int cap;
6420
6421 cap = dinfo->cfg.pcie.pcie_location;
6422 if (cap == 0)
6423 return (0);
6424
6425 /*
6426 * Functions using the 1.x spec use the default timeout range of
6427 * 50 microseconds to 50 milliseconds. Functions that do not
6428 * support programmable timeouts also use this range.
6429 */
6430 if ((dinfo->cfg.pcie.pcie_flags & PCIEM_FLAGS_VERSION) < 2 ||
6431 (pci_read_config(dev, cap + PCIER_DEVICE_CAP2, 4) &
6433 return (50 * 1000);
6434
6435 switch (pci_read_config(dev, cap + PCIER_DEVICE_CTL2, 2) &
6438 return (100);
6440 return (10 * 1000);
6442 return (55 * 1000);
6444 return (210 * 1000);
6446 return (900 * 1000);
6448 return (3500 * 1000);
6450 return (13 * 1000 * 1000);
6452 return (64 * 1000 * 1000);
6453 default:
6454 return (50 * 1000);
6455 }
6456}
6457
6458void
6459pcie_apei_error(device_t dev, int sev, uint8_t *aerp)
6460{
6461 struct pci_devinfo *dinfo = device_get_ivars(dev);
6462 const char *s;
6463 int aer;
6464 uint32_t r, r1;
6465 uint16_t rs;
6466
6467 if (sev == PCIEM_STA_CORRECTABLE_ERROR)
6468 s = "Correctable";
6469 else if (sev == PCIEM_STA_NON_FATAL_ERROR)
6470 s = "Uncorrectable (Non-Fatal)";
6471 else
6472 s = "Uncorrectable (Fatal)";
6473 device_printf(dev, "%s PCIe error reported by APEI\n", s);
6474 if (aerp) {
6475 if (sev == PCIEM_STA_CORRECTABLE_ERROR) {
6476 r = le32dec(aerp + PCIR_AER_COR_STATUS);
6477 r1 = le32dec(aerp + PCIR_AER_COR_MASK);
6478 } else {
6479 r = le32dec(aerp + PCIR_AER_UC_STATUS);
6480 r1 = le32dec(aerp + PCIR_AER_UC_MASK);
6481 }
6482 device_printf(dev, "status 0x%08x mask 0x%08x", r, r1);
6483 if (sev != PCIEM_STA_CORRECTABLE_ERROR) {
6484 r = le32dec(aerp + PCIR_AER_UC_SEVERITY);
6485 rs = le16dec(aerp + PCIR_AER_CAP_CONTROL);
6486 printf(" severity 0x%08x first %d\n",
6487 r, rs & 0x1f);
6488 } else
6489 printf("\n");
6490 }
6491
6492 /* As kind of recovery just report and clear the error statuses. */
6493 if (pci_find_extcap(dev, PCIZ_AER, &aer) == 0) {
6494 r = pci_read_config(dev, aer + PCIR_AER_UC_STATUS, 4);
6495 if (r != 0) {
6496 pci_write_config(dev, aer + PCIR_AER_UC_STATUS, r, 4);
6497 device_printf(dev, "Clearing UC AER errors 0x%08x\n", r);
6498 }
6499
6500 r = pci_read_config(dev, aer + PCIR_AER_COR_STATUS, 4);
6501 if (r != 0) {
6502 pci_write_config(dev, aer + PCIR_AER_COR_STATUS, r, 4);
6503 device_printf(dev, "Clearing COR AER errors 0x%08x\n", r);
6504 }
6505 }
6506 if (dinfo->cfg.pcie.pcie_location != 0) {
6507 rs = pci_read_config(dev, dinfo->cfg.pcie.pcie_location +
6508 PCIER_DEVICE_STA, 2);
6509 if ((rs & (PCIEM_STA_CORRECTABLE_ERROR |
6512 pci_write_config(dev, dinfo->cfg.pcie.pcie_location +
6513 PCIER_DEVICE_STA, rs, 2);
6514 device_printf(dev, "Clearing PCIe errors 0x%04x\n", rs);
6515 }
6516 }
6517}
6518
6519/*
6520 * Perform a Function Level Reset (FLR) on a device.
6521 *
6522 * This function first waits for any pending transactions to complete
6523 * within the timeout specified by max_delay. If transactions are
6524 * still pending, the function will return false without attempting a
6525 * reset.
6526 *
6527 * If dev is not a PCI-express function or does not support FLR, this
6528 * function returns false.
6529 *
6530 * Note that no registers are saved or restored. The caller is
6531 * responsible for saving and restoring any registers including
6532 * PCI-standard registers via pci_save_state() and
6533 * pci_restore_state().
6534 */
6535bool
6536pcie_flr(device_t dev, u_int max_delay, bool force)
6537{
6538 struct pci_devinfo *dinfo = device_get_ivars(dev);
6539 uint16_t cmd, ctl;
6540 int compl_delay;
6541 int cap;
6542
6543 cap = dinfo->cfg.pcie.pcie_location;
6544 if (cap == 0)
6545 return (false);
6546
6547 if (!(pci_read_config(dev, cap + PCIER_DEVICE_CAP, 4) & PCIEM_CAP_FLR))
6548 return (false);
6549
6550 /*
6551 * Disable busmastering to prevent generation of new
6552 * transactions while waiting for the device to go idle. If
6553 * the idle timeout fails, the command register is restored
6554 * which will re-enable busmastering.
6555 */
6556 cmd = pci_read_config(dev, PCIR_COMMAND, 2);
6557 pci_write_config(dev, PCIR_COMMAND, cmd & ~(PCIM_CMD_BUSMASTEREN), 2);
6558 if (!pcie_wait_for_pending_transactions(dev, max_delay)) {
6559 if (!force) {
6560 pci_write_config(dev, PCIR_COMMAND, cmd, 2);
6561 return (false);
6562 }
6563 pci_printf(&dinfo->cfg,
6564 "Resetting with transactions pending after %d ms\n",
6565 max_delay);
6566
6567 /*
6568 * Extend the post-FLR delay to cover the maximum
6569 * Completion Timeout delay of anything in flight
6570 * during the FLR delay. Enforce a minimum delay of
6571 * at least 10ms.
6572 */
6573 compl_delay = pcie_get_max_completion_timeout(dev) / 1000;
6574 if (compl_delay < 10)
6575 compl_delay = 10;
6576 } else
6577 compl_delay = 0;
6578
6579 /* Initiate the reset. */
6580 ctl = pci_read_config(dev, cap + PCIER_DEVICE_CTL, 2);
6581 pci_write_config(dev, cap + PCIER_DEVICE_CTL, ctl |
6583
6584 /* Wait for 100ms. */
6585 pause_sbt("pcieflr", (100 + compl_delay) * SBT_1MS, 0, C_HARDCLOCK);
6586
6587 if (pci_read_config(dev, cap + PCIER_DEVICE_STA, 2) &
6589 pci_printf(&dinfo->cfg, "Transactions pending after FLR!\n");
6590 return (true);
6591}
6592
6593/*
6594 * Attempt a power-management reset by cycling the device in/out of D3
6595 * state. PCI spec says we can only go into D3 state from D0 state.
6596 * Transition from D[12] into D0 before going to D3 state.
6597 */
6598int
6600{
6601 int ps;
6602
6603 ps = pci_get_powerstate(dev);
6604 if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3)
6605 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
6606 pci_set_powerstate(dev, PCI_POWERSTATE_D3);
6607 pci_set_powerstate(dev, ps);
6608 return (0);
6609}
6610
6611/*
6612 * Try link drop and retrain of the downstream port of upstream
6613 * switch, for PCIe. According to the PCIe 3.0 spec 6.6.1, this must
6614 * cause Conventional Hot reset of the device in the slot.
6615 * Alternative, for PCIe, could be the secondary bus reset initiatied
6616 * on the upstream switch PCIR_BRIDGECTL_1, bit 6.
6617 */
6618int
6619pcie_link_reset(device_t port, int pcie_location)
6620{
6621 uint16_t v;
6622
6623 v = pci_read_config(port, pcie_location + PCIER_LINK_CTL, 2);
6625 pci_write_config(port, pcie_location + PCIER_LINK_CTL, v, 2);
6626 pause_sbt("pcier1", mstosbt(20), 0, 0);
6627 v &= ~PCIEM_LINK_CTL_LINK_DIS;
6629 pci_write_config(port, pcie_location + PCIER_LINK_CTL, v, 2);
6630 pause_sbt("pcier2", mstosbt(100), 0, 0); /* 100 ms */
6631 v = pci_read_config(port, pcie_location + PCIER_LINK_STA, 2);
6632 return ((v & PCIEM_LINK_STA_TRAINING) != 0 ? ETIMEDOUT : 0);
6633}
6634
6635static int
6636pci_reset_post(device_t dev, device_t child)
6637{
6638
6639 if (dev == device_get_parent(child))
6641 return (0);
6642}
6643
6644static int
6645pci_reset_prepare(device_t dev, device_t child)
6646{
6647
6648 if (dev == device_get_parent(child))
6650 return (0);
6651}
6652
6653static int
6654pci_reset_child(device_t dev, device_t child, int flags)
6655{
6656 int error;
6657
6658 if (dev == NULL || device_get_parent(child) != dev)
6659 return (0);
6660 if ((flags & DEVF_RESET_DETACH) != 0) {
6661 error = device_get_state(child) == DS_ATTACHED ?
6662 device_detach(child) : 0;
6663 } else {
6664 error = BUS_SUSPEND_CHILD(dev, child);
6665 }
6666 if (error == 0) {
6667 if (!pcie_flr(child, 1000, false)) {
6668 error = BUS_RESET_PREPARE(dev, child);
6669 if (error == 0)
6671 BUS_RESET_POST(dev, child);
6672 }
6673 if ((flags & DEVF_RESET_DETACH) != 0)
6674 device_probe_and_attach(child);
6675 else
6676 BUS_RESUME_CHILD(dev, child);
6677 }
6678 return (error);
6679}
6680
6681const struct pci_device_table *
6682pci_match_device(device_t child, const struct pci_device_table *id, size_t nelt)
6683{
6684 bool match;
6685 uint16_t vendor, device, subvendor, subdevice, class, subclass, revid;
6686
6687 vendor = pci_get_vendor(child);
6688 device = pci_get_device(child);
6689 subvendor = pci_get_subvendor(child);
6690 subdevice = pci_get_subdevice(child);
6691 class = pci_get_class(child);
6692 subclass = pci_get_subclass(child);
6693 revid = pci_get_revid(child);
6694 while (nelt-- > 0) {
6695 match = true;
6696 if (id->match_flag_vendor)
6697 match &= vendor == id->vendor;
6698 if (id->match_flag_device)
6699 match &= device == id->device;
6700 if (id->match_flag_subvendor)
6701 match &= subvendor == id->subvendor;
6702 if (id->match_flag_subdevice)
6703 match &= subdevice == id->subdevice;
6704 if (id->match_flag_class)
6705 match &= class == id->class_id;
6706 if (id->match_flag_subclass)
6707 match &= subclass == id->subclass;
6708 if (id->match_flag_revid)
6709 match &= revid == id->revid;
6710 if (match)
6711 return (id);
6712 id++;
6713 }
6714 return (NULL);
6715}
6716
6717static void
6718pci_print_faulted_dev_name(const struct pci_devinfo *dinfo)
6719{
6720 const char *dev_name;
6721 device_t dev;
6722
6723 dev = dinfo->cfg.dev;
6724 printf("pci%d:%d:%d:%d", dinfo->cfg.domain, dinfo->cfg.bus,
6725 dinfo->cfg.slot, dinfo->cfg.func);
6726 dev_name = device_get_name(dev);
6727 if (dev_name != NULL)
6728 printf(" (%s%d)", dev_name, device_get_unit(dev));
6729}
6730
6731void
6733{
6734 struct pci_devinfo *dinfo;
6735 device_t dev;
6736 int aer, i;
6737 uint32_t r1, r2;
6738 uint16_t status;
6739
6740 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
6741 dev = dinfo->cfg.dev;
6742 status = pci_read_config(dev, PCIR_STATUS, 2);
6746 if (status != 0) {
6748 printf(" error 0x%04x\n", status);
6749 }
6750 if (dinfo->cfg.pcie.pcie_location != 0) {
6751 status = pci_read_config(dev,
6752 dinfo->cfg.pcie.pcie_location +
6753 PCIER_DEVICE_STA, 2);
6758 printf(" PCIe DEVCTL 0x%04x DEVSTA 0x%04x\n",
6759 pci_read_config(dev,
6760 dinfo->cfg.pcie.pcie_location +
6761 PCIER_DEVICE_CTL, 2),
6762 status);
6763 }
6764 }
6765 if (pci_find_extcap(dev, PCIZ_AER, &aer) == 0) {
6766 r1 = pci_read_config(dev, aer + PCIR_AER_UC_STATUS, 4);
6767 r2 = pci_read_config(dev, aer + PCIR_AER_COR_STATUS, 4);
6768 if (r1 != 0 || r2 != 0) {
6770 printf(" AER UC 0x%08x Mask 0x%08x Svr 0x%08x\n"
6771 " COR 0x%08x Mask 0x%08x Ctl 0x%08x\n",
6772 r1, pci_read_config(dev, aer +
6773 PCIR_AER_UC_MASK, 4),
6774 pci_read_config(dev, aer +
6776 r2, pci_read_config(dev, aer +
6778 pci_read_config(dev, aer +
6780 for (i = 0; i < 4; i++) {
6781 r1 = pci_read_config(dev, aer +
6782 PCIR_AER_HEADER_LOG + i * 4, 4);
6783 printf(" HL%d: 0x%08x\n", i, r1);
6784 }
6785 }
6786 }
6787 }
6788}
6789
6790#ifdef DDB
6791DB_SHOW_COMMAND(pcierr, pci_print_faulted_dev_db)
6792{
6793
6795}
6796
6797static void
6798db_clear_pcie_errors(const struct pci_devinfo *dinfo)
6799{
6800 device_t dev;
6801 int aer;
6802 uint32_t r;
6803
6804 dev = dinfo->cfg.dev;
6805 r = pci_read_config(dev, dinfo->cfg.pcie.pcie_location +
6806 PCIER_DEVICE_STA, 2);
6807 pci_write_config(dev, dinfo->cfg.pcie.pcie_location +
6808 PCIER_DEVICE_STA, r, 2);
6809
6810 if (pci_find_extcap(dev, PCIZ_AER, &aer) != 0)
6811 return;
6812 r = pci_read_config(dev, aer + PCIR_AER_UC_STATUS, 4);
6813 if (r != 0)
6814 pci_write_config(dev, aer + PCIR_AER_UC_STATUS, r, 4);
6815 r = pci_read_config(dev, aer + PCIR_AER_COR_STATUS, 4);
6816 if (r != 0)
6817 pci_write_config(dev, aer + PCIR_AER_COR_STATUS, r, 4);
6818}
6819
6820DB_COMMAND(pci_clearerr, db_pci_clearerr)
6821{
6822 struct pci_devinfo *dinfo;
6823 device_t dev;
6824 uint16_t status, status1;
6825
6826 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
6827 dev = dinfo->cfg.dev;
6828 status1 = status = pci_read_config(dev, PCIR_STATUS, 2);
6832 if (status1 != 0) {
6833 status &= ~status1;
6834 pci_write_config(dev, PCIR_STATUS, status, 2);
6835 }
6836 if (dinfo->cfg.pcie.pcie_location != 0)
6837 db_clear_pcie_errors(dinfo);
6838 }
6839}
6840#endif
struct resource * pci_reserve_map(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int num, u_int flags)
Definition: pci.c:5421
device_t pci_find_pcie_root_port(device_t dev)
Definition: pci.c:6331
static int pci_msi_blacklisted(void)
Definition: pci.c:2514
int pci_remap_msix_method(device_t dev, device_t child, int count, const u_int *vectors)
Definition: pci.c:1982
void pci_child_deleted(device_t dev, device_t child)
Definition: pci.c:5743
DEFINE_CLASS_0(pci, pci_driver, pci_methods, sizeof(struct pci_softc))
static void pci_cfg_save_pcix(device_t dev, struct pci_devinfo *dinfo)
Definition: pci.c:6188
static int pci_reset_post(device_t dev, device_t child)
Definition: pci.c:6636
static devclass_t pci_devclass
Definition: pci.c:229
void pci_probe_nomatch(device_t dev, device_t child)
Definition: pci.c:5037
uint32_t pci_numdevs
Definition: pci.c:336
int pci_detach(device_t dev)
Definition: pci.c:4517
static int pci_ea_bei_to_rid(device_t dev, int bei)
Definition: pci.c:3868
int pci_msix_count_method(device_t dev, device_t child)
Definition: pci.c:2157
static struct pci_devinfo * pci_fill_devinfo(device_t pcib, device_t bus, int d, int b, int s, int f, uint16_t vid, uint16_t did)
Definition: pci.c:703
int pci_teardown_intr(device_t dev, device_t child, struct resource *irq, void *cookie)
Definition: pci.c:4853
void pci_enable_msix_method(device_t dev, device_t child, u_int index, uint64_t address, uint32_t data)
Definition: pci.c:1690
static int pci_porten(device_t dev)
Definition: pci.c:3052
static int pci_get_id_method(device_t dev, device_t child, enum pci_id_type type, uintptr_t *rid)
Definition: pci.c:6322
static int pci_usb_takeover
Definition: pci.c:394
int pcie_link_reset(device_t port, int pcie_location)
Definition: pci.c:6619
struct pci_devinfo * pci_alloc_devinfo_method(device_t dev)
Definition: pci.c:695
static void pci_assign_interrupt(device_t bus, device_t dev, int force_route)
Definition: pci.c:3483
int pci_ea_is_enabled(device_t dev, int rid)
Definition: pci.c:3904
struct pci_map * pci_find_bar(device_t dev, int reg)
Definition: pci.c:3162
static int pci_msix_blacklisted(void)
Definition: pci.c:2566
#define WREG(n, v, w)
int pci_activate_resource(device_t dev, device_t child, int type, int rid, struct resource *r)
Definition: pci.c:5694
void pci_enable_msi_method(device_t dev, device_t child, uint64_t address, uint16_t data)
Definition: pci.c:2347
static int pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t *data)
Definition: pci.c:1014
int pci_do_power_suspend
Definition: pci.c:369
void pcie_apei_error(device_t dev, int sev, uint8_t *aerp)
Definition: pci.c:6459
void pci_print_verbose(struct pci_devinfo *dinfo)
Definition: pci.c:2995
static void pci_hint_device_unit(device_t acdev, device_t child, const char *name, int *unitp)
Definition: pci.c:4537
int pci_do_power_resume
Definition: pci.c:364
MODULE_VERSION(pci, 1)
#define PCIR_IS_BIOS(cfg, reg)
Definition: pci.c:89
int pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
Definition: pci.c:5339
static device_method_t pci_methods[]
Definition: pci.c:144
static void uhci_early_takeover(device_t self)
Definition: pci.c:3569
device_t pci_find_dbsf(uint32_t domain, uint8_t bus, uint8_t slot, uint8_t func)
Definition: pci.c:449
uint32_t pcie_read_config(device_t dev, int reg, int width)
Definition: pci.c:2287
static int pci_reset_prepare(device_t dev, device_t child)
Definition: pci.c:6645
device_t pci_find_class(uint8_t class, uint8_t subclass)
Definition: pci.c:483
static pci_addr_t pci_mapbase(uint64_t mapreg)
Definition: pci.c:536
static size_t pci_vendordata_size
Definition: pci.c:235
int pci_release_resource(device_t dev, device_t child, int type, int rid, struct resource *r)
Definition: pci.c:5643
#define PCI_QUIRK_MSI_INTX_BUG
Definition: pci.c:245
int pci_find_cap_method(device_t dev, device_t child, int capability, int *capreg)
Definition: pci.c:1526
void pci_cfg_save(device_t dev, struct pci_devinfo *dinfo, int setstate)
Definition: pci.c:6195
int pci_release_msi_method(device_t dev, device_t child)
Definition: pci.c:2706
int pci_find_next_extcap_method(device_t dev, device_t child, int capability, int start, int *capreg)
Definition: pci.c:1643
static int pci_has_quirk(uint32_t devid, int quirk)
Definition: pci.c:426
static void pci_mask_msix(device_t dev, u_int index)
Definition: pci.c:1718
static void pci_lookup(void *arg, const char *name, device_t *dev)
Definition: pci.c:5956
#define REG(n, w)
int pci_msi_device_blacklisted(device_t dev)
Definition: pci.c:2497
#define PCI_QUIRK_DISABLE_MSI
Definition: pci.c:241
int pci_get_max_read_req(device_t dev)
Definition: pci.c:2249
static void pci_ea_fill_info(device_t pcib, pcicfgregs *cfg)
Definition: pci.c:769
static char * pci_describe_device(device_t dev)
Definition: pci.c:5195
int pci_alloc_msix_method(device_t dev, device_t child, int *count)
Definition: pci.c:1808
int pci_resume(device_t dev)
Definition: pci.c:4657
static int pci_clear_aer_on_attach
Definition: pci.c:420
device_t pci_find_class_from(uint8_t class, uint8_t subclass, device_t from)
Definition: pci.c:498
int pci_attach(device_t dev)
Definition: pci.c:4496
void pci_child_added_method(device_t dev, device_t child)
Definition: pci.c:4452
static void pci_cfg_restore_pcie(device_t dev, struct pci_devinfo *dinfo)
Definition: pci.c:6032
static void pcie_setup_mps(device_t dev)
Definition: pci.c:4320
static __inline void pci_clear_command_bit(device_t dev, device_t child, uint16_t bit)
Definition: pci.c:2928
void pci_add_resources(device_t bus, device_t dev, int force, uint32_t prefetchmask)
Definition: pci.c:4038
uint32_t pci_read_config_method(device_t dev, device_t child, int reg, int width)
Definition: pci.c:5851
static int pci_do_msi
Definition: pci.c:374
static int pci_do_msix
Definition: pci.c:378
static void pci_write_msix_entry(device_t dev, u_int index, uint64_t address, uint32_t data)
Definition: pci.c:1676
int pci_enable_busmaster_method(device_t dev, device_t child)
Definition: pci.c:2938
static void pci_cfg_save_pcie(device_t dev, struct pci_devinfo *dinfo)
Definition: pci.c:6150
void pci_print_faulted_dev(void)
Definition: pci.c:6732
static void ehci_early_takeover(device_t self)
Definition: pci.c:3593
struct resource_list * pci_get_resource_list(device_t dev, device_t child)
Definition: pci.c:5815
void pci_delete_resource(device_t dev, device_t child, int type, int rid)
Definition: pci.c:5785
static __inline void pci_set_command_bit(device_t dev, device_t child, uint16_t bit)
Definition: pci.c:2918
int pci_assign_interrupt_method(device_t dev, device_t child)
Definition: pci.c:5946
static const struct pci_quirk pci_quirks[]
Definition: pci.c:251
int pci_set_max_read_req(device_t dev, int size)
Definition: pci.c:2265
bool pcie_wait_for_pending_transactions(device_t dev, u_int max_delay)
Definition: pci.c:6380
struct devlist pci_devq
Definition: pci.c:334
const char * desc
Definition: pci.c:4943
int pci_mapsize(uint64_t testval)
Definition: pci.c:561
int pci_get_device_path_method(device_t bus, device_t child, const char *locator, struct sbuf *sb)
Definition: pci.c:5928
struct resource * pci_alloc_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
Definition: pci.c:5611
int pci_pending_msix(device_t dev, u_int index)
Definition: pci.c:1756
static int vpd_nextbyte(struct vpd_readstate *vrs, uint8_t *data)
Definition: pci.c:1064
int pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep)
Definition: pci.c:4745
struct resource * pci_alloc_multi_resource(device_t dev, device_t child, int type, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_long num, u_int flags)
Definition: pci.c:5535
static struct pci_devinfo * pci_identify_function(device_t pcib, device_t dev, int domain, int busno, int slot, int func)
Definition: pci.c:4131
#define RREG(n)
int pci_print_child(device_t dev, device_t child)
Definition: pci.c:4912
static void pci_write_bar(device_t dev, struct pci_map *pm, pci_addr_t base)
Definition: pci.c:3141
int pci_bar_enabled(device_t dev, struct pci_map *pm)
Definition: pci.c:3176
int pci_msi_count_method(device_t dev, device_t child)
Definition: pci.c:2760
int pci_enable_io_method(device_t dev, device_t child, int space)
Definition: pci.c:2952
int pci_enable_aspm
Definition: pci.c:416
static void pci_read_vpd(device_t pcib, pcicfgregs *cfg)
Definition: pci.c:1088
int pci_find_next_htcap_method(device_t dev, device_t child, int capability, int start, int *capreg)
Definition: pci.c:1485
static void pci_resume_msix(device_t dev)
Definition: pci.c:1774
int subclass
Definition: pci.c:4941
void pci_child_detached(device_t dev, device_t child)
Definition: pci.c:5084
void pci_disable_msi_method(device_t dev, device_t child)
Definition: pci.c:2375
__FBSDID("$FreeBSD$")
device_t pci_find_device(uint16_t vendor, uint16_t device)
Definition: pci.c:468
SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD|CTLFLAG_MPSAFE, 0, "PCI bus tuning parameters")
static void pci_cfg_restore_pcix(device_t dev, struct pci_devinfo *dinfo)
Definition: pci.c:6068
int pci_power_reset(device_t dev)
Definition: pci.c:6599
int pci_get_powerstate_method(device_t dev, device_t child)
Definition: pci.c:2880
void pci_add_children(device_t dev, int domain, int busno)
Definition: pci.c:4144
int pci_get_max_payload(device_t dev)
Definition: pci.c:2233
int pci_deactivate_resource(device_t dev, device_t child, int type, int rid, struct resource *r)
Definition: pci.c:5722
#define PCI_QUIRK_DISABLE_MSIX
Definition: pci.c:244
int pci_msix_table_bar_method(device_t dev, device_t child)
Definition: pci.c:2179
static int pci_reset_child(device_t dev, device_t child, int flags)
Definition: pci.c:6654
static void pci_read_cap(device_t pcib, pcicfgregs *cfg)
Definition: pci.c:843
static int pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc)
Definition: pci.c:5149
#define PCI_QUIRK_UNMAP_REG
Definition: pci.c:243
static int pci_enable_ari
Definition: pci.c:412
int pci_msix_pba_bar_method(device_t dev, device_t child)
Definition: pci.c:2168
int pci_freecfg(struct pci_devinfo *dinfo)
Definition: pci.c:2773
static pci_addr_t pci_rombase(uint64_t mapreg)
Definition: pci.c:580
static void pci_fixancient(pcicfgregs *cfg)
Definition: pci.c:632
int pci_find_extcap_method(device_t dev, device_t child, int capability, int *capreg)
Definition: pci.c:1606
void pcie_write_config(device_t dev, int reg, uint32_t value, int width)
Definition: pci.c:2303
static void pci_resume_msi(device_t dev)
Definition: pci.c:2395
int pci_resume_child(device_t dev, device_t child)
Definition: pci.c:4628
int pci_disable_busmaster_method(device_t dev, device_t child)
Definition: pci.c:2945
void pci_add_child(device_t bus, struct pci_devinfo *dinfo)
Definition: pci.c:4431
int pci_get_vpd_ident_method(device_t dev, device_t child, const char **identptr)
Definition: pci.c:1390
int pci_get_relaxed_ordering_enabled(device_t dev)
Definition: pci.c:2218
static int pci_memen(device_t dev)
Definition: pci.c:3058
int pci_child_pnpinfo_method(device_t dev, device_t child, struct sbuf *sb)
Definition: pci.c:5913
EARLY_DRIVER_MODULE(pci, pcib, pci_driver, pci_devclass, pci_modevent, NULL, BUS_PASS_BUS)
static void pci_set_power_child(device_t dev, device_t child, int state)
Definition: pci.c:4569
static void xhci_early_takeover(device_t self)
Definition: pci.c:3649
int pci_rescan_method(device_t dev)
Definition: pci.c:4191
static int pci_printf(pcicfgregs *cfg, const char *fmt,...)
Definition: pci.c:520
bus_dma_tag_t pci_get_dma_tag(device_t bus, device_t dev)
Definition: pci.c:5842
bool pcie_flr(device_t dev, u_int max_delay, bool force)
Definition: pci.c:6536
static int pci_modevent(module_t mod, int what, void *arg)
Definition: pci.c:6005
void pci_add_resources_ea(device_t bus, device_t dev, int alloc_iov)
Definition: pci.c:3920
int pci_attach_common(device_t dev)
Definition: pci.c:4468
uint32_t pcie_adjust_config(device_t dev, int reg, uint32_t mask, uint32_t value, int width)
Definition: pci.c:2322
static int pci_enable_io_modes
Definition: pci.c:343
static int pci_honor_msi_blacklist
Definition: pci.c:387
static int pci_romsize(uint64_t testval)
Definition: pci.c:589
static int pci_maprange(uint64_t mapreg)
Definition: pci.c:608
static void pci_add_child_clear_aer(device_t dev, struct pci_devinfo *dinfo)
Definition: pci.c:4356
struct pci_map * pci_add_bar(device_t dev, int reg, pci_addr_t value, pci_addr_t size)
Definition: pci.c:3203
uint32_t pci_generation
Definition: pci.c:335
void pci_restore_state(device_t dev)
Definition: pci.c:6313
static int pci_do_power_nodriver
Definition: pci.c:356
#define PCI_VPD_TIMEOUT
Definition: pci.c:1011
void pci_write_config_method(device_t dev, device_t child, int reg, uint32_t val, int width)
Definition: pci.c:5892
static int pcie_chipset
Definition: pci.c:337
static char * pci_vendordata
Definition: pci.c:234
static void pci_restore_bars(device_t dev)
Definition: pci.c:3228
void pci_cfg_restore(device_t dev, struct pci_devinfo *dinfo)
Definition: pci.c:6075
struct pcicfg_vpd * pci_fetch_vpd_list(device_t dev)
Definition: pci.c:1429
int pci_msix_device_blacklisted(device_t dev)
Definition: pci.c:2548
static int pci_remap_intr_method(device_t bus, device_t dev, u_int irq)
Definition: pci.c:2421
struct pci_devinfo * pci_read_device(device_t pcib, device_t bus, int d, int b, int s, int f)
Definition: pci.c:680
device_t pci_find_bsf(uint8_t bus, uint8_t slot, uint8_t func)
Definition: pci.c:440
#define PCI_QUIRK_MAP_REG
Definition: pci.c:240
static int pci_add_map(device_t bus, device_t dev, int reg, struct resource_list *rl, int force, int prefetch)
Definition: pci.c:3252
#define PCI_QUIRK_ENABLE_MSI_VM
Definition: pci.c:242
#define PCI_QUIRK_REALLOC_BAR
Definition: pci.c:246
static const struct @0 pci_nomatch_tab[]
static int pcix_chipset
Definition: pci.c:337
int pci_child_location_method(device_t dev, device_t child, struct sbuf *sb)
Definition: pci.c:5903
static void pci_load_vendor_data(void)
Definition: pci.c:4696
static int pci_release_msix(device_t dev, device_t child)
Definition: pci.c:2104
int pci_get_vpd_readonly_method(device_t dev, device_t child, const char *kw, const char **vptr)
Definition: pci.c:1407
int pci_alloc_msi_method(device_t dev, device_t child, int *count)
Definition: pci.c:2587
int pci_disable_io_method(device_t dev, device_t child, int space)
Definition: pci.c:2971
static void ohci_early_takeover(device_t self)
Definition: pci.c:3532
void pci_driver_added(device_t dev, driver_t *driver)
Definition: pci.c:4716
const struct pci_device_table * pci_match_device(device_t child, const struct pci_device_table *id, size_t nelt)
Definition: pci.c:6682
int pci_find_next_cap_method(device_t dev, device_t child, int capability, int start, int *capreg)
Definition: pci.c:1579
int pci_find_htcap_method(device_t dev, device_t child, int capability, int *capreg)
Definition: pci.c:1445
void pci_read_bar(device_t dev, int reg, pci_addr_t *mapp, pci_addr_t *testvalp, int *bar64)
Definition: pci.c:3064
void pci_ht_map_msi(device_t dev, uint64_t addr)
Definition: pci.c:2193
static int pci_msix_rewrite_table
Definition: pci.c:382
int pcie_get_max_completion_timeout(device_t dev)
Definition: pci.c:6416
static const char * pci_maptype(uint64_t mapreg)
Definition: pci.c:548
static void pci_unmask_msix(device_t dev, u_int index)
Definition: pci.c:1737
static int pci_probe(device_t dev)
Definition: pci.c:4458
int pci_suspend_child(device_t dev, device_t child)
Definition: pci.c:4589
static int pci_do_realloc_bars
Definition: pci.c:350
static void pci_print_faulted_dev_name(const struct pci_devinfo *dinfo)
Definition: pci.c:6718
void pci_save_state(device_t dev)
Definition: pci.c:6304
int report
Definition: pci.c:4942
SYSCTL_INT(_hw_pci, OID_AUTO, enable_io_modes, CTLFLAG_RWTUN, &pci_enable_io_modes, 1, "Enable I/O and memory bits in the config register. Some BIOSes do not" " enable these bits correctly. We'd like to do this all the time, but" " there are some peripherals that this causes problems with.")
int pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
Definition: pci.c:5248
int pci_set_powerstate_method(device_t dev, device_t child, int state)
Definition: pci.c:2808
static void pci_ata_maps(device_t bus, device_t dev, struct resource_list *rl, int force, uint32_t prefetchmask)
Definition: pci.c:3429
static int pci_clear_bars
Definition: pci.c:402
static void pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg)
Definition: pci.c:645
u_int reg
Definition: pci_dw_if.m:42
int width
Definition: pci_dw_if.m:43
bool start
Definition: pci_dw_if.m:64
bool * status
Definition: pci_dw_if.m:72
uint32_t value
Definition: pci_dw_if.m:55
uint16_t data
Definition: pci_if.m:198
u_int index
Definition: pci_if.m:204
METHOD void enable_msix
Definition: pci_if.m:201
device_t child
Definition: pci_if.m:73
const char ** identptr
Definition: pci_if.m:100
INTERFACE pci
Definition: pci_if.m:32
const u_int * vectors
Definition: pci_if.m:218
const char * kw
Definition: pci_if.m:106
device_t pf
Definition: pci_if.m:277
enum pci_id_type type
Definition: pci_if.m:249
uint16_t rid
Definition: pci_if.m:278
int state
Definition: pci_if.m:94
const char * name
Definition: pci_if.m:267
u_int32_t val
Definition: pci_if.m:82
uintptr_t * id
Definition: pci_if.m:250
uint16_t vid
Definition: pci_if.m:279
int space
Definition: pci_if.m:123
uint64_t address
Definition: pci_if.m:197
const char ** vptr
Definition: pci_if.m:107
uint16_t did
Definition: pci_if.m:280
int * count
Definition: pci_if.m:185
int * capreg
Definition: pci_if.m:141
METHOD void enable_msi
Definition: pci_if.m:194
pci_id_type
Definition: pci_if.m:59
int capability
Definition: pci_if.m:140
int pci_iov_attach_method(device_t bus, device_t dev, nvlist_t *pf_schema, nvlist_t *vf_schema, const char *name)
Definition: pci_iov.c:117
int pci_iov_detach_method(device_t bus, device_t dev)
Definition: pci_iov.c:190
struct resource * pci_vf_alloc_mem_resource(device_t dev, device_t child, int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags)
Definition: pci_iov.c:998
void pci_iov_cfg_save(device_t dev, struct pci_devinfo *dinfo)
Definition: pci_iov.c:808
int pci_vf_release_mem_resource(device_t dev, device_t child, int rid, struct resource *r)
Definition: pci_iov.c:1061
void pci_iov_cfg_restore(device_t dev, struct pci_devinfo *dinfo)
Definition: pci_iov.c:796
static __inline int pci_iov_detach(device_t dev)
Definition: pci_iov.h:47
int static __inline int pci_iov_attach(device_t dev, struct nvlist *pf_schema, struct nvlist *vf_schema)
Definition: pci_iov.h:40
device_t pci_create_iov_child_method(device_t bus, device_t pf, uint16_t rid, uint16_t vid, uint16_t did)
device_t pci_add_iov_child(device_t bus, device_t pf, uint16_t rid, uint16_t vid, uint16_t did)
struct cdevsw pcicdev
Definition: pci_user.c:119
u_int func
Definition: pcib_if.m:81
int * irqs
Definition: pcib_if.m:125
u_int bus
Definition: pcib_if.m:79
device_t dev
Definition: pcib_if.m:109
uint64_t * addr
Definition: pcib_if.m:165
METHOD int maxslots
Definition: pcib_if.m:58
INTERFACE pcib
Definition: pcib_if.m:34
u_int slot
Definition: pcib_if.m:80
int * irq
Definition: pcib_if.m:145
#define PCIM_AER_COR_REPLAY_TIMEOUT
Definition: pcireg.h:1013
#define PCIR_MSI_DATA_64BIT
Definition: pcireg.h:621
#define PCIM_CMD_BUSMASTEREN
Definition: pcireg.h:88
#define PCIS_SATCOM_AUDIO
Definition: pcireg.h:495
#define PCIM_AER_UC_POISONED_TLP
Definition: pcireg.h:992
#define PCIR_SUBVEND_2
Definition: pcireg.h:318
#define PCIS_MULTIMEDIA_TELE
Definition: pcireg.h:377
#define PCIM_PSTAT_D3
Definition: pcireg.h:568
#define PCIR_BAR(x)
Definition: pcireg.h:216
#define PCI_DOMAINMAX
Definition: pcireg.h:44
#define PCIR_BRIDGECTL_1
Definition: pcireg.h:286
#define PCIS_SERIALBUS_SSA
Definition: pcireg.h:460
#define PCIC_OLD
Definition: pcireg.h:330
#define PCIS_INPUTDEV_GAMEPORT
Definition: pcireg.h:441
#define PCIM_EA_FIELD_MASK
Definition: pcireg.h:666
#define PCIM_AER_UC_RECEIVER_OVERFLOW
Definition: pcireg.h:997
#define PCIS_WIRELESS_IR
Definition: pcireg.h:480
#define PCICAP_NEXTPTR
Definition: pcireg.h:130
#define PCIR_LATTIMER
Definition: pcireg.h:115
#define PCIER_FLAGS
Definition: pcireg.h:787
#define PCIM_EA_ENABLE
Definition: pcireg.h:661
#define PCIEM_CAP2_COMP_TIMO_RANGES
Definition: pcireg.h:923
#define PCIM_AER_UC_UNEXPECTED_COMPLETION
Definition: pcireg.h:996
#define PCIM_BAR_MEM_PREFETCH
Definition: pcireg.h:228
#define PCIS_BRIDGE_RACEWAY
Definition: pcireg.h:397
#define PCIER_ROOT_CTL
Definition: pcireg.h:910
#define PCIEM_CTL2_COMP_TIMO_55MS
Definition: pcireg.h:935
#define PCIEM_CTL2_COMP_TIMO_3500MS
Definition: pcireg.h:938
#define PCIM_PCAP_D2SUPP
Definition: pcireg.h:557
#define PCIS_MULTIMEDIA_HDA
Definition: pcireg.h:378
#define PCIM_AER_UC_ATOMIC_EGRESS_BLK
Definition: pcireg.h:1004
#define PCIM_AER_COR_REPLAY_ROLLOVER
Definition: pcireg.h:1012
#define PCIM_EA_P_MEM
Definition: pcireg.h:648
#define PCIM_HTCMD_CAP_MASK
Definition: pcireg.h:746
#define PCIS_BRIDGE_CARDBUS
Definition: pcireg.h:396
#define PCIS_BASEPERIPH_TIMER
Definition: pcireg.h:428
#define PCIEM_CTL_NFER_ENABLE
Definition: pcireg.h:813
#define PCIS_CRYPTO_NETCOMP
Definition: pcireg.h:500
#define PCI_BAR_MEM(x)
Definition: pcireg.h:220
#define PCIR_MSI_ADDR_HIGH
Definition: pcireg.h:619
#define PCIEM_TYPE_DOWNSTREAM_PORT
Definition: pcireg.h:794
#define PCIER_LINK_CTL
Definition: pcireg.h:844
#define PCIS_BRIDGE_PCMCIA
Definition: pcireg.h:394
#define PCIS_SIMPLECOMM_UART
Definition: pcireg.h:405
#define PCIXR_COMMAND
Definition: pcireg.h:674
#define PCIM_EA_BEI
Definition: pcireg.h:631
#define PCIM_EA_BEI_VF_BAR_0
Definition: pcireg.h:641
#define PCIY_VPD
Definition: pcireg.h:136
#define PCIC_INPUTDEV
Definition: pcireg.h:436
#define PCIM_BAR_IO_BASE
Definition: pcireg.h:231
#define PCIR_PRIBUS_2
Definition: pcireg.h:299
#define PCIM_CMD_MEMEN
Definition: pcireg.h:87
#define PCIER_DEVICE_CAP
Definition: pcireg.h:801
#define PCIR_VENDOR
Definition: pcireg.h:83
#define PCIEM_CTL_RELAXED_ORD_ENABLE
Definition: pcireg.h:816
#define PCIS_BRIDGE_ISA
Definition: pcireg.h:389
#define PCIS_BRIDGE_EISA
Definition: pcireg.h:390
#define PCIEM_CTL2_COMP_TIMO_900MS
Definition: pcireg.h:937
#define PCIER_DEVICE_CTL2
Definition: pcireg.h:930
#define PCIER_LINK_CTL2
Definition: pcireg.h:956
#define PCIEM_CAP_FLR
Definition: pcireg.h:810
#define PCIY_EA
Definition: pcireg.h:153
#define PCIS_SATCOM_VOICE
Definition: pcireg.h:496
#define PCIS_BRIDGE_MCA
Definition: pcireg.h:391
#define PCIS_BASEPERIPH_IOMMU
Definition: pcireg.h:432
#define PCIR_MAXLAT
Definition: pcireg.h:254
#define PCIM_BIOS_ENABLE
Definition: pcireg.h:248
#define PCI_BAR_IO(x)
Definition: pcireg.h:219
#define PCIR_POWER_DATA
Definition: pcireg.h:593
#define PCIS_DISPLAY_XGA
Definition: pcireg.h:370
#define PCIS_STORAGE_IPI
Definition: pcireg.h:343
#define PCIM_EA_BEI_BAR_5
Definition: pcireg.h:635
#define PCIM_AER_COR_INTERNAL_ERROR
Definition: pcireg.h:1015
#define PCIR_AER_COR_MASK
Definition: pcireg.h:1017
#define PCIM_AER_COR_HEADER_LOG_OVFLOW
Definition: pcireg.h:1016
#define PCIS_STORAGE_RAID
Definition: pcireg.h:344
#define PCIY_PMG
Definition: pcireg.h:134
#define PCI_EXTCAP_NEXTPTR(ecap)
Definition: pcireg.h:164
#define PCIR_COMMAND
Definition: pcireg.h:85
#define PCIM_STATUS_MDPERR
Definition: pcireg.h:100
#define PCIC_DOCKING
Definition: pcireg.h:444
#define PCIS_MEMORY_FLASH
Definition: pcireg.h:384
#define PCIEM_CAP_MAX_PAYLOAD
Definition: pcireg.h:802
#define PCIS_BASEPERIPH_PIC
Definition: pcireg.h:421
#define PCIR_BRIDGECTL_2
Definition: pcireg.h:316
#define PCIM_PCAP_D1SUPP
Definition: pcireg.h:556
#define PCIM_AER_UC_TLP_PREFIX_BLOCKED
Definition: pcireg.h:1005
#define PCIR_SUBVEND_0
Definition: pcireg.h:245
#define PCIM_AER_UC_ACS_VIOLATION
Definition: pcireg.h:1001
#define PCIR_MSI_DATA
Definition: pcireg.h:620
#define PCIM_BAR_IO_RESERVED
Definition: pcireg.h:230
#define PCIEM_FLAGS_SLOT
Definition: pcireg.h:799
#define PCIM_MSICTRL_MSI_ENABLE
Definition: pcireg.h:617
#define PCIM_AER_COR_BAD_TLP
Definition: pcireg.h:1010
#define PCIS_INPUTDEV_MOUSE
Definition: pcireg.h:439
#define PCIR_STATUS
Definition: pcireg.h:95
#define PCIM_EA_IS_64
Definition: pcireg.h:665
#define PCIEM_ROOT_CTL_SERR_CORR
Definition: pcireg.h:911
#define PCIM_HTCMD_MSI_ENABLE
Definition: pcireg.h:766
#define PCIM_AER_UC_ECRC_ERROR
Definition: pcireg.h:999
#define PCIEM_CTL_COR_ENABLE
Definition: pcireg.h:812
#define PCIS_NETWORK_ETHERNET
Definition: pcireg.h:357
#define PCIEM_CTL2_COMP_TIMO_100US
Definition: pcireg.h:933
#define PCIER_SLOT_CTL
Definition: pcireg.h:880
#define PCIR_INTLINE
Definition: pcireg.h:251
#define PCIM_AER_COR_BAD_DLLP
Definition: pcireg.h:1011
#define PCIZ_AER
Definition: pcireg.h:168
#define PCIR_PROGIF
Definition: pcireg.h:111
#define PCIP_SERIALBUS_USB_XHCI
Definition: pcireg.h:465
#define PCIS_SATCOM_TV
Definition: pcireg.h:494
#define PCIR_AER_UC_SEVERITY
Definition: pcireg.h:1007
#define PCIR_POWER_CAP
Definition: pcireg.h:543
#define PCIC_PROCESSOR
Definition: pcireg.h:448
#define PCIR_DEVICE
Definition: pcireg.h:84
#define PCIS_INTELLIIO_I2O
Definition: pcireg.h:491
#define PCIM_EA_BEI_ROM
Definition: pcireg.h:639
#define PCIEM_TYPE_LEGACY_ENDPOINT
Definition: pcireg.h:791
#define PCIR_CLASS
Definition: pcireg.h:113
#define PCIS_OLD_VGA
Definition: pcireg.h:332
#define PCIM_CMD_PORTEN
Definition: pcireg.h:86
#define PCIEM_STA_FATAL_ERROR
Definition: pcireg.h:828
#define PCIM_PSTAT_DMASK
Definition: pcireg.h:569
#define PCIR_EA_NUM_ENT
Definition: pcireg.h:626
#define PCIC_BASEPERIPH
Definition: pcireg.h:420
#define PCIY_EXPRESS
Definition: pcireg.h:149
#define PCIR_INTPIN
Definition: pcireg.h:252
#define PCIM_BAR_MEM_64
Definition: pcireg.h:227
#define PCIEM_CTL2_COMP_TIMO_210MS
Definition: pcireg.h:936
#define PCIS_NETWORK_TOKENRING
Definition: pcireg.h:358
#define PCIC_MULTIMEDIA
Definition: pcireg.h:374
#define PCIS_BRIDGE_NUBUS
Definition: pcireg.h:395
#define PCIEM_CTL_INITIATE_FLR
Definition: pcireg.h:824
#define PCIEM_CTL2_COMP_TIMO_13S
Definition: pcireg.h:939
#define PCICAP_ID
Definition: pcireg.h:129
#define PCIM_EA_PP
Definition: pcireg.h:644
#define PCIM_HTCAP_SLAVE
Definition: pcireg.h:747
#define PCIM_MSIX_BIR_MASK
Definition: pcireg.h:969
#define PCIM_EA_P_VF_MEM
Definition: pcireg.h:652
#define PCI_BUSMAX
Definition: pcireg.h:45
#define PCIR_SECLAT_1
Definition: pcireg.h:264
#define PCIR_SRIOV_BAR(x)
Definition: pcireg.h:1076
#define PCIR_MSIX_TABLE
Definition: pcireg.h:967
#define PCIM_AER_UC_MALFORMED_TLP
Definition: pcireg.h:998
#define PCIEM_LINK_CTL_LINK_DIS
Definition: pcireg.h:850
#define PCIM_HDRTYPE_NORMAL
Definition: pcireg.h:118
#define PCIR_SUBDEV_2
Definition: pcireg.h:319
#define PCIS_NETWORK_ATM
Definition: pcireg.h:360
#define PCIEM_FLAGS_VERSION
Definition: pcireg.h:788
#define PCIM_EA_BEI_OFFSET
Definition: pcireg.h:632
#define PCIM_PSTAT_D1
Definition: pcireg.h:566
#define PCIM_AER_UC_FC_PROTOCOL_ERROR
Definition: pcireg.h:993
#define PCIR_HDRTYPE
Definition: pcireg.h:116
#define PCIP_SERIALBUS_USB_EHCI
Definition: pcireg.h:464
#define PCIM_HDRTYPE
Definition: pcireg.h:117
#define PCIR_MINGNT
Definition: pcireg.h:253
#define PCIM_EA_BEI_VF_BAR_5
Definition: pcireg.h:642
#define PCIR_SECBUS_2
Definition: pcireg.h:300
#define PCIR_IOBASEL_1
Definition: pcireg.h:266
#define PCIR_EA_FIRST_ENT
Definition: pcireg.h:628
#define PCIS_DASP_DPIO
Definition: pcireg.h:505
#define PCIER_DEVICE_CTL
Definition: pcireg.h:811
#define PCIM_EA_BEI_BAR_0
Definition: pcireg.h:634
#define PCIR_AER_CAP_CONTROL
Definition: pcireg.h:1018
#define PCIM_STATUS_SERR
Definition: pcireg.h:108
#define PCIS_BASEPERIPH_RTC
Definition: pcireg.h:429
#define PCIM_BAR_MEM_BASE
Definition: pcireg.h:229
#define PCIM_STATUS_RTABORT
Definition: pcireg.h:106
#define PCIM_MSIXCTRL_MSIX_ENABLE
Definition: pcireg.h:964
#define PCIS_STORAGE_ATA_ADMA
Definition: pcireg.h:345
#define PCIER_DEVICE_STA
Definition: pcireg.h:825
#define PCIS_MULTIMEDIA_VIDEO
Definition: pcireg.h:375
#define PCIR_SECLAT_2
Definition: pcireg.h:302
#define PCIR_BIOS
Definition: pcireg.h:247
#define PCIM_BAR_MEM_1MB
Definition: pcireg.h:226
#define PCIS_STORAGE_IDE
Definition: pcireg.h:336
#define PCIER_DEVICE_CAP2
Definition: pcireg.h:922
#define PCIS_DISPLAY_VGA
Definition: pcireg.h:369
#define PCI_MAXHDRTYPE
Definition: pcireg.h:50
#define PCIM_HTCMD_MSI_FIXED
Definition: pcireg.h:767
#define PCIY_MSI
Definition: pcireg.h:138
#define PCIM_PCAP_SPEC
Definition: pcireg.h:544
#define PCIEM_TYPE_ROOT_EC
Definition: pcireg.h:798
#define PCIP_SERIALBUS_USB_OHCI
Definition: pcireg.h:463
#define PCIR_REVID
Definition: pcireg.h:110
#define PCIEM_ROOT_CTL_SERR_FATAL
Definition: pcireg.h:913
#define PCIC_NETWORK
Definition: pcireg.h:356
#define PCIC_SATCOM
Definition: pcireg.h:493
#define PCIM_MSICTRL_64BIT
Definition: pcireg.h:602
#define PCIM_BAR_MEM_32
Definition: pcireg.h:225
#define PCIEM_CTL_FER_ENABLE
Definition: pcireg.h:814
#define PCIR_PMBASEL_1
Definition: pcireg.h:277
#define PCIS_SIMPLECOMM_PAR
Definition: pcireg.h:413
#define PCIS_INPUTDEV_KEYBOARD
Definition: pcireg.h:437
#define PCIM_CMD_INTxDIS
Definition: pcireg.h:94
#define PCIR_HTMSI_ADDRESS_HI
Definition: pcireg.h:769
#define PCIM_EA_ES
Definition: pcireg.h:630
#define PCIM_EA_P_VF_MEM_PREFETCH
Definition: pcireg.h:651
#define PCIS_SERIALBUS_SMBUS
Definition: pcireg.h:468
#define PCIS_INPUTDEV_DIGITIZER
Definition: pcireg.h:438
#define PCIR_SUBBUS_1
Definition: pcireg.h:263
#define PCIS_STORAGE_SATA
Definition: pcireg.h:346
#define PCIR_SRIOV_CTL
Definition: pcireg.h:1061
#define PCIEM_LINK_STA_TRAINING
Definition: pcireg.h:862
#define PCIM_HTCAP_HOST
Definition: pcireg.h:748
#define PCIM_STATUS_STABORT
Definition: pcireg.h:105
#define PCIM_STATUS_RMABORT
Definition: pcireg.h:107
#define PCIM_AER_COR_ADVISORY_NF_ERROR
Definition: pcireg.h:1014
#define PCIS_WIRELESS_RF
Definition: pcireg.h:481
#define PCIC_WIRELESS
Definition: pcireg.h:478
#define PCIS_BASEPERIPH_SDHC
Definition: pcireg.h:431
#define PCIM_EA_NUM_ENT_MASK
Definition: pcireg.h:627
#define PCIR_CACHELNSZ
Definition: pcireg.h:114
#define PCIM_EA_P_MEM_PREFETCH
Definition: pcireg.h:649
#define PCIM_BAR_MEM_TYPE
Definition: pcireg.h:224
#define PCIS_BRIDGE_HOST
Definition: pcireg.h:388
#define PCIC_CRYPTO
Definition: pcireg.h:499
#define PCIS_SATCOM_DATA
Definition: pcireg.h:497
#define PCIC_DISPLAY
Definition: pcireg.h:368
#define PCIM_PSTAT_D0
Definition: pcireg.h:565
#define PCIS_DASP_PERFCNTRS
Definition: pcireg.h:506
#define PCIM_AER_UC_INTERNAL_ERROR
Definition: pcireg.h:1002
#define PCIR_HTMSI_ADDRESS_LO
Definition: pcireg.h:768
#define PCIEM_TYPE_ROOT_PORT
Definition: pcireg.h:792
#define PCIS_SERIALBUS_ACCESS
Definition: pcireg.h:459
#define PCIM_MFDEV
Definition: pcireg.h:121
#define PCIS_BRIDGE_PCI
Definition: pcireg.h:392
#define PCIEM_CTL2_COMP_TIMO_64S
Definition: pcireg.h:940
#define PCIM_AER_UC_COMPLETION_TIMEOUT
Definition: pcireg.h:994
#define PCIM_HDRTYPE_BRIDGE
Definition: pcireg.h:119
#define PCIS_INPUTDEV_SCANNER
Definition: pcireg.h:440
#define PCIM_EA_P_IO
Definition: pcireg.h:650
#define PCIR_VPD_ADDR
Definition: pcireg.h:596
#define PCIP_STORAGE_IDE_MASTERDEV
Definition: pcireg.h:341
#define PCIM_AER_UC_UNSUPPORTED_REQUEST
Definition: pcireg.h:1000
#define PCIR_AER_COR_STATUS
Definition: pcireg.h:1008
#define PCIS_MEMORY_RAM
Definition: pcireg.h:383
#define PCIR_CAP_PTR_2
Definition: pcireg.h:296
#define PCIM_MSICTRL_VECTOR
Definition: pcireg.h:601
#define PCIR_MSIX_CTRL
Definition: pcireg.h:963
#define PCIM_MSIX_VCTRL_MASK
Definition: pcireg.h:976
#define PCIS_BASEPERIPH_DMA
Definition: pcireg.h:427
#define PCIS_SERIALBUS_USB
Definition: pcireg.h:461
#define PCIEM_STA_CORRECTABLE_ERROR
Definition: pcireg.h:826
#define PCIE_ARI_FUNCMAX
Definition: pcireg.h:53
#define PCIEM_CTL2_COMP_TIMO_VAL
Definition: pcireg.h:931
#define PCIR_SECBUS_1
Definition: pcireg.h:262
#define PCIS_BASEPERIPH_PCIHOT
Definition: pcireg.h:430
#define PCI_SLOTMAX
Definition: pcireg.h:46
#define PCIV_INVALID
Definition: pcireg.h:125
#define PCIER_SLOT_CTL2
Definition: pcireg.h:959
#define PCIC_BRIDGE
Definition: pcireg.h:387
#define PCIM_AER_UC_TRAINING_ERROR
Definition: pcireg.h:989
#define PCIR_AER_UC_MASK
Definition: pcireg.h:1006
#define PCIEM_STA_TRANSACTION_PND
Definition: pcireg.h:831
#define PCIM_HDRTYPE_CARDBUS
Definition: pcireg.h:120
#define PCIM_AER_UC_DL_PROTOCOL_ERROR
Definition: pcireg.h:990
#define PCIEM_CTL2_COMP_TIMO_10MS
Definition: pcireg.h:934
#define PCIR_AER_UC_STATUS
Definition: pcireg.h:988
#define PCIS_STORAGE_SAS
Definition: pcireg.h:348
#define PCIEM_CTL_URR_ENABLE
Definition: pcireg.h:815
#define PCIR_SUBBUS_2
Definition: pcireg.h:301
#define PCIEM_ROOT_CTL_SERR_NONFATAL
Definition: pcireg.h:912
#define PCIM_AER_UC_COMPLETER_ABORT
Definition: pcireg.h:995
#define PCIY_PCIX
Definition: pcireg.h:140
#define PCIM_STATUS_PERR
Definition: pcireg.h:109
#define PCIEM_STA_UNSUPPORTED_REQ
Definition: pcireg.h:829
#define PCIEM_STA_NON_FATAL_ERROR
Definition: pcireg.h:827
#define PCIEM_LINK_CTL_RETRAIN_LINK
Definition: pcireg.h:851
#define PCIEM_CTL_MAX_PAYLOAD
Definition: pcireg.h:817
#define PCIM_MSIXCTRL_TABLE_SIZE
Definition: pcireg.h:966
#define PCIS_NETWORK_FDDI
Definition: pcireg.h:359
#define PCIR_SUBDEV_0
Definition: pcireg.h:246
#define PCIS_DISPLAY_3D
Definition: pcireg.h:371
#define PCIR_EXTCAP
Definition: pcireg.h:158
#define PCIC_STORAGE
Definition: pcireg.h:334
#define PCIR_MSIX_PBA
Definition: pcireg.h:968
#define PCIM_EA_PP_OFFSET
Definition: pcireg.h:645
#define PCIS_SIMPLECOMM_MULSER
Definition: pcireg.h:414
#define PCIR_SUBCLASS
Definition: pcireg.h:112
#define PCIP_STORAGE_IDE_MODESEC
Definition: pcireg.h:339
#define PCIS_STORAGE_FLOPPY
Definition: pcireg.h:342
#define PCIY_SUBVENDOR
Definition: pcireg.h:146
#define PCIS_WIRELESS_IRDA
Definition: pcireg.h:479
#define PCIC_INTELLIIO
Definition: pcireg.h:490
#define PCIR_POWER_STATUS
Definition: pcireg.h:564
#define PCIY_MSIX
Definition: pcireg.h:150
#define PCIS_OLD_NONVGA
Definition: pcireg.h:331
#define PCIM_MSICTRL_MMC_MASK
Definition: pcireg.h:610
#define PCIER_LINK_STA
Definition: pcireg.h:858
#define PCIM_AER_UC_MC_BLOCKED_TLP
Definition: pcireg.h:1003
#define PCIR_AER_HEADER_LOG
Definition: pcireg.h:1027
#define PCIEM_FLAGS_TYPE
Definition: pcireg.h:789
#define PCIR_MSI_CTRL
Definition: pcireg.h:600
#define PCIY_HT
Definition: pcireg.h:141
#define PCIC_SIMPLECOMM
Definition: pcireg.h:404
#define PCIS_DASP_COMM_SYNC
Definition: pcireg.h:507
#define PCIS_SERIALBUS_FC
Definition: pcireg.h:467
#define PCIR_PRIBUS_1
Definition: pcireg.h:261
#define PCIS_NETWORK_ISDN
Definition: pcireg.h:361
#define PCIS_STORAGE_NVM
Definition: pcireg.h:349
#define PCIS_MULTIMEDIA_AUDIO
Definition: pcireg.h:376
#define PCIR_VPD_DATA
Definition: pcireg.h:597
#define PCIM_BIOS_ADDR_MASK
Definition: pcireg.h:249
#define PCIR_MEMBASE_1
Definition: pcireg.h:274
#define PCIM_HTCAP_MSI_MAPPING
Definition: pcireg.h:755
#define PCIM_AER_COR_RECEIVER_ERROR
Definition: pcireg.h:1009
#define PCIEM_TYPE_ENDPOINT
Definition: pcireg.h:790
#define PCIS_CRYPTO_ENTERTAIN
Definition: pcireg.h:501
#define PCIS_STORAGE_SCSI
Definition: pcireg.h:335
#define PCIR_SUBVENDCAP_ID
Definition: pcireg.h:784
#define PCIP_SERIALBUS_USB_UHCI
Definition: pcireg.h:462
#define PCIR_HT_COMMAND
Definition: pcireg.h:745
#define PCIS_DASP_MGMT_CARD
Definition: pcireg.h:508
#define PCIM_AER_UC_SURPRISE_LINK_DOWN
Definition: pcireg.h:991
#define PCIM_PSTAT_D2
Definition: pcireg.h:567
#define PCIM_SRIOV_VF_MSE
Definition: pcireg.h:1063
#define PCI_FUNCMAX
Definition: pcireg.h:47
#define PCIS_SERIALBUS_FW
Definition: pcireg.h:458
#define PCIEM_CTL_MAX_READ_REQUEST
Definition: pcireg.h:822
#define PCIC_SERIALBUS
Definition: pcireg.h:457
#define PCIC_MEMORY
Definition: pcireg.h:382
#define PCI_EXTCAP_ID(ecap)
Definition: pcireg.h:162
#define PCIM_STATUS_CAPPRESENT
Definition: pcireg.h:97
#define PCIR_MSI_ADDR
Definition: pcireg.h:618
#define PCIR_CAP_PTR
Definition: pcireg.h:250
#define PCIS_SIMPLECOMM_MODEM
Definition: pcireg.h:415
#define PCIC_DASP
Definition: pcireg.h:504
#define PCIP_STORAGE_IDE_MODEPRIM
Definition: pcireg.h:337
#define PCIR_POWER_BSE
Definition: pcireg.h:588
#define PCI_MAXMAPS_0
Definition: pcivar.h:38
#define PCICFG_VF
Definition: pcivar.h:175
uint64_t pci_addr_t
Definition: pcivar.h:42
#define PCI_MAXMAPS_2
Definition: pcivar.h:40
#define PCI_MAXMAPS_1
Definition: pcivar.h:39
Definition: pcivar.h:109
u_int mte_handlers
Definition: pcivar.h:111
u_int mte_vector
Definition: pcivar.h:110
int mv_irq
Definition: pcivar.h:106
uint64_t mv_address
Definition: pcivar.h:104
uint32_t mv_data
Definition: pcivar.h:105
uint16_t subdevice
Definition: pcivar.h:294
uint16_t subvendor
Definition: pcivar.h:293
uint16_t vendor
Definition: pcivar.h:291
uint16_t revid
Definition: pcivar.h:297
uint16_t device
Definition: pcivar.h:292
Definition: pcivar.h:161
uint32_t eae_cfg_offset
Definition: pcivar.h:166
int eae_bei
Definition: pcivar.h:162
uint64_t eae_base
Definition: pcivar.h:164
uint64_t eae_max_offset
Definition: pcivar.h:165
uint32_t eae_flags
Definition: pcivar.h:163
Definition: pcivar.h:61
pci_addr_t pm_value
Definition: pcivar.h:62
uint16_t pm_reg
Definition: pcivar.h:64
pci_addr_t pm_size
Definition: pcivar.h:63
Definition: pci.c:237
int type
Definition: pci.c:239
int arg2
Definition: pci.c:248
uint32_t devid
Definition: pci.c:238
int arg1
Definition: pci.c:247
bus_dma_tag_t sc_dma_tag
Definition: pci_private.h:44
uint8_t br_pribus
Definition: pcivar.h:49
uint16_t br_control
Definition: pcivar.h:50
uint8_t br_secbus
Definition: pcivar.h:48
uint8_t br_subbus
Definition: pcivar.h:47
uint8_t br_seclat
Definition: pcivar.h:46
int ea_location
Definition: pcivar.h:171
uint8_t ht_slave
Definition: pcivar.h:132
uint8_t ht_msimap
Definition: pcivar.h:133
uint64_t ht_msiaddr
Definition: pcivar.h:135
uint16_t ht_msictrl
Definition: pcivar.h:134
device_t iov_pf
uint8_t msi_msgnum
Definition: pcivar.h:95
uint64_t msi_addr
Definition: pcivar.h:97
uint16_t msi_data
Definition: pcivar.h:98
uint8_t msi_location
Definition: pcivar.h:94
uint16_t msi_ctrl
Definition: pcivar.h:93
u_int msi_handlers
Definition: pcivar.h:99
int msi_alloc
Definition: pcivar.h:96
uint8_t msix_pba_bar
Definition: pcivar.h:119
struct msix_vector * msix_vectors
Definition: pcivar.h:125
uint8_t msix_location
Definition: pcivar.h:117
struct msix_table_entry * msix_table
Definition: pcivar.h:124
uint32_t msix_pba_offset
Definition: pcivar.h:121
uint16_t msix_msgnum
Definition: pcivar.h:116
uint16_t msix_ctrl
Definition: pcivar.h:115
uint32_t msix_table_offset
Definition: pcivar.h:120
struct resource * msix_table_res
Definition: pcivar.h:126
int msix_alloc
Definition: pcivar.h:122
int msix_table_len
Definition: pcivar.h:123
struct resource * msix_pba_res
Definition: pcivar.h:127
uint8_t msix_table_bar
Definition: pcivar.h:118
uint16_t pcie_flags
Definition: pcivar.h:142
uint16_t pcie_slot_ctl2
Definition: pcivar.h:149
uint16_t pcie_root_ctl
Definition: pcivar.h:146
uint16_t pcie_slot_ctl
Definition: pcivar.h:145
uint16_t pcie_link_ctl
Definition: pcivar.h:144
uint16_t pcie_link_ctl2
Definition: pcivar.h:148
uint8_t pcie_type
Definition: pcivar.h:141
uint16_t pcie_device_ctl2
Definition: pcivar.h:147
uint8_t pcie_location
Definition: pcivar.h:140
uint16_t pcie_device_ctl
Definition: pcivar.h:143
uint8_t pcix_location
Definition: pcivar.h:154
uint16_t pp_cap
Definition: pcivar.h:55
uint8_t pp_status
Definition: pcivar.h:56
uint8_t pp_data
Definition: pcivar.h:58
uint8_t pp_bse
Definition: pcivar.h:57
uint8_t vpd_reg
Definition: pcivar.h:82
char * vpd_ident
Definition: pcivar.h:84
char vpd_cached
Definition: pcivar.h:83
struct vpd_readonly * vpd_ros
Definition: pcivar.h:86
struct vpd_write * vpd_w
Definition: pcivar.h:88
int vpd_rocnt
Definition: pcivar.h:85
int vpd_wcnt
Definition: pcivar.h:87
Definition: pcivar.h:178
uint16_t statreg
Definition: pcivar.h:189
uint16_t cmdreg
Definition: pcivar.h:188
uint32_t domain
Definition: pcivar.h:208
uint8_t mingnt
Definition: pcivar.h:201
uint8_t progif
Definition: pcivar.h:193
uint16_t subdevice
Definition: pcivar.h:184
device_t dev
Definition: pcivar.h:179
struct pcicfg_vpd vpd
Definition: pcivar.h:217
struct pcicfg_ea ea
Definition: pcivar.h:225
struct pcicfg_bridge bridge
Definition: pcivar.h:215
uint32_t flags
Definition: pcivar.h:213
uint8_t hdrtype
Definition: pcivar.h:196
uint8_t cachelnsz
Definition: pcivar.h:197
struct pcicfg_msi msi
Definition: pcivar.h:218
uint16_t device
Definition: pcivar.h:186
uint8_t mfdev
Definition: pcivar.h:205
uint8_t intpin
Definition: pcivar.h:198
uint16_t vendor
Definition: pcivar.h:185
uint8_t lattimer
Definition: pcivar.h:203
uint8_t func
Definition: pcivar.h:211
uint8_t bus
Definition: pcivar.h:209
uint8_t subclass
Definition: pcivar.h:192
uint8_t revid
Definition: pcivar.h:194
uint16_t subvendor
Definition: pcivar.h:183
uint8_t baseclass
Definition: pcivar.h:191
struct pcicfg_iov * iov
Definition: pcivar.h:223
uint8_t slot
Definition: pcivar.h:210
struct pcicfg_pcix pcix
Definition: pcivar.h:222
struct pcicfg_msix msix
Definition: pcivar.h:219
uint8_t nummaps
Definition: pcivar.h:206
struct pcicfg_pcie pcie
Definition: pcivar.h:221
struct pcicfg_ht ht
Definition: pcivar.h:220
struct pcicfg_pp pp
Definition: pcivar.h:216
uint8_t intline
Definition: pcivar.h:199
uint8_t maxlat
Definition: pcivar.h:202
char * value
Definition: pcivar.h:70
int len
Definition: pcivar.h:71
char keyword[2]
Definition: pcivar.h:69
int bytesinval
Definition: pci.c:1058
int off
Definition: pci.c:1059
uint32_t val
Definition: pci.c:1057
uint8_t cksum
Definition: pci.c:1060
pcicfgregs * cfg
Definition: pci.c:1056
device_t pcib
Definition: pci.c:1055
char keyword[2]
Definition: pcivar.h:75
char * value
Definition: pcivar.h:76
int start
Definition: pcivar.h:77
int len
Definition: pcivar.h:78