1 | #include <freebsd/machine/rtems-bsd-config.h> |
---|
2 | |
---|
3 | /*- |
---|
4 | * Copyright (c) 1997, Stefan Esser <se@freebsd.org> |
---|
5 | * Copyright (c) 2000, Michael Smith <msmith@freebsd.org> |
---|
6 | * Copyright (c) 2000, BSDi |
---|
7 | * All rights reserved. |
---|
8 | * |
---|
9 | * Redistribution and use in source and binary forms, with or without |
---|
10 | * modification, are permitted provided that the following conditions |
---|
11 | * are met: |
---|
12 | * 1. Redistributions of source code must retain the above copyright |
---|
13 | * notice unmodified, this list of conditions, and the following |
---|
14 | * disclaimer. |
---|
15 | * 2. Redistributions in binary form must reproduce the above copyright |
---|
16 | * notice, this list of conditions and the following disclaimer in the |
---|
17 | * documentation and/or other materials provided with the distribution. |
---|
18 | * |
---|
19 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
---|
20 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
---|
21 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
---|
22 | * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, |
---|
23 | * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT |
---|
24 | * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
---|
25 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
---|
26 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
---|
27 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
---|
28 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
---|
29 | */ |
---|
30 | |
---|
31 | #include <freebsd/sys/cdefs.h> |
---|
32 | __FBSDID("$FreeBSD$"); |
---|
33 | |
---|
34 | #include <freebsd/local/opt_bus.h> |
---|
35 | |
---|
36 | #include <freebsd/sys/param.h> |
---|
37 | #include <freebsd/sys/systm.h> |
---|
38 | #include <freebsd/sys/malloc.h> |
---|
39 | #include <freebsd/sys/module.h> |
---|
40 | #include <freebsd/sys/linker.h> |
---|
41 | #include <freebsd/sys/fcntl.h> |
---|
42 | #include <freebsd/sys/conf.h> |
---|
43 | #include <freebsd/sys/kernel.h> |
---|
44 | #include <freebsd/sys/queue.h> |
---|
45 | #include <freebsd/sys/sysctl.h> |
---|
46 | #include <freebsd/sys/endian.h> |
---|
47 | |
---|
48 | #include <freebsd/vm/vm.h> |
---|
49 | #include <freebsd/vm/pmap.h> |
---|
50 | #ifndef __rtems__ |
---|
51 | #include <freebsd/vm/vm_extern.h> |
---|
52 | #endif /* __rtems__ */ |
---|
53 | |
---|
54 | #include <freebsd/sys/bus.h> |
---|
55 | #include <freebsd/machine/bus.h> |
---|
56 | #include <freebsd/sys/rman.h> |
---|
57 | #include <freebsd/machine/resource.h> |
---|
58 | #include <freebsd/machine/stdarg.h> |
---|
59 | |
---|
60 | #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) |
---|
61 | #include <freebsd/machine/intr_machdep.h> |
---|
62 | #endif |
---|
63 | |
---|
64 | #include <freebsd/sys/pciio.h> |
---|
65 | #include <freebsd/dev/pci/pcireg.h> |
---|
66 | #include <freebsd/dev/pci/pcivar.h> |
---|
67 | #include <freebsd/dev/pci/pci_private.h> |
---|
68 | |
---|
69 | #include <freebsd/dev/usb/controller/ehcireg.h> |
---|
70 | #include <freebsd/dev/usb/controller/ohcireg.h> |
---|
71 | #ifndef __rtems__ |
---|
72 | #include <freebsd/dev/usb/controller/uhcireg.h> |
---|
73 | #endif /* __rtems__ */ |
---|
74 | |
---|
75 | #include <freebsd/local/pcib_if.h> |
---|
76 | #include <freebsd/local/pci_if.h> |
---|
77 | |
---|
78 | #ifdef __HAVE_ACPI |
---|
79 | #include <freebsd/contrib/dev/acpica/include/acpi.h> |
---|
80 | #include <freebsd/local/acpi_if.h> |
---|
81 | #else |
---|
82 | #define ACPI_PWR_FOR_SLEEP(x, y, z) |
---|
83 | #endif |
---|
84 | |
---|
85 | static pci_addr_t pci_mapbase(uint64_t mapreg); |
---|
86 | static const char *pci_maptype(uint64_t mapreg); |
---|
87 | static int pci_mapsize(uint64_t testval); |
---|
88 | static int pci_maprange(uint64_t mapreg); |
---|
89 | static void pci_fixancient(pcicfgregs *cfg); |
---|
90 | static int pci_printf(pcicfgregs *cfg, const char *fmt, ...); |
---|
91 | |
---|
92 | static int pci_porten(device_t dev); |
---|
93 | static int pci_memen(device_t dev); |
---|
94 | static void pci_assign_interrupt(device_t bus, device_t dev, |
---|
95 | int force_route); |
---|
96 | static int pci_add_map(device_t bus, device_t dev, int reg, |
---|
97 | struct resource_list *rl, int force, int prefetch); |
---|
98 | static int pci_probe(device_t dev); |
---|
99 | static int pci_attach(device_t dev); |
---|
100 | static void pci_load_vendor_data(void); |
---|
101 | static int pci_describe_parse_line(char **ptr, int *vendor, |
---|
102 | int *device, char **desc); |
---|
103 | static char *pci_describe_device(device_t dev); |
---|
104 | static int pci_modevent(module_t mod, int what, void *arg); |
---|
105 | static void pci_hdrtypedata(device_t pcib, int b, int s, int f, |
---|
106 | pcicfgregs *cfg); |
---|
107 | static void pci_read_extcap(device_t pcib, pcicfgregs *cfg); |
---|
108 | static int pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, |
---|
109 | int reg, uint32_t *data); |
---|
110 | #if 0 |
---|
111 | static int pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, |
---|
112 | int reg, uint32_t data); |
---|
113 | #endif |
---|
114 | static void pci_read_vpd(device_t pcib, pcicfgregs *cfg); |
---|
115 | static void pci_disable_msi(device_t dev); |
---|
116 | static void pci_enable_msi(device_t dev, uint64_t address, |
---|
117 | uint16_t data); |
---|
118 | static void pci_enable_msix(device_t dev, u_int index, |
---|
119 | uint64_t address, uint32_t data); |
---|
120 | static void pci_mask_msix(device_t dev, u_int index); |
---|
121 | static void pci_unmask_msix(device_t dev, u_int index); |
---|
122 | static int pci_msi_blacklisted(void); |
---|
123 | static void pci_resume_msi(device_t dev); |
---|
124 | static void pci_resume_msix(device_t dev); |
---|
125 | static int pci_remap_intr_method(device_t bus, device_t dev, |
---|
126 | u_int irq); |
---|
127 | |
---|
128 | static device_method_t pci_methods[] = { |
---|
129 | /* Device interface */ |
---|
130 | DEVMETHOD(device_probe, pci_probe), |
---|
131 | DEVMETHOD(device_attach, pci_attach), |
---|
132 | DEVMETHOD(device_detach, bus_generic_detach), |
---|
133 | DEVMETHOD(device_shutdown, bus_generic_shutdown), |
---|
134 | DEVMETHOD(device_suspend, pci_suspend), |
---|
135 | DEVMETHOD(device_resume, pci_resume), |
---|
136 | |
---|
137 | /* Bus interface */ |
---|
138 | DEVMETHOD(bus_print_child, pci_print_child), |
---|
139 | DEVMETHOD(bus_probe_nomatch, pci_probe_nomatch), |
---|
140 | DEVMETHOD(bus_read_ivar, pci_read_ivar), |
---|
141 | DEVMETHOD(bus_write_ivar, pci_write_ivar), |
---|
142 | DEVMETHOD(bus_driver_added, pci_driver_added), |
---|
143 | DEVMETHOD(bus_setup_intr, pci_setup_intr), |
---|
144 | DEVMETHOD(bus_teardown_intr, pci_teardown_intr), |
---|
145 | |
---|
146 | DEVMETHOD(bus_get_resource_list,pci_get_resource_list), |
---|
147 | DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource), |
---|
148 | DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource), |
---|
149 | DEVMETHOD(bus_delete_resource, pci_delete_resource), |
---|
150 | DEVMETHOD(bus_alloc_resource, pci_alloc_resource), |
---|
151 | DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource), |
---|
152 | DEVMETHOD(bus_activate_resource, pci_activate_resource), |
---|
153 | DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource), |
---|
154 | DEVMETHOD(bus_child_pnpinfo_str, pci_child_pnpinfo_str_method), |
---|
155 | DEVMETHOD(bus_child_location_str, pci_child_location_str_method), |
---|
156 | DEVMETHOD(bus_remap_intr, pci_remap_intr_method), |
---|
157 | |
---|
158 | /* PCI interface */ |
---|
159 | DEVMETHOD(pci_read_config, pci_read_config_method), |
---|
160 | DEVMETHOD(pci_write_config, pci_write_config_method), |
---|
161 | DEVMETHOD(pci_enable_busmaster, pci_enable_busmaster_method), |
---|
162 | DEVMETHOD(pci_disable_busmaster, pci_disable_busmaster_method), |
---|
163 | DEVMETHOD(pci_enable_io, pci_enable_io_method), |
---|
164 | DEVMETHOD(pci_disable_io, pci_disable_io_method), |
---|
165 | DEVMETHOD(pci_get_vpd_ident, pci_get_vpd_ident_method), |
---|
166 | DEVMETHOD(pci_get_vpd_readonly, pci_get_vpd_readonly_method), |
---|
167 | DEVMETHOD(pci_get_powerstate, pci_get_powerstate_method), |
---|
168 | DEVMETHOD(pci_set_powerstate, pci_set_powerstate_method), |
---|
169 | DEVMETHOD(pci_assign_interrupt, pci_assign_interrupt_method), |
---|
170 | DEVMETHOD(pci_find_extcap, pci_find_extcap_method), |
---|
171 | DEVMETHOD(pci_alloc_msi, pci_alloc_msi_method), |
---|
172 | DEVMETHOD(pci_alloc_msix, pci_alloc_msix_method), |
---|
173 | DEVMETHOD(pci_remap_msix, pci_remap_msix_method), |
---|
174 | DEVMETHOD(pci_release_msi, pci_release_msi_method), |
---|
175 | DEVMETHOD(pci_msi_count, pci_msi_count_method), |
---|
176 | DEVMETHOD(pci_msix_count, pci_msix_count_method), |
---|
177 | |
---|
178 | { 0, 0 } |
---|
179 | }; |
---|
180 | |
---|
181 | DEFINE_CLASS_0(pci, pci_driver, pci_methods, 0); |
---|
182 | |
---|
183 | static devclass_t pci_devclass; |
---|
184 | DRIVER_MODULE(pci, pcib, pci_driver, pci_devclass, pci_modevent, 0); |
---|
185 | MODULE_VERSION(pci, 1); |
---|
186 | |
---|
187 | static char *pci_vendordata; |
---|
188 | static size_t pci_vendordata_size; |
---|
189 | |
---|
190 | |
---|
191 | struct pci_quirk { |
---|
192 | uint32_t devid; /* Vendor/device of the card */ |
---|
193 | int type; |
---|
194 | #define PCI_QUIRK_MAP_REG 1 /* PCI map register in weird place */ |
---|
195 | #define PCI_QUIRK_DISABLE_MSI 2 /* MSI/MSI-X doesn't work */ |
---|
196 | #define PCI_QUIRK_ENABLE_MSI_VM 3 /* Older chipset in VM where MSI works */ |
---|
197 | int arg1; |
---|
198 | int arg2; |
---|
199 | }; |
---|
200 | |
---|
201 | struct pci_quirk pci_quirks[] = { |
---|
202 | /* The Intel 82371AB and 82443MX has a map register at offset 0x90. */ |
---|
203 | { 0x71138086, PCI_QUIRK_MAP_REG, 0x90, 0 }, |
---|
204 | { 0x719b8086, PCI_QUIRK_MAP_REG, 0x90, 0 }, |
---|
205 | /* As does the Serverworks OSB4 (the SMBus mapping register) */ |
---|
206 | { 0x02001166, PCI_QUIRK_MAP_REG, 0x90, 0 }, |
---|
207 | |
---|
208 | /* |
---|
209 | * MSI doesn't work with the ServerWorks CNB20-HE Host Bridge |
---|
210 | * or the CMIC-SL (AKA ServerWorks GC_LE). |
---|
211 | */ |
---|
212 | { 0x00141166, PCI_QUIRK_DISABLE_MSI, 0, 0 }, |
---|
213 | { 0x00171166, PCI_QUIRK_DISABLE_MSI, 0, 0 }, |
---|
214 | |
---|
215 | /* |
---|
216 | * MSI doesn't work on earlier Intel chipsets including |
---|
217 | * E7500, E7501, E7505, 845, 865, 875/E7210, and 855. |
---|
218 | */ |
---|
219 | { 0x25408086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, |
---|
220 | { 0x254c8086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, |
---|
221 | { 0x25508086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, |
---|
222 | { 0x25608086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, |
---|
223 | { 0x25708086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, |
---|
224 | { 0x25788086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, |
---|
225 | { 0x35808086, PCI_QUIRK_DISABLE_MSI, 0, 0 }, |
---|
226 | |
---|
227 | /* |
---|
228 | * MSI doesn't work with devices behind the AMD 8131 HT-PCIX |
---|
229 | * bridge. |
---|
230 | */ |
---|
231 | { 0x74501022, PCI_QUIRK_DISABLE_MSI, 0, 0 }, |
---|
232 | |
---|
233 | /* |
---|
234 | * Some virtualization environments emulate an older chipset |
---|
235 | * but support MSI just fine. QEMU uses the Intel 82440. |
---|
236 | */ |
---|
237 | { 0x12378086, PCI_QUIRK_ENABLE_MSI_VM, 0, 0 }, |
---|
238 | |
---|
239 | { 0 } |
---|
240 | }; |
---|
241 | |
---|
242 | /* map register information */ |
---|
243 | #define PCI_MAPMEM 0x01 /* memory map */ |
---|
244 | #define PCI_MAPMEMP 0x02 /* prefetchable memory map */ |
---|
245 | #define PCI_MAPPORT 0x04 /* port map */ |
---|
246 | |
---|
247 | struct devlist pci_devq; |
---|
248 | uint32_t pci_generation; |
---|
249 | uint32_t pci_numdevs = 0; |
---|
250 | static int pcie_chipset, pcix_chipset; |
---|
251 | |
---|
252 | /* sysctl vars */ |
---|
253 | SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD, 0, "PCI bus tuning parameters"); |
---|
254 | |
---|
255 | static int pci_enable_io_modes = 1; |
---|
256 | TUNABLE_INT("hw.pci.enable_io_modes", &pci_enable_io_modes); |
---|
257 | SYSCTL_INT(_hw_pci, OID_AUTO, enable_io_modes, CTLFLAG_RW, |
---|
258 | &pci_enable_io_modes, 1, |
---|
259 | "Enable I/O and memory bits in the config register. Some BIOSes do not\n\ |
---|
260 | enable these bits correctly. We'd like to do this all the time, but there\n\ |
---|
261 | are some peripherals that this causes problems with."); |
---|
262 | |
---|
263 | static int pci_do_power_nodriver = 0; |
---|
264 | TUNABLE_INT("hw.pci.do_power_nodriver", &pci_do_power_nodriver); |
---|
265 | SYSCTL_INT(_hw_pci, OID_AUTO, do_power_nodriver, CTLFLAG_RW, |
---|
266 | &pci_do_power_nodriver, 0, |
---|
267 | "Place a function into D3 state when no driver attaches to it. 0 means\n\ |
---|
268 | disable. 1 means conservatively place devices into D3 state. 2 means\n\ |
---|
269 | agressively place devices into D3 state. 3 means put absolutely everything\n\ |
---|
270 | in D3 state."); |
---|
271 | |
---|
272 | static int pci_do_power_resume = 1; |
---|
273 | TUNABLE_INT("hw.pci.do_power_resume", &pci_do_power_resume); |
---|
274 | SYSCTL_INT(_hw_pci, OID_AUTO, do_power_resume, CTLFLAG_RW, |
---|
275 | &pci_do_power_resume, 1, |
---|
276 | "Transition from D3 -> D0 on resume."); |
---|
277 | |
---|
278 | static int pci_do_msi = 1; |
---|
279 | TUNABLE_INT("hw.pci.enable_msi", &pci_do_msi); |
---|
280 | SYSCTL_INT(_hw_pci, OID_AUTO, enable_msi, CTLFLAG_RW, &pci_do_msi, 1, |
---|
281 | "Enable support for MSI interrupts"); |
---|
282 | |
---|
283 | static int pci_do_msix = 1; |
---|
284 | TUNABLE_INT("hw.pci.enable_msix", &pci_do_msix); |
---|
285 | SYSCTL_INT(_hw_pci, OID_AUTO, enable_msix, CTLFLAG_RW, &pci_do_msix, 1, |
---|
286 | "Enable support for MSI-X interrupts"); |
---|
287 | |
---|
288 | static int pci_honor_msi_blacklist = 1; |
---|
289 | TUNABLE_INT("hw.pci.honor_msi_blacklist", &pci_honor_msi_blacklist); |
---|
290 | SYSCTL_INT(_hw_pci, OID_AUTO, honor_msi_blacklist, CTLFLAG_RD, |
---|
291 | &pci_honor_msi_blacklist, 1, "Honor chipset blacklist for MSI"); |
---|
292 | |
---|
293 | #if defined(__i386__) || defined(__amd64__) |
---|
294 | static int pci_usb_takeover = 1; |
---|
295 | #else |
---|
296 | static int pci_usb_takeover = 0; |
---|
297 | #endif |
---|
298 | TUNABLE_INT("hw.pci.usb_early_takeover", &pci_usb_takeover); |
---|
299 | SYSCTL_INT(_hw_pci, OID_AUTO, usb_early_takeover, CTLFLAG_RD | CTLFLAG_TUN, |
---|
300 | &pci_usb_takeover, 1, "Enable early takeover of USB controllers.\n\ |
---|
301 | Disable this if you depend on BIOS emulation of USB devices, that is\n\ |
---|
302 | you use USB devices (like keyboard or mouse) but do not load USB drivers"); |
---|
303 | |
---|
304 | /* Find a device_t by bus/slot/function in domain 0 */ |
---|
305 | |
---|
306 | device_t |
---|
307 | pci_find_bsf(uint8_t bus, uint8_t slot, uint8_t func) |
---|
308 | { |
---|
309 | |
---|
310 | return (pci_find_dbsf(0, bus, slot, func)); |
---|
311 | } |
---|
312 | |
---|
313 | /* Find a device_t by domain/bus/slot/function */ |
---|
314 | |
---|
315 | device_t |
---|
316 | pci_find_dbsf(uint32_t domain, uint8_t bus, uint8_t slot, uint8_t func) |
---|
317 | { |
---|
318 | struct pci_devinfo *dinfo; |
---|
319 | |
---|
320 | STAILQ_FOREACH(dinfo, &pci_devq, pci_links) { |
---|
321 | if ((dinfo->cfg.domain == domain) && |
---|
322 | (dinfo->cfg.bus == bus) && |
---|
323 | (dinfo->cfg.slot == slot) && |
---|
324 | (dinfo->cfg.func == func)) { |
---|
325 | return (dinfo->cfg.dev); |
---|
326 | } |
---|
327 | } |
---|
328 | |
---|
329 | return (NULL); |
---|
330 | } |
---|
331 | |
---|
332 | #ifndef __rtems__ |
---|
333 | /* Find a device_t by vendor/device ID */ |
---|
334 | |
---|
335 | device_t |
---|
336 | pci_find_device(uint16_t vendor, uint16_t device) |
---|
337 | { |
---|
338 | struct pci_devinfo *dinfo; |
---|
339 | |
---|
340 | STAILQ_FOREACH(dinfo, &pci_devq, pci_links) { |
---|
341 | if ((dinfo->cfg.vendor == vendor) && |
---|
342 | (dinfo->cfg.device == device)) { |
---|
343 | return (dinfo->cfg.dev); |
---|
344 | } |
---|
345 | } |
---|
346 | |
---|
347 | return (NULL); |
---|
348 | } |
---|
349 | #endif /* __rtems__ */ |
---|
350 | |
---|
351 | static int |
---|
352 | pci_printf(pcicfgregs *cfg, const char *fmt, ...) |
---|
353 | { |
---|
354 | va_list ap; |
---|
355 | int retval; |
---|
356 | |
---|
357 | retval = printf("pci%d:%d:%d:%d: ", cfg->domain, cfg->bus, cfg->slot, |
---|
358 | cfg->func); |
---|
359 | va_start(ap, fmt); |
---|
360 | retval += vprintf(fmt, ap); |
---|
361 | va_end(ap); |
---|
362 | return (retval); |
---|
363 | } |
---|
364 | |
---|
365 | /* return base address of memory or port map */ |
---|
366 | |
---|
367 | static pci_addr_t |
---|
368 | pci_mapbase(uint64_t mapreg) |
---|
369 | { |
---|
370 | |
---|
371 | if (PCI_BAR_MEM(mapreg)) |
---|
372 | return (mapreg & PCIM_BAR_MEM_BASE); |
---|
373 | else |
---|
374 | return (mapreg & PCIM_BAR_IO_BASE); |
---|
375 | } |
---|
376 | |
---|
377 | /* return map type of memory or port map */ |
---|
378 | |
---|
379 | static const char * |
---|
380 | pci_maptype(uint64_t mapreg) |
---|
381 | { |
---|
382 | |
---|
383 | if (PCI_BAR_IO(mapreg)) |
---|
384 | return ("I/O Port"); |
---|
385 | if (mapreg & PCIM_BAR_MEM_PREFETCH) |
---|
386 | return ("Prefetchable Memory"); |
---|
387 | return ("Memory"); |
---|
388 | } |
---|
389 | |
---|
390 | /* return log2 of map size decoded for memory or port map */ |
---|
391 | |
---|
392 | static int |
---|
393 | pci_mapsize(uint64_t testval) |
---|
394 | { |
---|
395 | int ln2size; |
---|
396 | |
---|
397 | testval = pci_mapbase(testval); |
---|
398 | ln2size = 0; |
---|
399 | if (testval != 0) { |
---|
400 | while ((testval & 1) == 0) |
---|
401 | { |
---|
402 | ln2size++; |
---|
403 | testval >>= 1; |
---|
404 | } |
---|
405 | } |
---|
406 | return (ln2size); |
---|
407 | } |
---|
408 | |
---|
409 | /* return log2 of address range supported by map register */ |
---|
410 | |
---|
411 | static int |
---|
412 | pci_maprange(uint64_t mapreg) |
---|
413 | { |
---|
414 | int ln2range = 0; |
---|
415 | |
---|
416 | if (PCI_BAR_IO(mapreg)) |
---|
417 | ln2range = 32; |
---|
418 | else |
---|
419 | switch (mapreg & PCIM_BAR_MEM_TYPE) { |
---|
420 | case PCIM_BAR_MEM_32: |
---|
421 | ln2range = 32; |
---|
422 | break; |
---|
423 | case PCIM_BAR_MEM_1MB: |
---|
424 | ln2range = 20; |
---|
425 | break; |
---|
426 | case PCIM_BAR_MEM_64: |
---|
427 | ln2range = 64; |
---|
428 | break; |
---|
429 | } |
---|
430 | return (ln2range); |
---|
431 | } |
---|
432 | |
---|
433 | /* adjust some values from PCI 1.0 devices to match 2.0 standards ... */ |
---|
434 | |
---|
435 | static void |
---|
436 | pci_fixancient(pcicfgregs *cfg) |
---|
437 | { |
---|
438 | if (cfg->hdrtype != 0) |
---|
439 | return; |
---|
440 | |
---|
441 | /* PCI to PCI bridges use header type 1 */ |
---|
442 | if (cfg->baseclass == PCIC_BRIDGE && cfg->subclass == PCIS_BRIDGE_PCI) |
---|
443 | cfg->hdrtype = 1; |
---|
444 | } |
---|
445 | |
---|
446 | /* extract header type specific config data */ |
---|
447 | |
---|
448 | static void |
---|
449 | pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg) |
---|
450 | { |
---|
451 | #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w) |
---|
452 | switch (cfg->hdrtype) { |
---|
453 | case 0: |
---|
454 | cfg->subvendor = REG(PCIR_SUBVEND_0, 2); |
---|
455 | cfg->subdevice = REG(PCIR_SUBDEV_0, 2); |
---|
456 | cfg->nummaps = PCI_MAXMAPS_0; |
---|
457 | break; |
---|
458 | case 1: |
---|
459 | cfg->nummaps = PCI_MAXMAPS_1; |
---|
460 | break; |
---|
461 | case 2: |
---|
462 | cfg->subvendor = REG(PCIR_SUBVEND_2, 2); |
---|
463 | cfg->subdevice = REG(PCIR_SUBDEV_2, 2); |
---|
464 | cfg->nummaps = PCI_MAXMAPS_2; |
---|
465 | break; |
---|
466 | } |
---|
467 | #undef REG |
---|
468 | } |
---|
469 | |
---|
470 | /* read configuration header into pcicfgregs structure */ |
---|
471 | struct pci_devinfo * |
---|
472 | pci_read_device(device_t pcib, int d, int b, int s, int f, size_t size) |
---|
473 | { |
---|
474 | #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w) |
---|
475 | pcicfgregs *cfg = NULL; |
---|
476 | struct pci_devinfo *devlist_entry; |
---|
477 | struct devlist *devlist_head; |
---|
478 | |
---|
479 | devlist_head = &pci_devq; |
---|
480 | |
---|
481 | devlist_entry = NULL; |
---|
482 | |
---|
483 | if (REG(PCIR_DEVVENDOR, 4) != 0xfffffffful) { |
---|
484 | devlist_entry = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO); |
---|
485 | if (devlist_entry == NULL) |
---|
486 | return (NULL); |
---|
487 | |
---|
488 | cfg = &devlist_entry->cfg; |
---|
489 | |
---|
490 | cfg->domain = d; |
---|
491 | cfg->bus = b; |
---|
492 | cfg->slot = s; |
---|
493 | cfg->func = f; |
---|
494 | cfg->vendor = REG(PCIR_VENDOR, 2); |
---|
495 | cfg->device = REG(PCIR_DEVICE, 2); |
---|
496 | cfg->cmdreg = REG(PCIR_COMMAND, 2); |
---|
497 | cfg->statreg = REG(PCIR_STATUS, 2); |
---|
498 | cfg->baseclass = REG(PCIR_CLASS, 1); |
---|
499 | cfg->subclass = REG(PCIR_SUBCLASS, 1); |
---|
500 | cfg->progif = REG(PCIR_PROGIF, 1); |
---|
501 | cfg->revid = REG(PCIR_REVID, 1); |
---|
502 | cfg->hdrtype = REG(PCIR_HDRTYPE, 1); |
---|
503 | cfg->cachelnsz = REG(PCIR_CACHELNSZ, 1); |
---|
504 | cfg->lattimer = REG(PCIR_LATTIMER, 1); |
---|
505 | cfg->intpin = REG(PCIR_INTPIN, 1); |
---|
506 | cfg->intline = REG(PCIR_INTLINE, 1); |
---|
507 | |
---|
508 | cfg->mingnt = REG(PCIR_MINGNT, 1); |
---|
509 | cfg->maxlat = REG(PCIR_MAXLAT, 1); |
---|
510 | |
---|
511 | cfg->mfdev = (cfg->hdrtype & PCIM_MFDEV) != 0; |
---|
512 | cfg->hdrtype &= ~PCIM_MFDEV; |
---|
513 | |
---|
514 | pci_fixancient(cfg); |
---|
515 | pci_hdrtypedata(pcib, b, s, f, cfg); |
---|
516 | |
---|
517 | if (REG(PCIR_STATUS, 2) & PCIM_STATUS_CAPPRESENT) |
---|
518 | pci_read_extcap(pcib, cfg); |
---|
519 | |
---|
520 | STAILQ_INSERT_TAIL(devlist_head, devlist_entry, pci_links); |
---|
521 | |
---|
522 | devlist_entry->conf.pc_sel.pc_domain = cfg->domain; |
---|
523 | devlist_entry->conf.pc_sel.pc_bus = cfg->bus; |
---|
524 | devlist_entry->conf.pc_sel.pc_dev = cfg->slot; |
---|
525 | devlist_entry->conf.pc_sel.pc_func = cfg->func; |
---|
526 | devlist_entry->conf.pc_hdr = cfg->hdrtype; |
---|
527 | |
---|
528 | devlist_entry->conf.pc_subvendor = cfg->subvendor; |
---|
529 | devlist_entry->conf.pc_subdevice = cfg->subdevice; |
---|
530 | devlist_entry->conf.pc_vendor = cfg->vendor; |
---|
531 | devlist_entry->conf.pc_device = cfg->device; |
---|
532 | |
---|
533 | devlist_entry->conf.pc_class = cfg->baseclass; |
---|
534 | devlist_entry->conf.pc_subclass = cfg->subclass; |
---|
535 | devlist_entry->conf.pc_progif = cfg->progif; |
---|
536 | devlist_entry->conf.pc_revid = cfg->revid; |
---|
537 | |
---|
538 | pci_numdevs++; |
---|
539 | pci_generation++; |
---|
540 | } |
---|
541 | return (devlist_entry); |
---|
542 | #undef REG |
---|
543 | } |
---|
544 | |
---|
545 | static void |
---|
546 | pci_read_extcap(device_t pcib, pcicfgregs *cfg) |
---|
547 | { |
---|
548 | #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w) |
---|
549 | #define WREG(n, v, w) PCIB_WRITE_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, v, w) |
---|
550 | #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) |
---|
551 | uint64_t addr; |
---|
552 | #endif |
---|
553 | uint32_t val; |
---|
554 | int ptr, nextptr, ptrptr; |
---|
555 | |
---|
556 | switch (cfg->hdrtype & PCIM_HDRTYPE) { |
---|
557 | case 0: |
---|
558 | case 1: |
---|
559 | ptrptr = PCIR_CAP_PTR; |
---|
560 | break; |
---|
561 | case 2: |
---|
562 | ptrptr = PCIR_CAP_PTR_2; /* cardbus capabilities ptr */ |
---|
563 | break; |
---|
564 | default: |
---|
565 | return; /* no extended capabilities support */ |
---|
566 | } |
---|
567 | nextptr = REG(ptrptr, 1); /* sanity check? */ |
---|
568 | |
---|
569 | /* |
---|
570 | * Read capability entries. |
---|
571 | */ |
---|
572 | while (nextptr != 0) { |
---|
573 | /* Sanity check */ |
---|
574 | if (nextptr > 255) { |
---|
575 | printf("illegal PCI extended capability offset %d\n", |
---|
576 | nextptr); |
---|
577 | return; |
---|
578 | } |
---|
579 | /* Find the next entry */ |
---|
580 | ptr = nextptr; |
---|
581 | nextptr = REG(ptr + PCICAP_NEXTPTR, 1); |
---|
582 | |
---|
583 | /* Process this entry */ |
---|
584 | switch (REG(ptr + PCICAP_ID, 1)) { |
---|
585 | case PCIY_PMG: /* PCI power management */ |
---|
586 | if (cfg->pp.pp_cap == 0) { |
---|
587 | cfg->pp.pp_cap = REG(ptr + PCIR_POWER_CAP, 2); |
---|
588 | cfg->pp.pp_status = ptr + PCIR_POWER_STATUS; |
---|
589 | cfg->pp.pp_pmcsr = ptr + PCIR_POWER_PMCSR; |
---|
590 | if ((nextptr - ptr) > PCIR_POWER_DATA) |
---|
591 | cfg->pp.pp_data = ptr + PCIR_POWER_DATA; |
---|
592 | } |
---|
593 | break; |
---|
594 | #if defined(__i386__) || defined(__amd64__) || defined(__powerpc__) |
---|
595 | case PCIY_HT: /* HyperTransport */ |
---|
596 | /* Determine HT-specific capability type. */ |
---|
597 | val = REG(ptr + PCIR_HT_COMMAND, 2); |
---|
598 | switch (val & PCIM_HTCMD_CAP_MASK) { |
---|
599 | case PCIM_HTCAP_MSI_MAPPING: |
---|
600 | if (!(val & PCIM_HTCMD_MSI_FIXED)) { |
---|
601 | /* Sanity check the mapping window. */ |
---|
602 | addr = REG(ptr + PCIR_HTMSI_ADDRESS_HI, |
---|
603 | 4); |
---|
604 | addr <<= 32; |
---|
605 | addr |= REG(ptr + PCIR_HTMSI_ADDRESS_LO, |
---|
606 | 4); |
---|
607 | if (addr != MSI_INTEL_ADDR_BASE) |
---|
608 | device_printf(pcib, |
---|
609 | "HT Bridge at pci%d:%d:%d:%d has non-default MSI window 0x%llx\n", |
---|
610 | cfg->domain, cfg->bus, |
---|
611 | cfg->slot, cfg->func, |
---|
612 | (long long)addr); |
---|
613 | } else |
---|
614 | addr = MSI_INTEL_ADDR_BASE; |
---|
615 | |
---|
616 | cfg->ht.ht_msimap = ptr; |
---|
617 | cfg->ht.ht_msictrl = val; |
---|
618 | cfg->ht.ht_msiaddr = addr; |
---|
619 | break; |
---|
620 | } |
---|
621 | break; |
---|
622 | #endif |
---|
623 | case PCIY_MSI: /* PCI MSI */ |
---|
624 | cfg->msi.msi_location = ptr; |
---|
625 | cfg->msi.msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2); |
---|
626 | cfg->msi.msi_msgnum = 1 << ((cfg->msi.msi_ctrl & |
---|
627 | PCIM_MSICTRL_MMC_MASK)>>1); |
---|
628 | break; |
---|
629 | case PCIY_MSIX: /* PCI MSI-X */ |
---|
630 | cfg->msix.msix_location = ptr; |
---|
631 | cfg->msix.msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2); |
---|
632 | cfg->msix.msix_msgnum = (cfg->msix.msix_ctrl & |
---|
633 | PCIM_MSIXCTRL_TABLE_SIZE) + 1; |
---|
634 | val = REG(ptr + PCIR_MSIX_TABLE, 4); |
---|
635 | cfg->msix.msix_table_bar = PCIR_BAR(val & |
---|
636 | PCIM_MSIX_BIR_MASK); |
---|
637 | cfg->msix.msix_table_offset = val & ~PCIM_MSIX_BIR_MASK; |
---|
638 | val = REG(ptr + PCIR_MSIX_PBA, 4); |
---|
639 | cfg->msix.msix_pba_bar = PCIR_BAR(val & |
---|
640 | PCIM_MSIX_BIR_MASK); |
---|
641 | cfg->msix.msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK; |
---|
642 | break; |
---|
643 | case PCIY_VPD: /* PCI Vital Product Data */ |
---|
644 | cfg->vpd.vpd_reg = ptr; |
---|
645 | break; |
---|
646 | case PCIY_SUBVENDOR: |
---|
647 | /* Should always be true. */ |
---|
648 | if ((cfg->hdrtype & PCIM_HDRTYPE) == 1) { |
---|
649 | val = REG(ptr + PCIR_SUBVENDCAP_ID, 4); |
---|
650 | cfg->subvendor = val & 0xffff; |
---|
651 | cfg->subdevice = val >> 16; |
---|
652 | } |
---|
653 | break; |
---|
654 | case PCIY_PCIX: /* PCI-X */ |
---|
655 | /* |
---|
656 | * Assume we have a PCI-X chipset if we have |
---|
657 | * at least one PCI-PCI bridge with a PCI-X |
---|
658 | * capability. Note that some systems with |
---|
659 | * PCI-express or HT chipsets might match on |
---|
660 | * this check as well. |
---|
661 | */ |
---|
662 | if ((cfg->hdrtype & PCIM_HDRTYPE) == 1) |
---|
663 | pcix_chipset = 1; |
---|
664 | break; |
---|
665 | case PCIY_EXPRESS: /* PCI-express */ |
---|
666 | /* |
---|
667 | * Assume we have a PCI-express chipset if we have |
---|
668 | * at least one PCI-express device. |
---|
669 | */ |
---|
670 | pcie_chipset = 1; |
---|
671 | break; |
---|
672 | default: |
---|
673 | break; |
---|
674 | } |
---|
675 | } |
---|
676 | /* REG and WREG use carry through to next functions */ |
---|
677 | } |
---|
678 | |
---|
679 | /* |
---|
680 | * PCI Vital Product Data |
---|
681 | */ |
---|
682 | |
---|
683 | #define PCI_VPD_TIMEOUT 1000000 |
---|
684 | |
---|
685 | static int |
---|
686 | pci_read_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t *data) |
---|
687 | { |
---|
688 | int count = PCI_VPD_TIMEOUT; |
---|
689 | |
---|
690 | KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned")); |
---|
691 | |
---|
692 | WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg, 2); |
---|
693 | |
---|
694 | while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) != 0x8000) { |
---|
695 | if (--count < 0) |
---|
696 | return (ENXIO); |
---|
697 | DELAY(1); /* limit looping */ |
---|
698 | } |
---|
699 | *data = (REG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, 4)); |
---|
700 | |
---|
701 | return (0); |
---|
702 | } |
---|
703 | |
---|
704 | #if 0 |
---|
705 | static int |
---|
706 | pci_write_vpd_reg(device_t pcib, pcicfgregs *cfg, int reg, uint32_t data) |
---|
707 | { |
---|
708 | int count = PCI_VPD_TIMEOUT; |
---|
709 | |
---|
710 | KASSERT((reg & 3) == 0, ("VPD register must by 4 byte aligned")); |
---|
711 | |
---|
712 | WREG(cfg->vpd.vpd_reg + PCIR_VPD_DATA, data, 4); |
---|
713 | WREG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, reg | 0x8000, 2); |
---|
714 | while ((REG(cfg->vpd.vpd_reg + PCIR_VPD_ADDR, 2) & 0x8000) == 0x8000) { |
---|
715 | if (--count < 0) |
---|
716 | return (ENXIO); |
---|
717 | DELAY(1); /* limit looping */ |
---|
718 | } |
---|
719 | |
---|
720 | return (0); |
---|
721 | } |
---|
722 | #endif |
---|
723 | |
---|
724 | #undef PCI_VPD_TIMEOUT |
---|
725 | |
---|
726 | struct vpd_readstate { |
---|
727 | device_t pcib; |
---|
728 | pcicfgregs *cfg; |
---|
729 | uint32_t val; |
---|
730 | int bytesinval; |
---|
731 | int off; |
---|
732 | uint8_t cksum; |
---|
733 | }; |
---|
734 | |
---|
735 | static int |
---|
736 | vpd_nextbyte(struct vpd_readstate *vrs, uint8_t *data) |
---|
737 | { |
---|
738 | uint32_t reg; |
---|
739 | uint8_t byte; |
---|
740 | |
---|
741 | if (vrs->bytesinval == 0) { |
---|
742 | if (pci_read_vpd_reg(vrs->pcib, vrs->cfg, vrs->off, ®)) |
---|
743 | return (ENXIO); |
---|
744 | vrs->val = le32toh(reg); |
---|
745 | vrs->off += 4; |
---|
746 | byte = vrs->val & 0xff; |
---|
747 | vrs->bytesinval = 3; |
---|
748 | } else { |
---|
749 | vrs->val = vrs->val >> 8; |
---|
750 | byte = vrs->val & 0xff; |
---|
751 | vrs->bytesinval--; |
---|
752 | } |
---|
753 | |
---|
754 | vrs->cksum += byte; |
---|
755 | *data = byte; |
---|
756 | return (0); |
---|
757 | } |
---|
758 | |
---|
759 | static void |
---|
760 | pci_read_vpd(device_t pcib, pcicfgregs *cfg) |
---|
761 | { |
---|
762 | struct vpd_readstate vrs; |
---|
763 | int state; |
---|
764 | int name; |
---|
765 | int remain; |
---|
766 | int i; |
---|
767 | int alloc, off; /* alloc/off for RO/W arrays */ |
---|
768 | int cksumvalid; |
---|
769 | int dflen; |
---|
770 | uint8_t byte; |
---|
771 | uint8_t byte2; |
---|
772 | |
---|
773 | /* init vpd reader */ |
---|
774 | vrs.bytesinval = 0; |
---|
775 | vrs.off = 0; |
---|
776 | vrs.pcib = pcib; |
---|
777 | vrs.cfg = cfg; |
---|
778 | vrs.cksum = 0; |
---|
779 | |
---|
780 | state = 0; |
---|
781 | name = remain = i = 0; /* shut up stupid gcc */ |
---|
782 | alloc = off = 0; /* shut up stupid gcc */ |
---|
783 | dflen = 0; /* shut up stupid gcc */ |
---|
784 | cksumvalid = -1; |
---|
785 | while (state >= 0) { |
---|
786 | if (vpd_nextbyte(&vrs, &byte)) { |
---|
787 | state = -2; |
---|
788 | break; |
---|
789 | } |
---|
790 | #if 0 |
---|
791 | printf("vpd: val: %#x, off: %d, bytesinval: %d, byte: %#hhx, " \ |
---|
792 | "state: %d, remain: %d, name: %#x, i: %d\n", vrs.val, |
---|
793 | vrs.off, vrs.bytesinval, byte, state, remain, name, i); |
---|
794 | #endif |
---|
795 | switch (state) { |
---|
796 | case 0: /* item name */ |
---|
797 | if (byte & 0x80) { |
---|
798 | if (vpd_nextbyte(&vrs, &byte2)) { |
---|
799 | state = -2; |
---|
800 | break; |
---|
801 | } |
---|
802 | remain = byte2; |
---|
803 | if (vpd_nextbyte(&vrs, &byte2)) { |
---|
804 | state = -2; |
---|
805 | break; |
---|
806 | } |
---|
807 | remain |= byte2 << 8; |
---|
808 | if (remain > (0x7f*4 - vrs.off)) { |
---|
809 | state = -1; |
---|
810 | printf( |
---|
811 | "pci%d:%d:%d:%d: invalid VPD data, remain %#x\n", |
---|
812 | cfg->domain, cfg->bus, cfg->slot, |
---|
813 | cfg->func, remain); |
---|
814 | } |
---|
815 | name = byte & 0x7f; |
---|
816 | } else { |
---|
817 | remain = byte & 0x7; |
---|
818 | name = (byte >> 3) & 0xf; |
---|
819 | } |
---|
820 | switch (name) { |
---|
821 | case 0x2: /* String */ |
---|
822 | cfg->vpd.vpd_ident = malloc(remain + 1, |
---|
823 | M_DEVBUF, M_WAITOK); |
---|
824 | i = 0; |
---|
825 | state = 1; |
---|
826 | break; |
---|
827 | case 0xf: /* End */ |
---|
828 | state = -1; |
---|
829 | break; |
---|
830 | case 0x10: /* VPD-R */ |
---|
831 | alloc = 8; |
---|
832 | off = 0; |
---|
833 | cfg->vpd.vpd_ros = malloc(alloc * |
---|
834 | sizeof(*cfg->vpd.vpd_ros), M_DEVBUF, |
---|
835 | M_WAITOK | M_ZERO); |
---|
836 | state = 2; |
---|
837 | break; |
---|
838 | case 0x11: /* VPD-W */ |
---|
839 | alloc = 8; |
---|
840 | off = 0; |
---|
841 | cfg->vpd.vpd_w = malloc(alloc * |
---|
842 | sizeof(*cfg->vpd.vpd_w), M_DEVBUF, |
---|
843 | M_WAITOK | M_ZERO); |
---|
844 | state = 5; |
---|
845 | break; |
---|
846 | default: /* Invalid data, abort */ |
---|
847 | state = -1; |
---|
848 | break; |
---|
849 | } |
---|
850 | break; |
---|
851 | |
---|
852 | case 1: /* Identifier String */ |
---|
853 | cfg->vpd.vpd_ident[i++] = byte; |
---|
854 | remain--; |
---|
855 | if (remain == 0) { |
---|
856 | cfg->vpd.vpd_ident[i] = '\0'; |
---|
857 | state = 0; |
---|
858 | } |
---|
859 | break; |
---|
860 | |
---|
861 | case 2: /* VPD-R Keyword Header */ |
---|
862 | if (off == alloc) { |
---|
863 | cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros, |
---|
864 | (alloc *= 2) * sizeof(*cfg->vpd.vpd_ros), |
---|
865 | M_DEVBUF, M_WAITOK | M_ZERO); |
---|
866 | } |
---|
867 | cfg->vpd.vpd_ros[off].keyword[0] = byte; |
---|
868 | if (vpd_nextbyte(&vrs, &byte2)) { |
---|
869 | state = -2; |
---|
870 | break; |
---|
871 | } |
---|
872 | cfg->vpd.vpd_ros[off].keyword[1] = byte2; |
---|
873 | if (vpd_nextbyte(&vrs, &byte2)) { |
---|
874 | state = -2; |
---|
875 | break; |
---|
876 | } |
---|
877 | dflen = byte2; |
---|
878 | if (dflen == 0 && |
---|
879 | strncmp(cfg->vpd.vpd_ros[off].keyword, "RV", |
---|
880 | 2) == 0) { |
---|
881 | /* |
---|
882 | * if this happens, we can't trust the rest |
---|
883 | * of the VPD. |
---|
884 | */ |
---|
885 | printf( |
---|
886 | "pci%d:%d:%d:%d: bad keyword length: %d\n", |
---|
887 | cfg->domain, cfg->bus, cfg->slot, |
---|
888 | cfg->func, dflen); |
---|
889 | cksumvalid = 0; |
---|
890 | state = -1; |
---|
891 | break; |
---|
892 | } else if (dflen == 0) { |
---|
893 | cfg->vpd.vpd_ros[off].value = malloc(1 * |
---|
894 | sizeof(*cfg->vpd.vpd_ros[off].value), |
---|
895 | M_DEVBUF, M_WAITOK); |
---|
896 | cfg->vpd.vpd_ros[off].value[0] = '\x00'; |
---|
897 | } else |
---|
898 | cfg->vpd.vpd_ros[off].value = malloc( |
---|
899 | (dflen + 1) * |
---|
900 | sizeof(*cfg->vpd.vpd_ros[off].value), |
---|
901 | M_DEVBUF, M_WAITOK); |
---|
902 | remain -= 3; |
---|
903 | i = 0; |
---|
904 | /* keep in sync w/ state 3's transistions */ |
---|
905 | if (dflen == 0 && remain == 0) |
---|
906 | state = 0; |
---|
907 | else if (dflen == 0) |
---|
908 | state = 2; |
---|
909 | else |
---|
910 | state = 3; |
---|
911 | break; |
---|
912 | |
---|
913 | case 3: /* VPD-R Keyword Value */ |
---|
914 | cfg->vpd.vpd_ros[off].value[i++] = byte; |
---|
915 | if (strncmp(cfg->vpd.vpd_ros[off].keyword, |
---|
916 | "RV", 2) == 0 && cksumvalid == -1) { |
---|
917 | if (vrs.cksum == 0) |
---|
918 | cksumvalid = 1; |
---|
919 | else { |
---|
920 | if (bootverbose) |
---|
921 | printf( |
---|
922 | "pci%d:%d:%d:%d: bad VPD cksum, remain %hhu\n", |
---|
923 | cfg->domain, cfg->bus, |
---|
924 | cfg->slot, cfg->func, |
---|
925 | vrs.cksum); |
---|
926 | cksumvalid = 0; |
---|
927 | state = -1; |
---|
928 | break; |
---|
929 | } |
---|
930 | } |
---|
931 | dflen--; |
---|
932 | remain--; |
---|
933 | /* keep in sync w/ state 2's transistions */ |
---|
934 | if (dflen == 0) |
---|
935 | cfg->vpd.vpd_ros[off++].value[i++] = '\0'; |
---|
936 | if (dflen == 0 && remain == 0) { |
---|
937 | cfg->vpd.vpd_rocnt = off; |
---|
938 | cfg->vpd.vpd_ros = reallocf(cfg->vpd.vpd_ros, |
---|
939 | off * sizeof(*cfg->vpd.vpd_ros), |
---|
940 | M_DEVBUF, M_WAITOK | M_ZERO); |
---|
941 | state = 0; |
---|
942 | } else if (dflen == 0) |
---|
943 | state = 2; |
---|
944 | break; |
---|
945 | |
---|
946 | case 4: |
---|
947 | remain--; |
---|
948 | if (remain == 0) |
---|
949 | state = 0; |
---|
950 | break; |
---|
951 | |
---|
952 | case 5: /* VPD-W Keyword Header */ |
---|
953 | if (off == alloc) { |
---|
954 | cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w, |
---|
955 | (alloc *= 2) * sizeof(*cfg->vpd.vpd_w), |
---|
956 | M_DEVBUF, M_WAITOK | M_ZERO); |
---|
957 | } |
---|
958 | cfg->vpd.vpd_w[off].keyword[0] = byte; |
---|
959 | if (vpd_nextbyte(&vrs, &byte2)) { |
---|
960 | state = -2; |
---|
961 | break; |
---|
962 | } |
---|
963 | cfg->vpd.vpd_w[off].keyword[1] = byte2; |
---|
964 | if (vpd_nextbyte(&vrs, &byte2)) { |
---|
965 | state = -2; |
---|
966 | break; |
---|
967 | } |
---|
968 | cfg->vpd.vpd_w[off].len = dflen = byte2; |
---|
969 | cfg->vpd.vpd_w[off].start = vrs.off - vrs.bytesinval; |
---|
970 | cfg->vpd.vpd_w[off].value = malloc((dflen + 1) * |
---|
971 | sizeof(*cfg->vpd.vpd_w[off].value), |
---|
972 | M_DEVBUF, M_WAITOK); |
---|
973 | remain -= 3; |
---|
974 | i = 0; |
---|
975 | /* keep in sync w/ state 6's transistions */ |
---|
976 | if (dflen == 0 && remain == 0) |
---|
977 | state = 0; |
---|
978 | else if (dflen == 0) |
---|
979 | state = 5; |
---|
980 | else |
---|
981 | state = 6; |
---|
982 | break; |
---|
983 | |
---|
984 | case 6: /* VPD-W Keyword Value */ |
---|
985 | cfg->vpd.vpd_w[off].value[i++] = byte; |
---|
986 | dflen--; |
---|
987 | remain--; |
---|
988 | /* keep in sync w/ state 5's transistions */ |
---|
989 | if (dflen == 0) |
---|
990 | cfg->vpd.vpd_w[off++].value[i++] = '\0'; |
---|
991 | if (dflen == 0 && remain == 0) { |
---|
992 | cfg->vpd.vpd_wcnt = off; |
---|
993 | cfg->vpd.vpd_w = reallocf(cfg->vpd.vpd_w, |
---|
994 | off * sizeof(*cfg->vpd.vpd_w), |
---|
995 | M_DEVBUF, M_WAITOK | M_ZERO); |
---|
996 | state = 0; |
---|
997 | } else if (dflen == 0) |
---|
998 | state = 5; |
---|
999 | break; |
---|
1000 | |
---|
1001 | default: |
---|
1002 | printf("pci%d:%d:%d:%d: invalid state: %d\n", |
---|
1003 | cfg->domain, cfg->bus, cfg->slot, cfg->func, |
---|
1004 | state); |
---|
1005 | state = -1; |
---|
1006 | break; |
---|
1007 | } |
---|
1008 | } |
---|
1009 | |
---|
1010 | if (cksumvalid == 0 || state < -1) { |
---|
1011 | /* read-only data bad, clean up */ |
---|
1012 | if (cfg->vpd.vpd_ros != NULL) { |
---|
1013 | for (off = 0; cfg->vpd.vpd_ros[off].value; off++) |
---|
1014 | free(cfg->vpd.vpd_ros[off].value, M_DEVBUF); |
---|
1015 | free(cfg->vpd.vpd_ros, M_DEVBUF); |
---|
1016 | cfg->vpd.vpd_ros = NULL; |
---|
1017 | } |
---|
1018 | } |
---|
1019 | if (state < -1) { |
---|
1020 | /* I/O error, clean up */ |
---|
1021 | printf("pci%d:%d:%d:%d: failed to read VPD data.\n", |
---|
1022 | cfg->domain, cfg->bus, cfg->slot, cfg->func); |
---|
1023 | if (cfg->vpd.vpd_ident != NULL) { |
---|
1024 | free(cfg->vpd.vpd_ident, M_DEVBUF); |
---|
1025 | cfg->vpd.vpd_ident = NULL; |
---|
1026 | } |
---|
1027 | if (cfg->vpd.vpd_w != NULL) { |
---|
1028 | for (off = 0; cfg->vpd.vpd_w[off].value; off++) |
---|
1029 | free(cfg->vpd.vpd_w[off].value, M_DEVBUF); |
---|
1030 | free(cfg->vpd.vpd_w, M_DEVBUF); |
---|
1031 | cfg->vpd.vpd_w = NULL; |
---|
1032 | } |
---|
1033 | } |
---|
1034 | cfg->vpd.vpd_cached = 1; |
---|
1035 | #undef REG |
---|
1036 | #undef WREG |
---|
1037 | } |
---|
1038 | |
---|
1039 | int |
---|
1040 | pci_get_vpd_ident_method(device_t dev, device_t child, const char **identptr) |
---|
1041 | { |
---|
1042 | struct pci_devinfo *dinfo = device_get_ivars(child); |
---|
1043 | pcicfgregs *cfg = &dinfo->cfg; |
---|
1044 | |
---|
1045 | if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0) |
---|
1046 | pci_read_vpd(device_get_parent(dev), cfg); |
---|
1047 | |
---|
1048 | *identptr = cfg->vpd.vpd_ident; |
---|
1049 | |
---|
1050 | if (*identptr == NULL) |
---|
1051 | return (ENXIO); |
---|
1052 | |
---|
1053 | return (0); |
---|
1054 | } |
---|
1055 | |
---|
1056 | int |
---|
1057 | pci_get_vpd_readonly_method(device_t dev, device_t child, const char *kw, |
---|
1058 | const char **vptr) |
---|
1059 | { |
---|
1060 | struct pci_devinfo *dinfo = device_get_ivars(child); |
---|
1061 | pcicfgregs *cfg = &dinfo->cfg; |
---|
1062 | int i; |
---|
1063 | |
---|
1064 | if (!cfg->vpd.vpd_cached && cfg->vpd.vpd_reg != 0) |
---|
1065 | pci_read_vpd(device_get_parent(dev), cfg); |
---|
1066 | |
---|
1067 | for (i = 0; i < cfg->vpd.vpd_rocnt; i++) |
---|
1068 | if (memcmp(kw, cfg->vpd.vpd_ros[i].keyword, |
---|
1069 | sizeof(cfg->vpd.vpd_ros[i].keyword)) == 0) { |
---|
1070 | *vptr = cfg->vpd.vpd_ros[i].value; |
---|
1071 | } |
---|
1072 | |
---|
1073 | if (i != cfg->vpd.vpd_rocnt) |
---|
1074 | return (0); |
---|
1075 | |
---|
1076 | *vptr = NULL; |
---|
1077 | return (ENXIO); |
---|
1078 | } |
---|
1079 | |
---|
1080 | /* |
---|
1081 | * Find the requested extended capability and return the offset in |
---|
1082 | * configuration space via the pointer provided. The function returns |
---|
1083 | * 0 on success and error code otherwise. |
---|
1084 | */ |
---|
1085 | int |
---|
1086 | pci_find_extcap_method(device_t dev, device_t child, int capability, |
---|
1087 | int *capreg) |
---|
1088 | { |
---|
1089 | struct pci_devinfo *dinfo = device_get_ivars(child); |
---|
1090 | pcicfgregs *cfg = &dinfo->cfg; |
---|
1091 | u_int32_t status; |
---|
1092 | u_int8_t ptr; |
---|
1093 | |
---|
1094 | /* |
---|
1095 | * Check the CAP_LIST bit of the PCI status register first. |
---|
1096 | */ |
---|
1097 | status = pci_read_config(child, PCIR_STATUS, 2); |
---|
1098 | if (!(status & PCIM_STATUS_CAPPRESENT)) |
---|
1099 | return (ENXIO); |
---|
1100 | |
---|
1101 | /* |
---|
1102 | * Determine the start pointer of the capabilities list. |
---|
1103 | */ |
---|
1104 | switch (cfg->hdrtype & PCIM_HDRTYPE) { |
---|
1105 | case 0: |
---|
1106 | case 1: |
---|
1107 | ptr = PCIR_CAP_PTR; |
---|
1108 | break; |
---|
1109 | case 2: |
---|
1110 | ptr = PCIR_CAP_PTR_2; |
---|
1111 | break; |
---|
1112 | default: |
---|
1113 | /* XXX: panic? */ |
---|
1114 | return (ENXIO); /* no extended capabilities support */ |
---|
1115 | } |
---|
1116 | ptr = pci_read_config(child, ptr, 1); |
---|
1117 | |
---|
1118 | /* |
---|
1119 | * Traverse the capabilities list. |
---|
1120 | */ |
---|
1121 | while (ptr != 0) { |
---|
1122 | if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) { |
---|
1123 | if (capreg != NULL) |
---|
1124 | *capreg = ptr; |
---|
1125 | return (0); |
---|
1126 | } |
---|
1127 | ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1); |
---|
1128 | } |
---|
1129 | |
---|
1130 | return (ENOENT); |
---|
1131 | } |
---|
1132 | |
---|
1133 | /* |
---|
1134 | * Support for MSI-X message interrupts. |
---|
1135 | */ |
---|
1136 | void |
---|
1137 | pci_enable_msix(device_t dev, u_int index, uint64_t address, uint32_t data) |
---|
1138 | { |
---|
1139 | struct pci_devinfo *dinfo = device_get_ivars(dev); |
---|
1140 | struct pcicfg_msix *msix = &dinfo->cfg.msix; |
---|
1141 | uint32_t offset; |
---|
1142 | |
---|
1143 | KASSERT(msix->msix_table_len > index, ("bogus index")); |
---|
1144 | offset = msix->msix_table_offset + index * 16; |
---|
1145 | bus_write_4(msix->msix_table_res, offset, address & 0xffffffff); |
---|
1146 | bus_write_4(msix->msix_table_res, offset + 4, address >> 32); |
---|
1147 | bus_write_4(msix->msix_table_res, offset + 8, data); |
---|
1148 | |
---|
1149 | /* Enable MSI -> HT mapping. */ |
---|
1150 | pci_ht_map_msi(dev, address); |
---|
1151 | } |
---|
1152 | |
---|
1153 | void |
---|
1154 | pci_mask_msix(device_t dev, u_int index) |
---|
1155 | { |
---|
1156 | struct pci_devinfo *dinfo = device_get_ivars(dev); |
---|
1157 | struct pcicfg_msix *msix = &dinfo->cfg.msix; |
---|
1158 | uint32_t offset, val; |
---|
1159 | |
---|
1160 | KASSERT(msix->msix_msgnum > index, ("bogus index")); |
---|
1161 | offset = msix->msix_table_offset + index * 16 + 12; |
---|
1162 | val = bus_read_4(msix->msix_table_res, offset); |
---|
1163 | if (!(val & PCIM_MSIX_VCTRL_MASK)) { |
---|
1164 | val |= PCIM_MSIX_VCTRL_MASK; |
---|
1165 | bus_write_4(msix->msix_table_res, offset, val); |
---|
1166 | } |
---|
1167 | } |
---|
1168 | |
---|
1169 | void |
---|
1170 | pci_unmask_msix(device_t dev, u_int index) |
---|
1171 | { |
---|
1172 | struct pci_devinfo *dinfo = device_get_ivars(dev); |
---|
1173 | struct pcicfg_msix *msix = &dinfo->cfg.msix; |
---|
1174 | uint32_t offset, val; |
---|
1175 | |
---|
1176 | KASSERT(msix->msix_table_len > index, ("bogus index")); |
---|
1177 | offset = msix->msix_table_offset + index * 16 + 12; |
---|
1178 | val = bus_read_4(msix->msix_table_res, offset); |
---|
1179 | if (val & PCIM_MSIX_VCTRL_MASK) { |
---|
1180 | val &= ~PCIM_MSIX_VCTRL_MASK; |
---|
1181 | bus_write_4(msix->msix_table_res, offset, val); |
---|
1182 | } |
---|
1183 | } |
---|
1184 | |
---|
1185 | int |
---|
1186 | pci_pending_msix(device_t dev, u_int index) |
---|
1187 | { |
---|
1188 | struct pci_devinfo *dinfo = device_get_ivars(dev); |
---|
1189 | struct pcicfg_msix *msix = &dinfo->cfg.msix; |
---|
1190 | uint32_t offset, bit; |
---|
1191 | |
---|
1192 | KASSERT(msix->msix_table_len > index, ("bogus index")); |
---|
1193 | offset = msix->msix_pba_offset + (index / 32) * 4; |
---|
1194 | bit = 1 << index % 32; |
---|
1195 | return (bus_read_4(msix->msix_pba_res, offset) & bit); |
---|
1196 | } |
---|
1197 | |
---|
1198 | /* |
---|
1199 | * Restore MSI-X registers and table during resume. If MSI-X is |
---|
1200 | * enabled then walk the virtual table to restore the actual MSI-X |
---|
1201 | * table. |
---|
1202 | */ |
---|
1203 | static void |
---|
1204 | pci_resume_msix(device_t dev) |
---|
1205 | { |
---|
1206 | struct pci_devinfo *dinfo = device_get_ivars(dev); |
---|
1207 | struct pcicfg_msix *msix = &dinfo->cfg.msix; |
---|
1208 | struct msix_table_entry *mte; |
---|
1209 | struct msix_vector *mv; |
---|
1210 | int i; |
---|
1211 | |
---|
1212 | if (msix->msix_alloc > 0) { |
---|
1213 | /* First, mask all vectors. */ |
---|
1214 | for (i = 0; i < msix->msix_msgnum; i++) |
---|
1215 | pci_mask_msix(dev, i); |
---|
1216 | |
---|
1217 | /* Second, program any messages with at least one handler. */ |
---|
1218 | for (i = 0; i < msix->msix_table_len; i++) { |
---|
1219 | mte = &msix->msix_table[i]; |
---|
1220 | if (mte->mte_vector == 0 || mte->mte_handlers == 0) |
---|
1221 | continue; |
---|
1222 | mv = &msix->msix_vectors[mte->mte_vector - 1]; |
---|
1223 | pci_enable_msix(dev, i, mv->mv_address, mv->mv_data); |
---|
1224 | pci_unmask_msix(dev, i); |
---|
1225 | } |
---|
1226 | } |
---|
1227 | pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL, |
---|
1228 | msix->msix_ctrl, 2); |
---|
1229 | } |
---|
1230 | |
---|
1231 | /* |
---|
1232 | * Attempt to allocate *count MSI-X messages. The actual number allocated is |
---|
1233 | * returned in *count. After this function returns, each message will be |
---|
1234 | * available to the driver as SYS_RES_IRQ resources starting at rid 1. |
---|
1235 | */ |
---|
1236 | int |
---|
1237 | pci_alloc_msix_method(device_t dev, device_t child, int *count) |
---|
1238 | { |
---|
1239 | struct pci_devinfo *dinfo = device_get_ivars(child); |
---|
1240 | pcicfgregs *cfg = &dinfo->cfg; |
---|
1241 | struct resource_list_entry *rle; |
---|
1242 | int actual, error, i, irq, max; |
---|
1243 | |
---|
1244 | /* Don't let count == 0 get us into trouble. */ |
---|
1245 | if (*count == 0) |
---|
1246 | return (EINVAL); |
---|
1247 | |
---|
1248 | /* If rid 0 is allocated, then fail. */ |
---|
1249 | rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0); |
---|
1250 | if (rle != NULL && rle->res != NULL) |
---|
1251 | return (ENXIO); |
---|
1252 | |
---|
1253 | /* Already have allocated messages? */ |
---|
1254 | if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0) |
---|
1255 | return (ENXIO); |
---|
1256 | |
---|
1257 | /* If MSI is blacklisted for this system, fail. */ |
---|
1258 | if (pci_msi_blacklisted()) |
---|
1259 | return (ENXIO); |
---|
1260 | |
---|
1261 | /* MSI-X capability present? */ |
---|
1262 | if (cfg->msix.msix_location == 0 || !pci_do_msix) |
---|
1263 | return (ENODEV); |
---|
1264 | |
---|
1265 | /* Make sure the appropriate BARs are mapped. */ |
---|
1266 | rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY, |
---|
1267 | cfg->msix.msix_table_bar); |
---|
1268 | if (rle == NULL || rle->res == NULL || |
---|
1269 | !(rman_get_flags(rle->res) & RF_ACTIVE)) |
---|
1270 | return (ENXIO); |
---|
1271 | cfg->msix.msix_table_res = rle->res; |
---|
1272 | if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) { |
---|
1273 | rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY, |
---|
1274 | cfg->msix.msix_pba_bar); |
---|
1275 | if (rle == NULL || rle->res == NULL || |
---|
1276 | !(rman_get_flags(rle->res) & RF_ACTIVE)) |
---|
1277 | return (ENXIO); |
---|
1278 | } |
---|
1279 | cfg->msix.msix_pba_res = rle->res; |
---|
1280 | |
---|
1281 | if (bootverbose) |
---|
1282 | device_printf(child, |
---|
1283 | "attempting to allocate %d MSI-X vectors (%d supported)\n", |
---|
1284 | *count, cfg->msix.msix_msgnum); |
---|
1285 | max = min(*count, cfg->msix.msix_msgnum); |
---|
1286 | for (i = 0; i < max; i++) { |
---|
1287 | /* Allocate a message. */ |
---|
1288 | error = PCIB_ALLOC_MSIX(device_get_parent(dev), child, &irq); |
---|
1289 | if (error) |
---|
1290 | break; |
---|
1291 | resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq, |
---|
1292 | irq, 1); |
---|
1293 | } |
---|
1294 | actual = i; |
---|
1295 | |
---|
1296 | if (bootverbose) { |
---|
1297 | rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 1); |
---|
1298 | if (actual == 1) |
---|
1299 | device_printf(child, "using IRQ %lu for MSI-X\n", |
---|
1300 | rle->start); |
---|
1301 | else { |
---|
1302 | int run; |
---|
1303 | |
---|
1304 | /* |
---|
1305 | * Be fancy and try to print contiguous runs of |
---|
1306 | * IRQ values as ranges. 'irq' is the previous IRQ. |
---|
1307 | * 'run' is true if we are in a range. |
---|
1308 | */ |
---|
1309 | device_printf(child, "using IRQs %lu", rle->start); |
---|
1310 | irq = rle->start; |
---|
1311 | run = 0; |
---|
1312 | for (i = 1; i < actual; i++) { |
---|
1313 | rle = resource_list_find(&dinfo->resources, |
---|
1314 | SYS_RES_IRQ, i + 1); |
---|
1315 | |
---|
1316 | /* Still in a run? */ |
---|
1317 | if (rle->start == irq + 1) { |
---|
1318 | run = 1; |
---|
1319 | irq++; |
---|
1320 | continue; |
---|
1321 | } |
---|
1322 | |
---|
1323 | /* Finish previous range. */ |
---|
1324 | if (run) { |
---|
1325 | printf("-%d", irq); |
---|
1326 | run = 0; |
---|
1327 | } |
---|
1328 | |
---|
1329 | /* Start new range. */ |
---|
1330 | printf(",%lu", rle->start); |
---|
1331 | irq = rle->start; |
---|
1332 | } |
---|
1333 | |
---|
1334 | /* Unfinished range? */ |
---|
1335 | if (run) |
---|
1336 | printf("-%d", irq); |
---|
1337 | printf(" for MSI-X\n"); |
---|
1338 | } |
---|
1339 | } |
---|
1340 | |
---|
1341 | /* Mask all vectors. */ |
---|
1342 | for (i = 0; i < cfg->msix.msix_msgnum; i++) |
---|
1343 | pci_mask_msix(child, i); |
---|
1344 | |
---|
1345 | /* Allocate and initialize vector data and virtual table. */ |
---|
1346 | cfg->msix.msix_vectors = malloc(sizeof(struct msix_vector) * actual, |
---|
1347 | M_DEVBUF, M_WAITOK | M_ZERO); |
---|
1348 | cfg->msix.msix_table = malloc(sizeof(struct msix_table_entry) * actual, |
---|
1349 | M_DEVBUF, M_WAITOK | M_ZERO); |
---|
1350 | for (i = 0; i < actual; i++) { |
---|
1351 | rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); |
---|
1352 | cfg->msix.msix_vectors[i].mv_irq = rle->start; |
---|
1353 | cfg->msix.msix_table[i].mte_vector = i + 1; |
---|
1354 | } |
---|
1355 | |
---|
1356 | /* Update control register to enable MSI-X. */ |
---|
1357 | cfg->msix.msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE; |
---|
1358 | pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL, |
---|
1359 | cfg->msix.msix_ctrl, 2); |
---|
1360 | |
---|
1361 | /* Update counts of alloc'd messages. */ |
---|
1362 | cfg->msix.msix_alloc = actual; |
---|
1363 | cfg->msix.msix_table_len = actual; |
---|
1364 | *count = actual; |
---|
1365 | return (0); |
---|
1366 | } |
---|
1367 | |
---|
1368 | /* |
---|
1369 | * By default, pci_alloc_msix() will assign the allocated IRQ |
---|
1370 | * resources consecutively to the first N messages in the MSI-X table. |
---|
1371 | * However, device drivers may want to use different layouts if they |
---|
1372 | * either receive fewer messages than they asked for, or they wish to |
---|
1373 | * populate the MSI-X table sparsely. This method allows the driver |
---|
1374 | * to specify what layout it wants. It must be called after a |
---|
1375 | * successful pci_alloc_msix() but before any of the associated |
---|
1376 | * SYS_RES_IRQ resources are allocated via bus_alloc_resource(). |
---|
1377 | * |
---|
1378 | * The 'vectors' array contains 'count' message vectors. The array |
---|
1379 | * maps directly to the MSI-X table in that index 0 in the array |
---|
1380 | * specifies the vector for the first message in the MSI-X table, etc. |
---|
1381 | * The vector value in each array index can either be 0 to indicate |
---|
1382 | * that no vector should be assigned to a message slot, or it can be a |
---|
1383 | * number from 1 to N (where N is the count returned from a |
---|
1384 | * succcessful call to pci_alloc_msix()) to indicate which message |
---|
1385 | * vector (IRQ) to be used for the corresponding message. |
---|
1386 | * |
---|
1387 | * On successful return, each message with a non-zero vector will have |
---|
1388 | * an associated SYS_RES_IRQ whose rid is equal to the array index + |
---|
1389 | * 1. Additionally, if any of the IRQs allocated via the previous |
---|
1390 | * call to pci_alloc_msix() are not used in the mapping, those IRQs |
---|
1391 | * will be freed back to the system automatically. |
---|
1392 | * |
---|
1393 | * For example, suppose a driver has a MSI-X table with 6 messages and |
---|
1394 | * asks for 6 messages, but pci_alloc_msix() only returns a count of |
---|
1395 | * 3. Call the three vectors allocated by pci_alloc_msix() A, B, and |
---|
1396 | * C. After the call to pci_alloc_msix(), the device will be setup to |
---|
1397 | * have an MSI-X table of ABC--- (where - means no vector assigned). |
---|
1398 | * If the driver ten passes a vector array of { 1, 0, 1, 2, 0, 2 }, |
---|
1399 | * then the MSI-X table will look like A-AB-B, and the 'C' vector will |
---|
1400 | * be freed back to the system. This device will also have valid |
---|
1401 | * SYS_RES_IRQ rids of 1, 3, 4, and 6. |
---|
1402 | * |
---|
1403 | * In any case, the SYS_RES_IRQ rid X will always map to the message |
---|
1404 | * at MSI-X table index X - 1 and will only be valid if a vector is |
---|
1405 | * assigned to that table entry. |
---|
1406 | */ |
---|
1407 | int |
---|
1408 | pci_remap_msix_method(device_t dev, device_t child, int count, |
---|
1409 | const u_int *vectors) |
---|
1410 | { |
---|
1411 | struct pci_devinfo *dinfo = device_get_ivars(child); |
---|
1412 | struct pcicfg_msix *msix = &dinfo->cfg.msix; |
---|
1413 | struct resource_list_entry *rle; |
---|
1414 | int i, irq, j, *used; |
---|
1415 | |
---|
1416 | /* |
---|
1417 | * Have to have at least one message in the table but the |
---|
1418 | * table can't be bigger than the actual MSI-X table in the |
---|
1419 | * device. |
---|
1420 | */ |
---|
1421 | if (count == 0 || count > msix->msix_msgnum) |
---|
1422 | return (EINVAL); |
---|
1423 | |
---|
1424 | /* Sanity check the vectors. */ |
---|
1425 | for (i = 0; i < count; i++) |
---|
1426 | if (vectors[i] > msix->msix_alloc) |
---|
1427 | return (EINVAL); |
---|
1428 | |
---|
1429 | /* |
---|
1430 | * Make sure there aren't any holes in the vectors to be used. |
---|
1431 | * It's a big pain to support it, and it doesn't really make |
---|
1432 | * sense anyway. Also, at least one vector must be used. |
---|
1433 | */ |
---|
1434 | used = malloc(sizeof(int) * msix->msix_alloc, M_DEVBUF, M_WAITOK | |
---|
1435 | M_ZERO); |
---|
1436 | for (i = 0; i < count; i++) |
---|
1437 | if (vectors[i] != 0) |
---|
1438 | used[vectors[i] - 1] = 1; |
---|
1439 | for (i = 0; i < msix->msix_alloc - 1; i++) |
---|
1440 | if (used[i] == 0 && used[i + 1] == 1) { |
---|
1441 | free(used, M_DEVBUF); |
---|
1442 | return (EINVAL); |
---|
1443 | } |
---|
1444 | if (used[0] != 1) { |
---|
1445 | free(used, M_DEVBUF); |
---|
1446 | return (EINVAL); |
---|
1447 | } |
---|
1448 | |
---|
1449 | /* Make sure none of the resources are allocated. */ |
---|
1450 | for (i = 0; i < msix->msix_table_len; i++) { |
---|
1451 | if (msix->msix_table[i].mte_vector == 0) |
---|
1452 | continue; |
---|
1453 | if (msix->msix_table[i].mte_handlers > 0) |
---|
1454 | return (EBUSY); |
---|
1455 | rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); |
---|
1456 | KASSERT(rle != NULL, ("missing resource")); |
---|
1457 | if (rle->res != NULL) |
---|
1458 | return (EBUSY); |
---|
1459 | } |
---|
1460 | |
---|
1461 | /* Free the existing resource list entries. */ |
---|
1462 | for (i = 0; i < msix->msix_table_len; i++) { |
---|
1463 | if (msix->msix_table[i].mte_vector == 0) |
---|
1464 | continue; |
---|
1465 | resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1); |
---|
1466 | } |
---|
1467 | |
---|
1468 | /* |
---|
1469 | * Build the new virtual table keeping track of which vectors are |
---|
1470 | * used. |
---|
1471 | */ |
---|
1472 | free(msix->msix_table, M_DEVBUF); |
---|
1473 | msix->msix_table = malloc(sizeof(struct msix_table_entry) * count, |
---|
1474 | M_DEVBUF, M_WAITOK | M_ZERO); |
---|
1475 | for (i = 0; i < count; i++) |
---|
1476 | msix->msix_table[i].mte_vector = vectors[i]; |
---|
1477 | msix->msix_table_len = count; |
---|
1478 | |
---|
1479 | /* Free any unused IRQs and resize the vectors array if necessary. */ |
---|
1480 | j = msix->msix_alloc - 1; |
---|
1481 | if (used[j] == 0) { |
---|
1482 | struct msix_vector *vec; |
---|
1483 | |
---|
1484 | while (used[j] == 0) { |
---|
1485 | PCIB_RELEASE_MSIX(device_get_parent(dev), child, |
---|
1486 | msix->msix_vectors[j].mv_irq); |
---|
1487 | j--; |
---|
1488 | } |
---|
1489 | vec = malloc(sizeof(struct msix_vector) * (j + 1), M_DEVBUF, |
---|
1490 | M_WAITOK); |
---|
1491 | bcopy(msix->msix_vectors, vec, sizeof(struct msix_vector) * |
---|
1492 | (j + 1)); |
---|
1493 | free(msix->msix_vectors, M_DEVBUF); |
---|
1494 | msix->msix_vectors = vec; |
---|
1495 | msix->msix_alloc = j + 1; |
---|
1496 | } |
---|
1497 | free(used, M_DEVBUF); |
---|
1498 | |
---|
1499 | /* Map the IRQs onto the rids. */ |
---|
1500 | for (i = 0; i < count; i++) { |
---|
1501 | if (vectors[i] == 0) |
---|
1502 | continue; |
---|
1503 | irq = msix->msix_vectors[vectors[i]].mv_irq; |
---|
1504 | resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq, |
---|
1505 | irq, 1); |
---|
1506 | } |
---|
1507 | |
---|
1508 | if (bootverbose) { |
---|
1509 | device_printf(child, "Remapped MSI-X IRQs as: "); |
---|
1510 | for (i = 0; i < count; i++) { |
---|
1511 | if (i != 0) |
---|
1512 | printf(", "); |
---|
1513 | if (vectors[i] == 0) |
---|
1514 | printf("---"); |
---|
1515 | else |
---|
1516 | printf("%d", |
---|
1517 | msix->msix_vectors[vectors[i]].mv_irq); |
---|
1518 | } |
---|
1519 | printf("\n"); |
---|
1520 | } |
---|
1521 | |
---|
1522 | return (0); |
---|
1523 | } |
---|
1524 | |
---|
1525 | static int |
---|
1526 | pci_release_msix(device_t dev, device_t child) |
---|
1527 | { |
---|
1528 | struct pci_devinfo *dinfo = device_get_ivars(child); |
---|
1529 | struct pcicfg_msix *msix = &dinfo->cfg.msix; |
---|
1530 | struct resource_list_entry *rle; |
---|
1531 | int i; |
---|
1532 | |
---|
1533 | /* Do we have any messages to release? */ |
---|
1534 | if (msix->msix_alloc == 0) |
---|
1535 | return (ENODEV); |
---|
1536 | |
---|
1537 | /* Make sure none of the resources are allocated. */ |
---|
1538 | for (i = 0; i < msix->msix_table_len; i++) { |
---|
1539 | if (msix->msix_table[i].mte_vector == 0) |
---|
1540 | continue; |
---|
1541 | if (msix->msix_table[i].mte_handlers > 0) |
---|
1542 | return (EBUSY); |
---|
1543 | rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); |
---|
1544 | KASSERT(rle != NULL, ("missing resource")); |
---|
1545 | if (rle->res != NULL) |
---|
1546 | return (EBUSY); |
---|
1547 | } |
---|
1548 | |
---|
1549 | /* Update control register to disable MSI-X. */ |
---|
1550 | msix->msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE; |
---|
1551 | pci_write_config(child, msix->msix_location + PCIR_MSIX_CTRL, |
---|
1552 | msix->msix_ctrl, 2); |
---|
1553 | |
---|
1554 | /* Free the resource list entries. */ |
---|
1555 | for (i = 0; i < msix->msix_table_len; i++) { |
---|
1556 | if (msix->msix_table[i].mte_vector == 0) |
---|
1557 | continue; |
---|
1558 | resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1); |
---|
1559 | } |
---|
1560 | free(msix->msix_table, M_DEVBUF); |
---|
1561 | msix->msix_table_len = 0; |
---|
1562 | |
---|
1563 | /* Release the IRQs. */ |
---|
1564 | for (i = 0; i < msix->msix_alloc; i++) |
---|
1565 | PCIB_RELEASE_MSIX(device_get_parent(dev), child, |
---|
1566 | msix->msix_vectors[i].mv_irq); |
---|
1567 | free(msix->msix_vectors, M_DEVBUF); |
---|
1568 | msix->msix_alloc = 0; |
---|
1569 | return (0); |
---|
1570 | } |
---|
1571 | |
---|
1572 | /* |
---|
1573 | * Return the max supported MSI-X messages this device supports. |
---|
1574 | * Basically, assuming the MD code can alloc messages, this function |
---|
1575 | * should return the maximum value that pci_alloc_msix() can return. |
---|
1576 | * Thus, it is subject to the tunables, etc. |
---|
1577 | */ |
---|
1578 | int |
---|
1579 | pci_msix_count_method(device_t dev, device_t child) |
---|
1580 | { |
---|
1581 | struct pci_devinfo *dinfo = device_get_ivars(child); |
---|
1582 | struct pcicfg_msix *msix = &dinfo->cfg.msix; |
---|
1583 | |
---|
1584 | if (pci_do_msix && msix->msix_location != 0) |
---|
1585 | return (msix->msix_msgnum); |
---|
1586 | return (0); |
---|
1587 | } |
---|
1588 | |
---|
1589 | /* |
---|
1590 | * HyperTransport MSI mapping control |
---|
1591 | */ |
---|
1592 | void |
---|
1593 | pci_ht_map_msi(device_t dev, uint64_t addr) |
---|
1594 | { |
---|
1595 | struct pci_devinfo *dinfo = device_get_ivars(dev); |
---|
1596 | struct pcicfg_ht *ht = &dinfo->cfg.ht; |
---|
1597 | |
---|
1598 | if (!ht->ht_msimap) |
---|
1599 | return; |
---|
1600 | |
---|
1601 | if (addr && !(ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) && |
---|
1602 | ht->ht_msiaddr >> 20 == addr >> 20) { |
---|
1603 | /* Enable MSI -> HT mapping. */ |
---|
1604 | ht->ht_msictrl |= PCIM_HTCMD_MSI_ENABLE; |
---|
1605 | pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND, |
---|
1606 | ht->ht_msictrl, 2); |
---|
1607 | } |
---|
1608 | |
---|
1609 | if (!addr && ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) { |
---|
1610 | /* Disable MSI -> HT mapping. */ |
---|
1611 | ht->ht_msictrl &= ~PCIM_HTCMD_MSI_ENABLE; |
---|
1612 | pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND, |
---|
1613 | ht->ht_msictrl, 2); |
---|
1614 | } |
---|
1615 | } |
---|
1616 | |
---|
1617 | int |
---|
1618 | pci_get_max_read_req(device_t dev) |
---|
1619 | { |
---|
1620 | int cap; |
---|
1621 | uint16_t val; |
---|
1622 | |
---|
1623 | if (pci_find_extcap(dev, PCIY_EXPRESS, &cap) != 0) |
---|
1624 | return (0); |
---|
1625 | val = pci_read_config(dev, cap + PCIR_EXPRESS_DEVICE_CTL, 2); |
---|
1626 | val &= PCIM_EXP_CTL_MAX_READ_REQUEST; |
---|
1627 | val >>= 12; |
---|
1628 | return (1 << (val + 7)); |
---|
1629 | } |
---|
1630 | |
---|
1631 | int |
---|
1632 | pci_set_max_read_req(device_t dev, int size) |
---|
1633 | { |
---|
1634 | int cap; |
---|
1635 | uint16_t val; |
---|
1636 | |
---|
1637 | if (pci_find_extcap(dev, PCIY_EXPRESS, &cap) != 0) |
---|
1638 | return (0); |
---|
1639 | if (size < 128) |
---|
1640 | size = 128; |
---|
1641 | if (size > 4096) |
---|
1642 | size = 4096; |
---|
1643 | size = (1 << (fls(size) - 1)); |
---|
1644 | val = pci_read_config(dev, cap + PCIR_EXPRESS_DEVICE_CTL, 2); |
---|
1645 | val &= ~PCIM_EXP_CTL_MAX_READ_REQUEST; |
---|
1646 | val |= (fls(size) - 8) << 12; |
---|
1647 | pci_write_config(dev, cap + PCIR_EXPRESS_DEVICE_CTL, val, 2); |
---|
1648 | return (size); |
---|
1649 | } |
---|
1650 | |
---|
1651 | /* |
---|
1652 | * Support for MSI message signalled interrupts. |
---|
1653 | */ |
---|
1654 | void |
---|
1655 | pci_enable_msi(device_t dev, uint64_t address, uint16_t data) |
---|
1656 | { |
---|
1657 | struct pci_devinfo *dinfo = device_get_ivars(dev); |
---|
1658 | struct pcicfg_msi *msi = &dinfo->cfg.msi; |
---|
1659 | |
---|
1660 | /* Write data and address values. */ |
---|
1661 | pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR, |
---|
1662 | address & 0xffffffff, 4); |
---|
1663 | if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) { |
---|
1664 | pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR_HIGH, |
---|
1665 | address >> 32, 4); |
---|
1666 | pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA_64BIT, |
---|
1667 | data, 2); |
---|
1668 | } else |
---|
1669 | pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA, data, |
---|
1670 | 2); |
---|
1671 | |
---|
1672 | /* Enable MSI in the control register. */ |
---|
1673 | msi->msi_ctrl |= PCIM_MSICTRL_MSI_ENABLE; |
---|
1674 | pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl, |
---|
1675 | 2); |
---|
1676 | |
---|
1677 | /* Enable MSI -> HT mapping. */ |
---|
1678 | pci_ht_map_msi(dev, address); |
---|
1679 | } |
---|
1680 | |
---|
1681 | void |
---|
1682 | pci_disable_msi(device_t dev) |
---|
1683 | { |
---|
1684 | struct pci_devinfo *dinfo = device_get_ivars(dev); |
---|
1685 | struct pcicfg_msi *msi = &dinfo->cfg.msi; |
---|
1686 | |
---|
1687 | /* Disable MSI -> HT mapping. */ |
---|
1688 | pci_ht_map_msi(dev, 0); |
---|
1689 | |
---|
1690 | /* Disable MSI in the control register. */ |
---|
1691 | msi->msi_ctrl &= ~PCIM_MSICTRL_MSI_ENABLE; |
---|
1692 | pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl, |
---|
1693 | 2); |
---|
1694 | } |
---|
1695 | |
---|
1696 | /* |
---|
1697 | * Restore MSI registers during resume. If MSI is enabled then |
---|
1698 | * restore the data and address registers in addition to the control |
---|
1699 | * register. |
---|
1700 | */ |
---|
1701 | static void |
---|
1702 | pci_resume_msi(device_t dev) |
---|
1703 | { |
---|
1704 | struct pci_devinfo *dinfo = device_get_ivars(dev); |
---|
1705 | struct pcicfg_msi *msi = &dinfo->cfg.msi; |
---|
1706 | uint64_t address; |
---|
1707 | uint16_t data; |
---|
1708 | |
---|
1709 | if (msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) { |
---|
1710 | address = msi->msi_addr; |
---|
1711 | data = msi->msi_data; |
---|
1712 | pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR, |
---|
1713 | address & 0xffffffff, 4); |
---|
1714 | if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) { |
---|
1715 | pci_write_config(dev, msi->msi_location + |
---|
1716 | PCIR_MSI_ADDR_HIGH, address >> 32, 4); |
---|
1717 | pci_write_config(dev, msi->msi_location + |
---|
1718 | PCIR_MSI_DATA_64BIT, data, 2); |
---|
1719 | } else |
---|
1720 | pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA, |
---|
1721 | data, 2); |
---|
1722 | } |
---|
1723 | pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl, |
---|
1724 | 2); |
---|
1725 | } |
---|
1726 | |
---|
1727 | static int |
---|
1728 | pci_remap_intr_method(device_t bus, device_t dev, u_int irq) |
---|
1729 | { |
---|
1730 | struct pci_devinfo *dinfo = device_get_ivars(dev); |
---|
1731 | pcicfgregs *cfg = &dinfo->cfg; |
---|
1732 | struct resource_list_entry *rle; |
---|
1733 | struct msix_table_entry *mte; |
---|
1734 | struct msix_vector *mv; |
---|
1735 | uint64_t addr; |
---|
1736 | uint32_t data; |
---|
1737 | int error, i, j; |
---|
1738 | |
---|
1739 | /* |
---|
1740 | * Handle MSI first. We try to find this IRQ among our list |
---|
1741 | * of MSI IRQs. If we find it, we request updated address and |
---|
1742 | * data registers and apply the results. |
---|
1743 | */ |
---|
1744 | if (cfg->msi.msi_alloc > 0) { |
---|
1745 | |
---|
1746 | /* If we don't have any active handlers, nothing to do. */ |
---|
1747 | if (cfg->msi.msi_handlers == 0) |
---|
1748 | return (0); |
---|
1749 | for (i = 0; i < cfg->msi.msi_alloc; i++) { |
---|
1750 | rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, |
---|
1751 | i + 1); |
---|
1752 | if (rle->start == irq) { |
---|
1753 | error = PCIB_MAP_MSI(device_get_parent(bus), |
---|
1754 | dev, irq, &addr, &data); |
---|
1755 | if (error) |
---|
1756 | return (error); |
---|
1757 | pci_disable_msi(dev); |
---|
1758 | dinfo->cfg.msi.msi_addr = addr; |
---|
1759 | dinfo->cfg.msi.msi_data = data; |
---|
1760 | pci_enable_msi(dev, addr, data); |
---|
1761 | return (0); |
---|
1762 | } |
---|
1763 | } |
---|
1764 | return (ENOENT); |
---|
1765 | } |
---|
1766 | |
---|
1767 | /* |
---|
1768 | * For MSI-X, we check to see if we have this IRQ. If we do, |
---|
1769 | * we request the updated mapping info. If that works, we go |
---|
1770 | * through all the slots that use this IRQ and update them. |
---|
1771 | */ |
---|
1772 | if (cfg->msix.msix_alloc > 0) { |
---|
1773 | for (i = 0; i < cfg->msix.msix_alloc; i++) { |
---|
1774 | mv = &cfg->msix.msix_vectors[i]; |
---|
1775 | if (mv->mv_irq == irq) { |
---|
1776 | error = PCIB_MAP_MSI(device_get_parent(bus), |
---|
1777 | dev, irq, &addr, &data); |
---|
1778 | if (error) |
---|
1779 | return (error); |
---|
1780 | mv->mv_address = addr; |
---|
1781 | mv->mv_data = data; |
---|
1782 | for (j = 0; j < cfg->msix.msix_table_len; j++) { |
---|
1783 | mte = &cfg->msix.msix_table[j]; |
---|
1784 | if (mte->mte_vector != i + 1) |
---|
1785 | continue; |
---|
1786 | if (mte->mte_handlers == 0) |
---|
1787 | continue; |
---|
1788 | pci_mask_msix(dev, j); |
---|
1789 | pci_enable_msix(dev, j, addr, data); |
---|
1790 | pci_unmask_msix(dev, j); |
---|
1791 | } |
---|
1792 | } |
---|
1793 | } |
---|
1794 | return (ENOENT); |
---|
1795 | } |
---|
1796 | |
---|
1797 | return (ENOENT); |
---|
1798 | } |
---|
1799 | |
---|
1800 | /* |
---|
1801 | * Returns true if the specified device is blacklisted because MSI |
---|
1802 | * doesn't work. |
---|
1803 | */ |
---|
1804 | int |
---|
1805 | pci_msi_device_blacklisted(device_t dev) |
---|
1806 | { |
---|
1807 | struct pci_quirk *q; |
---|
1808 | |
---|
1809 | if (!pci_honor_msi_blacklist) |
---|
1810 | return (0); |
---|
1811 | |
---|
1812 | for (q = &pci_quirks[0]; q->devid; q++) { |
---|
1813 | if (q->devid == pci_get_devid(dev) && |
---|
1814 | q->type == PCI_QUIRK_DISABLE_MSI) |
---|
1815 | return (1); |
---|
1816 | } |
---|
1817 | return (0); |
---|
1818 | } |
---|
1819 | |
---|
1820 | /* |
---|
1821 | * Returns true if a specified chipset supports MSI when it is |
---|
1822 | * emulated hardware in a virtual machine. |
---|
1823 | */ |
---|
1824 | static int |
---|
1825 | pci_msi_vm_chipset(device_t dev) |
---|
1826 | { |
---|
1827 | struct pci_quirk *q; |
---|
1828 | |
---|
1829 | for (q = &pci_quirks[0]; q->devid; q++) { |
---|
1830 | if (q->devid == pci_get_devid(dev) && |
---|
1831 | q->type == PCI_QUIRK_ENABLE_MSI_VM) |
---|
1832 | return (1); |
---|
1833 | } |
---|
1834 | return (0); |
---|
1835 | } |
---|
1836 | |
---|
1837 | /* |
---|
1838 | * Determine if MSI is blacklisted globally on this sytem. Currently, |
---|
1839 | * we just check for blacklisted chipsets as represented by the |
---|
1840 | * host-PCI bridge at device 0:0:0. In the future, it may become |
---|
1841 | * necessary to check other system attributes, such as the kenv values |
---|
1842 | * that give the motherboard manufacturer and model number. |
---|
1843 | */ |
---|
1844 | static int |
---|
1845 | pci_msi_blacklisted(void) |
---|
1846 | { |
---|
1847 | device_t dev; |
---|
1848 | |
---|
1849 | if (!pci_honor_msi_blacklist) |
---|
1850 | return (0); |
---|
1851 | |
---|
1852 | /* Blacklist all non-PCI-express and non-PCI-X chipsets. */ |
---|
1853 | if (!(pcie_chipset || pcix_chipset)) { |
---|
1854 | if (vm_guest != VM_GUEST_NO) { |
---|
1855 | dev = pci_find_bsf(0, 0, 0); |
---|
1856 | if (dev != NULL) |
---|
1857 | return (pci_msi_vm_chipset(dev) == 0); |
---|
1858 | } |
---|
1859 | return (1); |
---|
1860 | } |
---|
1861 | |
---|
1862 | dev = pci_find_bsf(0, 0, 0); |
---|
1863 | if (dev != NULL) |
---|
1864 | return (pci_msi_device_blacklisted(dev)); |
---|
1865 | return (0); |
---|
1866 | } |
---|
1867 | |
---|
1868 | /* |
---|
1869 | * Attempt to allocate *count MSI messages. The actual number allocated is |
---|
1870 | * returned in *count. After this function returns, each message will be |
---|
1871 | * available to the driver as SYS_RES_IRQ resources starting at a rid 1. |
---|
1872 | */ |
---|
1873 | int |
---|
1874 | pci_alloc_msi_method(device_t dev, device_t child, int *count) |
---|
1875 | { |
---|
1876 | struct pci_devinfo *dinfo = device_get_ivars(child); |
---|
1877 | pcicfgregs *cfg = &dinfo->cfg; |
---|
1878 | struct resource_list_entry *rle; |
---|
1879 | int actual, error, i, irqs[32]; |
---|
1880 | uint16_t ctrl; |
---|
1881 | |
---|
1882 | /* Don't let count == 0 get us into trouble. */ |
---|
1883 | if (*count == 0) |
---|
1884 | return (EINVAL); |
---|
1885 | |
---|
1886 | /* If rid 0 is allocated, then fail. */ |
---|
1887 | rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0); |
---|
1888 | if (rle != NULL && rle->res != NULL) |
---|
1889 | return (ENXIO); |
---|
1890 | |
---|
1891 | /* Already have allocated messages? */ |
---|
1892 | if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0) |
---|
1893 | return (ENXIO); |
---|
1894 | |
---|
1895 | /* If MSI is blacklisted for this system, fail. */ |
---|
1896 | if (pci_msi_blacklisted()) |
---|
1897 | return (ENXIO); |
---|
1898 | |
---|
1899 | /* MSI capability present? */ |
---|
1900 | if (cfg->msi.msi_location == 0 || !pci_do_msi) |
---|
1901 | return (ENODEV); |
---|
1902 | |
---|
1903 | if (bootverbose) |
---|
1904 | device_printf(child, |
---|
1905 | "attempting to allocate %d MSI vectors (%d supported)\n", |
---|
1906 | *count, cfg->msi.msi_msgnum); |
---|
1907 | |
---|
1908 | /* Don't ask for more than the device supports. */ |
---|
1909 | actual = min(*count, cfg->msi.msi_msgnum); |
---|
1910 | |
---|
1911 | /* Don't ask for more than 32 messages. */ |
---|
1912 | actual = min(actual, 32); |
---|
1913 | |
---|
1914 | /* MSI requires power of 2 number of messages. */ |
---|
1915 | if (!powerof2(actual)) |
---|
1916 | return (EINVAL); |
---|
1917 | |
---|
1918 | for (;;) { |
---|
1919 | /* Try to allocate N messages. */ |
---|
1920 | error = PCIB_ALLOC_MSI(device_get_parent(dev), child, actual, |
---|
1921 | cfg->msi.msi_msgnum, irqs); |
---|
1922 | if (error == 0) |
---|
1923 | break; |
---|
1924 | if (actual == 1) |
---|
1925 | return (error); |
---|
1926 | |
---|
1927 | /* Try N / 2. */ |
---|
1928 | actual >>= 1; |
---|
1929 | } |
---|
1930 | |
---|
1931 | /* |
---|
1932 | * We now have N actual messages mapped onto SYS_RES_IRQ |
---|
1933 | * resources in the irqs[] array, so add new resources |
---|
1934 | * starting at rid 1. |
---|
1935 | */ |
---|
1936 | for (i = 0; i < actual; i++) |
---|
1937 | resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, |
---|
1938 | irqs[i], irqs[i], 1); |
---|
1939 | |
---|
1940 | if (bootverbose) { |
---|
1941 | if (actual == 1) |
---|
1942 | device_printf(child, "using IRQ %d for MSI\n", irqs[0]); |
---|
1943 | else { |
---|
1944 | int run; |
---|
1945 | |
---|
1946 | /* |
---|
1947 | * Be fancy and try to print contiguous runs |
---|
1948 | * of IRQ values as ranges. 'run' is true if |
---|
1949 | * we are in a range. |
---|
1950 | */ |
---|
1951 | device_printf(child, "using IRQs %d", irqs[0]); |
---|
1952 | run = 0; |
---|
1953 | for (i = 1; i < actual; i++) { |
---|
1954 | |
---|
1955 | /* Still in a run? */ |
---|
1956 | if (irqs[i] == irqs[i - 1] + 1) { |
---|
1957 | run = 1; |
---|
1958 | continue; |
---|
1959 | } |
---|
1960 | |
---|
1961 | /* Finish previous range. */ |
---|
1962 | if (run) { |
---|
1963 | printf("-%d", irqs[i - 1]); |
---|
1964 | run = 0; |
---|
1965 | } |
---|
1966 | |
---|
1967 | /* Start new range. */ |
---|
1968 | printf(",%d", irqs[i]); |
---|
1969 | } |
---|
1970 | |
---|
1971 | /* Unfinished range? */ |
---|
1972 | if (run) |
---|
1973 | printf("-%d", irqs[actual - 1]); |
---|
1974 | printf(" for MSI\n"); |
---|
1975 | } |
---|
1976 | } |
---|
1977 | |
---|
1978 | /* Update control register with actual count. */ |
---|
1979 | ctrl = cfg->msi.msi_ctrl; |
---|
1980 | ctrl &= ~PCIM_MSICTRL_MME_MASK; |
---|
1981 | ctrl |= (ffs(actual) - 1) << 4; |
---|
1982 | cfg->msi.msi_ctrl = ctrl; |
---|
1983 | pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2); |
---|
1984 | |
---|
1985 | /* Update counts of alloc'd messages. */ |
---|
1986 | cfg->msi.msi_alloc = actual; |
---|
1987 | cfg->msi.msi_handlers = 0; |
---|
1988 | *count = actual; |
---|
1989 | return (0); |
---|
1990 | } |
---|
1991 | |
---|
1992 | /* Release the MSI messages associated with this device. */ |
---|
1993 | int |
---|
1994 | pci_release_msi_method(device_t dev, device_t child) |
---|
1995 | { |
---|
1996 | struct pci_devinfo *dinfo = device_get_ivars(child); |
---|
1997 | struct pcicfg_msi *msi = &dinfo->cfg.msi; |
---|
1998 | struct resource_list_entry *rle; |
---|
1999 | int error, i, irqs[32]; |
---|
2000 | |
---|
2001 | /* Try MSI-X first. */ |
---|
2002 | error = pci_release_msix(dev, child); |
---|
2003 | if (error != ENODEV) |
---|
2004 | return (error); |
---|
2005 | |
---|
2006 | /* Do we have any messages to release? */ |
---|
2007 | if (msi->msi_alloc == 0) |
---|
2008 | return (ENODEV); |
---|
2009 | KASSERT(msi->msi_alloc <= 32, ("more than 32 alloc'd messages")); |
---|
2010 | |
---|
2011 | /* Make sure none of the resources are allocated. */ |
---|
2012 | if (msi->msi_handlers > 0) |
---|
2013 | return (EBUSY); |
---|
2014 | for (i = 0; i < msi->msi_alloc; i++) { |
---|
2015 | rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1); |
---|
2016 | KASSERT(rle != NULL, ("missing MSI resource")); |
---|
2017 | if (rle->res != NULL) |
---|
2018 | return (EBUSY); |
---|
2019 | irqs[i] = rle->start; |
---|
2020 | } |
---|
2021 | |
---|
2022 | /* Update control register with 0 count. */ |
---|
2023 | KASSERT(!(msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE), |
---|
2024 | ("%s: MSI still enabled", __func__)); |
---|
2025 | msi->msi_ctrl &= ~PCIM_MSICTRL_MME_MASK; |
---|
2026 | pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL, |
---|
2027 | msi->msi_ctrl, 2); |
---|
2028 | |
---|
2029 | /* Release the messages. */ |
---|
2030 | PCIB_RELEASE_MSI(device_get_parent(dev), child, msi->msi_alloc, irqs); |
---|
2031 | for (i = 0; i < msi->msi_alloc; i++) |
---|
2032 | resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1); |
---|
2033 | |
---|
2034 | /* Update alloc count. */ |
---|
2035 | msi->msi_alloc = 0; |
---|
2036 | msi->msi_addr = 0; |
---|
2037 | msi->msi_data = 0; |
---|
2038 | return (0); |
---|
2039 | } |
---|
2040 | |
---|
2041 | /* |
---|
2042 | * Return the max supported MSI messages this device supports. |
---|
2043 | * Basically, assuming the MD code can alloc messages, this function |
---|
2044 | * should return the maximum value that pci_alloc_msi() can return. |
---|
2045 | * Thus, it is subject to the tunables, etc. |
---|
2046 | */ |
---|
2047 | int |
---|
2048 | pci_msi_count_method(device_t dev, device_t child) |
---|
2049 | { |
---|
2050 | struct pci_devinfo *dinfo = device_get_ivars(child); |
---|
2051 | struct pcicfg_msi *msi = &dinfo->cfg.msi; |
---|
2052 | |
---|
2053 | if (pci_do_msi && msi->msi_location != 0) |
---|
2054 | return (msi->msi_msgnum); |
---|
2055 | return (0); |
---|
2056 | } |
---|
2057 | |
---|
2058 | /* free pcicfgregs structure and all depending data structures */ |
---|
2059 | |
---|
2060 | int |
---|
2061 | pci_freecfg(struct pci_devinfo *dinfo) |
---|
2062 | { |
---|
2063 | struct devlist *devlist_head; |
---|
2064 | int i; |
---|
2065 | |
---|
2066 | devlist_head = &pci_devq; |
---|
2067 | |
---|
2068 | if (dinfo->cfg.vpd.vpd_reg) { |
---|
2069 | free(dinfo->cfg.vpd.vpd_ident, M_DEVBUF); |
---|
2070 | for (i = 0; i < dinfo->cfg.vpd.vpd_rocnt; i++) |
---|
2071 | free(dinfo->cfg.vpd.vpd_ros[i].value, M_DEVBUF); |
---|
2072 | free(dinfo->cfg.vpd.vpd_ros, M_DEVBUF); |
---|
2073 | for (i = 0; i < dinfo->cfg.vpd.vpd_wcnt; i++) |
---|
2074 | free(dinfo->cfg.vpd.vpd_w[i].value, M_DEVBUF); |
---|
2075 | free(dinfo->cfg.vpd.vpd_w, M_DEVBUF); |
---|
2076 | } |
---|
2077 | STAILQ_REMOVE(devlist_head, dinfo, pci_devinfo, pci_links); |
---|
2078 | free(dinfo, M_DEVBUF); |
---|
2079 | |
---|
2080 | /* increment the generation count */ |
---|
2081 | pci_generation++; |
---|
2082 | |
---|
2083 | /* we're losing one device */ |
---|
2084 | pci_numdevs--; |
---|
2085 | return (0); |
---|
2086 | } |
---|
2087 | |
---|
2088 | /* |
---|
2089 | * PCI power manangement |
---|
2090 | */ |
---|
2091 | int |
---|
2092 | pci_set_powerstate_method(device_t dev, device_t child, int state) |
---|
2093 | { |
---|
2094 | struct pci_devinfo *dinfo = device_get_ivars(child); |
---|
2095 | pcicfgregs *cfg = &dinfo->cfg; |
---|
2096 | uint16_t status; |
---|
2097 | int result, oldstate, highest, delay; |
---|
2098 | |
---|
2099 | if (cfg->pp.pp_cap == 0) |
---|
2100 | return (EOPNOTSUPP); |
---|
2101 | |
---|
2102 | /* |
---|
2103 | * Optimize a no state change request away. While it would be OK to |
---|
2104 | * write to the hardware in theory, some devices have shown odd |
---|
2105 | * behavior when going from D3 -> D3. |
---|
2106 | */ |
---|
2107 | oldstate = pci_get_powerstate(child); |
---|
2108 | if (oldstate == state) |
---|
2109 | return (0); |
---|
2110 | |
---|
2111 | /* |
---|
2112 | * The PCI power management specification states that after a state |
---|
2113 | * transition between PCI power states, system software must |
---|
2114 | * guarantee a minimal delay before the function accesses the device. |
---|
2115 | * Compute the worst case delay that we need to guarantee before we |
---|
2116 | * access the device. Many devices will be responsive much more |
---|
2117 | * quickly than this delay, but there are some that don't respond |
---|
2118 | * instantly to state changes. Transitions to/from D3 state require |
---|
2119 | * 10ms, while D2 requires 200us, and D0/1 require none. The delay |
---|
2120 | * is done below with DELAY rather than a sleeper function because |
---|
2121 | * this function can be called from contexts where we cannot sleep. |
---|
2122 | */ |
---|
2123 | highest = (oldstate > state) ? oldstate : state; |
---|
2124 | if (highest == PCI_POWERSTATE_D3) |
---|
2125 | delay = 10000; |
---|
2126 | else if (highest == PCI_POWERSTATE_D2) |
---|
2127 | delay = 200; |
---|
2128 | else |
---|
2129 | delay = 0; |
---|
2130 | status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2) |
---|
2131 | & ~PCIM_PSTAT_DMASK; |
---|
2132 | result = 0; |
---|
2133 | switch (state) { |
---|
2134 | case PCI_POWERSTATE_D0: |
---|
2135 | status |= PCIM_PSTAT_D0; |
---|
2136 | break; |
---|
2137 | case PCI_POWERSTATE_D1: |
---|
2138 | if ((cfg->pp.pp_cap & PCIM_PCAP_D1SUPP) == 0) |
---|
2139 | return (EOPNOTSUPP); |
---|
2140 | status |= PCIM_PSTAT_D1; |
---|
2141 | break; |
---|
2142 | case PCI_POWERSTATE_D2: |
---|
2143 | if ((cfg->pp.pp_cap & PCIM_PCAP_D2SUPP) == 0) |
---|
2144 | return (EOPNOTSUPP); |
---|
2145 | status |= PCIM_PSTAT_D2; |
---|
2146 | break; |
---|
2147 | case PCI_POWERSTATE_D3: |
---|
2148 | status |= PCIM_PSTAT_D3; |
---|
2149 | break; |
---|
2150 | default: |
---|
2151 | return (EINVAL); |
---|
2152 | } |
---|
2153 | |
---|
2154 | if (bootverbose) |
---|
2155 | pci_printf(cfg, "Transition from D%d to D%d\n", oldstate, |
---|
2156 | state); |
---|
2157 | |
---|
2158 | PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_status, status, 2); |
---|
2159 | if (delay) |
---|
2160 | DELAY(delay); |
---|
2161 | return (0); |
---|
2162 | } |
---|
2163 | |
---|
2164 | int |
---|
2165 | pci_get_powerstate_method(device_t dev, device_t child) |
---|
2166 | { |
---|
2167 | struct pci_devinfo *dinfo = device_get_ivars(child); |
---|
2168 | pcicfgregs *cfg = &dinfo->cfg; |
---|
2169 | uint16_t status; |
---|
2170 | int result; |
---|
2171 | |
---|
2172 | if (cfg->pp.pp_cap != 0) { |
---|
2173 | status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2); |
---|
2174 | switch (status & PCIM_PSTAT_DMASK) { |
---|
2175 | case PCIM_PSTAT_D0: |
---|
2176 | result = PCI_POWERSTATE_D0; |
---|
2177 | break; |
---|
2178 | case PCIM_PSTAT_D1: |
---|
2179 | result = PCI_POWERSTATE_D1; |
---|
2180 | break; |
---|
2181 | case PCIM_PSTAT_D2: |
---|
2182 | result = PCI_POWERSTATE_D2; |
---|
2183 | break; |
---|
2184 | case PCIM_PSTAT_D3: |
---|
2185 | result = PCI_POWERSTATE_D3; |
---|
2186 | break; |
---|
2187 | default: |
---|
2188 | result = PCI_POWERSTATE_UNKNOWN; |
---|
2189 | break; |
---|
2190 | } |
---|
2191 | } else { |
---|
2192 | /* No support, device is always at D0 */ |
---|
2193 | result = PCI_POWERSTATE_D0; |
---|
2194 | } |
---|
2195 | return (result); |
---|
2196 | } |
---|
2197 | |
---|
2198 | /* |
---|
2199 | * Some convenience functions for PCI device drivers. |
---|
2200 | */ |
---|
2201 | |
---|
2202 | static __inline void |
---|
2203 | pci_set_command_bit(device_t dev, device_t child, uint16_t bit) |
---|
2204 | { |
---|
2205 | uint16_t command; |
---|
2206 | |
---|
2207 | command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2); |
---|
2208 | command |= bit; |
---|
2209 | PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2); |
---|
2210 | } |
---|
2211 | |
---|
2212 | static __inline void |
---|
2213 | pci_clear_command_bit(device_t dev, device_t child, uint16_t bit) |
---|
2214 | { |
---|
2215 | uint16_t command; |
---|
2216 | |
---|
2217 | command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2); |
---|
2218 | command &= ~bit; |
---|
2219 | PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2); |
---|
2220 | } |
---|
2221 | |
---|
2222 | int |
---|
2223 | pci_enable_busmaster_method(device_t dev, device_t child) |
---|
2224 | { |
---|
2225 | pci_set_command_bit(dev, child, PCIM_CMD_BUSMASTEREN); |
---|
2226 | return (0); |
---|
2227 | } |
---|
2228 | |
---|
2229 | int |
---|
2230 | pci_disable_busmaster_method(device_t dev, device_t child) |
---|
2231 | { |
---|
2232 | pci_clear_command_bit(dev, child, PCIM_CMD_BUSMASTEREN); |
---|
2233 | return (0); |
---|
2234 | } |
---|
2235 | |
---|
2236 | int |
---|
2237 | pci_enable_io_method(device_t dev, device_t child, int space) |
---|
2238 | { |
---|
2239 | uint16_t bit; |
---|
2240 | |
---|
2241 | switch(space) { |
---|
2242 | case SYS_RES_IOPORT: |
---|
2243 | bit = PCIM_CMD_PORTEN; |
---|
2244 | break; |
---|
2245 | case SYS_RES_MEMORY: |
---|
2246 | bit = PCIM_CMD_MEMEN; |
---|
2247 | break; |
---|
2248 | default: |
---|
2249 | return (EINVAL); |
---|
2250 | } |
---|
2251 | pci_set_command_bit(dev, child, bit); |
---|
2252 | return (0); |
---|
2253 | } |
---|
2254 | |
---|
2255 | int |
---|
2256 | pci_disable_io_method(device_t dev, device_t child, int space) |
---|
2257 | { |
---|
2258 | uint16_t bit; |
---|
2259 | |
---|
2260 | switch(space) { |
---|
2261 | case SYS_RES_IOPORT: |
---|
2262 | bit = PCIM_CMD_PORTEN; |
---|
2263 | break; |
---|
2264 | case SYS_RES_MEMORY: |
---|
2265 | bit = PCIM_CMD_MEMEN; |
---|
2266 | break; |
---|
2267 | default: |
---|
2268 | return (EINVAL); |
---|
2269 | } |
---|
2270 | pci_clear_command_bit(dev, child, bit); |
---|
2271 | return (0); |
---|
2272 | } |
---|
2273 | |
---|
2274 | /* |
---|
2275 | * New style pci driver. Parent device is either a pci-host-bridge or a |
---|
2276 | * pci-pci-bridge. Both kinds are represented by instances of pcib. |
---|
2277 | */ |
---|
2278 | |
---|
2279 | void |
---|
2280 | pci_print_verbose(struct pci_devinfo *dinfo) |
---|
2281 | { |
---|
2282 | |
---|
2283 | if (bootverbose) { |
---|
2284 | pcicfgregs *cfg = &dinfo->cfg; |
---|
2285 | |
---|
2286 | printf("found->\tvendor=0x%04x, dev=0x%04x, revid=0x%02x\n", |
---|
2287 | cfg->vendor, cfg->device, cfg->revid); |
---|
2288 | printf("\tdomain=%d, bus=%d, slot=%d, func=%d\n", |
---|
2289 | cfg->domain, cfg->bus, cfg->slot, cfg->func); |
---|
2290 | printf("\tclass=%02x-%02x-%02x, hdrtype=0x%02x, mfdev=%d\n", |
---|
2291 | cfg->baseclass, cfg->subclass, cfg->progif, cfg->hdrtype, |
---|
2292 | cfg->mfdev); |
---|
2293 | printf("\tcmdreg=0x%04x, statreg=0x%04x, cachelnsz=%d (dwords)\n", |
---|
2294 | cfg->cmdreg, cfg->statreg, cfg->cachelnsz); |
---|
2295 | printf("\tlattimer=0x%02x (%d ns), mingnt=0x%02x (%d ns), maxlat=0x%02x (%d ns)\n", |
---|
2296 | cfg->lattimer, cfg->lattimer * 30, cfg->mingnt, |
---|
2297 | cfg->mingnt * 250, cfg->maxlat, cfg->maxlat * 250); |
---|
2298 | if (cfg->intpin > 0) |
---|
2299 | printf("\tintpin=%c, irq=%d\n", |
---|
2300 | cfg->intpin +'a' -1, cfg->intline); |
---|
2301 | if (cfg->pp.pp_cap) { |
---|
2302 | uint16_t status; |
---|
2303 | |
---|
2304 | status = pci_read_config(cfg->dev, cfg->pp.pp_status, 2); |
---|
2305 | printf("\tpowerspec %d supports D0%s%s D3 current D%d\n", |
---|
2306 | cfg->pp.pp_cap & PCIM_PCAP_SPEC, |
---|
2307 | cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "", |
---|
2308 | cfg->pp.pp_cap & PCIM_PCAP_D2SUPP ? " D2" : "", |
---|
2309 | status & PCIM_PSTAT_DMASK); |
---|
2310 | } |
---|
2311 | if (cfg->msi.msi_location) { |
---|
2312 | int ctrl; |
---|
2313 | |
---|
2314 | ctrl = cfg->msi.msi_ctrl; |
---|
2315 | printf("\tMSI supports %d message%s%s%s\n", |
---|
2316 | cfg->msi.msi_msgnum, |
---|
2317 | (cfg->msi.msi_msgnum == 1) ? "" : "s", |
---|
2318 | (ctrl & PCIM_MSICTRL_64BIT) ? ", 64 bit" : "", |
---|
2319 | (ctrl & PCIM_MSICTRL_VECTOR) ? ", vector masks":""); |
---|
2320 | } |
---|
2321 | if (cfg->msix.msix_location) { |
---|
2322 | printf("\tMSI-X supports %d message%s ", |
---|
2323 | cfg->msix.msix_msgnum, |
---|
2324 | (cfg->msix.msix_msgnum == 1) ? "" : "s"); |
---|
2325 | if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar) |
---|
2326 | printf("in map 0x%x\n", |
---|
2327 | cfg->msix.msix_table_bar); |
---|
2328 | else |
---|
2329 | printf("in maps 0x%x and 0x%x\n", |
---|
2330 | cfg->msix.msix_table_bar, |
---|
2331 | cfg->msix.msix_pba_bar); |
---|
2332 | } |
---|
2333 | } |
---|
2334 | } |
---|
2335 | |
---|
2336 | static int |
---|
2337 | pci_porten(device_t dev) |
---|
2338 | { |
---|
2339 | return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_PORTEN) != 0; |
---|
2340 | } |
---|
2341 | |
---|
2342 | static int |
---|
2343 | pci_memen(device_t dev) |
---|
2344 | { |
---|
2345 | return (pci_read_config(dev, PCIR_COMMAND, 2) & PCIM_CMD_MEMEN) != 0; |
---|
2346 | } |
---|
2347 | |
---|
2348 | static void |
---|
2349 | pci_read_bar(device_t dev, int reg, pci_addr_t *mapp, pci_addr_t *testvalp) |
---|
2350 | { |
---|
2351 | pci_addr_t map, testval; |
---|
2352 | int ln2range; |
---|
2353 | uint16_t cmd; |
---|
2354 | |
---|
2355 | map = pci_read_config(dev, reg, 4); |
---|
2356 | ln2range = pci_maprange(map); |
---|
2357 | if (ln2range == 64) |
---|
2358 | map |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32; |
---|
2359 | |
---|
2360 | /* |
---|
2361 | * Disable decoding via the command register before |
---|
2362 | * determining the BAR's length since we will be placing it in |
---|
2363 | * a weird state. |
---|
2364 | */ |
---|
2365 | cmd = pci_read_config(dev, PCIR_COMMAND, 2); |
---|
2366 | pci_write_config(dev, PCIR_COMMAND, |
---|
2367 | cmd & ~(PCI_BAR_MEM(map) ? PCIM_CMD_MEMEN : PCIM_CMD_PORTEN), 2); |
---|
2368 | |
---|
2369 | /* |
---|
2370 | * Determine the BAR's length by writing all 1's. The bottom |
---|
2371 | * log_2(size) bits of the BAR will stick as 0 when we read |
---|
2372 | * the value back. |
---|
2373 | */ |
---|
2374 | pci_write_config(dev, reg, 0xffffffff, 4); |
---|
2375 | testval = pci_read_config(dev, reg, 4); |
---|
2376 | if (ln2range == 64) { |
---|
2377 | pci_write_config(dev, reg + 4, 0xffffffff, 4); |
---|
2378 | testval |= (pci_addr_t)pci_read_config(dev, reg + 4, 4) << 32; |
---|
2379 | } |
---|
2380 | |
---|
2381 | /* |
---|
2382 | * Restore the original value of the BAR. We may have reprogrammed |
---|
2383 | * the BAR of the low-level console device and when booting verbose, |
---|
2384 | * we need the console device addressable. |
---|
2385 | */ |
---|
2386 | pci_write_config(dev, reg, map, 4); |
---|
2387 | if (ln2range == 64) |
---|
2388 | pci_write_config(dev, reg + 4, map >> 32, 4); |
---|
2389 | pci_write_config(dev, PCIR_COMMAND, cmd, 2); |
---|
2390 | |
---|
2391 | *mapp = map; |
---|
2392 | *testvalp = testval; |
---|
2393 | } |
---|
2394 | |
---|
2395 | static void |
---|
2396 | pci_write_bar(device_t dev, int reg, pci_addr_t base) |
---|
2397 | { |
---|
2398 | pci_addr_t map; |
---|
2399 | int ln2range; |
---|
2400 | |
---|
2401 | map = pci_read_config(dev, reg, 4); |
---|
2402 | ln2range = pci_maprange(map); |
---|
2403 | pci_write_config(dev, reg, base, 4); |
---|
2404 | if (ln2range == 64) |
---|
2405 | pci_write_config(dev, reg + 4, base >> 32, 4); |
---|
2406 | } |
---|
2407 | |
---|
2408 | /* |
---|
2409 | * Add a resource based on a pci map register. Return 1 if the map |
---|
2410 | * register is a 32bit map register or 2 if it is a 64bit register. |
---|
2411 | */ |
---|
2412 | static int |
---|
2413 | pci_add_map(device_t bus, device_t dev, int reg, struct resource_list *rl, |
---|
2414 | int force, int prefetch) |
---|
2415 | { |
---|
2416 | pci_addr_t base, map, testval; |
---|
2417 | pci_addr_t start, end, count; |
---|
2418 | int barlen, basezero, maprange, mapsize, type; |
---|
2419 | uint16_t cmd; |
---|
2420 | struct resource *res; |
---|
2421 | |
---|
2422 | pci_read_bar(dev, reg, &map, &testval); |
---|
2423 | if (PCI_BAR_MEM(map)) { |
---|
2424 | type = SYS_RES_MEMORY; |
---|
2425 | if (map & PCIM_BAR_MEM_PREFETCH) |
---|
2426 | prefetch = 1; |
---|
2427 | } else |
---|
2428 | type = SYS_RES_IOPORT; |
---|
2429 | mapsize = pci_mapsize(testval); |
---|
2430 | base = pci_mapbase(map); |
---|
2431 | #ifdef __PCI_BAR_ZERO_VALID |
---|
2432 | basezero = 0; |
---|
2433 | #else |
---|
2434 | basezero = base == 0; |
---|
2435 | #endif |
---|
2436 | maprange = pci_maprange(map); |
---|
2437 | barlen = maprange == 64 ? 2 : 1; |
---|
2438 | |
---|
2439 | /* |
---|
2440 | * For I/O registers, if bottom bit is set, and the next bit up |
---|
2441 | * isn't clear, we know we have a BAR that doesn't conform to the |
---|
2442 | * spec, so ignore it. Also, sanity check the size of the data |
---|
2443 | * areas to the type of memory involved. Memory must be at least |
---|
2444 | * 16 bytes in size, while I/O ranges must be at least 4. |
---|
2445 | */ |
---|
2446 | if (PCI_BAR_IO(testval) && (testval & PCIM_BAR_IO_RESERVED) != 0) |
---|
2447 | return (barlen); |
---|
2448 | if ((type == SYS_RES_MEMORY && mapsize < 4) || |
---|
2449 | (type == SYS_RES_IOPORT && mapsize < 2)) |
---|
2450 | return (barlen); |
---|
2451 | |
---|
2452 | if (bootverbose) { |
---|
2453 | printf("\tmap[%02x]: type %s, range %2d, base %#jx, size %2d", |
---|
2454 | reg, pci_maptype(map), maprange, (uintmax_t)base, mapsize); |
---|
2455 | if (type == SYS_RES_IOPORT && !pci_porten(dev)) |
---|
2456 | printf(", port disabled\n"); |
---|
2457 | else if (type == SYS_RES_MEMORY && !pci_memen(dev)) |
---|
2458 | printf(", memory disabled\n"); |
---|
2459 | else |
---|
2460 | printf(", enabled\n"); |
---|
2461 | } |
---|
2462 | |
---|
2463 | /* |
---|
2464 | * If base is 0, then we have problems if this architecture does |
---|
2465 | * not allow that. It is best to ignore such entries for the |
---|
2466 | * moment. These will be allocated later if the driver specifically |
---|
2467 | * requests them. However, some removable busses look better when |
---|
2468 | * all resources are allocated, so allow '0' to be overriden. |
---|
2469 | * |
---|
2470 | * Similarly treat maps whose values is the same as the test value |
---|
2471 | * read back. These maps have had all f's written to them by the |
---|
2472 | * BIOS in an attempt to disable the resources. |
---|
2473 | */ |
---|
2474 | if (!force && (basezero || map == testval)) |
---|
2475 | return (barlen); |
---|
2476 | if ((u_long)base != base) { |
---|
2477 | device_printf(bus, |
---|
2478 | "pci%d:%d:%d:%d bar %#x too many address bits", |
---|
2479 | pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev), |
---|
2480 | pci_get_function(dev), reg); |
---|
2481 | return (barlen); |
---|
2482 | } |
---|
2483 | |
---|
2484 | /* |
---|
2485 | * This code theoretically does the right thing, but has |
---|
2486 | * undesirable side effects in some cases where peripherals |
---|
2487 | * respond oddly to having these bits enabled. Let the user |
---|
2488 | * be able to turn them off (since pci_enable_io_modes is 1 by |
---|
2489 | * default). |
---|
2490 | */ |
---|
2491 | if (pci_enable_io_modes) { |
---|
2492 | /* Turn on resources that have been left off by a lazy BIOS */ |
---|
2493 | if (type == SYS_RES_IOPORT && !pci_porten(dev)) { |
---|
2494 | cmd = pci_read_config(dev, PCIR_COMMAND, 2); |
---|
2495 | cmd |= PCIM_CMD_PORTEN; |
---|
2496 | pci_write_config(dev, PCIR_COMMAND, cmd, 2); |
---|
2497 | } |
---|
2498 | if (type == SYS_RES_MEMORY && !pci_memen(dev)) { |
---|
2499 | cmd = pci_read_config(dev, PCIR_COMMAND, 2); |
---|
2500 | cmd |= PCIM_CMD_MEMEN; |
---|
2501 | pci_write_config(dev, PCIR_COMMAND, cmd, 2); |
---|
2502 | } |
---|
2503 | } else { |
---|
2504 | if (type == SYS_RES_IOPORT && !pci_porten(dev)) |
---|
2505 | return (barlen); |
---|
2506 | if (type == SYS_RES_MEMORY && !pci_memen(dev)) |
---|
2507 | return (barlen); |
---|
2508 | } |
---|
2509 | |
---|
2510 | count = 1 << mapsize; |
---|
2511 | if (basezero || base == pci_mapbase(testval)) { |
---|
2512 | start = 0; /* Let the parent decide. */ |
---|
2513 | end = ~0ULL; |
---|
2514 | } else { |
---|
2515 | start = base; |
---|
2516 | end = base + (1 << mapsize) - 1; |
---|
2517 | } |
---|
2518 | resource_list_add(rl, type, reg, start, end, count); |
---|
2519 | |
---|
2520 | /* |
---|
2521 | * Try to allocate the resource for this BAR from our parent |
---|
2522 | * so that this resource range is already reserved. The |
---|
2523 | * driver for this device will later inherit this resource in |
---|
2524 | * pci_alloc_resource(). |
---|
2525 | */ |
---|
2526 | res = resource_list_alloc(rl, bus, dev, type, ®, start, end, count, |
---|
2527 | prefetch ? RF_PREFETCHABLE : 0); |
---|
2528 | if (res == NULL) { |
---|
2529 | /* |
---|
2530 | * If the allocation fails, clear the BAR and delete |
---|
2531 | * the resource list entry to force |
---|
2532 | * pci_alloc_resource() to allocate resources from the |
---|
2533 | * parent. |
---|
2534 | */ |
---|
2535 | resource_list_delete(rl, type, reg); |
---|
2536 | start = 0; |
---|
2537 | } else { |
---|
2538 | start = rman_get_start(res); |
---|
2539 | rman_set_device(res, bus); |
---|
2540 | } |
---|
2541 | pci_write_bar(dev, reg, start); |
---|
2542 | return (barlen); |
---|
2543 | } |
---|
2544 | |
---|
2545 | /* |
---|
2546 | * For ATA devices we need to decide early what addressing mode to use. |
---|
2547 | * Legacy demands that the primary and secondary ATA ports sits on the |
---|
2548 | * same addresses that old ISA hardware did. This dictates that we use |
---|
2549 | * those addresses and ignore the BAR's if we cannot set PCI native |
---|
2550 | * addressing mode. |
---|
2551 | */ |
---|
2552 | static void |
---|
2553 | pci_ata_maps(device_t bus, device_t dev, struct resource_list *rl, int force, |
---|
2554 | uint32_t prefetchmask) |
---|
2555 | { |
---|
2556 | struct resource *r; |
---|
2557 | int rid, type, progif; |
---|
2558 | #if 0 |
---|
2559 | /* if this device supports PCI native addressing use it */ |
---|
2560 | progif = pci_read_config(dev, PCIR_PROGIF, 1); |
---|
2561 | if ((progif & 0x8a) == 0x8a) { |
---|
2562 | if (pci_mapbase(pci_read_config(dev, PCIR_BAR(0), 4)) && |
---|
2563 | pci_mapbase(pci_read_config(dev, PCIR_BAR(2), 4))) { |
---|
2564 | printf("Trying ATA native PCI addressing mode\n"); |
---|
2565 | pci_write_config(dev, PCIR_PROGIF, progif | 0x05, 1); |
---|
2566 | } |
---|
2567 | } |
---|
2568 | #endif |
---|
2569 | progif = pci_read_config(dev, PCIR_PROGIF, 1); |
---|
2570 | type = SYS_RES_IOPORT; |
---|
2571 | if (progif & PCIP_STORAGE_IDE_MODEPRIM) { |
---|
2572 | pci_add_map(bus, dev, PCIR_BAR(0), rl, force, |
---|
2573 | prefetchmask & (1 << 0)); |
---|
2574 | pci_add_map(bus, dev, PCIR_BAR(1), rl, force, |
---|
2575 | prefetchmask & (1 << 1)); |
---|
2576 | } else { |
---|
2577 | rid = PCIR_BAR(0); |
---|
2578 | resource_list_add(rl, type, rid, 0x1f0, 0x1f7, 8); |
---|
2579 | r = resource_list_alloc(rl, bus, dev, type, &rid, 0x1f0, 0x1f7, |
---|
2580 | 8, 0); |
---|
2581 | rman_set_device(r, bus); |
---|
2582 | rid = PCIR_BAR(1); |
---|
2583 | resource_list_add(rl, type, rid, 0x3f6, 0x3f6, 1); |
---|
2584 | r = resource_list_alloc(rl, bus, dev, type, &rid, 0x3f6, 0x3f6, |
---|
2585 | 1, 0); |
---|
2586 | rman_set_device(r, bus); |
---|
2587 | } |
---|
2588 | if (progif & PCIP_STORAGE_IDE_MODESEC) { |
---|
2589 | pci_add_map(bus, dev, PCIR_BAR(2), rl, force, |
---|
2590 | prefetchmask & (1 << 2)); |
---|
2591 | pci_add_map(bus, dev, PCIR_BAR(3), rl, force, |
---|
2592 | prefetchmask & (1 << 3)); |
---|
2593 | } else { |
---|
2594 | rid = PCIR_BAR(2); |
---|
2595 | resource_list_add(rl, type, rid, 0x170, 0x177, 8); |
---|
2596 | r = resource_list_alloc(rl, bus, dev, type, &rid, 0x170, 0x177, |
---|
2597 | 8, 0); |
---|
2598 | rman_set_device(r, bus); |
---|
2599 | rid = PCIR_BAR(3); |
---|
2600 | resource_list_add(rl, type, rid, 0x376, 0x376, 1); |
---|
2601 | r = resource_list_alloc(rl, bus, dev, type, &rid, 0x376, 0x376, |
---|
2602 | 1, 0); |
---|
2603 | rman_set_device(r, bus); |
---|
2604 | } |
---|
2605 | pci_add_map(bus, dev, PCIR_BAR(4), rl, force, |
---|
2606 | prefetchmask & (1 << 4)); |
---|
2607 | pci_add_map(bus, dev, PCIR_BAR(5), rl, force, |
---|
2608 | prefetchmask & (1 << 5)); |
---|
2609 | } |
---|
2610 | |
---|
2611 | static void |
---|
2612 | pci_assign_interrupt(device_t bus, device_t dev, int force_route) |
---|
2613 | { |
---|
2614 | struct pci_devinfo *dinfo = device_get_ivars(dev); |
---|
2615 | pcicfgregs *cfg = &dinfo->cfg; |
---|
2616 | char tunable_name[64]; |
---|
2617 | int irq; |
---|
2618 | |
---|
2619 | /* Has to have an intpin to have an interrupt. */ |
---|
2620 | if (cfg->intpin == 0) |
---|
2621 | return; |
---|
2622 | |
---|
2623 | /* Let the user override the IRQ with a tunable. */ |
---|
2624 | irq = PCI_INVALID_IRQ; |
---|
2625 | #ifndef __rtems__ |
---|
2626 | snprintf(tunable_name, sizeof(tunable_name), |
---|
2627 | "hw.pci%d.%d.%d.INT%c.irq", |
---|
2628 | cfg->domain, cfg->bus, cfg->slot, cfg->intpin + 'A' - 1); |
---|
2629 | if (TUNABLE_INT_FETCH(tunable_name, &irq) && (irq >= 255 || irq <= 0)) |
---|
2630 | irq = PCI_INVALID_IRQ; |
---|
2631 | #endif /* __rtems__ */ |
---|
2632 | |
---|
2633 | /* |
---|
2634 | * If we didn't get an IRQ via the tunable, then we either use the |
---|
2635 | * IRQ value in the intline register or we ask the bus to route an |
---|
2636 | * interrupt for us. If force_route is true, then we only use the |
---|
2637 | * value in the intline register if the bus was unable to assign an |
---|
2638 | * IRQ. |
---|
2639 | */ |
---|
2640 | if (!PCI_INTERRUPT_VALID(irq)) { |
---|
2641 | if (!PCI_INTERRUPT_VALID(cfg->intline) || force_route) |
---|
2642 | irq = PCI_ASSIGN_INTERRUPT(bus, dev); |
---|
2643 | if (!PCI_INTERRUPT_VALID(irq)) |
---|
2644 | irq = cfg->intline; |
---|
2645 | } |
---|
2646 | |
---|
2647 | /* If after all that we don't have an IRQ, just bail. */ |
---|
2648 | if (!PCI_INTERRUPT_VALID(irq)) |
---|
2649 | return; |
---|
2650 | |
---|
2651 | /* Update the config register if it changed. */ |
---|
2652 | if (irq != cfg->intline) { |
---|
2653 | cfg->intline = irq; |
---|
2654 | pci_write_config(dev, PCIR_INTLINE, irq, 1); |
---|
2655 | } |
---|
2656 | |
---|
2657 | /* Add this IRQ as rid 0 interrupt resource. */ |
---|
2658 | resource_list_add(&dinfo->resources, SYS_RES_IRQ, 0, irq, irq, 1); |
---|
2659 | } |
---|
2660 | |
---|
2661 | /* Perform early OHCI takeover from SMM. */ |
---|
2662 | static void |
---|
2663 | ohci_early_takeover(device_t self) |
---|
2664 | { |
---|
2665 | struct resource *res; |
---|
2666 | uint32_t ctl; |
---|
2667 | int rid; |
---|
2668 | int i; |
---|
2669 | |
---|
2670 | rid = PCIR_BAR(0); |
---|
2671 | res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE); |
---|
2672 | if (res == NULL) |
---|
2673 | return; |
---|
2674 | |
---|
2675 | ctl = bus_read_4(res, OHCI_CONTROL); |
---|
2676 | if (ctl & OHCI_IR) { |
---|
2677 | if (bootverbose) |
---|
2678 | printf("ohci early: " |
---|
2679 | "SMM active, request owner change\n"); |
---|
2680 | bus_write_4(res, OHCI_COMMAND_STATUS, OHCI_OCR); |
---|
2681 | for (i = 0; (i < 100) && (ctl & OHCI_IR); i++) { |
---|
2682 | DELAY(1000); |
---|
2683 | ctl = bus_read_4(res, OHCI_CONTROL); |
---|
2684 | } |
---|
2685 | if (ctl & OHCI_IR) { |
---|
2686 | if (bootverbose) |
---|
2687 | printf("ohci early: " |
---|
2688 | "SMM does not respond, resetting\n"); |
---|
2689 | bus_write_4(res, OHCI_CONTROL, OHCI_HCFS_RESET); |
---|
2690 | } |
---|
2691 | /* Disable interrupts */ |
---|
2692 | bus_write_4(res, OHCI_INTERRUPT_DISABLE, OHCI_ALL_INTRS); |
---|
2693 | } |
---|
2694 | |
---|
2695 | bus_release_resource(self, SYS_RES_MEMORY, rid, res); |
---|
2696 | } |
---|
2697 | |
---|
2698 | #ifndef __rtems__ |
---|
2699 | /* Perform early UHCI takeover from SMM. */ |
---|
2700 | static void |
---|
2701 | uhci_early_takeover(device_t self) |
---|
2702 | { |
---|
2703 | struct resource *res; |
---|
2704 | int rid; |
---|
2705 | |
---|
2706 | /* |
---|
2707 | * Set the PIRQD enable bit and switch off all the others. We don't |
---|
2708 | * want legacy support to interfere with us XXX Does this also mean |
---|
2709 | * that the BIOS won't touch the keyboard anymore if it is connected |
---|
2710 | * to the ports of the root hub? |
---|
2711 | */ |
---|
2712 | pci_write_config(self, PCI_LEGSUP, PCI_LEGSUP_USBPIRQDEN, 2); |
---|
2713 | |
---|
2714 | /* Disable interrupts */ |
---|
2715 | rid = PCI_UHCI_BASE_REG; |
---|
2716 | res = bus_alloc_resource_any(self, SYS_RES_IOPORT, &rid, RF_ACTIVE); |
---|
2717 | if (res != NULL) { |
---|
2718 | bus_write_2(res, UHCI_INTR, 0); |
---|
2719 | bus_release_resource(self, SYS_RES_IOPORT, rid, res); |
---|
2720 | } |
---|
2721 | } |
---|
2722 | #endif /* __rtems__ */ |
---|
2723 | |
---|
2724 | /* Perform early EHCI takeover from SMM. */ |
---|
2725 | static void |
---|
2726 | ehci_early_takeover(device_t self) |
---|
2727 | { |
---|
2728 | struct resource *res; |
---|
2729 | uint32_t cparams; |
---|
2730 | uint32_t eec; |
---|
2731 | uint8_t eecp; |
---|
2732 | uint8_t bios_sem; |
---|
2733 | uint8_t offs; |
---|
2734 | int rid; |
---|
2735 | int i; |
---|
2736 | |
---|
2737 | rid = PCIR_BAR(0); |
---|
2738 | res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid, RF_ACTIVE); |
---|
2739 | if (res == NULL) |
---|
2740 | return; |
---|
2741 | |
---|
2742 | cparams = bus_read_4(res, EHCI_HCCPARAMS); |
---|
2743 | |
---|
2744 | /* Synchronise with the BIOS if it owns the controller. */ |
---|
2745 | for (eecp = EHCI_HCC_EECP(cparams); eecp != 0; |
---|
2746 | eecp = EHCI_EECP_NEXT(eec)) { |
---|
2747 | eec = pci_read_config(self, eecp, 4); |
---|
2748 | if (EHCI_EECP_ID(eec) != EHCI_EC_LEGSUP) { |
---|
2749 | continue; |
---|
2750 | } |
---|
2751 | bios_sem = pci_read_config(self, eecp + |
---|
2752 | EHCI_LEGSUP_BIOS_SEM, 1); |
---|
2753 | if (bios_sem == 0) { |
---|
2754 | continue; |
---|
2755 | } |
---|
2756 | if (bootverbose) |
---|
2757 | printf("ehci early: " |
---|
2758 | "SMM active, request owner change\n"); |
---|
2759 | |
---|
2760 | pci_write_config(self, eecp + EHCI_LEGSUP_OS_SEM, 1, 1); |
---|
2761 | |
---|
2762 | for (i = 0; (i < 100) && (bios_sem != 0); i++) { |
---|
2763 | DELAY(1000); |
---|
2764 | bios_sem = pci_read_config(self, eecp + |
---|
2765 | EHCI_LEGSUP_BIOS_SEM, 1); |
---|
2766 | } |
---|
2767 | |
---|
2768 | if (bios_sem != 0) { |
---|
2769 | if (bootverbose) |
---|
2770 | printf("ehci early: " |
---|
2771 | "SMM does not respond\n"); |
---|
2772 | } |
---|
2773 | /* Disable interrupts */ |
---|
2774 | offs = EHCI_CAPLENGTH(bus_read_4(res, EHCI_CAPLEN_HCIVERSION)); |
---|
2775 | bus_write_4(res, offs + EHCI_USBINTR, 0); |
---|
2776 | } |
---|
2777 | bus_release_resource(self, SYS_RES_MEMORY, rid, res); |
---|
2778 | } |
---|
2779 | |
---|
2780 | void |
---|
2781 | pci_add_resources(device_t bus, device_t dev, int force, uint32_t prefetchmask) |
---|
2782 | { |
---|
2783 | struct pci_devinfo *dinfo = device_get_ivars(dev); |
---|
2784 | pcicfgregs *cfg = &dinfo->cfg; |
---|
2785 | struct resource_list *rl = &dinfo->resources; |
---|
2786 | struct pci_quirk *q; |
---|
2787 | int i; |
---|
2788 | |
---|
2789 | /* ATA devices needs special map treatment */ |
---|
2790 | if ((pci_get_class(dev) == PCIC_STORAGE) && |
---|
2791 | (pci_get_subclass(dev) == PCIS_STORAGE_IDE) && |
---|
2792 | ((pci_get_progif(dev) & PCIP_STORAGE_IDE_MASTERDEV) || |
---|
2793 | (!pci_read_config(dev, PCIR_BAR(0), 4) && |
---|
2794 | !pci_read_config(dev, PCIR_BAR(2), 4))) ) |
---|
2795 | pci_ata_maps(bus, dev, rl, force, prefetchmask); |
---|
2796 | else |
---|
2797 | for (i = 0; i < cfg->nummaps;) |
---|
2798 | i += pci_add_map(bus, dev, PCIR_BAR(i), rl, force, |
---|
2799 | prefetchmask & (1 << i)); |
---|
2800 | |
---|
2801 | /* |
---|
2802 | * Add additional, quirked resources. |
---|
2803 | */ |
---|
2804 | for (q = &pci_quirks[0]; q->devid; q++) { |
---|
2805 | if (q->devid == ((cfg->device << 16) | cfg->vendor) |
---|
2806 | && q->type == PCI_QUIRK_MAP_REG) |
---|
2807 | pci_add_map(bus, dev, q->arg1, rl, force, 0); |
---|
2808 | } |
---|
2809 | |
---|
2810 | if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline)) { |
---|
2811 | #ifdef __PCI_REROUTE_INTERRUPT |
---|
2812 | /* |
---|
2813 | * Try to re-route interrupts. Sometimes the BIOS or |
---|
2814 | * firmware may leave bogus values in these registers. |
---|
2815 | * If the re-route fails, then just stick with what we |
---|
2816 | * have. |
---|
2817 | */ |
---|
2818 | pci_assign_interrupt(bus, dev, 1); |
---|
2819 | #else |
---|
2820 | pci_assign_interrupt(bus, dev, 0); |
---|
2821 | #endif |
---|
2822 | } |
---|
2823 | |
---|
2824 | if (pci_usb_takeover && pci_get_class(dev) == PCIC_SERIALBUS && |
---|
2825 | pci_get_subclass(dev) == PCIS_SERIALBUS_USB) { |
---|
2826 | if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_EHCI) |
---|
2827 | ehci_early_takeover(dev); |
---|
2828 | else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_OHCI) |
---|
2829 | ohci_early_takeover(dev); |
---|
2830 | #ifndef __rtems__ |
---|
2831 | else if (pci_get_progif(dev) == PCIP_SERIALBUS_USB_UHCI) |
---|
2832 | uhci_early_takeover(dev); |
---|
2833 | #endif /* __rtems__ */ |
---|
2834 | } |
---|
2835 | } |
---|
2836 | |
---|
2837 | void |
---|
2838 | pci_add_children(device_t dev, int domain, int busno, size_t dinfo_size) |
---|
2839 | { |
---|
2840 | #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w) |
---|
2841 | device_t pcib = device_get_parent(dev); |
---|
2842 | struct pci_devinfo *dinfo; |
---|
2843 | int maxslots; |
---|
2844 | int s, f, pcifunchigh; |
---|
2845 | uint8_t hdrtype; |
---|
2846 | |
---|
2847 | KASSERT(dinfo_size >= sizeof(struct pci_devinfo), |
---|
2848 | ("dinfo_size too small")); |
---|
2849 | maxslots = PCIB_MAXSLOTS(pcib); |
---|
2850 | for (s = 0; s <= maxslots; s++) { |
---|
2851 | pcifunchigh = 0; |
---|
2852 | f = 0; |
---|
2853 | DELAY(1); |
---|
2854 | hdrtype = REG(PCIR_HDRTYPE, 1); |
---|
2855 | if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE) |
---|
2856 | continue; |
---|
2857 | if (hdrtype & PCIM_MFDEV) |
---|
2858 | pcifunchigh = PCI_FUNCMAX; |
---|
2859 | for (f = 0; f <= pcifunchigh; f++) { |
---|
2860 | dinfo = pci_read_device(pcib, domain, busno, s, f, |
---|
2861 | dinfo_size); |
---|
2862 | if (dinfo != NULL) { |
---|
2863 | pci_add_child(dev, dinfo); |
---|
2864 | } |
---|
2865 | } |
---|
2866 | } |
---|
2867 | #undef REG |
---|
2868 | } |
---|
2869 | |
---|
2870 | void |
---|
2871 | pci_add_child(device_t bus, struct pci_devinfo *dinfo) |
---|
2872 | { |
---|
2873 | dinfo->cfg.dev = device_add_child(bus, NULL, -1); |
---|
2874 | device_set_ivars(dinfo->cfg.dev, dinfo); |
---|
2875 | resource_list_init(&dinfo->resources); |
---|
2876 | pci_cfg_save(dinfo->cfg.dev, dinfo, 0); |
---|
2877 | pci_cfg_restore(dinfo->cfg.dev, dinfo); |
---|
2878 | pci_print_verbose(dinfo); |
---|
2879 | pci_add_resources(bus, dinfo->cfg.dev, 0, 0); |
---|
2880 | } |
---|
2881 | |
---|
2882 | static int |
---|
2883 | pci_probe(device_t dev) |
---|
2884 | { |
---|
2885 | |
---|
2886 | device_set_desc(dev, "PCI bus"); |
---|
2887 | |
---|
2888 | /* Allow other subclasses to override this driver. */ |
---|
2889 | return (BUS_PROBE_GENERIC); |
---|
2890 | } |
---|
2891 | |
---|
2892 | static int |
---|
2893 | pci_attach(device_t dev) |
---|
2894 | { |
---|
2895 | int busno, domain; |
---|
2896 | |
---|
2897 | /* |
---|
2898 | * Since there can be multiple independantly numbered PCI |
---|
2899 | * busses on systems with multiple PCI domains, we can't use |
---|
2900 | * the unit number to decide which bus we are probing. We ask |
---|
2901 | * the parent pcib what our domain and bus numbers are. |
---|
2902 | */ |
---|
2903 | domain = pcib_get_domain(dev); |
---|
2904 | busno = pcib_get_bus(dev); |
---|
2905 | if (bootverbose) |
---|
2906 | device_printf(dev, "domain=%d, physical bus=%d\n", |
---|
2907 | domain, busno); |
---|
2908 | pci_add_children(dev, domain, busno, sizeof(struct pci_devinfo)); |
---|
2909 | return (bus_generic_attach(dev)); |
---|
2910 | } |
---|
2911 | |
---|
2912 | int |
---|
2913 | pci_suspend(device_t dev) |
---|
2914 | { |
---|
2915 | int dstate, error, i, numdevs; |
---|
2916 | device_t acpi_dev, child, *devlist; |
---|
2917 | struct pci_devinfo *dinfo; |
---|
2918 | |
---|
2919 | /* |
---|
2920 | * Save the PCI configuration space for each child and set the |
---|
2921 | * device in the appropriate power state for this sleep state. |
---|
2922 | */ |
---|
2923 | acpi_dev = NULL; |
---|
2924 | if (pci_do_power_resume) |
---|
2925 | acpi_dev = devclass_get_device(devclass_find("acpi"), 0); |
---|
2926 | if ((error = device_get_children(dev, &devlist, &numdevs)) != 0) |
---|
2927 | return (error); |
---|
2928 | for (i = 0; i < numdevs; i++) { |
---|
2929 | child = devlist[i]; |
---|
2930 | dinfo = (struct pci_devinfo *) device_get_ivars(child); |
---|
2931 | pci_cfg_save(child, dinfo, 0); |
---|
2932 | } |
---|
2933 | |
---|
2934 | /* Suspend devices before potentially powering them down. */ |
---|
2935 | error = bus_generic_suspend(dev); |
---|
2936 | if (error) { |
---|
2937 | free(devlist, M_TEMP); |
---|
2938 | return (error); |
---|
2939 | } |
---|
2940 | |
---|
2941 | /* |
---|
2942 | * Always set the device to D3. If ACPI suggests a different |
---|
2943 | * power state, use it instead. If ACPI is not present, the |
---|
2944 | * firmware is responsible for managing device power. Skip |
---|
2945 | * children who aren't attached since they are powered down |
---|
2946 | * separately. Only manage type 0 devices for now. |
---|
2947 | */ |
---|
2948 | for (i = 0; acpi_dev && i < numdevs; i++) { |
---|
2949 | child = devlist[i]; |
---|
2950 | dinfo = (struct pci_devinfo *) device_get_ivars(child); |
---|
2951 | if (device_is_attached(child) && dinfo->cfg.hdrtype == 0) { |
---|
2952 | dstate = PCI_POWERSTATE_D3; |
---|
2953 | ACPI_PWR_FOR_SLEEP(acpi_dev, child, &dstate); |
---|
2954 | pci_set_powerstate(child, dstate); |
---|
2955 | } |
---|
2956 | } |
---|
2957 | free(devlist, M_TEMP); |
---|
2958 | return (0); |
---|
2959 | } |
---|
2960 | |
---|
2961 | int |
---|
2962 | pci_resume(device_t dev) |
---|
2963 | { |
---|
2964 | int i, numdevs, error; |
---|
2965 | device_t acpi_dev, child, *devlist; |
---|
2966 | struct pci_devinfo *dinfo; |
---|
2967 | |
---|
2968 | /* |
---|
2969 | * Set each child to D0 and restore its PCI configuration space. |
---|
2970 | */ |
---|
2971 | acpi_dev = NULL; |
---|
2972 | if (pci_do_power_resume) |
---|
2973 | acpi_dev = devclass_get_device(devclass_find("acpi"), 0); |
---|
2974 | if ((error = device_get_children(dev, &devlist, &numdevs)) != 0) |
---|
2975 | return (error); |
---|
2976 | for (i = 0; i < numdevs; i++) { |
---|
2977 | /* |
---|
2978 | * Notify ACPI we're going to D0 but ignore the result. If |
---|
2979 | * ACPI is not present, the firmware is responsible for |
---|
2980 | * managing device power. Only manage type 0 devices for now. |
---|
2981 | */ |
---|
2982 | child = devlist[i]; |
---|
2983 | dinfo = (struct pci_devinfo *) device_get_ivars(child); |
---|
2984 | if (acpi_dev && device_is_attached(child) && |
---|
2985 | dinfo->cfg.hdrtype == 0) { |
---|
2986 | ACPI_PWR_FOR_SLEEP(acpi_dev, child, NULL); |
---|
2987 | pci_set_powerstate(child, PCI_POWERSTATE_D0); |
---|
2988 | } |
---|
2989 | |
---|
2990 | /* Now the device is powered up, restore its config space. */ |
---|
2991 | pci_cfg_restore(child, dinfo); |
---|
2992 | } |
---|
2993 | free(devlist, M_TEMP); |
---|
2994 | return (bus_generic_resume(dev)); |
---|
2995 | } |
---|
2996 | |
---|
2997 | static void |
---|
2998 | pci_load_vendor_data(void) |
---|
2999 | { |
---|
3000 | caddr_t vendordata, info; |
---|
3001 | |
---|
3002 | if ((vendordata = preload_search_by_type("pci_vendor_data")) != NULL) { |
---|
3003 | info = preload_search_info(vendordata, MODINFO_ADDR); |
---|
3004 | pci_vendordata = *(char **)info; |
---|
3005 | info = preload_search_info(vendordata, MODINFO_SIZE); |
---|
3006 | pci_vendordata_size = *(size_t *)info; |
---|
3007 | /* terminate the database */ |
---|
3008 | pci_vendordata[pci_vendordata_size] = '\n'; |
---|
3009 | } |
---|
3010 | } |
---|
3011 | |
---|
3012 | void |
---|
3013 | pci_driver_added(device_t dev, driver_t *driver) |
---|
3014 | { |
---|
3015 | int numdevs; |
---|
3016 | device_t *devlist; |
---|
3017 | device_t child; |
---|
3018 | struct pci_devinfo *dinfo; |
---|
3019 | int i; |
---|
3020 | |
---|
3021 | if (bootverbose) |
---|
3022 | device_printf(dev, "driver added\n"); |
---|
3023 | DEVICE_IDENTIFY(driver, dev); |
---|
3024 | if (device_get_children(dev, &devlist, &numdevs) != 0) |
---|
3025 | return; |
---|
3026 | for (i = 0; i < numdevs; i++) { |
---|
3027 | child = devlist[i]; |
---|
3028 | if (device_get_state(child) != DS_NOTPRESENT) |
---|
3029 | continue; |
---|
3030 | dinfo = device_get_ivars(child); |
---|
3031 | pci_print_verbose(dinfo); |
---|
3032 | if (bootverbose) |
---|
3033 | pci_printf(&dinfo->cfg, "reprobing on driver added\n"); |
---|
3034 | pci_cfg_restore(child, dinfo); |
---|
3035 | if (device_probe_and_attach(child) != 0) |
---|
3036 | pci_cfg_save(child, dinfo, 1); |
---|
3037 | } |
---|
3038 | free(devlist, M_TEMP); |
---|
3039 | } |
---|
3040 | |
---|
3041 | int |
---|
3042 | pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags, |
---|
3043 | driver_filter_t *filter, driver_intr_t *intr, void *arg, void **cookiep) |
---|
3044 | { |
---|
3045 | struct pci_devinfo *dinfo; |
---|
3046 | struct msix_table_entry *mte; |
---|
3047 | struct msix_vector *mv; |
---|
3048 | uint64_t addr; |
---|
3049 | uint32_t data; |
---|
3050 | void *cookie; |
---|
3051 | int error, rid; |
---|
3052 | |
---|
3053 | error = bus_generic_setup_intr(dev, child, irq, flags, filter, intr, |
---|
3054 | arg, &cookie); |
---|
3055 | if (error) |
---|
3056 | return (error); |
---|
3057 | |
---|
3058 | /* If this is not a direct child, just bail out. */ |
---|
3059 | if (device_get_parent(child) != dev) { |
---|
3060 | *cookiep = cookie; |
---|
3061 | return(0); |
---|
3062 | } |
---|
3063 | |
---|
3064 | rid = rman_get_rid(irq); |
---|
3065 | if (rid == 0) { |
---|
3066 | /* Make sure that INTx is enabled */ |
---|
3067 | pci_clear_command_bit(dev, child, PCIM_CMD_INTxDIS); |
---|
3068 | } else { |
---|
3069 | /* |
---|
3070 | * Check to see if the interrupt is MSI or MSI-X. |
---|
3071 | * Ask our parent to map the MSI and give |
---|
3072 | * us the address and data register values. |
---|
3073 | * If we fail for some reason, teardown the |
---|
3074 | * interrupt handler. |
---|
3075 | */ |
---|
3076 | dinfo = device_get_ivars(child); |
---|
3077 | if (dinfo->cfg.msi.msi_alloc > 0) { |
---|
3078 | if (dinfo->cfg.msi.msi_addr == 0) { |
---|
3079 | KASSERT(dinfo->cfg.msi.msi_handlers == 0, |
---|
3080 | ("MSI has handlers, but vectors not mapped")); |
---|
3081 | error = PCIB_MAP_MSI(device_get_parent(dev), |
---|
3082 | child, rman_get_start(irq), &addr, &data); |
---|
3083 | if (error) |
---|
3084 | goto bad; |
---|
3085 | dinfo->cfg.msi.msi_addr = addr; |
---|
3086 | dinfo->cfg.msi.msi_data = data; |
---|
3087 | } |
---|
3088 | if (dinfo->cfg.msi.msi_handlers == 0) |
---|
3089 | pci_enable_msi(child, dinfo->cfg.msi.msi_addr, |
---|
3090 | dinfo->cfg.msi.msi_data); |
---|
3091 | dinfo->cfg.msi.msi_handlers++; |
---|
3092 | } else { |
---|
3093 | KASSERT(dinfo->cfg.msix.msix_alloc > 0, |
---|
3094 | ("No MSI or MSI-X interrupts allocated")); |
---|
3095 | KASSERT(rid <= dinfo->cfg.msix.msix_table_len, |
---|
3096 | ("MSI-X index too high")); |
---|
3097 | mte = &dinfo->cfg.msix.msix_table[rid - 1]; |
---|
3098 | KASSERT(mte->mte_vector != 0, ("no message vector")); |
---|
3099 | mv = &dinfo->cfg.msix.msix_vectors[mte->mte_vector - 1]; |
---|
3100 | KASSERT(mv->mv_irq == rman_get_start(irq), |
---|
3101 | ("IRQ mismatch")); |
---|
3102 | if (mv->mv_address == 0) { |
---|
3103 | KASSERT(mte->mte_handlers == 0, |
---|
3104 | ("MSI-X table entry has handlers, but vector not mapped")); |
---|
3105 | error = PCIB_MAP_MSI(device_get_parent(dev), |
---|
3106 | child, rman_get_start(irq), &addr, &data); |
---|
3107 | if (error) |
---|
3108 | goto bad; |
---|
3109 | mv->mv_address = addr; |
---|
3110 | mv->mv_data = data; |
---|
3111 | } |
---|
3112 | if (mte->mte_handlers == 0) { |
---|
3113 | pci_enable_msix(child, rid - 1, mv->mv_address, |
---|
3114 | mv->mv_data); |
---|
3115 | pci_unmask_msix(child, rid - 1); |
---|
3116 | } |
---|
3117 | mte->mte_handlers++; |
---|
3118 | } |
---|
3119 | |
---|
3120 | /* Make sure that INTx is disabled if we are using MSI/MSIX */ |
---|
3121 | pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS); |
---|
3122 | bad: |
---|
3123 | if (error) { |
---|
3124 | (void)bus_generic_teardown_intr(dev, child, irq, |
---|
3125 | cookie); |
---|
3126 | return (error); |
---|
3127 | } |
---|
3128 | } |
---|
3129 | *cookiep = cookie; |
---|
3130 | return (0); |
---|
3131 | } |
---|
3132 | |
---|
3133 | int |
---|
3134 | pci_teardown_intr(device_t dev, device_t child, struct resource *irq, |
---|
3135 | void *cookie) |
---|
3136 | { |
---|
3137 | struct msix_table_entry *mte; |
---|
3138 | struct resource_list_entry *rle; |
---|
3139 | struct pci_devinfo *dinfo; |
---|
3140 | int error, rid; |
---|
3141 | |
---|
3142 | if (irq == NULL || !(rman_get_flags(irq) & RF_ACTIVE)) |
---|
3143 | return (EINVAL); |
---|
3144 | |
---|
3145 | /* If this isn't a direct child, just bail out */ |
---|
3146 | if (device_get_parent(child) != dev) |
---|
3147 | return(bus_generic_teardown_intr(dev, child, irq, cookie)); |
---|
3148 | |
---|
3149 | rid = rman_get_rid(irq); |
---|
3150 | if (rid == 0) { |
---|
3151 | /* Mask INTx */ |
---|
3152 | pci_set_command_bit(dev, child, PCIM_CMD_INTxDIS); |
---|
3153 | } else { |
---|
3154 | /* |
---|
3155 | * Check to see if the interrupt is MSI or MSI-X. If so, |
---|
3156 | * decrement the appropriate handlers count and mask the |
---|
3157 | * MSI-X message, or disable MSI messages if the count |
---|
3158 | * drops to 0. |
---|
3159 | */ |
---|
3160 | dinfo = device_get_ivars(child); |
---|
3161 | rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid); |
---|
3162 | if (rle->res != irq) |
---|
3163 | return (EINVAL); |
---|
3164 | if (dinfo->cfg.msi.msi_alloc > 0) { |
---|
3165 | KASSERT(rid <= dinfo->cfg.msi.msi_alloc, |
---|
3166 | ("MSI-X index too high")); |
---|
3167 | if (dinfo->cfg.msi.msi_handlers == 0) |
---|
3168 | return (EINVAL); |
---|
3169 | dinfo->cfg.msi.msi_handlers--; |
---|
3170 | if (dinfo->cfg.msi.msi_handlers == 0) |
---|
3171 | pci_disable_msi(child); |
---|
3172 | } else { |
---|
3173 | KASSERT(dinfo->cfg.msix.msix_alloc > 0, |
---|
3174 | ("No MSI or MSI-X interrupts allocated")); |
---|
3175 | KASSERT(rid <= dinfo->cfg.msix.msix_table_len, |
---|
3176 | ("MSI-X index too high")); |
---|
3177 | mte = &dinfo->cfg.msix.msix_table[rid - 1]; |
---|
3178 | if (mte->mte_handlers == 0) |
---|
3179 | return (EINVAL); |
---|
3180 | mte->mte_handlers--; |
---|
3181 | if (mte->mte_handlers == 0) |
---|
3182 | pci_mask_msix(child, rid - 1); |
---|
3183 | } |
---|
3184 | } |
---|
3185 | error = bus_generic_teardown_intr(dev, child, irq, cookie); |
---|
3186 | if (rid > 0) |
---|
3187 | KASSERT(error == 0, |
---|
3188 | ("%s: generic teardown failed for MSI/MSI-X", __func__)); |
---|
3189 | return (error); |
---|
3190 | } |
---|
3191 | |
---|
3192 | int |
---|
3193 | pci_print_child(device_t dev, device_t child) |
---|
3194 | { |
---|
3195 | struct pci_devinfo *dinfo; |
---|
3196 | struct resource_list *rl; |
---|
3197 | int retval = 0; |
---|
3198 | |
---|
3199 | dinfo = device_get_ivars(child); |
---|
3200 | rl = &dinfo->resources; |
---|
3201 | |
---|
3202 | retval += bus_print_child_header(dev, child); |
---|
3203 | |
---|
3204 | retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#lx"); |
---|
3205 | retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#lx"); |
---|
3206 | retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld"); |
---|
3207 | if (device_get_flags(dev)) |
---|
3208 | retval += printf(" flags %#x", device_get_flags(dev)); |
---|
3209 | |
---|
3210 | retval += printf(" at device %d.%d", pci_get_slot(child), |
---|
3211 | pci_get_function(child)); |
---|
3212 | |
---|
3213 | retval += bus_print_child_footer(dev, child); |
---|
3214 | |
---|
3215 | return (retval); |
---|
3216 | } |
---|
3217 | |
---|
3218 | static struct |
---|
3219 | { |
---|
3220 | int class; |
---|
3221 | int subclass; |
---|
3222 | char *desc; |
---|
3223 | } pci_nomatch_tab[] = { |
---|
3224 | {PCIC_OLD, -1, "old"}, |
---|
3225 | {PCIC_OLD, PCIS_OLD_NONVGA, "non-VGA display device"}, |
---|
3226 | {PCIC_OLD, PCIS_OLD_VGA, "VGA-compatible display device"}, |
---|
3227 | {PCIC_STORAGE, -1, "mass storage"}, |
---|
3228 | {PCIC_STORAGE, PCIS_STORAGE_SCSI, "SCSI"}, |
---|
3229 | {PCIC_STORAGE, PCIS_STORAGE_IDE, "ATA"}, |
---|
3230 | {PCIC_STORAGE, PCIS_STORAGE_FLOPPY, "floppy disk"}, |
---|
3231 | {PCIC_STORAGE, PCIS_STORAGE_IPI, "IPI"}, |
---|
3232 | {PCIC_STORAGE, PCIS_STORAGE_RAID, "RAID"}, |
---|
3233 | {PCIC_STORAGE, PCIS_STORAGE_ATA_ADMA, "ATA (ADMA)"}, |
---|
3234 | {PCIC_STORAGE, PCIS_STORAGE_SATA, "SATA"}, |
---|
3235 | {PCIC_STORAGE, PCIS_STORAGE_SAS, "SAS"}, |
---|
3236 | {PCIC_NETWORK, -1, "network"}, |
---|
3237 | {PCIC_NETWORK, PCIS_NETWORK_ETHERNET, "ethernet"}, |
---|
3238 | {PCIC_NETWORK, PCIS_NETWORK_TOKENRING, "token ring"}, |
---|
3239 | {PCIC_NETWORK, PCIS_NETWORK_FDDI, "fddi"}, |
---|
3240 | {PCIC_NETWORK, PCIS_NETWORK_ATM, "ATM"}, |
---|
3241 | {PCIC_NETWORK, PCIS_NETWORK_ISDN, "ISDN"}, |
---|
3242 | {PCIC_DISPLAY, -1, "display"}, |
---|
3243 | {PCIC_DISPLAY, PCIS_DISPLAY_VGA, "VGA"}, |
---|
3244 | {PCIC_DISPLAY, PCIS_DISPLAY_XGA, "XGA"}, |
---|
3245 | {PCIC_DISPLAY, PCIS_DISPLAY_3D, "3D"}, |
---|
3246 | {PCIC_MULTIMEDIA, -1, "multimedia"}, |
---|
3247 | {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_VIDEO, "video"}, |
---|
3248 | {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_AUDIO, "audio"}, |
---|
3249 | {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_TELE, "telephony"}, |
---|
3250 | {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_HDA, "HDA"}, |
---|
3251 | {PCIC_MEMORY, -1, "memory"}, |
---|
3252 | {PCIC_MEMORY, PCIS_MEMORY_RAM, "RAM"}, |
---|
3253 | {PCIC_MEMORY, PCIS_MEMORY_FLASH, "flash"}, |
---|
3254 | {PCIC_BRIDGE, -1, "bridge"}, |
---|
3255 | {PCIC_BRIDGE, PCIS_BRIDGE_HOST, "HOST-PCI"}, |
---|
3256 | {PCIC_BRIDGE, PCIS_BRIDGE_ISA, "PCI-ISA"}, |
---|
3257 | {PCIC_BRIDGE, PCIS_BRIDGE_EISA, "PCI-EISA"}, |
---|
3258 | {PCIC_BRIDGE, PCIS_BRIDGE_MCA, "PCI-MCA"}, |
---|
3259 | {PCIC_BRIDGE, PCIS_BRIDGE_PCI, "PCI-PCI"}, |
---|
3260 | {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, "PCI-PCMCIA"}, |
---|
3261 | {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, "PCI-NuBus"}, |
---|
3262 | {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, "PCI-CardBus"}, |
---|
3263 | {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, "PCI-RACEway"}, |
---|
3264 | {PCIC_SIMPLECOMM, -1, "simple comms"}, |
---|
3265 | {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_UART, "UART"}, /* could detect 16550 */ |
---|
3266 | {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_PAR, "parallel port"}, |
---|
3267 | {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MULSER, "multiport serial"}, |
---|
3268 | {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MODEM, "generic modem"}, |
---|
3269 | {PCIC_BASEPERIPH, -1, "base peripheral"}, |
---|
3270 | {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PIC, "interrupt controller"}, |
---|
3271 | {PCIC_BASEPERIPH, PCIS_BASEPERIPH_DMA, "DMA controller"}, |
---|
3272 | {PCIC_BASEPERIPH, PCIS_BASEPERIPH_TIMER, "timer"}, |
---|
3273 | {PCIC_BASEPERIPH, PCIS_BASEPERIPH_RTC, "realtime clock"}, |
---|
3274 | {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, "PCI hot-plug controller"}, |
---|
3275 | {PCIC_BASEPERIPH, PCIS_BASEPERIPH_SDHC, "SD host controller"}, |
---|
3276 | {PCIC_INPUTDEV, -1, "input device"}, |
---|
3277 | {PCIC_INPUTDEV, PCIS_INPUTDEV_KEYBOARD, "keyboard"}, |
---|
3278 | {PCIC_INPUTDEV, PCIS_INPUTDEV_DIGITIZER,"digitizer"}, |
---|
3279 | {PCIC_INPUTDEV, PCIS_INPUTDEV_MOUSE, "mouse"}, |
---|
3280 | {PCIC_INPUTDEV, PCIS_INPUTDEV_SCANNER, "scanner"}, |
---|
3281 | {PCIC_INPUTDEV, PCIS_INPUTDEV_GAMEPORT, "gameport"}, |
---|
3282 | {PCIC_DOCKING, -1, "docking station"}, |
---|
3283 | {PCIC_PROCESSOR, -1, "processor"}, |
---|
3284 | {PCIC_SERIALBUS, -1, "serial bus"}, |
---|
3285 | {PCIC_SERIALBUS, PCIS_SERIALBUS_FW, "FireWire"}, |
---|
3286 | {PCIC_SERIALBUS, PCIS_SERIALBUS_ACCESS, "AccessBus"}, |
---|
3287 | {PCIC_SERIALBUS, PCIS_SERIALBUS_SSA, "SSA"}, |
---|
3288 | {PCIC_SERIALBUS, PCIS_SERIALBUS_USB, "USB"}, |
---|
3289 | {PCIC_SERIALBUS, PCIS_SERIALBUS_FC, "Fibre Channel"}, |
---|
3290 | {PCIC_SERIALBUS, PCIS_SERIALBUS_SMBUS, "SMBus"}, |
---|
3291 | {PCIC_WIRELESS, -1, "wireless controller"}, |
---|
3292 | {PCIC_WIRELESS, PCIS_WIRELESS_IRDA, "iRDA"}, |
---|
3293 | {PCIC_WIRELESS, PCIS_WIRELESS_IR, "IR"}, |
---|
3294 | {PCIC_WIRELESS, PCIS_WIRELESS_RF, "RF"}, |
---|
3295 | {PCIC_INTELLIIO, -1, "intelligent I/O controller"}, |
---|
3296 | {PCIC_INTELLIIO, PCIS_INTELLIIO_I2O, "I2O"}, |
---|
3297 | {PCIC_SATCOM, -1, "satellite communication"}, |
---|
3298 | {PCIC_SATCOM, PCIS_SATCOM_TV, "sat TV"}, |
---|
3299 | {PCIC_SATCOM, PCIS_SATCOM_AUDIO, "sat audio"}, |
---|
3300 | {PCIC_SATCOM, PCIS_SATCOM_VOICE, "sat voice"}, |
---|
3301 | {PCIC_SATCOM, PCIS_SATCOM_DATA, "sat data"}, |
---|
3302 | {PCIC_CRYPTO, -1, "encrypt/decrypt"}, |
---|
3303 | {PCIC_CRYPTO, PCIS_CRYPTO_NETCOMP, "network/computer crypto"}, |
---|
3304 | {PCIC_CRYPTO, PCIS_CRYPTO_ENTERTAIN, "entertainment crypto"}, |
---|
3305 | {PCIC_DASP, -1, "dasp"}, |
---|
3306 | {PCIC_DASP, PCIS_DASP_DPIO, "DPIO module"}, |
---|
3307 | {0, 0, NULL} |
---|
3308 | }; |
---|
3309 | |
---|
3310 | void |
---|
3311 | pci_probe_nomatch(device_t dev, device_t child) |
---|
3312 | { |
---|
3313 | int i; |
---|
3314 | char *cp, *scp, *device; |
---|
3315 | |
---|
3316 | /* |
---|
3317 | * Look for a listing for this device in a loaded device database. |
---|
3318 | */ |
---|
3319 | if ((device = pci_describe_device(child)) != NULL) { |
---|
3320 | device_printf(dev, "<%s>", device); |
---|
3321 | free(device, M_DEVBUF); |
---|
3322 | } else { |
---|
3323 | /* |
---|
3324 | * Scan the class/subclass descriptions for a general |
---|
3325 | * description. |
---|
3326 | */ |
---|
3327 | cp = "unknown"; |
---|
3328 | scp = NULL; |
---|
3329 | for (i = 0; pci_nomatch_tab[i].desc != NULL; i++) { |
---|
3330 | if (pci_nomatch_tab[i].class == pci_get_class(child)) { |
---|
3331 | if (pci_nomatch_tab[i].subclass == -1) { |
---|
3332 | cp = pci_nomatch_tab[i].desc; |
---|
3333 | } else if (pci_nomatch_tab[i].subclass == |
---|
3334 | pci_get_subclass(child)) { |
---|
3335 | scp = pci_nomatch_tab[i].desc; |
---|
3336 | } |
---|
3337 | } |
---|
3338 | } |
---|
3339 | device_printf(dev, "<%s%s%s>", |
---|
3340 | cp ? cp : "", |
---|
3341 | ((cp != NULL) && (scp != NULL)) ? ", " : "", |
---|
3342 | scp ? scp : ""); |
---|
3343 | } |
---|
3344 | printf(" at device %d.%d (no driver attached)\n", |
---|
3345 | pci_get_slot(child), pci_get_function(child)); |
---|
3346 | pci_cfg_save(child, (struct pci_devinfo *)device_get_ivars(child), 1); |
---|
3347 | return; |
---|
3348 | } |
---|
3349 | |
---|
3350 | /* |
---|
3351 | * Parse the PCI device database, if loaded, and return a pointer to a |
---|
3352 | * description of the device. |
---|
3353 | * |
---|
3354 | * The database is flat text formatted as follows: |
---|
3355 | * |
---|
3356 | * Any line not in a valid format is ignored. |
---|
3357 | * Lines are terminated with newline '\n' characters. |
---|
3358 | * |
---|
3359 | * A VENDOR line consists of the 4 digit (hex) vendor code, a TAB, then |
---|
3360 | * the vendor name. |
---|
3361 | * |
---|
3362 | * A DEVICE line is entered immediately below the corresponding VENDOR ID. |
---|
3363 | * - devices cannot be listed without a corresponding VENDOR line. |
---|
3364 | * A DEVICE line consists of a TAB, the 4 digit (hex) device code, |
---|
3365 | * another TAB, then the device name. |
---|
3366 | */ |
---|
3367 | |
---|
3368 | /* |
---|
3369 | * Assuming (ptr) points to the beginning of a line in the database, |
---|
3370 | * return the vendor or device and description of the next entry. |
---|
3371 | * The value of (vendor) or (device) inappropriate for the entry type |
---|
3372 | * is set to -1. Returns nonzero at the end of the database. |
---|
3373 | * |
---|
3374 | * Note that this is slightly unrobust in the face of corrupt data; |
---|
3375 | * we attempt to safeguard against this by spamming the end of the |
---|
3376 | * database with a newline when we initialise. |
---|
3377 | */ |
---|
3378 | static int |
---|
3379 | pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc) |
---|
3380 | { |
---|
3381 | char *cp = *ptr; |
---|
3382 | int left; |
---|
3383 | |
---|
3384 | *device = -1; |
---|
3385 | *vendor = -1; |
---|
3386 | **desc = '\0'; |
---|
3387 | for (;;) { |
---|
3388 | left = pci_vendordata_size - (cp - pci_vendordata); |
---|
3389 | if (left <= 0) { |
---|
3390 | *ptr = cp; |
---|
3391 | return(1); |
---|
3392 | } |
---|
3393 | |
---|
3394 | /* vendor entry? */ |
---|
3395 | if (*cp != '\t' && |
---|
3396 | sscanf(cp, "%x\t%80[^\n]", vendor, *desc) == 2) |
---|
3397 | break; |
---|
3398 | /* device entry? */ |
---|
3399 | if (*cp == '\t' && |
---|
3400 | sscanf(cp, "%x\t%80[^\n]", device, *desc) == 2) |
---|
3401 | break; |
---|
3402 | |
---|
3403 | /* skip to next line */ |
---|
3404 | while (*cp != '\n' && left > 0) { |
---|
3405 | cp++; |
---|
3406 | left--; |
---|
3407 | } |
---|
3408 | if (*cp == '\n') { |
---|
3409 | cp++; |
---|
3410 | left--; |
---|
3411 | } |
---|
3412 | } |
---|
3413 | /* skip to next line */ |
---|
3414 | while (*cp != '\n' && left > 0) { |
---|
3415 | cp++; |
---|
3416 | left--; |
---|
3417 | } |
---|
3418 | if (*cp == '\n' && left > 0) |
---|
3419 | cp++; |
---|
3420 | *ptr = cp; |
---|
3421 | return(0); |
---|
3422 | } |
---|
3423 | |
---|
3424 | static char * |
---|
3425 | pci_describe_device(device_t dev) |
---|
3426 | { |
---|
3427 | int vendor, device; |
---|
3428 | char *desc, *vp, *dp, *line; |
---|
3429 | |
---|
3430 | desc = vp = dp = NULL; |
---|
3431 | |
---|
3432 | /* |
---|
3433 | * If we have no vendor data, we can't do anything. |
---|
3434 | */ |
---|
3435 | if (pci_vendordata == NULL) |
---|
3436 | goto out; |
---|
3437 | |
---|
3438 | /* |
---|
3439 | * Scan the vendor data looking for this device |
---|
3440 | */ |
---|
3441 | line = pci_vendordata; |
---|
3442 | if ((vp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL) |
---|
3443 | goto out; |
---|
3444 | for (;;) { |
---|
3445 | if (pci_describe_parse_line(&line, &vendor, &device, &vp)) |
---|
3446 | goto out; |
---|
3447 | if (vendor == pci_get_vendor(dev)) |
---|
3448 | break; |
---|
3449 | } |
---|
3450 | if ((dp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL) |
---|
3451 | goto out; |
---|
3452 | for (;;) { |
---|
3453 | if (pci_describe_parse_line(&line, &vendor, &device, &dp)) { |
---|
3454 | *dp = 0; |
---|
3455 | break; |
---|
3456 | } |
---|
3457 | if (vendor != -1) { |
---|
3458 | *dp = 0; |
---|
3459 | break; |
---|
3460 | } |
---|
3461 | if (device == pci_get_device(dev)) |
---|
3462 | break; |
---|
3463 | } |
---|
3464 | if (dp[0] == '\0') |
---|
3465 | snprintf(dp, 80, "0x%x", pci_get_device(dev)); |
---|
3466 | if ((desc = malloc(strlen(vp) + strlen(dp) + 3, M_DEVBUF, M_NOWAIT)) != |
---|
3467 | NULL) |
---|
3468 | sprintf(desc, "%s, %s", vp, dp); |
---|
3469 | out: |
---|
3470 | if (vp != NULL) |
---|
3471 | free(vp, M_DEVBUF); |
---|
3472 | if (dp != NULL) |
---|
3473 | free(dp, M_DEVBUF); |
---|
3474 | return(desc); |
---|
3475 | } |
---|
3476 | |
---|
3477 | int |
---|
3478 | pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) |
---|
3479 | { |
---|
3480 | struct pci_devinfo *dinfo; |
---|
3481 | pcicfgregs *cfg; |
---|
3482 | |
---|
3483 | dinfo = device_get_ivars(child); |
---|
3484 | cfg = &dinfo->cfg; |
---|
3485 | |
---|
3486 | switch (which) { |
---|
3487 | case PCI_IVAR_ETHADDR: |
---|
3488 | /* |
---|
3489 | * The generic accessor doesn't deal with failure, so |
---|
3490 | * we set the return value, then return an error. |
---|
3491 | */ |
---|
3492 | *((uint8_t **) result) = NULL; |
---|
3493 | return (EINVAL); |
---|
3494 | case PCI_IVAR_SUBVENDOR: |
---|
3495 | *result = cfg->subvendor; |
---|
3496 | break; |
---|
3497 | case PCI_IVAR_SUBDEVICE: |
---|
3498 | *result = cfg->subdevice; |
---|
3499 | break; |
---|
3500 | case PCI_IVAR_VENDOR: |
---|
3501 | *result = cfg->vendor; |
---|
3502 | break; |
---|
3503 | case PCI_IVAR_DEVICE: |
---|
3504 | *result = cfg->device; |
---|
3505 | break; |
---|
3506 | case PCI_IVAR_DEVID: |
---|
3507 | *result = (cfg->device << 16) | cfg->vendor; |
---|
3508 | break; |
---|
3509 | case PCI_IVAR_CLASS: |
---|
3510 | *result = cfg->baseclass; |
---|
3511 | break; |
---|
3512 | case PCI_IVAR_SUBCLASS: |
---|
3513 | *result = cfg->subclass; |
---|
3514 | break; |
---|
3515 | case PCI_IVAR_PROGIF: |
---|
3516 | *result = cfg->progif; |
---|
3517 | break; |
---|
3518 | case PCI_IVAR_REVID: |
---|
3519 | *result = cfg->revid; |
---|
3520 | break; |
---|
3521 | case PCI_IVAR_INTPIN: |
---|
3522 | *result = cfg->intpin; |
---|
3523 | break; |
---|
3524 | case PCI_IVAR_IRQ: |
---|
3525 | *result = cfg->intline; |
---|
3526 | break; |
---|
3527 | case PCI_IVAR_DOMAIN: |
---|
3528 | *result = cfg->domain; |
---|
3529 | break; |
---|
3530 | case PCI_IVAR_BUS: |
---|
3531 | *result = cfg->bus; |
---|
3532 | break; |
---|
3533 | case PCI_IVAR_SLOT: |
---|
3534 | *result = cfg->slot; |
---|
3535 | break; |
---|
3536 | case PCI_IVAR_FUNCTION: |
---|
3537 | *result = cfg->func; |
---|
3538 | break; |
---|
3539 | case PCI_IVAR_CMDREG: |
---|
3540 | *result = cfg->cmdreg; |
---|
3541 | break; |
---|
3542 | case PCI_IVAR_CACHELNSZ: |
---|
3543 | *result = cfg->cachelnsz; |
---|
3544 | break; |
---|
3545 | case PCI_IVAR_MINGNT: |
---|
3546 | *result = cfg->mingnt; |
---|
3547 | break; |
---|
3548 | case PCI_IVAR_MAXLAT: |
---|
3549 | *result = cfg->maxlat; |
---|
3550 | break; |
---|
3551 | case PCI_IVAR_LATTIMER: |
---|
3552 | *result = cfg->lattimer; |
---|
3553 | break; |
---|
3554 | default: |
---|
3555 | return (ENOENT); |
---|
3556 | } |
---|
3557 | return (0); |
---|
3558 | } |
---|
3559 | |
---|
3560 | int |
---|
3561 | pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value) |
---|
3562 | { |
---|
3563 | struct pci_devinfo *dinfo; |
---|
3564 | |
---|
3565 | dinfo = device_get_ivars(child); |
---|
3566 | |
---|
3567 | switch (which) { |
---|
3568 | case PCI_IVAR_INTPIN: |
---|
3569 | dinfo->cfg.intpin = value; |
---|
3570 | return (0); |
---|
3571 | case PCI_IVAR_ETHADDR: |
---|
3572 | case PCI_IVAR_SUBVENDOR: |
---|
3573 | case PCI_IVAR_SUBDEVICE: |
---|
3574 | case PCI_IVAR_VENDOR: |
---|
3575 | case PCI_IVAR_DEVICE: |
---|
3576 | case PCI_IVAR_DEVID: |
---|
3577 | case PCI_IVAR_CLASS: |
---|
3578 | case PCI_IVAR_SUBCLASS: |
---|
3579 | case PCI_IVAR_PROGIF: |
---|
3580 | case PCI_IVAR_REVID: |
---|
3581 | case PCI_IVAR_IRQ: |
---|
3582 | case PCI_IVAR_DOMAIN: |
---|
3583 | case PCI_IVAR_BUS: |
---|
3584 | case PCI_IVAR_SLOT: |
---|
3585 | case PCI_IVAR_FUNCTION: |
---|
3586 | return (EINVAL); /* disallow for now */ |
---|
3587 | |
---|
3588 | default: |
---|
3589 | return (ENOENT); |
---|
3590 | } |
---|
3591 | } |
---|
3592 | |
---|
3593 | |
---|
3594 | #include <freebsd/local/opt_ddb.h> |
---|
3595 | #ifdef DDB |
---|
3596 | #include <freebsd/ddb/ddb.h> |
---|
3597 | #include <freebsd/sys/cons.h> |
---|
3598 | |
---|
3599 | /* |
---|
3600 | * List resources based on pci map registers, used for within ddb |
---|
3601 | */ |
---|
3602 | |
---|
3603 | DB_SHOW_COMMAND(pciregs, db_pci_dump) |
---|
3604 | { |
---|
3605 | struct pci_devinfo *dinfo; |
---|
3606 | struct devlist *devlist_head; |
---|
3607 | struct pci_conf *p; |
---|
3608 | const char *name; |
---|
3609 | int i, error, none_count; |
---|
3610 | |
---|
3611 | none_count = 0; |
---|
3612 | /* get the head of the device queue */ |
---|
3613 | devlist_head = &pci_devq; |
---|
3614 | |
---|
3615 | /* |
---|
3616 | * Go through the list of devices and print out devices |
---|
3617 | */ |
---|
3618 | for (error = 0, i = 0, |
---|
3619 | dinfo = STAILQ_FIRST(devlist_head); |
---|
3620 | (dinfo != NULL) && (error == 0) && (i < pci_numdevs) && !db_pager_quit; |
---|
3621 | dinfo = STAILQ_NEXT(dinfo, pci_links), i++) { |
---|
3622 | |
---|
3623 | /* Populate pd_name and pd_unit */ |
---|
3624 | name = NULL; |
---|
3625 | if (dinfo->cfg.dev) |
---|
3626 | name = device_get_name(dinfo->cfg.dev); |
---|
3627 | |
---|
3628 | p = &dinfo->conf; |
---|
3629 | db_printf("%s%d@pci%d:%d:%d:%d:\tclass=0x%06x card=0x%08x " |
---|
3630 | "chip=0x%08x rev=0x%02x hdr=0x%02x\n", |
---|
3631 | (name && *name) ? name : "none", |
---|
3632 | (name && *name) ? (int)device_get_unit(dinfo->cfg.dev) : |
---|
3633 | none_count++, |
---|
3634 | p->pc_sel.pc_domain, p->pc_sel.pc_bus, p->pc_sel.pc_dev, |
---|
3635 | p->pc_sel.pc_func, (p->pc_class << 16) | |
---|
3636 | (p->pc_subclass << 8) | p->pc_progif, |
---|
3637 | (p->pc_subdevice << 16) | p->pc_subvendor, |
---|
3638 | (p->pc_device << 16) | p->pc_vendor, |
---|
3639 | p->pc_revid, p->pc_hdr); |
---|
3640 | } |
---|
3641 | } |
---|
3642 | #endif /* DDB */ |
---|
3643 | |
---|
3644 | static struct resource * |
---|
3645 | pci_alloc_map(device_t dev, device_t child, int type, int *rid, |
---|
3646 | u_long start, u_long end, u_long count, u_int flags) |
---|
3647 | { |
---|
3648 | struct pci_devinfo *dinfo = device_get_ivars(child); |
---|
3649 | struct resource_list *rl = &dinfo->resources; |
---|
3650 | struct resource_list_entry *rle; |
---|
3651 | struct resource *res; |
---|
3652 | pci_addr_t map, testval; |
---|
3653 | int mapsize; |
---|
3654 | |
---|
3655 | /* |
---|
3656 | * Weed out the bogons, and figure out how large the BAR/map |
---|
3657 | * is. Bars that read back 0 here are bogus and unimplemented. |
---|
3658 | * Note: atapci in legacy mode are special and handled elsewhere |
---|
3659 | * in the code. If you have a atapci device in legacy mode and |
---|
3660 | * it fails here, that other code is broken. |
---|
3661 | */ |
---|
3662 | res = NULL; |
---|
3663 | pci_read_bar(child, *rid, &map, &testval); |
---|
3664 | |
---|
3665 | /* Ignore a BAR with a base of 0. */ |
---|
3666 | if (pci_mapbase(testval) == 0) |
---|
3667 | goto out; |
---|
3668 | |
---|
3669 | if (PCI_BAR_MEM(testval)) { |
---|
3670 | if (type != SYS_RES_MEMORY) { |
---|
3671 | if (bootverbose) |
---|
3672 | device_printf(dev, |
---|
3673 | "child %s requested type %d for rid %#x," |
---|
3674 | " but the BAR says it is an memio\n", |
---|
3675 | device_get_nameunit(child), type, *rid); |
---|
3676 | goto out; |
---|
3677 | } |
---|
3678 | } else { |
---|
3679 | if (type != SYS_RES_IOPORT) { |
---|
3680 | if (bootverbose) |
---|
3681 | device_printf(dev, |
---|
3682 | "child %s requested type %d for rid %#x," |
---|
3683 | " but the BAR says it is an ioport\n", |
---|
3684 | device_get_nameunit(child), type, *rid); |
---|
3685 | goto out; |
---|
3686 | } |
---|
3687 | } |
---|
3688 | |
---|
3689 | /* |
---|
3690 | * For real BARs, we need to override the size that |
---|
3691 | * the driver requests, because that's what the BAR |
---|
3692 | * actually uses and we would otherwise have a |
---|
3693 | * situation where we might allocate the excess to |
---|
3694 | * another driver, which won't work. |
---|
3695 | */ |
---|
3696 | mapsize = pci_mapsize(testval); |
---|
3697 | count = 1UL << mapsize; |
---|
3698 | if (RF_ALIGNMENT(flags) < mapsize) |
---|
3699 | flags = (flags & ~RF_ALIGNMENT_MASK) | RF_ALIGNMENT_LOG2(mapsize); |
---|
3700 | if (PCI_BAR_MEM(testval) && (testval & PCIM_BAR_MEM_PREFETCH)) |
---|
3701 | flags |= RF_PREFETCHABLE; |
---|
3702 | |
---|
3703 | /* |
---|
3704 | * Allocate enough resource, and then write back the |
---|
3705 | * appropriate bar for that resource. |
---|
3706 | */ |
---|
3707 | res = BUS_ALLOC_RESOURCE(device_get_parent(dev), child, type, rid, |
---|
3708 | start, end, count, flags & ~RF_ACTIVE); |
---|
3709 | if (res == NULL) { |
---|
3710 | device_printf(child, |
---|
3711 | "%#lx bytes of rid %#x res %d failed (%#lx, %#lx).\n", |
---|
3712 | count, *rid, type, start, end); |
---|
3713 | goto out; |
---|
3714 | } |
---|
3715 | rman_set_device(res, dev); |
---|
3716 | resource_list_add(rl, type, *rid, start, end, count); |
---|
3717 | rle = resource_list_find(rl, type, *rid); |
---|
3718 | if (rle == NULL) |
---|
3719 | panic("pci_alloc_map: unexpectedly can't find resource."); |
---|
3720 | rle->res = res; |
---|
3721 | rle->start = rman_get_start(res); |
---|
3722 | rle->end = rman_get_end(res); |
---|
3723 | rle->count = count; |
---|
3724 | if (bootverbose) |
---|
3725 | device_printf(child, |
---|
3726 | "Lazy allocation of %#lx bytes rid %#x type %d at %#lx\n", |
---|
3727 | count, *rid, type, rman_get_start(res)); |
---|
3728 | map = rman_get_start(res); |
---|
3729 | pci_write_bar(child, *rid, map); |
---|
3730 | out:; |
---|
3731 | return (res); |
---|
3732 | } |
---|
3733 | |
---|
3734 | |
---|
3735 | struct resource * |
---|
3736 | pci_alloc_resource(device_t dev, device_t child, int type, int *rid, |
---|
3737 | u_long start, u_long end, u_long count, u_int flags) |
---|
3738 | { |
---|
3739 | struct pci_devinfo *dinfo = device_get_ivars(child); |
---|
3740 | struct resource_list *rl = &dinfo->resources; |
---|
3741 | struct resource_list_entry *rle; |
---|
3742 | struct resource *res; |
---|
3743 | pcicfgregs *cfg = &dinfo->cfg; |
---|
3744 | |
---|
3745 | if (device_get_parent(child) != dev) |
---|
3746 | return (BUS_ALLOC_RESOURCE(device_get_parent(dev), child, |
---|
3747 | type, rid, start, end, count, flags)); |
---|
3748 | |
---|
3749 | /* |
---|
3750 | * Perform lazy resource allocation |
---|
3751 | */ |
---|
3752 | switch (type) { |
---|
3753 | case SYS_RES_IRQ: |
---|
3754 | /* |
---|
3755 | * Can't alloc legacy interrupt once MSI messages have |
---|
3756 | * been allocated. |
---|
3757 | */ |
---|
3758 | if (*rid == 0 && (cfg->msi.msi_alloc > 0 || |
---|
3759 | cfg->msix.msix_alloc > 0)) |
---|
3760 | return (NULL); |
---|
3761 | |
---|
3762 | /* |
---|
3763 | * If the child device doesn't have an interrupt |
---|
3764 | * routed and is deserving of an interrupt, try to |
---|
3765 | * assign it one. |
---|
3766 | */ |
---|
3767 | if (*rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) && |
---|
3768 | (cfg->intpin != 0)) |
---|
3769 | pci_assign_interrupt(dev, child, 0); |
---|
3770 | break; |
---|
3771 | case SYS_RES_IOPORT: |
---|
3772 | case SYS_RES_MEMORY: |
---|
3773 | /* Allocate resources for this BAR if needed. */ |
---|
3774 | rle = resource_list_find(rl, type, *rid); |
---|
3775 | if (rle == NULL) { |
---|
3776 | res = pci_alloc_map(dev, child, type, rid, start, end, |
---|
3777 | count, flags); |
---|
3778 | if (res == NULL) |
---|
3779 | return (NULL); |
---|
3780 | rle = resource_list_find(rl, type, *rid); |
---|
3781 | } |
---|
3782 | |
---|
3783 | /* |
---|
3784 | * If the resource belongs to the bus, then give it to |
---|
3785 | * the child. We need to activate it if requested |
---|
3786 | * since the bus always allocates inactive resources. |
---|
3787 | */ |
---|
3788 | if (rle != NULL && rle->res != NULL && |
---|
3789 | rman_get_device(rle->res) == dev) { |
---|
3790 | if (bootverbose) |
---|
3791 | device_printf(child, |
---|
3792 | "Reserved %#lx bytes for rid %#x type %d at %#lx\n", |
---|
3793 | rman_get_size(rle->res), *rid, type, |
---|
3794 | rman_get_start(rle->res)); |
---|
3795 | rman_set_device(rle->res, child); |
---|
3796 | if ((flags & RF_ACTIVE) && |
---|
3797 | bus_activate_resource(child, type, *rid, |
---|
3798 | rle->res) != 0) |
---|
3799 | return (NULL); |
---|
3800 | return (rle->res); |
---|
3801 | } |
---|
3802 | } |
---|
3803 | return (resource_list_alloc(rl, dev, child, type, rid, |
---|
3804 | start, end, count, flags)); |
---|
3805 | } |
---|
3806 | |
---|
3807 | int |
---|
3808 | pci_release_resource(device_t dev, device_t child, int type, int rid, |
---|
3809 | struct resource *r) |
---|
3810 | { |
---|
3811 | int error; |
---|
3812 | |
---|
3813 | if (device_get_parent(child) != dev) |
---|
3814 | return (BUS_RELEASE_RESOURCE(device_get_parent(dev), child, |
---|
3815 | type, rid, r)); |
---|
3816 | |
---|
3817 | /* |
---|
3818 | * For BARs we don't actually want to release the resource. |
---|
3819 | * Instead, we deactivate the resource if needed and then give |
---|
3820 | * ownership of the BAR back to the bus. |
---|
3821 | */ |
---|
3822 | switch (type) { |
---|
3823 | case SYS_RES_IOPORT: |
---|
3824 | case SYS_RES_MEMORY: |
---|
3825 | if (rman_get_device(r) != child) |
---|
3826 | return (EINVAL); |
---|
3827 | if (rman_get_flags(r) & RF_ACTIVE) { |
---|
3828 | error = bus_deactivate_resource(child, type, rid, r); |
---|
3829 | if (error) |
---|
3830 | return (error); |
---|
3831 | } |
---|
3832 | rman_set_device(r, dev); |
---|
3833 | return (0); |
---|
3834 | } |
---|
3835 | return (bus_generic_rl_release_resource(dev, child, type, rid, r)); |
---|
3836 | } |
---|
3837 | |
---|
3838 | int |
---|
3839 | pci_activate_resource(device_t dev, device_t child, int type, int rid, |
---|
3840 | struct resource *r) |
---|
3841 | { |
---|
3842 | int error; |
---|
3843 | |
---|
3844 | error = bus_generic_activate_resource(dev, child, type, rid, r); |
---|
3845 | if (error) |
---|
3846 | return (error); |
---|
3847 | |
---|
3848 | /* Enable decoding in the command register when activating BARs. */ |
---|
3849 | if (device_get_parent(child) == dev) { |
---|
3850 | switch (type) { |
---|
3851 | case SYS_RES_IOPORT: |
---|
3852 | case SYS_RES_MEMORY: |
---|
3853 | error = PCI_ENABLE_IO(dev, child, type); |
---|
3854 | break; |
---|
3855 | } |
---|
3856 | } |
---|
3857 | return (error); |
---|
3858 | } |
---|
3859 | |
---|
3860 | void |
---|
3861 | pci_delete_resource(device_t dev, device_t child, int type, int rid) |
---|
3862 | { |
---|
3863 | struct pci_devinfo *dinfo; |
---|
3864 | struct resource_list *rl; |
---|
3865 | struct resource_list_entry *rle; |
---|
3866 | |
---|
3867 | if (device_get_parent(child) != dev) |
---|
3868 | return; |
---|
3869 | |
---|
3870 | dinfo = device_get_ivars(child); |
---|
3871 | rl = &dinfo->resources; |
---|
3872 | rle = resource_list_find(rl, type, rid); |
---|
3873 | if (rle == NULL) |
---|
3874 | return; |
---|
3875 | |
---|
3876 | if (rle->res) { |
---|
3877 | if (rman_get_device(rle->res) != dev || |
---|
3878 | rman_get_flags(rle->res) & RF_ACTIVE) { |
---|
3879 | device_printf(dev, "delete_resource: " |
---|
3880 | "Resource still owned by child, oops. " |
---|
3881 | "(type=%d, rid=%d, addr=%lx)\n", |
---|
3882 | rle->type, rle->rid, |
---|
3883 | rman_get_start(rle->res)); |
---|
3884 | return; |
---|
3885 | } |
---|
3886 | |
---|
3887 | #ifndef __PCI_BAR_ZERO_VALID |
---|
3888 | /* |
---|
3889 | * If this is a BAR, clear the BAR so it stops |
---|
3890 | * decoding before releasing the resource. |
---|
3891 | */ |
---|
3892 | switch (type) { |
---|
3893 | case SYS_RES_IOPORT: |
---|
3894 | case SYS_RES_MEMORY: |
---|
3895 | pci_write_bar(child, rid, 0); |
---|
3896 | break; |
---|
3897 | } |
---|
3898 | #endif |
---|
3899 | bus_release_resource(dev, type, rid, rle->res); |
---|
3900 | } |
---|
3901 | resource_list_delete(rl, type, rid); |
---|
3902 | } |
---|
3903 | |
---|
3904 | struct resource_list * |
---|
3905 | pci_get_resource_list (device_t dev, device_t child) |
---|
3906 | { |
---|
3907 | struct pci_devinfo *dinfo = device_get_ivars(child); |
---|
3908 | |
---|
3909 | return (&dinfo->resources); |
---|
3910 | } |
---|
3911 | |
---|
3912 | uint32_t |
---|
3913 | pci_read_config_method(device_t dev, device_t child, int reg, int width) |
---|
3914 | { |
---|
3915 | struct pci_devinfo *dinfo = device_get_ivars(child); |
---|
3916 | pcicfgregs *cfg = &dinfo->cfg; |
---|
3917 | |
---|
3918 | return (PCIB_READ_CONFIG(device_get_parent(dev), |
---|
3919 | cfg->bus, cfg->slot, cfg->func, reg, width)); |
---|
3920 | } |
---|
3921 | |
---|
3922 | void |
---|
3923 | pci_write_config_method(device_t dev, device_t child, int reg, |
---|
3924 | uint32_t val, int width) |
---|
3925 | { |
---|
3926 | struct pci_devinfo *dinfo = device_get_ivars(child); |
---|
3927 | pcicfgregs *cfg = &dinfo->cfg; |
---|
3928 | |
---|
3929 | PCIB_WRITE_CONFIG(device_get_parent(dev), |
---|
3930 | cfg->bus, cfg->slot, cfg->func, reg, val, width); |
---|
3931 | } |
---|
3932 | |
---|
3933 | int |
---|
3934 | pci_child_location_str_method(device_t dev, device_t child, char *buf, |
---|
3935 | size_t buflen) |
---|
3936 | { |
---|
3937 | |
---|
3938 | snprintf(buf, buflen, "slot=%d function=%d", pci_get_slot(child), |
---|
3939 | pci_get_function(child)); |
---|
3940 | return (0); |
---|
3941 | } |
---|
3942 | |
---|
3943 | int |
---|
3944 | pci_child_pnpinfo_str_method(device_t dev, device_t child, char *buf, |
---|
3945 | size_t buflen) |
---|
3946 | { |
---|
3947 | struct pci_devinfo *dinfo; |
---|
3948 | pcicfgregs *cfg; |
---|
3949 | |
---|
3950 | dinfo = device_get_ivars(child); |
---|
3951 | cfg = &dinfo->cfg; |
---|
3952 | snprintf(buf, buflen, "vendor=0x%04x device=0x%04x subvendor=0x%04x " |
---|
3953 | "subdevice=0x%04x class=0x%02x%02x%02x", cfg->vendor, cfg->device, |
---|
3954 | cfg->subvendor, cfg->subdevice, cfg->baseclass, cfg->subclass, |
---|
3955 | cfg->progif); |
---|
3956 | return (0); |
---|
3957 | } |
---|
3958 | |
---|
3959 | int |
---|
3960 | pci_assign_interrupt_method(device_t dev, device_t child) |
---|
3961 | { |
---|
3962 | struct pci_devinfo *dinfo = device_get_ivars(child); |
---|
3963 | pcicfgregs *cfg = &dinfo->cfg; |
---|
3964 | |
---|
3965 | return (PCIB_ROUTE_INTERRUPT(device_get_parent(dev), child, |
---|
3966 | cfg->intpin)); |
---|
3967 | } |
---|
3968 | |
---|
3969 | static int |
---|
3970 | pci_modevent(module_t mod, int what, void *arg) |
---|
3971 | { |
---|
3972 | static struct cdev *pci_cdev; |
---|
3973 | |
---|
3974 | switch (what) { |
---|
3975 | case MOD_LOAD: |
---|
3976 | STAILQ_INIT(&pci_devq); |
---|
3977 | pci_generation = 0; |
---|
3978 | pci_cdev = make_dev(&pcicdev, 0, UID_ROOT, GID_WHEEL, 0644, |
---|
3979 | "pci"); |
---|
3980 | pci_load_vendor_data(); |
---|
3981 | break; |
---|
3982 | |
---|
3983 | case MOD_UNLOAD: |
---|
3984 | destroy_dev(pci_cdev); |
---|
3985 | break; |
---|
3986 | } |
---|
3987 | |
---|
3988 | return (0); |
---|
3989 | } |
---|
3990 | |
---|
3991 | void |
---|
3992 | pci_cfg_restore(device_t dev, struct pci_devinfo *dinfo) |
---|
3993 | { |
---|
3994 | int i; |
---|
3995 | |
---|
3996 | /* |
---|
3997 | * Only do header type 0 devices. Type 1 devices are bridges, |
---|
3998 | * which we know need special treatment. Type 2 devices are |
---|
3999 | * cardbus bridges which also require special treatment. |
---|
4000 | * Other types are unknown, and we err on the side of safety |
---|
4001 | * by ignoring them. |
---|
4002 | */ |
---|
4003 | if (dinfo->cfg.hdrtype != 0) |
---|
4004 | return; |
---|
4005 | |
---|
4006 | /* |
---|
4007 | * Restore the device to full power mode. We must do this |
---|
4008 | * before we restore the registers because moving from D3 to |
---|
4009 | * D0 will cause the chip's BARs and some other registers to |
---|
4010 | * be reset to some unknown power on reset values. Cut down |
---|
4011 | * the noise on boot by doing nothing if we are already in |
---|
4012 | * state D0. |
---|
4013 | */ |
---|
4014 | if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { |
---|
4015 | pci_set_powerstate(dev, PCI_POWERSTATE_D0); |
---|
4016 | } |
---|
4017 | for (i = 0; i < dinfo->cfg.nummaps; i++) |
---|
4018 | pci_write_config(dev, PCIR_BAR(i), dinfo->cfg.bar[i], 4); |
---|
4019 | pci_write_config(dev, PCIR_BIOS, dinfo->cfg.bios, 4); |
---|
4020 | pci_write_config(dev, PCIR_COMMAND, dinfo->cfg.cmdreg, 2); |
---|
4021 | pci_write_config(dev, PCIR_INTLINE, dinfo->cfg.intline, 1); |
---|
4022 | pci_write_config(dev, PCIR_INTPIN, dinfo->cfg.intpin, 1); |
---|
4023 | pci_write_config(dev, PCIR_MINGNT, dinfo->cfg.mingnt, 1); |
---|
4024 | pci_write_config(dev, PCIR_MAXLAT, dinfo->cfg.maxlat, 1); |
---|
4025 | pci_write_config(dev, PCIR_CACHELNSZ, dinfo->cfg.cachelnsz, 1); |
---|
4026 | pci_write_config(dev, PCIR_LATTIMER, dinfo->cfg.lattimer, 1); |
---|
4027 | pci_write_config(dev, PCIR_PROGIF, dinfo->cfg.progif, 1); |
---|
4028 | pci_write_config(dev, PCIR_REVID, dinfo->cfg.revid, 1); |
---|
4029 | |
---|
4030 | /* Restore MSI and MSI-X configurations if they are present. */ |
---|
4031 | if (dinfo->cfg.msi.msi_location != 0) |
---|
4032 | pci_resume_msi(dev); |
---|
4033 | if (dinfo->cfg.msix.msix_location != 0) |
---|
4034 | pci_resume_msix(dev); |
---|
4035 | } |
---|
4036 | |
---|
4037 | void |
---|
4038 | pci_cfg_save(device_t dev, struct pci_devinfo *dinfo, int setstate) |
---|
4039 | { |
---|
4040 | int i; |
---|
4041 | uint32_t cls; |
---|
4042 | int ps; |
---|
4043 | |
---|
4044 | /* |
---|
4045 | * Only do header type 0 devices. Type 1 devices are bridges, which |
---|
4046 | * we know need special treatment. Type 2 devices are cardbus bridges |
---|
4047 | * which also require special treatment. Other types are unknown, and |
---|
4048 | * we err on the side of safety by ignoring them. Powering down |
---|
4049 | * bridges should not be undertaken lightly. |
---|
4050 | */ |
---|
4051 | if (dinfo->cfg.hdrtype != 0) |
---|
4052 | return; |
---|
4053 | for (i = 0; i < dinfo->cfg.nummaps; i++) |
---|
4054 | dinfo->cfg.bar[i] = pci_read_config(dev, PCIR_BAR(i), 4); |
---|
4055 | dinfo->cfg.bios = pci_read_config(dev, PCIR_BIOS, 4); |
---|
4056 | |
---|
4057 | /* |
---|
4058 | * Some drivers apparently write to these registers w/o updating our |
---|
4059 | * cached copy. No harm happens if we update the copy, so do so here |
---|
4060 | * so we can restore them. The COMMAND register is modified by the |
---|
4061 | * bus w/o updating the cache. This should represent the normally |
---|
4062 | * writable portion of the 'defined' part of type 0 headers. In |
---|
4063 | * theory we also need to save/restore the PCI capability structures |
---|
4064 | * we know about, but apart from power we don't know any that are |
---|
4065 | * writable. |
---|
4066 | */ |
---|
4067 | dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_0, 2); |
---|
4068 | dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_0, 2); |
---|
4069 | dinfo->cfg.vendor = pci_read_config(dev, PCIR_VENDOR, 2); |
---|
4070 | dinfo->cfg.device = pci_read_config(dev, PCIR_DEVICE, 2); |
---|
4071 | dinfo->cfg.cmdreg = pci_read_config(dev, PCIR_COMMAND, 2); |
---|
4072 | dinfo->cfg.intline = pci_read_config(dev, PCIR_INTLINE, 1); |
---|
4073 | dinfo->cfg.intpin = pci_read_config(dev, PCIR_INTPIN, 1); |
---|
4074 | dinfo->cfg.mingnt = pci_read_config(dev, PCIR_MINGNT, 1); |
---|
4075 | dinfo->cfg.maxlat = pci_read_config(dev, PCIR_MAXLAT, 1); |
---|
4076 | dinfo->cfg.cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1); |
---|
4077 | dinfo->cfg.lattimer = pci_read_config(dev, PCIR_LATTIMER, 1); |
---|
4078 | dinfo->cfg.baseclass = pci_read_config(dev, PCIR_CLASS, 1); |
---|
4079 | dinfo->cfg.subclass = pci_read_config(dev, PCIR_SUBCLASS, 1); |
---|
4080 | dinfo->cfg.progif = pci_read_config(dev, PCIR_PROGIF, 1); |
---|
4081 | dinfo->cfg.revid = pci_read_config(dev, PCIR_REVID, 1); |
---|
4082 | |
---|
4083 | /* |
---|
4084 | * don't set the state for display devices, base peripherals and |
---|
4085 | * memory devices since bad things happen when they are powered down. |
---|
4086 | * We should (a) have drivers that can easily detach and (b) use |
---|
4087 | * generic drivers for these devices so that some device actually |
---|
4088 | * attaches. We need to make sure that when we implement (a) we don't |
---|
4089 | * power the device down on a reattach. |
---|
4090 | */ |
---|
4091 | cls = pci_get_class(dev); |
---|
4092 | if (!setstate) |
---|
4093 | return; |
---|
4094 | switch (pci_do_power_nodriver) |
---|
4095 | { |
---|
4096 | case 0: /* NO powerdown at all */ |
---|
4097 | return; |
---|
4098 | case 1: /* Conservative about what to power down */ |
---|
4099 | if (cls == PCIC_STORAGE) |
---|
4100 | return; |
---|
4101 | /*FALLTHROUGH*/ |
---|
4102 | case 2: /* Agressive about what to power down */ |
---|
4103 | if (cls == PCIC_DISPLAY || cls == PCIC_MEMORY || |
---|
4104 | cls == PCIC_BASEPERIPH) |
---|
4105 | return; |
---|
4106 | /*FALLTHROUGH*/ |
---|
4107 | case 3: /* Power down everything */ |
---|
4108 | break; |
---|
4109 | } |
---|
4110 | /* |
---|
4111 | * PCI spec says we can only go into D3 state from D0 state. |
---|
4112 | * Transition from D[12] into D0 before going to D3 state. |
---|
4113 | */ |
---|
4114 | ps = pci_get_powerstate(dev); |
---|
4115 | if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3) |
---|
4116 | pci_set_powerstate(dev, PCI_POWERSTATE_D0); |
---|
4117 | if (pci_get_powerstate(dev) != PCI_POWERSTATE_D3) |
---|
4118 | pci_set_powerstate(dev, PCI_POWERSTATE_D3); |
---|
4119 | } |
---|