1 | #include <freebsd/machine/rtems-bsd-config.h> |
---|
2 | |
---|
3 | /****************************************************************************** |
---|
4 | |
---|
5 | Copyright (c) 2001-2010, Intel Corporation |
---|
6 | All rights reserved. |
---|
7 | |
---|
8 | Redistribution and use in source and binary forms, with or without |
---|
9 | modification, are permitted provided that the following conditions are met: |
---|
10 | |
---|
11 | 1. Redistributions of source code must retain the above copyright notice, |
---|
12 | this list of conditions and the following disclaimer. |
---|
13 | |
---|
14 | 2. Redistributions in binary form must reproduce the above copyright |
---|
15 | notice, this list of conditions and the following disclaimer in the |
---|
16 | documentation and/or other materials provided with the distribution. |
---|
17 | |
---|
18 | 3. Neither the name of the Intel Corporation nor the names of its |
---|
19 | contributors may be used to endorse or promote products derived from |
---|
20 | this software without specific prior written permission. |
---|
21 | |
---|
22 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
---|
23 | AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
---|
24 | IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
---|
25 | ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
---|
26 | LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
---|
27 | CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
---|
28 | SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
---|
29 | INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
---|
30 | CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
---|
31 | ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
---|
32 | POSSIBILITY OF SUCH DAMAGE. |
---|
33 | |
---|
34 | ******************************************************************************/ |
---|
35 | /*$FreeBSD$*/ |
---|
36 | |
---|
37 | |
---|
38 | #ifdef HAVE_KERNEL_OPTION_HEADERS |
---|
39 | #include <freebsd/local/opt_device_polling.h> |
---|
40 | #include <freebsd/local/opt_inet.h> |
---|
41 | #include <freebsd/local/opt_altq.h> |
---|
42 | #endif |
---|
43 | |
---|
44 | #include <freebsd/sys/param.h> |
---|
45 | #include <freebsd/sys/systm.h> |
---|
46 | #if __FreeBSD_version >= 800000 |
---|
47 | #include <freebsd/sys/buf_ring.h> |
---|
48 | #endif |
---|
49 | #include <freebsd/sys/bus.h> |
---|
50 | #include <freebsd/sys/endian.h> |
---|
51 | #include <freebsd/sys/kernel.h> |
---|
52 | #include <freebsd/sys/kthread.h> |
---|
53 | #include <freebsd/sys/malloc.h> |
---|
54 | #include <freebsd/sys/mbuf.h> |
---|
55 | #include <freebsd/sys/module.h> |
---|
56 | #include <freebsd/sys/rman.h> |
---|
57 | #include <freebsd/sys/socket.h> |
---|
58 | #include <freebsd/sys/sockio.h> |
---|
59 | #include <freebsd/sys/sysctl.h> |
---|
60 | #include <freebsd/sys/taskqueue.h> |
---|
61 | #include <freebsd/sys/eventhandler.h> |
---|
62 | #include <freebsd/sys/pcpu.h> |
---|
63 | #ifndef __rtems__ |
---|
64 | #include <freebsd/sys/smp.h> |
---|
65 | #include <freebsd/machine/smp.h> |
---|
66 | #endif |
---|
67 | #include <freebsd/machine/bus.h> |
---|
68 | #include <freebsd/machine/resource.h> |
---|
69 | |
---|
70 | #include <freebsd/net/bpf.h> |
---|
71 | #include <freebsd/net/ethernet.h> |
---|
72 | #include <freebsd/net/if.h> |
---|
73 | #include <freebsd/net/if_arp.h> |
---|
74 | #include <freebsd/net/if_dl.h> |
---|
75 | #include <freebsd/net/if_media.h> |
---|
76 | |
---|
77 | #include <freebsd/net/if_types.h> |
---|
78 | #include <freebsd/net/if_vlan_var.h> |
---|
79 | |
---|
80 | #include <freebsd/netinet/in_systm.h> |
---|
81 | #include <freebsd/netinet/in.h> |
---|
82 | #include <freebsd/netinet/if_ether.h> |
---|
83 | #include <freebsd/netinet/ip.h> |
---|
84 | #include <freebsd/netinet/ip6.h> |
---|
85 | #include <freebsd/netinet/tcp.h> |
---|
86 | #include <freebsd/netinet/tcp_lro.h> |
---|
87 | #include <freebsd/netinet/udp.h> |
---|
88 | |
---|
89 | #include <freebsd/machine/in_cksum.h> |
---|
90 | #ifndef __rtems__ |
---|
91 | #include <freebsd/dev/led/led.h> |
---|
92 | #endif |
---|
93 | #include <freebsd/dev/pci/pcivar.h> |
---|
94 | #include <freebsd/dev/pci/pcireg.h> |
---|
95 | |
---|
96 | #ifndef __rtems__ |
---|
97 | #include <freebsd/local/e1000_api.h> |
---|
98 | #include <freebsd/local/e1000_82575.h> |
---|
99 | #include <freebsd/local/if_igb.h> |
---|
100 | #else |
---|
101 | #include <freebsd/dev/e1000/e1000_api.h> |
---|
102 | #include <freebsd/dev/e1000/e1000_82575.h> |
---|
103 | #include <freebsd/dev/e1000/if_igb.h> |
---|
104 | #endif |
---|
105 | |
---|
106 | /********************************************************************* |
---|
107 | * Set this to one to display debug statistics |
---|
108 | *********************************************************************/ |
---|
109 | int igb_display_debug_stats = 0; |
---|
110 | |
---|
111 | /********************************************************************* |
---|
112 | * Driver version: |
---|
113 | *********************************************************************/ |
---|
114 | char igb_driver_version[] = "version - 2.0.7"; |
---|
115 | |
---|
116 | |
---|
117 | /********************************************************************* |
---|
118 | * PCI Device ID Table |
---|
119 | * |
---|
120 | * Used by probe to select devices to load on |
---|
121 | * Last field stores an index into e1000_strings |
---|
122 | * Last entry must be all 0s |
---|
123 | * |
---|
124 | * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, String Index } |
---|
125 | *********************************************************************/ |
---|
126 | |
---|
127 | static igb_vendor_info_t igb_vendor_info_array[] = |
---|
128 | { |
---|
129 | { 0x8086, E1000_DEV_ID_82575EB_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, |
---|
130 | { 0x8086, E1000_DEV_ID_82575EB_FIBER_SERDES, |
---|
131 | PCI_ANY_ID, PCI_ANY_ID, 0}, |
---|
132 | { 0x8086, E1000_DEV_ID_82575GB_QUAD_COPPER, |
---|
133 | PCI_ANY_ID, PCI_ANY_ID, 0}, |
---|
134 | { 0x8086, E1000_DEV_ID_82576, PCI_ANY_ID, PCI_ANY_ID, 0}, |
---|
135 | { 0x8086, E1000_DEV_ID_82576_NS, PCI_ANY_ID, PCI_ANY_ID, 0}, |
---|
136 | { 0x8086, E1000_DEV_ID_82576_NS_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, |
---|
137 | { 0x8086, E1000_DEV_ID_82576_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, |
---|
138 | { 0x8086, E1000_DEV_ID_82576_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, |
---|
139 | { 0x8086, E1000_DEV_ID_82576_SERDES_QUAD, |
---|
140 | PCI_ANY_ID, PCI_ANY_ID, 0}, |
---|
141 | { 0x8086, E1000_DEV_ID_82576_QUAD_COPPER, |
---|
142 | PCI_ANY_ID, PCI_ANY_ID, 0}, |
---|
143 | { 0x8086, E1000_DEV_ID_82576_QUAD_COPPER_ET2, |
---|
144 | PCI_ANY_ID, PCI_ANY_ID, 0}, |
---|
145 | { 0x8086, E1000_DEV_ID_82576_VF, PCI_ANY_ID, PCI_ANY_ID, 0}, |
---|
146 | { 0x8086, E1000_DEV_ID_82580_COPPER, PCI_ANY_ID, PCI_ANY_ID, 0}, |
---|
147 | { 0x8086, E1000_DEV_ID_82580_FIBER, PCI_ANY_ID, PCI_ANY_ID, 0}, |
---|
148 | { 0x8086, E1000_DEV_ID_82580_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, |
---|
149 | { 0x8086, E1000_DEV_ID_82580_SGMII, PCI_ANY_ID, PCI_ANY_ID, 0}, |
---|
150 | { 0x8086, E1000_DEV_ID_82580_COPPER_DUAL, |
---|
151 | PCI_ANY_ID, PCI_ANY_ID, 0}, |
---|
152 | { 0x8086, E1000_DEV_ID_82580_QUAD_FIBER, |
---|
153 | PCI_ANY_ID, PCI_ANY_ID, 0}, |
---|
154 | { 0x8086, E1000_DEV_ID_DH89XXCC_SERDES, PCI_ANY_ID, PCI_ANY_ID, 0}, |
---|
155 | { 0x8086, E1000_DEV_ID_DH89XXCC_SGMII, PCI_ANY_ID, PCI_ANY_ID, 0}, |
---|
156 | /* required last entry */ |
---|
157 | { 0, 0, 0, 0, 0} |
---|
158 | }; |
---|
159 | |
---|
160 | /********************************************************************* |
---|
161 | * Table of branding strings for all supported NICs. |
---|
162 | *********************************************************************/ |
---|
163 | |
---|
164 | static char *igb_strings[] = { |
---|
165 | "Intel(R) PRO/1000 Network Connection" |
---|
166 | }; |
---|
167 | |
---|
168 | /********************************************************************* |
---|
169 | * Function prototypes |
---|
170 | *********************************************************************/ |
---|
171 | static int igb_probe(device_t); |
---|
172 | static int igb_attach(device_t); |
---|
173 | static int igb_detach(device_t); |
---|
174 | static int igb_shutdown(device_t); |
---|
175 | static int igb_suspend(device_t); |
---|
176 | static int igb_resume(device_t); |
---|
177 | static void igb_start(struct ifnet *); |
---|
178 | static void igb_start_locked(struct tx_ring *, struct ifnet *ifp); |
---|
179 | #if __FreeBSD_version >= 800000 |
---|
180 | static int igb_mq_start(struct ifnet *, struct mbuf *); |
---|
181 | static int igb_mq_start_locked(struct ifnet *, |
---|
182 | struct tx_ring *, struct mbuf *); |
---|
183 | static void igb_qflush(struct ifnet *); |
---|
184 | #endif |
---|
185 | static int igb_ioctl(struct ifnet *, u_long, caddr_t); |
---|
186 | static void igb_init(void *); |
---|
187 | static void igb_init_locked(struct adapter *); |
---|
188 | static void igb_stop(void *); |
---|
189 | static void igb_media_status(struct ifnet *, struct ifmediareq *); |
---|
190 | static int igb_media_change(struct ifnet *); |
---|
191 | static void igb_identify_hardware(struct adapter *); |
---|
192 | static int igb_allocate_pci_resources(struct adapter *); |
---|
193 | static int igb_allocate_msix(struct adapter *); |
---|
194 | static int igb_allocate_legacy(struct adapter *); |
---|
195 | static int igb_setup_msix(struct adapter *); |
---|
196 | static void igb_free_pci_resources(struct adapter *); |
---|
197 | static void igb_local_timer(void *); |
---|
198 | static void igb_reset(struct adapter *); |
---|
199 | static int igb_setup_interface(device_t, struct adapter *); |
---|
200 | static int igb_allocate_queues(struct adapter *); |
---|
201 | static void igb_configure_queues(struct adapter *); |
---|
202 | |
---|
203 | static int igb_allocate_transmit_buffers(struct tx_ring *); |
---|
204 | static void igb_setup_transmit_structures(struct adapter *); |
---|
205 | static void igb_setup_transmit_ring(struct tx_ring *); |
---|
206 | static void igb_initialize_transmit_units(struct adapter *); |
---|
207 | static void igb_free_transmit_structures(struct adapter *); |
---|
208 | static void igb_free_transmit_buffers(struct tx_ring *); |
---|
209 | |
---|
210 | static int igb_allocate_receive_buffers(struct rx_ring *); |
---|
211 | static int igb_setup_receive_structures(struct adapter *); |
---|
212 | static int igb_setup_receive_ring(struct rx_ring *); |
---|
213 | static void igb_initialize_receive_units(struct adapter *); |
---|
214 | static void igb_free_receive_structures(struct adapter *); |
---|
215 | static void igb_free_receive_buffers(struct rx_ring *); |
---|
216 | static void igb_free_receive_ring(struct rx_ring *); |
---|
217 | |
---|
218 | static void igb_enable_intr(struct adapter *); |
---|
219 | static void igb_disable_intr(struct adapter *); |
---|
220 | static void igb_update_stats_counters(struct adapter *); |
---|
221 | static bool igb_txeof(struct tx_ring *); |
---|
222 | |
---|
223 | static __inline void igb_rx_discard(struct rx_ring *, int); |
---|
224 | static __inline void igb_rx_input(struct rx_ring *, |
---|
225 | struct ifnet *, struct mbuf *, u32); |
---|
226 | |
---|
227 | static bool igb_rxeof(struct igb_queue *, int, int *); |
---|
228 | static void igb_rx_checksum(u32, struct mbuf *, u32); |
---|
229 | #ifdef __rtems__ |
---|
230 | /* XXX this is an inconsistency in BSD */ |
---|
231 | static bool igb_tx_ctx_setup(struct tx_ring *, struct mbuf *); |
---|
232 | static boolean_t igb_tso_setup(struct tx_ring *, struct mbuf *, u32 *); |
---|
233 | #else |
---|
234 | static int igb_tx_ctx_setup(struct tx_ring *, struct mbuf *); |
---|
235 | static bool igb_tso_setup(struct tx_ring *, struct mbuf *, u32 *); |
---|
236 | #endif |
---|
237 | static void igb_set_promisc(struct adapter *); |
---|
238 | static void igb_disable_promisc(struct adapter *); |
---|
239 | static void igb_set_multi(struct adapter *); |
---|
240 | static void igb_update_link_status(struct adapter *); |
---|
241 | static void igb_refresh_mbufs(struct rx_ring *, int); |
---|
242 | |
---|
243 | static void igb_register_vlan(void *, struct ifnet *, u16); |
---|
244 | static void igb_unregister_vlan(void *, struct ifnet *, u16); |
---|
245 | static void igb_setup_vlan_hw_support(struct adapter *); |
---|
246 | |
---|
247 | static int igb_xmit(struct tx_ring *, struct mbuf **); |
---|
248 | static int igb_dma_malloc(struct adapter *, bus_size_t, |
---|
249 | struct igb_dma_alloc *, int); |
---|
250 | static void igb_dma_free(struct adapter *, struct igb_dma_alloc *); |
---|
251 | static int igb_sysctl_nvm_info(SYSCTL_HANDLER_ARGS); |
---|
252 | static void igb_print_nvm_info(struct adapter *); |
---|
253 | static int igb_is_valid_ether_addr(u8 *); |
---|
254 | static void igb_add_hw_stats(struct adapter *); |
---|
255 | |
---|
256 | static void igb_vf_init_stats(struct adapter *); |
---|
257 | static void igb_update_vf_stats_counters(struct adapter *); |
---|
258 | |
---|
259 | /* Management and WOL Support */ |
---|
260 | static void igb_init_manageability(struct adapter *); |
---|
261 | static void igb_release_manageability(struct adapter *); |
---|
262 | static void igb_get_hw_control(struct adapter *); |
---|
263 | static void igb_release_hw_control(struct adapter *); |
---|
264 | static void igb_enable_wakeup(device_t); |
---|
265 | static void igb_led_func(void *, int); |
---|
266 | |
---|
267 | static int igb_irq_fast(void *); |
---|
268 | static void igb_add_rx_process_limit(struct adapter *, const char *, |
---|
269 | const char *, int *, int); |
---|
270 | static void igb_handle_que(void *context, int pending); |
---|
271 | static void igb_handle_link(void *context, int pending); |
---|
272 | |
---|
273 | /* These are MSIX only irq handlers */ |
---|
274 | static void igb_msix_que(void *); |
---|
275 | static void igb_msix_link(void *); |
---|
276 | |
---|
277 | #ifdef DEVICE_POLLING |
---|
278 | static poll_handler_t igb_poll; |
---|
279 | #endif /* POLLING */ |
---|
280 | |
---|
281 | /********************************************************************* |
---|
282 | * FreeBSD Device Interface Entry Points |
---|
283 | *********************************************************************/ |
---|
284 | |
---|
285 | static device_method_t igb_methods[] = { |
---|
286 | /* Device interface */ |
---|
287 | DEVMETHOD(device_probe, igb_probe), |
---|
288 | DEVMETHOD(device_attach, igb_attach), |
---|
289 | DEVMETHOD(device_detach, igb_detach), |
---|
290 | DEVMETHOD(device_shutdown, igb_shutdown), |
---|
291 | DEVMETHOD(device_suspend, igb_suspend), |
---|
292 | DEVMETHOD(device_resume, igb_resume), |
---|
293 | {0, 0} |
---|
294 | }; |
---|
295 | |
---|
296 | static driver_t igb_driver = { |
---|
297 | "igb", igb_methods, sizeof(struct adapter), |
---|
298 | }; |
---|
299 | |
---|
300 | static devclass_t igb_devclass; |
---|
301 | DRIVER_MODULE(igb, pci, igb_driver, igb_devclass, 0, 0); |
---|
302 | MODULE_DEPEND(igb, pci, 1, 1, 1); |
---|
303 | MODULE_DEPEND(igb, ether, 1, 1, 1); |
---|
304 | |
---|
305 | /********************************************************************* |
---|
306 | * Tunable default values. |
---|
307 | *********************************************************************/ |
---|
308 | |
---|
309 | /* Descriptor defaults */ |
---|
310 | static int igb_rxd = IGB_DEFAULT_RXD; |
---|
311 | static int igb_txd = IGB_DEFAULT_TXD; |
---|
312 | TUNABLE_INT("hw.igb.rxd", &igb_rxd); |
---|
313 | TUNABLE_INT("hw.igb.txd", &igb_txd); |
---|
314 | |
---|
315 | /* |
---|
316 | ** AIM: Adaptive Interrupt Moderation |
---|
317 | ** which means that the interrupt rate |
---|
318 | ** is varied over time based on the |
---|
319 | ** traffic for that interrupt vector |
---|
320 | */ |
---|
321 | static int igb_enable_aim = TRUE; |
---|
322 | TUNABLE_INT("hw.igb.enable_aim", &igb_enable_aim); |
---|
323 | |
---|
324 | /* |
---|
325 | * MSIX should be the default for best performance, |
---|
326 | * but this allows it to be forced off for testing. |
---|
327 | */ |
---|
328 | static int igb_enable_msix = 1; |
---|
329 | TUNABLE_INT("hw.igb.enable_msix", &igb_enable_msix); |
---|
330 | |
---|
331 | /* |
---|
332 | ** Tuneable Interrupt rate |
---|
333 | */ |
---|
334 | static int igb_max_interrupt_rate = 8000; |
---|
335 | TUNABLE_INT("hw.igb.max_interrupt_rate", &igb_max_interrupt_rate); |
---|
336 | |
---|
337 | /* |
---|
338 | ** Header split causes the packet header to |
---|
339 | ** be dma'd to a seperate mbuf from the payload. |
---|
340 | ** this can have memory alignment benefits. But |
---|
341 | ** another plus is that small packets often fit |
---|
342 | ** into the header and thus use no cluster. Its |
---|
343 | ** a very workload dependent type feature. |
---|
344 | */ |
---|
345 | static bool igb_header_split = FALSE; |
---|
346 | TUNABLE_INT("hw.igb.hdr_split", &igb_header_split); |
---|
347 | |
---|
348 | /* |
---|
349 | ** This will autoconfigure based on |
---|
350 | ** the number of CPUs if left at 0. |
---|
351 | */ |
---|
352 | static int igb_num_queues = 0; |
---|
353 | TUNABLE_INT("hw.igb.num_queues", &igb_num_queues); |
---|
354 | |
---|
355 | /* How many packets rxeof tries to clean at a time */ |
---|
356 | static int igb_rx_process_limit = 100; |
---|
357 | TUNABLE_INT("hw.igb.rx_process_limit", &igb_rx_process_limit); |
---|
358 | |
---|
359 | /* Flow control setting - default to FULL */ |
---|
360 | static int igb_fc_setting = e1000_fc_full; |
---|
361 | TUNABLE_INT("hw.igb.fc_setting", &igb_fc_setting); |
---|
362 | |
---|
363 | /********************************************************************* |
---|
364 | * Device identification routine |
---|
365 | * |
---|
366 | * igb_probe determines if the driver should be loaded on |
---|
367 | * adapter based on PCI vendor/device id of the adapter. |
---|
368 | * |
---|
369 | * return BUS_PROBE_DEFAULT on success, positive on failure |
---|
370 | *********************************************************************/ |
---|
371 | |
---|
372 | static int |
---|
373 | igb_probe(device_t dev) |
---|
374 | { |
---|
375 | char adapter_name[60]; |
---|
376 | uint16_t pci_vendor_id = 0; |
---|
377 | uint16_t pci_device_id = 0; |
---|
378 | uint16_t pci_subvendor_id = 0; |
---|
379 | uint16_t pci_subdevice_id = 0; |
---|
380 | igb_vendor_info_t *ent; |
---|
381 | |
---|
382 | INIT_DEBUGOUT("igb_probe: begin"); |
---|
383 | |
---|
384 | pci_vendor_id = pci_get_vendor(dev); |
---|
385 | if (pci_vendor_id != IGB_VENDOR_ID) |
---|
386 | return (ENXIO); |
---|
387 | |
---|
388 | pci_device_id = pci_get_device(dev); |
---|
389 | pci_subvendor_id = pci_get_subvendor(dev); |
---|
390 | pci_subdevice_id = pci_get_subdevice(dev); |
---|
391 | |
---|
392 | ent = igb_vendor_info_array; |
---|
393 | while (ent->vendor_id != 0) { |
---|
394 | if ((pci_vendor_id == ent->vendor_id) && |
---|
395 | (pci_device_id == ent->device_id) && |
---|
396 | |
---|
397 | ((pci_subvendor_id == ent->subvendor_id) || |
---|
398 | (ent->subvendor_id == PCI_ANY_ID)) && |
---|
399 | |
---|
400 | ((pci_subdevice_id == ent->subdevice_id) || |
---|
401 | (ent->subdevice_id == PCI_ANY_ID))) { |
---|
402 | sprintf(adapter_name, "%s %s", |
---|
403 | igb_strings[ent->index], |
---|
404 | igb_driver_version); |
---|
405 | device_set_desc_copy(dev, adapter_name); |
---|
406 | return (BUS_PROBE_DEFAULT); |
---|
407 | } |
---|
408 | ent++; |
---|
409 | } |
---|
410 | |
---|
411 | return (ENXIO); |
---|
412 | } |
---|
413 | |
---|
414 | /********************************************************************* |
---|
415 | * Device initialization routine |
---|
416 | * |
---|
417 | * The attach entry point is called when the driver is being loaded. |
---|
418 | * This routine identifies the type of hardware, allocates all resources |
---|
419 | * and initializes the hardware. |
---|
420 | * |
---|
421 | * return 0 on success, positive on failure |
---|
422 | *********************************************************************/ |
---|
423 | |
---|
424 | static int |
---|
425 | igb_attach(device_t dev) |
---|
426 | { |
---|
427 | struct adapter *adapter; |
---|
428 | int error = 0; |
---|
429 | u16 eeprom_data; |
---|
430 | |
---|
431 | INIT_DEBUGOUT("igb_attach: begin"); |
---|
432 | |
---|
433 | adapter = device_get_softc(dev); |
---|
434 | adapter->dev = adapter->osdep.dev = dev; |
---|
435 | IGB_CORE_LOCK_INIT(adapter, device_get_nameunit(dev)); |
---|
436 | |
---|
437 | /* SYSCTL stuff */ |
---|
438 | SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), |
---|
439 | SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), |
---|
440 | OID_AUTO, "nvm", CTLTYPE_INT|CTLFLAG_RW, adapter, 0, |
---|
441 | igb_sysctl_nvm_info, "I", "NVM Information"); |
---|
442 | |
---|
443 | SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev), |
---|
444 | SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)), |
---|
445 | OID_AUTO, "flow_control", CTLTYPE_INT|CTLFLAG_RW, |
---|
446 | &igb_fc_setting, 0, "Flow Control"); |
---|
447 | |
---|
448 | SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), |
---|
449 | SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), |
---|
450 | OID_AUTO, "enable_aim", CTLTYPE_INT|CTLFLAG_RW, |
---|
451 | &igb_enable_aim, 1, "Interrupt Moderation"); |
---|
452 | |
---|
453 | callout_init_mtx(&adapter->timer, &adapter->core_mtx, 0); |
---|
454 | |
---|
455 | /* Determine hardware and mac info */ |
---|
456 | igb_identify_hardware(adapter); |
---|
457 | |
---|
458 | /* Setup PCI resources */ |
---|
459 | if (igb_allocate_pci_resources(adapter)) { |
---|
460 | device_printf(dev, "Allocation of PCI resources failed\n"); |
---|
461 | error = ENXIO; |
---|
462 | goto err_pci; |
---|
463 | } |
---|
464 | |
---|
465 | /* Do Shared Code initialization */ |
---|
466 | if (e1000_setup_init_funcs(&adapter->hw, TRUE)) { |
---|
467 | device_printf(dev, "Setup of Shared code failed\n"); |
---|
468 | error = ENXIO; |
---|
469 | goto err_pci; |
---|
470 | } |
---|
471 | |
---|
472 | e1000_get_bus_info(&adapter->hw); |
---|
473 | |
---|
474 | /* Sysctls for limiting the amount of work done in the taskqueue */ |
---|
475 | igb_add_rx_process_limit(adapter, "rx_processing_limit", |
---|
476 | "max number of rx packets to process", &adapter->rx_process_limit, |
---|
477 | igb_rx_process_limit); |
---|
478 | |
---|
479 | /* |
---|
480 | * Validate number of transmit and receive descriptors. It |
---|
481 | * must not exceed hardware maximum, and must be multiple |
---|
482 | * of E1000_DBA_ALIGN. |
---|
483 | */ |
---|
484 | if (((igb_txd * sizeof(struct e1000_tx_desc)) % IGB_DBA_ALIGN) != 0 || |
---|
485 | (igb_txd > IGB_MAX_TXD) || (igb_txd < IGB_MIN_TXD)) { |
---|
486 | device_printf(dev, "Using %d TX descriptors instead of %d!\n", |
---|
487 | IGB_DEFAULT_TXD, igb_txd); |
---|
488 | adapter->num_tx_desc = IGB_DEFAULT_TXD; |
---|
489 | } else |
---|
490 | adapter->num_tx_desc = igb_txd; |
---|
491 | if (((igb_rxd * sizeof(struct e1000_rx_desc)) % IGB_DBA_ALIGN) != 0 || |
---|
492 | (igb_rxd > IGB_MAX_RXD) || (igb_rxd < IGB_MIN_RXD)) { |
---|
493 | device_printf(dev, "Using %d RX descriptors instead of %d!\n", |
---|
494 | IGB_DEFAULT_RXD, igb_rxd); |
---|
495 | adapter->num_rx_desc = IGB_DEFAULT_RXD; |
---|
496 | } else |
---|
497 | adapter->num_rx_desc = igb_rxd; |
---|
498 | |
---|
499 | adapter->hw.mac.autoneg = DO_AUTO_NEG; |
---|
500 | adapter->hw.phy.autoneg_wait_to_complete = FALSE; |
---|
501 | adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; |
---|
502 | |
---|
503 | /* Copper options */ |
---|
504 | if (adapter->hw.phy.media_type == e1000_media_type_copper) { |
---|
505 | adapter->hw.phy.mdix = AUTO_ALL_MODES; |
---|
506 | adapter->hw.phy.disable_polarity_correction = FALSE; |
---|
507 | adapter->hw.phy.ms_type = IGB_MASTER_SLAVE; |
---|
508 | } |
---|
509 | |
---|
510 | /* |
---|
511 | * Set the frame limits assuming |
---|
512 | * standard ethernet sized frames. |
---|
513 | */ |
---|
514 | adapter->max_frame_size = ETHERMTU + ETHER_HDR_LEN + ETHERNET_FCS_SIZE; |
---|
515 | adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE; |
---|
516 | |
---|
517 | /* |
---|
518 | ** Allocate and Setup Queues |
---|
519 | */ |
---|
520 | if (igb_allocate_queues(adapter)) { |
---|
521 | error = ENOMEM; |
---|
522 | goto err_pci; |
---|
523 | } |
---|
524 | |
---|
525 | /* Allocate the appropriate stats memory */ |
---|
526 | if (adapter->hw.mac.type == e1000_vfadapt) { |
---|
527 | adapter->stats = |
---|
528 | (struct e1000_vf_stats *)malloc(sizeof \ |
---|
529 | (struct e1000_vf_stats), M_DEVBUF, M_NOWAIT | M_ZERO); |
---|
530 | igb_vf_init_stats(adapter); |
---|
531 | } else |
---|
532 | adapter->stats = |
---|
533 | (struct e1000_hw_stats *)malloc(sizeof \ |
---|
534 | (struct e1000_hw_stats), M_DEVBUF, M_NOWAIT | M_ZERO); |
---|
535 | if (adapter->stats == NULL) { |
---|
536 | device_printf(dev, "Can not allocate stats memory\n"); |
---|
537 | error = ENOMEM; |
---|
538 | goto err_late; |
---|
539 | } |
---|
540 | |
---|
541 | /* Allocate multicast array memory. */ |
---|
542 | adapter->mta = malloc(sizeof(u8) * ETH_ADDR_LEN * |
---|
543 | MAX_NUM_MULTICAST_ADDRESSES, M_DEVBUF, M_NOWAIT); |
---|
544 | if (adapter->mta == NULL) { |
---|
545 | device_printf(dev, "Can not allocate multicast setup array\n"); |
---|
546 | error = ENOMEM; |
---|
547 | goto err_late; |
---|
548 | } |
---|
549 | |
---|
550 | /* |
---|
551 | ** Start from a known state, this is |
---|
552 | ** important in reading the nvm and |
---|
553 | ** mac from that. |
---|
554 | */ |
---|
555 | e1000_reset_hw(&adapter->hw); |
---|
556 | |
---|
557 | /* Make sure we have a good EEPROM before we read from it */ |
---|
558 | if (e1000_validate_nvm_checksum(&adapter->hw) < 0) { |
---|
559 | /* |
---|
560 | ** Some PCI-E parts fail the first check due to |
---|
561 | ** the link being in sleep state, call it again, |
---|
562 | ** if it fails a second time its a real issue. |
---|
563 | */ |
---|
564 | if (e1000_validate_nvm_checksum(&adapter->hw) < 0) { |
---|
565 | device_printf(dev, |
---|
566 | "The EEPROM Checksum Is Not Valid\n"); |
---|
567 | error = EIO; |
---|
568 | goto err_late; |
---|
569 | } |
---|
570 | } |
---|
571 | |
---|
572 | /* |
---|
573 | ** Copy the permanent MAC address out of the EEPROM |
---|
574 | */ |
---|
575 | if (e1000_read_mac_addr(&adapter->hw) < 0) { |
---|
576 | device_printf(dev, "EEPROM read error while reading MAC" |
---|
577 | " address\n"); |
---|
578 | error = EIO; |
---|
579 | goto err_late; |
---|
580 | } |
---|
581 | /* Check its sanity */ |
---|
582 | if (!igb_is_valid_ether_addr(adapter->hw.mac.addr)) { |
---|
583 | device_printf(dev, "Invalid MAC address\n"); |
---|
584 | error = EIO; |
---|
585 | goto err_late; |
---|
586 | } |
---|
587 | |
---|
588 | /* |
---|
589 | ** Configure Interrupts |
---|
590 | */ |
---|
591 | if ((adapter->msix > 1) && (igb_enable_msix)) |
---|
592 | error = igb_allocate_msix(adapter); |
---|
593 | else /* MSI or Legacy */ |
---|
594 | error = igb_allocate_legacy(adapter); |
---|
595 | if (error) |
---|
596 | goto err_late; |
---|
597 | |
---|
598 | /* Setup OS specific network interface */ |
---|
599 | if (igb_setup_interface(dev, adapter) != 0) |
---|
600 | goto err_late; |
---|
601 | |
---|
602 | /* Now get a good starting state */ |
---|
603 | igb_reset(adapter); |
---|
604 | |
---|
605 | /* Initialize statistics */ |
---|
606 | igb_update_stats_counters(adapter); |
---|
607 | |
---|
608 | adapter->hw.mac.get_link_status = 1; |
---|
609 | igb_update_link_status(adapter); |
---|
610 | |
---|
611 | /* Indicate SOL/IDER usage */ |
---|
612 | if (e1000_check_reset_block(&adapter->hw)) |
---|
613 | device_printf(dev, |
---|
614 | "PHY reset is blocked due to SOL/IDER session.\n"); |
---|
615 | |
---|
616 | /* Determine if we have to control management hardware */ |
---|
617 | adapter->has_manage = e1000_enable_mng_pass_thru(&adapter->hw); |
---|
618 | |
---|
619 | /* |
---|
620 | * Setup Wake-on-Lan |
---|
621 | */ |
---|
622 | /* APME bit in EEPROM is mapped to WUC.APME */ |
---|
623 | eeprom_data = E1000_READ_REG(&adapter->hw, E1000_WUC) & E1000_WUC_APME; |
---|
624 | if (eeprom_data) |
---|
625 | adapter->wol = E1000_WUFC_MAG; |
---|
626 | |
---|
627 | /* Register for VLAN events */ |
---|
628 | adapter->vlan_attach = EVENTHANDLER_REGISTER(vlan_config, |
---|
629 | igb_register_vlan, adapter, EVENTHANDLER_PRI_FIRST); |
---|
630 | adapter->vlan_detach = EVENTHANDLER_REGISTER(vlan_unconfig, |
---|
631 | igb_unregister_vlan, adapter, EVENTHANDLER_PRI_FIRST); |
---|
632 | |
---|
633 | igb_add_hw_stats(adapter); |
---|
634 | |
---|
635 | /* Tell the stack that the interface is not active */ |
---|
636 | adapter->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); |
---|
637 | |
---|
638 | adapter->led_dev = led_create(igb_led_func, adapter, |
---|
639 | device_get_nameunit(dev)); |
---|
640 | |
---|
641 | INIT_DEBUGOUT("igb_attach: end"); |
---|
642 | |
---|
643 | return (0); |
---|
644 | |
---|
645 | err_late: |
---|
646 | igb_free_transmit_structures(adapter); |
---|
647 | igb_free_receive_structures(adapter); |
---|
648 | igb_release_hw_control(adapter); |
---|
649 | if (adapter->ifp != NULL) |
---|
650 | if_free(adapter->ifp); |
---|
651 | err_pci: |
---|
652 | igb_free_pci_resources(adapter); |
---|
653 | free(adapter->mta, M_DEVBUF); |
---|
654 | IGB_CORE_LOCK_DESTROY(adapter); |
---|
655 | |
---|
656 | return (error); |
---|
657 | } |
---|
658 | |
---|
659 | /********************************************************************* |
---|
660 | * Device removal routine |
---|
661 | * |
---|
662 | * The detach entry point is called when the driver is being removed. |
---|
663 | * This routine stops the adapter and deallocates all the resources |
---|
664 | * that were allocated for driver operation. |
---|
665 | * |
---|
666 | * return 0 on success, positive on failure |
---|
667 | *********************************************************************/ |
---|
668 | |
---|
669 | static int |
---|
670 | igb_detach(device_t dev) |
---|
671 | { |
---|
672 | struct adapter *adapter = device_get_softc(dev); |
---|
673 | struct ifnet *ifp = adapter->ifp; |
---|
674 | |
---|
675 | INIT_DEBUGOUT("igb_detach: begin"); |
---|
676 | |
---|
677 | /* Make sure VLANS are not using driver */ |
---|
678 | if (adapter->ifp->if_vlantrunk != NULL) { |
---|
679 | device_printf(dev,"Vlan in use, detach first\n"); |
---|
680 | return (EBUSY); |
---|
681 | } |
---|
682 | |
---|
683 | if (adapter->led_dev != NULL) |
---|
684 | led_destroy(adapter->led_dev); |
---|
685 | |
---|
686 | #ifdef DEVICE_POLLING |
---|
687 | if (ifp->if_capenable & IFCAP_POLLING) |
---|
688 | ether_poll_deregister(ifp); |
---|
689 | #endif |
---|
690 | |
---|
691 | IGB_CORE_LOCK(adapter); |
---|
692 | adapter->in_detach = 1; |
---|
693 | igb_stop(adapter); |
---|
694 | IGB_CORE_UNLOCK(adapter); |
---|
695 | |
---|
696 | e1000_phy_hw_reset(&adapter->hw); |
---|
697 | |
---|
698 | /* Give control back to firmware */ |
---|
699 | igb_release_manageability(adapter); |
---|
700 | igb_release_hw_control(adapter); |
---|
701 | |
---|
702 | if (adapter->wol) { |
---|
703 | E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN); |
---|
704 | E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol); |
---|
705 | igb_enable_wakeup(dev); |
---|
706 | } |
---|
707 | |
---|
708 | /* Unregister VLAN events */ |
---|
709 | if (adapter->vlan_attach != NULL) |
---|
710 | EVENTHANDLER_DEREGISTER(vlan_config, adapter->vlan_attach); |
---|
711 | if (adapter->vlan_detach != NULL) |
---|
712 | EVENTHANDLER_DEREGISTER(vlan_unconfig, adapter->vlan_detach); |
---|
713 | |
---|
714 | ether_ifdetach(adapter->ifp); |
---|
715 | |
---|
716 | callout_drain(&adapter->timer); |
---|
717 | |
---|
718 | igb_free_pci_resources(adapter); |
---|
719 | bus_generic_detach(dev); |
---|
720 | if_free(ifp); |
---|
721 | |
---|
722 | igb_free_transmit_structures(adapter); |
---|
723 | igb_free_receive_structures(adapter); |
---|
724 | free(adapter->mta, M_DEVBUF); |
---|
725 | |
---|
726 | IGB_CORE_LOCK_DESTROY(adapter); |
---|
727 | |
---|
728 | return (0); |
---|
729 | } |
---|
730 | |
---|
731 | /********************************************************************* |
---|
732 | * |
---|
733 | * Shutdown entry point |
---|
734 | * |
---|
735 | **********************************************************************/ |
---|
736 | |
---|
737 | static int |
---|
738 | igb_shutdown(device_t dev) |
---|
739 | { |
---|
740 | return igb_suspend(dev); |
---|
741 | } |
---|
742 | |
---|
743 | /* |
---|
744 | * Suspend/resume device methods. |
---|
745 | */ |
---|
746 | static int |
---|
747 | igb_suspend(device_t dev) |
---|
748 | { |
---|
749 | struct adapter *adapter = device_get_softc(dev); |
---|
750 | |
---|
751 | IGB_CORE_LOCK(adapter); |
---|
752 | |
---|
753 | igb_stop(adapter); |
---|
754 | |
---|
755 | igb_release_manageability(adapter); |
---|
756 | igb_release_hw_control(adapter); |
---|
757 | |
---|
758 | if (adapter->wol) { |
---|
759 | E1000_WRITE_REG(&adapter->hw, E1000_WUC, E1000_WUC_PME_EN); |
---|
760 | E1000_WRITE_REG(&adapter->hw, E1000_WUFC, adapter->wol); |
---|
761 | igb_enable_wakeup(dev); |
---|
762 | } |
---|
763 | |
---|
764 | IGB_CORE_UNLOCK(adapter); |
---|
765 | |
---|
766 | return bus_generic_suspend(dev); |
---|
767 | } |
---|
768 | |
---|
769 | static int |
---|
770 | igb_resume(device_t dev) |
---|
771 | { |
---|
772 | struct adapter *adapter = device_get_softc(dev); |
---|
773 | struct ifnet *ifp = adapter->ifp; |
---|
774 | |
---|
775 | IGB_CORE_LOCK(adapter); |
---|
776 | igb_init_locked(adapter); |
---|
777 | igb_init_manageability(adapter); |
---|
778 | |
---|
779 | if ((ifp->if_flags & IFF_UP) && |
---|
780 | (ifp->if_drv_flags & IFF_DRV_RUNNING)) |
---|
781 | igb_start(ifp); |
---|
782 | |
---|
783 | IGB_CORE_UNLOCK(adapter); |
---|
784 | |
---|
785 | return bus_generic_resume(dev); |
---|
786 | } |
---|
787 | |
---|
788 | |
---|
789 | /********************************************************************* |
---|
790 | * Transmit entry point |
---|
791 | * |
---|
792 | * igb_start is called by the stack to initiate a transmit. |
---|
793 | * The driver will remain in this routine as long as there are |
---|
794 | * packets to transmit and transmit resources are available. |
---|
795 | * In case resources are not available stack is notified and |
---|
796 | * the packet is requeued. |
---|
797 | **********************************************************************/ |
---|
798 | |
---|
799 | static void |
---|
800 | igb_start_locked(struct tx_ring *txr, struct ifnet *ifp) |
---|
801 | { |
---|
802 | struct adapter *adapter = ifp->if_softc; |
---|
803 | struct mbuf *m_head; |
---|
804 | |
---|
805 | IGB_TX_LOCK_ASSERT(txr); |
---|
806 | |
---|
807 | if ((ifp->if_drv_flags & (IFF_DRV_RUNNING|IFF_DRV_OACTIVE)) != |
---|
808 | IFF_DRV_RUNNING) |
---|
809 | return; |
---|
810 | if (!adapter->link_active) |
---|
811 | return; |
---|
812 | |
---|
813 | /* Call cleanup if number of TX descriptors low */ |
---|
814 | if (txr->tx_avail <= IGB_TX_CLEANUP_THRESHOLD) |
---|
815 | igb_txeof(txr); |
---|
816 | |
---|
817 | while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { |
---|
818 | if (txr->tx_avail <= IGB_TX_OP_THRESHOLD) { |
---|
819 | ifp->if_drv_flags |= IFF_DRV_OACTIVE; |
---|
820 | break; |
---|
821 | } |
---|
822 | IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); |
---|
823 | if (m_head == NULL) |
---|
824 | break; |
---|
825 | /* |
---|
826 | * Encapsulation can modify our pointer, and or make it |
---|
827 | * NULL on failure. In that event, we can't requeue. |
---|
828 | */ |
---|
829 | if (igb_xmit(txr, &m_head)) { |
---|
830 | if (m_head == NULL) |
---|
831 | break; |
---|
832 | ifp->if_drv_flags |= IFF_DRV_OACTIVE; |
---|
833 | IFQ_DRV_PREPEND(&ifp->if_snd, m_head); |
---|
834 | break; |
---|
835 | } |
---|
836 | |
---|
837 | /* Send a copy of the frame to the BPF listener */ |
---|
838 | ETHER_BPF_MTAP(ifp, m_head); |
---|
839 | |
---|
840 | /* Set watchdog on */ |
---|
841 | txr->watchdog_time = ticks; |
---|
842 | txr->queue_status = IGB_QUEUE_WORKING; |
---|
843 | } |
---|
844 | } |
---|
845 | |
---|
846 | /* |
---|
847 | * Legacy TX driver routine, called from the |
---|
848 | * stack, always uses tx[0], and spins for it. |
---|
849 | * Should not be used with multiqueue tx |
---|
850 | */ |
---|
851 | static void |
---|
852 | igb_start(struct ifnet *ifp) |
---|
853 | { |
---|
854 | struct adapter *adapter = ifp->if_softc; |
---|
855 | struct tx_ring *txr = adapter->tx_rings; |
---|
856 | |
---|
857 | if (ifp->if_drv_flags & IFF_DRV_RUNNING) { |
---|
858 | IGB_TX_LOCK(txr); |
---|
859 | igb_start_locked(txr, ifp); |
---|
860 | IGB_TX_UNLOCK(txr); |
---|
861 | } |
---|
862 | return; |
---|
863 | } |
---|
864 | |
---|
865 | #if __FreeBSD_version >= 800000 |
---|
866 | /* |
---|
867 | ** Multiqueue Transmit driver |
---|
868 | ** |
---|
869 | */ |
---|
870 | static int |
---|
871 | igb_mq_start(struct ifnet *ifp, struct mbuf *m) |
---|
872 | { |
---|
873 | struct adapter *adapter = ifp->if_softc; |
---|
874 | struct igb_queue *que; |
---|
875 | struct tx_ring *txr; |
---|
876 | int i = 0, err = 0; |
---|
877 | |
---|
878 | /* Which queue to use */ |
---|
879 | if ((m->m_flags & M_FLOWID) != 0) |
---|
880 | i = m->m_pkthdr.flowid % adapter->num_queues; |
---|
881 | |
---|
882 | txr = &adapter->tx_rings[i]; |
---|
883 | que = &adapter->queues[i]; |
---|
884 | |
---|
885 | if (IGB_TX_TRYLOCK(txr)) { |
---|
886 | err = igb_mq_start_locked(ifp, txr, m); |
---|
887 | IGB_TX_UNLOCK(txr); |
---|
888 | } else { |
---|
889 | err = drbr_enqueue(ifp, txr->br, m); |
---|
890 | taskqueue_enqueue(que->tq, &que->que_task); |
---|
891 | } |
---|
892 | |
---|
893 | return (err); |
---|
894 | } |
---|
895 | |
---|
896 | static int |
---|
897 | igb_mq_start_locked(struct ifnet *ifp, struct tx_ring *txr, struct mbuf *m) |
---|
898 | { |
---|
899 | struct adapter *adapter = txr->adapter; |
---|
900 | struct mbuf *next; |
---|
901 | int err = 0, enq; |
---|
902 | |
---|
903 | IGB_TX_LOCK_ASSERT(txr); |
---|
904 | |
---|
905 | if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != |
---|
906 | IFF_DRV_RUNNING || adapter->link_active == 0) { |
---|
907 | if (m != NULL) |
---|
908 | err = drbr_enqueue(ifp, txr->br, m); |
---|
909 | return (err); |
---|
910 | } |
---|
911 | |
---|
912 | /* Call cleanup if number of TX descriptors low */ |
---|
913 | if (txr->tx_avail <= IGB_TX_CLEANUP_THRESHOLD) |
---|
914 | igb_txeof(txr); |
---|
915 | |
---|
916 | enq = 0; |
---|
917 | if (m == NULL) { |
---|
918 | next = drbr_dequeue(ifp, txr->br); |
---|
919 | } else if (drbr_needs_enqueue(ifp, txr->br)) { |
---|
920 | if ((err = drbr_enqueue(ifp, txr->br, m)) != 0) |
---|
921 | return (err); |
---|
922 | next = drbr_dequeue(ifp, txr->br); |
---|
923 | } else |
---|
924 | next = m; |
---|
925 | |
---|
926 | /* Process the queue */ |
---|
927 | while (next != NULL) { |
---|
928 | if ((err = igb_xmit(txr, &next)) != 0) { |
---|
929 | if (next != NULL) |
---|
930 | err = drbr_enqueue(ifp, txr->br, next); |
---|
931 | break; |
---|
932 | } |
---|
933 | enq++; |
---|
934 | drbr_stats_update(ifp, next->m_pkthdr.len, next->m_flags); |
---|
935 | ETHER_BPF_MTAP(ifp, next); |
---|
936 | if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) |
---|
937 | break; |
---|
938 | if (txr->tx_avail <= IGB_TX_OP_THRESHOLD) { |
---|
939 | ifp->if_drv_flags |= IFF_DRV_OACTIVE; |
---|
940 | break; |
---|
941 | } |
---|
942 | next = drbr_dequeue(ifp, txr->br); |
---|
943 | } |
---|
944 | if (enq > 0) { |
---|
945 | /* Set the watchdog */ |
---|
946 | txr->queue_status = IGB_QUEUE_WORKING; |
---|
947 | txr->watchdog_time = ticks; |
---|
948 | } |
---|
949 | return (err); |
---|
950 | } |
---|
951 | |
---|
952 | /* |
---|
953 | ** Flush all ring buffers |
---|
954 | */ |
---|
955 | static void |
---|
956 | igb_qflush(struct ifnet *ifp) |
---|
957 | { |
---|
958 | struct adapter *adapter = ifp->if_softc; |
---|
959 | struct tx_ring *txr = adapter->tx_rings; |
---|
960 | struct mbuf *m; |
---|
961 | |
---|
962 | for (int i = 0; i < adapter->num_queues; i++, txr++) { |
---|
963 | IGB_TX_LOCK(txr); |
---|
964 | while ((m = buf_ring_dequeue_sc(txr->br)) != NULL) |
---|
965 | m_freem(m); |
---|
966 | IGB_TX_UNLOCK(txr); |
---|
967 | } |
---|
968 | if_qflush(ifp); |
---|
969 | } |
---|
970 | #endif /* __FreeBSD_version >= 800000 */ |
---|
971 | |
---|
972 | /********************************************************************* |
---|
973 | * Ioctl entry point |
---|
974 | * |
---|
975 | * igb_ioctl is called when the user wants to configure the |
---|
976 | * interface. |
---|
977 | * |
---|
978 | * return 0 on success, positive on failure |
---|
979 | **********************************************************************/ |
---|
980 | |
---|
981 | static int |
---|
982 | igb_ioctl(struct ifnet *ifp, u_long command, caddr_t data) |
---|
983 | { |
---|
984 | struct adapter *adapter = ifp->if_softc; |
---|
985 | struct ifreq *ifr = (struct ifreq *)data; |
---|
986 | #ifdef INET |
---|
987 | struct ifaddr *ifa = (struct ifaddr *)data; |
---|
988 | #endif |
---|
989 | int error = 0; |
---|
990 | |
---|
991 | if (adapter->in_detach) |
---|
992 | return (error); |
---|
993 | |
---|
994 | switch (command) { |
---|
995 | case SIOCSIFADDR: |
---|
996 | #ifdef INET |
---|
997 | if (ifa->ifa_addr->sa_family == AF_INET) { |
---|
998 | /* |
---|
999 | * XXX |
---|
1000 | * Since resetting hardware takes a very long time |
---|
1001 | * and results in link renegotiation we only |
---|
1002 | * initialize the hardware only when it is absolutely |
---|
1003 | * required. |
---|
1004 | */ |
---|
1005 | ifp->if_flags |= IFF_UP; |
---|
1006 | if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { |
---|
1007 | IGB_CORE_LOCK(adapter); |
---|
1008 | igb_init_locked(adapter); |
---|
1009 | IGB_CORE_UNLOCK(adapter); |
---|
1010 | } |
---|
1011 | if (!(ifp->if_flags & IFF_NOARP)) |
---|
1012 | arp_ifinit(ifp, ifa); |
---|
1013 | } else |
---|
1014 | #endif |
---|
1015 | error = ether_ioctl(ifp, command, data); |
---|
1016 | break; |
---|
1017 | case SIOCSIFMTU: |
---|
1018 | { |
---|
1019 | int max_frame_size; |
---|
1020 | |
---|
1021 | IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFMTU (Set Interface MTU)"); |
---|
1022 | |
---|
1023 | IGB_CORE_LOCK(adapter); |
---|
1024 | max_frame_size = 9234; |
---|
1025 | if (ifr->ifr_mtu > max_frame_size - ETHER_HDR_LEN - |
---|
1026 | ETHER_CRC_LEN) { |
---|
1027 | IGB_CORE_UNLOCK(adapter); |
---|
1028 | error = EINVAL; |
---|
1029 | break; |
---|
1030 | } |
---|
1031 | |
---|
1032 | ifp->if_mtu = ifr->ifr_mtu; |
---|
1033 | adapter->max_frame_size = |
---|
1034 | ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; |
---|
1035 | igb_init_locked(adapter); |
---|
1036 | IGB_CORE_UNLOCK(adapter); |
---|
1037 | break; |
---|
1038 | } |
---|
1039 | case SIOCSIFFLAGS: |
---|
1040 | IOCTL_DEBUGOUT("ioctl rcv'd:\ |
---|
1041 | SIOCSIFFLAGS (Set Interface Flags)"); |
---|
1042 | IGB_CORE_LOCK(adapter); |
---|
1043 | if (ifp->if_flags & IFF_UP) { |
---|
1044 | if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) { |
---|
1045 | if ((ifp->if_flags ^ adapter->if_flags) & |
---|
1046 | (IFF_PROMISC | IFF_ALLMULTI)) { |
---|
1047 | igb_disable_promisc(adapter); |
---|
1048 | igb_set_promisc(adapter); |
---|
1049 | } |
---|
1050 | } else |
---|
1051 | igb_init_locked(adapter); |
---|
1052 | } else |
---|
1053 | if (ifp->if_drv_flags & IFF_DRV_RUNNING) |
---|
1054 | igb_stop(adapter); |
---|
1055 | adapter->if_flags = ifp->if_flags; |
---|
1056 | IGB_CORE_UNLOCK(adapter); |
---|
1057 | break; |
---|
1058 | case SIOCADDMULTI: |
---|
1059 | case SIOCDELMULTI: |
---|
1060 | IOCTL_DEBUGOUT("ioctl rcv'd: SIOC(ADD|DEL)MULTI"); |
---|
1061 | if (ifp->if_drv_flags & IFF_DRV_RUNNING) { |
---|
1062 | IGB_CORE_LOCK(adapter); |
---|
1063 | igb_disable_intr(adapter); |
---|
1064 | igb_set_multi(adapter); |
---|
1065 | #ifdef DEVICE_POLLING |
---|
1066 | if (!(ifp->if_capenable & IFCAP_POLLING)) |
---|
1067 | #endif |
---|
1068 | igb_enable_intr(adapter); |
---|
1069 | IGB_CORE_UNLOCK(adapter); |
---|
1070 | } |
---|
1071 | break; |
---|
1072 | case SIOCSIFMEDIA: |
---|
1073 | /* |
---|
1074 | ** As the speed/duplex settings are being |
---|
1075 | ** changed, we need toreset the PHY. |
---|
1076 | */ |
---|
1077 | adapter->hw.phy.reset_disable = FALSE; |
---|
1078 | /* Check SOL/IDER usage */ |
---|
1079 | IGB_CORE_LOCK(adapter); |
---|
1080 | if (e1000_check_reset_block(&adapter->hw)) { |
---|
1081 | IGB_CORE_UNLOCK(adapter); |
---|
1082 | device_printf(adapter->dev, "Media change is" |
---|
1083 | " blocked due to SOL/IDER session.\n"); |
---|
1084 | break; |
---|
1085 | } |
---|
1086 | IGB_CORE_UNLOCK(adapter); |
---|
1087 | case SIOCGIFMEDIA: |
---|
1088 | IOCTL_DEBUGOUT("ioctl rcv'd: \ |
---|
1089 | SIOCxIFMEDIA (Get/Set Interface Media)"); |
---|
1090 | error = ifmedia_ioctl(ifp, ifr, &adapter->media, command); |
---|
1091 | break; |
---|
1092 | case SIOCSIFCAP: |
---|
1093 | { |
---|
1094 | int mask, reinit; |
---|
1095 | |
---|
1096 | IOCTL_DEBUGOUT("ioctl rcv'd: SIOCSIFCAP (Set Capabilities)"); |
---|
1097 | reinit = 0; |
---|
1098 | mask = ifr->ifr_reqcap ^ ifp->if_capenable; |
---|
1099 | #ifdef DEVICE_POLLING |
---|
1100 | if (mask & IFCAP_POLLING) { |
---|
1101 | if (ifr->ifr_reqcap & IFCAP_POLLING) { |
---|
1102 | error = ether_poll_register(igb_poll, ifp); |
---|
1103 | if (error) |
---|
1104 | return (error); |
---|
1105 | IGB_CORE_LOCK(adapter); |
---|
1106 | igb_disable_intr(adapter); |
---|
1107 | ifp->if_capenable |= IFCAP_POLLING; |
---|
1108 | IGB_CORE_UNLOCK(adapter); |
---|
1109 | } else { |
---|
1110 | error = ether_poll_deregister(ifp); |
---|
1111 | /* Enable interrupt even in error case */ |
---|
1112 | IGB_CORE_LOCK(adapter); |
---|
1113 | igb_enable_intr(adapter); |
---|
1114 | ifp->if_capenable &= ~IFCAP_POLLING; |
---|
1115 | IGB_CORE_UNLOCK(adapter); |
---|
1116 | } |
---|
1117 | } |
---|
1118 | #endif |
---|
1119 | if (mask & IFCAP_HWCSUM) { |
---|
1120 | ifp->if_capenable ^= IFCAP_HWCSUM; |
---|
1121 | reinit = 1; |
---|
1122 | } |
---|
1123 | if (mask & IFCAP_TSO4) { |
---|
1124 | ifp->if_capenable ^= IFCAP_TSO4; |
---|
1125 | reinit = 1; |
---|
1126 | } |
---|
1127 | if (mask & IFCAP_VLAN_HWTAGGING) { |
---|
1128 | ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING; |
---|
1129 | reinit = 1; |
---|
1130 | } |
---|
1131 | if (mask & IFCAP_VLAN_HWFILTER) { |
---|
1132 | ifp->if_capenable ^= IFCAP_VLAN_HWFILTER; |
---|
1133 | reinit = 1; |
---|
1134 | } |
---|
1135 | if (mask & IFCAP_LRO) { |
---|
1136 | ifp->if_capenable ^= IFCAP_LRO; |
---|
1137 | reinit = 1; |
---|
1138 | } |
---|
1139 | if (reinit && (ifp->if_drv_flags & IFF_DRV_RUNNING)) |
---|
1140 | igb_init(adapter); |
---|
1141 | VLAN_CAPABILITIES(ifp); |
---|
1142 | break; |
---|
1143 | } |
---|
1144 | |
---|
1145 | default: |
---|
1146 | error = ether_ioctl(ifp, command, data); |
---|
1147 | break; |
---|
1148 | } |
---|
1149 | |
---|
1150 | return (error); |
---|
1151 | } |
---|
1152 | |
---|
1153 | |
---|
1154 | /********************************************************************* |
---|
1155 | * Init entry point |
---|
1156 | * |
---|
1157 | * This routine is used in two ways. It is used by the stack as |
---|
1158 | * init entry point in network interface structure. It is also used |
---|
1159 | * by the driver as a hw/sw initialization routine to get to a |
---|
1160 | * consistent state. |
---|
1161 | * |
---|
1162 | * return 0 on success, positive on failure |
---|
1163 | **********************************************************************/ |
---|
1164 | |
---|
1165 | static void |
---|
1166 | igb_init_locked(struct adapter *adapter) |
---|
1167 | { |
---|
1168 | struct ifnet *ifp = adapter->ifp; |
---|
1169 | device_t dev = adapter->dev; |
---|
1170 | |
---|
1171 | INIT_DEBUGOUT("igb_init: begin"); |
---|
1172 | |
---|
1173 | IGB_CORE_LOCK_ASSERT(adapter); |
---|
1174 | |
---|
1175 | igb_disable_intr(adapter); |
---|
1176 | callout_stop(&adapter->timer); |
---|
1177 | |
---|
1178 | /* Get the latest mac address, User can use a LAA */ |
---|
1179 | bcopy(IF_LLADDR(adapter->ifp), adapter->hw.mac.addr, |
---|
1180 | ETHER_ADDR_LEN); |
---|
1181 | |
---|
1182 | /* Put the address into the Receive Address Array */ |
---|
1183 | e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0); |
---|
1184 | |
---|
1185 | igb_reset(adapter); |
---|
1186 | igb_update_link_status(adapter); |
---|
1187 | |
---|
1188 | E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN); |
---|
1189 | |
---|
1190 | /* Set hardware offload abilities */ |
---|
1191 | ifp->if_hwassist = 0; |
---|
1192 | if (ifp->if_capenable & IFCAP_TXCSUM) { |
---|
1193 | ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP); |
---|
1194 | #if __FreeBSD_version >= 800000 |
---|
1195 | if (adapter->hw.mac.type == e1000_82576) |
---|
1196 | ifp->if_hwassist |= CSUM_SCTP; |
---|
1197 | #endif |
---|
1198 | } |
---|
1199 | |
---|
1200 | if (ifp->if_capenable & IFCAP_TSO4) |
---|
1201 | ifp->if_hwassist |= CSUM_TSO; |
---|
1202 | |
---|
1203 | /* Configure for OS presence */ |
---|
1204 | igb_init_manageability(adapter); |
---|
1205 | |
---|
1206 | /* Prepare transmit descriptors and buffers */ |
---|
1207 | igb_setup_transmit_structures(adapter); |
---|
1208 | igb_initialize_transmit_units(adapter); |
---|
1209 | |
---|
1210 | /* Setup Multicast table */ |
---|
1211 | igb_set_multi(adapter); |
---|
1212 | |
---|
1213 | /* |
---|
1214 | ** Figure out the desired mbuf pool |
---|
1215 | ** for doing jumbo/packetsplit |
---|
1216 | */ |
---|
1217 | if (adapter->max_frame_size <= 2048) |
---|
1218 | adapter->rx_mbuf_sz = MCLBYTES; |
---|
1219 | else if (adapter->max_frame_size <= 4096) |
---|
1220 | adapter->rx_mbuf_sz = MJUMPAGESIZE; |
---|
1221 | else |
---|
1222 | adapter->rx_mbuf_sz = MJUM9BYTES; |
---|
1223 | |
---|
1224 | /* Prepare receive descriptors and buffers */ |
---|
1225 | if (igb_setup_receive_structures(adapter)) { |
---|
1226 | device_printf(dev, "Could not setup receive structures\n"); |
---|
1227 | return; |
---|
1228 | } |
---|
1229 | igb_initialize_receive_units(adapter); |
---|
1230 | |
---|
1231 | /* Use real VLAN Filter support? */ |
---|
1232 | if (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) { |
---|
1233 | if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) |
---|
1234 | /* Use real VLAN Filter support */ |
---|
1235 | igb_setup_vlan_hw_support(adapter); |
---|
1236 | else { |
---|
1237 | u32 ctrl; |
---|
1238 | ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL); |
---|
1239 | ctrl |= E1000_CTRL_VME; |
---|
1240 | E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl); |
---|
1241 | } |
---|
1242 | } |
---|
1243 | |
---|
1244 | /* Don't lose promiscuous settings */ |
---|
1245 | igb_set_promisc(adapter); |
---|
1246 | |
---|
1247 | ifp->if_drv_flags |= IFF_DRV_RUNNING; |
---|
1248 | ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; |
---|
1249 | |
---|
1250 | callout_reset(&adapter->timer, hz, igb_local_timer, adapter); |
---|
1251 | e1000_clear_hw_cntrs_base_generic(&adapter->hw); |
---|
1252 | |
---|
1253 | if (adapter->msix > 1) /* Set up queue routing */ |
---|
1254 | igb_configure_queues(adapter); |
---|
1255 | |
---|
1256 | /* this clears any pending interrupts */ |
---|
1257 | E1000_READ_REG(&adapter->hw, E1000_ICR); |
---|
1258 | #ifdef DEVICE_POLLING |
---|
1259 | /* |
---|
1260 | * Only enable interrupts if we are not polling, make sure |
---|
1261 | * they are off otherwise. |
---|
1262 | */ |
---|
1263 | if (ifp->if_capenable & IFCAP_POLLING) |
---|
1264 | igb_disable_intr(adapter); |
---|
1265 | else |
---|
1266 | #endif /* DEVICE_POLLING */ |
---|
1267 | { |
---|
1268 | igb_enable_intr(adapter); |
---|
1269 | E1000_WRITE_REG(&adapter->hw, E1000_ICS, E1000_ICS_LSC); |
---|
1270 | } |
---|
1271 | |
---|
1272 | /* Don't reset the phy next time init gets called */ |
---|
1273 | adapter->hw.phy.reset_disable = TRUE; |
---|
1274 | } |
---|
1275 | |
---|
1276 | static void |
---|
1277 | igb_init(void *arg) |
---|
1278 | { |
---|
1279 | struct adapter *adapter = arg; |
---|
1280 | |
---|
1281 | IGB_CORE_LOCK(adapter); |
---|
1282 | igb_init_locked(adapter); |
---|
1283 | IGB_CORE_UNLOCK(adapter); |
---|
1284 | } |
---|
1285 | |
---|
1286 | |
---|
1287 | static void |
---|
1288 | igb_handle_que(void *context, int pending) |
---|
1289 | { |
---|
1290 | struct igb_queue *que = context; |
---|
1291 | struct adapter *adapter = que->adapter; |
---|
1292 | struct tx_ring *txr = que->txr; |
---|
1293 | struct ifnet *ifp = adapter->ifp; |
---|
1294 | |
---|
1295 | if (ifp->if_drv_flags & IFF_DRV_RUNNING) { |
---|
1296 | bool more; |
---|
1297 | |
---|
1298 | more = igb_rxeof(que, -1, NULL); |
---|
1299 | |
---|
1300 | IGB_TX_LOCK(txr); |
---|
1301 | if (igb_txeof(txr)) |
---|
1302 | more = TRUE; |
---|
1303 | #if __FreeBSD_version >= 800000 |
---|
1304 | if (!drbr_empty(ifp, txr->br)) |
---|
1305 | igb_mq_start_locked(ifp, txr, NULL); |
---|
1306 | #else |
---|
1307 | if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) |
---|
1308 | igb_start_locked(txr, ifp); |
---|
1309 | #endif |
---|
1310 | IGB_TX_UNLOCK(txr); |
---|
1311 | if (more) { |
---|
1312 | taskqueue_enqueue(que->tq, &que->que_task); |
---|
1313 | return; |
---|
1314 | } |
---|
1315 | } |
---|
1316 | |
---|
1317 | #ifdef DEVICE_POLLING |
---|
1318 | if (ifp->if_capenable & IFCAP_POLLING) |
---|
1319 | return; |
---|
1320 | #endif |
---|
1321 | /* Reenable this interrupt */ |
---|
1322 | if (que->eims) |
---|
1323 | E1000_WRITE_REG(&adapter->hw, E1000_EIMS, que->eims); |
---|
1324 | else |
---|
1325 | igb_enable_intr(adapter); |
---|
1326 | } |
---|
1327 | |
---|
1328 | /* Deal with link in a sleepable context */ |
---|
1329 | static void |
---|
1330 | igb_handle_link(void *context, int pending) |
---|
1331 | { |
---|
1332 | struct adapter *adapter = context; |
---|
1333 | |
---|
1334 | adapter->hw.mac.get_link_status = 1; |
---|
1335 | igb_update_link_status(adapter); |
---|
1336 | } |
---|
1337 | |
---|
1338 | /********************************************************************* |
---|
1339 | * |
---|
1340 | * MSI/Legacy Deferred |
---|
1341 | * Interrupt Service routine |
---|
1342 | * |
---|
1343 | *********************************************************************/ |
---|
1344 | static int |
---|
1345 | igb_irq_fast(void *arg) |
---|
1346 | { |
---|
1347 | struct adapter *adapter = arg; |
---|
1348 | struct igb_queue *que = adapter->queues; |
---|
1349 | u32 reg_icr; |
---|
1350 | |
---|
1351 | |
---|
1352 | reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR); |
---|
1353 | |
---|
1354 | /* Hot eject? */ |
---|
1355 | if (reg_icr == 0xffffffff) |
---|
1356 | return FILTER_STRAY; |
---|
1357 | |
---|
1358 | /* Definitely not our interrupt. */ |
---|
1359 | if (reg_icr == 0x0) |
---|
1360 | return FILTER_STRAY; |
---|
1361 | |
---|
1362 | if ((reg_icr & E1000_ICR_INT_ASSERTED) == 0) |
---|
1363 | return FILTER_STRAY; |
---|
1364 | |
---|
1365 | /* |
---|
1366 | * Mask interrupts until the taskqueue is finished running. This is |
---|
1367 | * cheap, just assume that it is needed. This also works around the |
---|
1368 | * MSI message reordering errata on certain systems. |
---|
1369 | */ |
---|
1370 | igb_disable_intr(adapter); |
---|
1371 | taskqueue_enqueue(que->tq, &que->que_task); |
---|
1372 | |
---|
1373 | /* Link status change */ |
---|
1374 | if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) |
---|
1375 | taskqueue_enqueue(que->tq, &adapter->link_task); |
---|
1376 | |
---|
1377 | if (reg_icr & E1000_ICR_RXO) |
---|
1378 | adapter->rx_overruns++; |
---|
1379 | return FILTER_HANDLED; |
---|
1380 | } |
---|
1381 | |
---|
1382 | #ifdef DEVICE_POLLING |
---|
1383 | /********************************************************************* |
---|
1384 | * |
---|
1385 | * Legacy polling routine : if using this code you MUST be sure that |
---|
1386 | * multiqueue is not defined, ie, set igb_num_queues to 1. |
---|
1387 | * |
---|
1388 | *********************************************************************/ |
---|
1389 | #if __FreeBSD_version >= 800000 |
---|
1390 | #define POLL_RETURN_COUNT(a) (a) |
---|
1391 | static int |
---|
1392 | #else |
---|
1393 | #define POLL_RETURN_COUNT(a) |
---|
1394 | static void |
---|
1395 | #endif |
---|
1396 | igb_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) |
---|
1397 | { |
---|
1398 | struct adapter *adapter = ifp->if_softc; |
---|
1399 | struct igb_queue *que = adapter->queues; |
---|
1400 | struct tx_ring *txr = adapter->tx_rings; |
---|
1401 | u32 reg_icr, rx_done = 0; |
---|
1402 | u32 loop = IGB_MAX_LOOP; |
---|
1403 | bool more; |
---|
1404 | |
---|
1405 | IGB_CORE_LOCK(adapter); |
---|
1406 | if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { |
---|
1407 | IGB_CORE_UNLOCK(adapter); |
---|
1408 | return POLL_RETURN_COUNT(rx_done); |
---|
1409 | } |
---|
1410 | |
---|
1411 | if (cmd == POLL_AND_CHECK_STATUS) { |
---|
1412 | reg_icr = E1000_READ_REG(&adapter->hw, E1000_ICR); |
---|
1413 | /* Link status change */ |
---|
1414 | if (reg_icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) |
---|
1415 | igb_handle_link(adapter, 0); |
---|
1416 | |
---|
1417 | if (reg_icr & E1000_ICR_RXO) |
---|
1418 | adapter->rx_overruns++; |
---|
1419 | } |
---|
1420 | IGB_CORE_UNLOCK(adapter); |
---|
1421 | |
---|
1422 | igb_rxeof(que, count, &rx_done); |
---|
1423 | |
---|
1424 | IGB_TX_LOCK(txr); |
---|
1425 | do { |
---|
1426 | more = igb_txeof(txr); |
---|
1427 | } while (loop-- && more); |
---|
1428 | #if __FreeBSD_version >= 800000 |
---|
1429 | if (!drbr_empty(ifp, txr->br)) |
---|
1430 | igb_mq_start_locked(ifp, txr, NULL); |
---|
1431 | #else |
---|
1432 | if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) |
---|
1433 | igb_start_locked(txr, ifp); |
---|
1434 | #endif |
---|
1435 | IGB_TX_UNLOCK(txr); |
---|
1436 | return POLL_RETURN_COUNT(rx_done); |
---|
1437 | } |
---|
1438 | #endif /* DEVICE_POLLING */ |
---|
1439 | |
---|
1440 | /********************************************************************* |
---|
1441 | * |
---|
1442 | * MSIX TX Interrupt Service routine |
---|
1443 | * |
---|
1444 | **********************************************************************/ |
---|
1445 | static void |
---|
1446 | igb_msix_que(void *arg) |
---|
1447 | { |
---|
1448 | struct igb_queue *que = arg; |
---|
1449 | struct adapter *adapter = que->adapter; |
---|
1450 | struct tx_ring *txr = que->txr; |
---|
1451 | struct rx_ring *rxr = que->rxr; |
---|
1452 | u32 newitr = 0; |
---|
1453 | bool more_tx, more_rx; |
---|
1454 | |
---|
1455 | E1000_WRITE_REG(&adapter->hw, E1000_EIMC, que->eims); |
---|
1456 | ++que->irqs; |
---|
1457 | |
---|
1458 | IGB_TX_LOCK(txr); |
---|
1459 | more_tx = igb_txeof(txr); |
---|
1460 | IGB_TX_UNLOCK(txr); |
---|
1461 | |
---|
1462 | more_rx = igb_rxeof(que, adapter->rx_process_limit, NULL); |
---|
1463 | |
---|
1464 | if (igb_enable_aim == FALSE) |
---|
1465 | goto no_calc; |
---|
1466 | /* |
---|
1467 | ** Do Adaptive Interrupt Moderation: |
---|
1468 | ** - Write out last calculated setting |
---|
1469 | ** - Calculate based on average size over |
---|
1470 | ** the last interval. |
---|
1471 | */ |
---|
1472 | if (que->eitr_setting) |
---|
1473 | E1000_WRITE_REG(&adapter->hw, |
---|
1474 | E1000_EITR(que->msix), que->eitr_setting); |
---|
1475 | |
---|
1476 | que->eitr_setting = 0; |
---|
1477 | |
---|
1478 | /* Idle, do nothing */ |
---|
1479 | if ((txr->bytes == 0) && (rxr->bytes == 0)) |
---|
1480 | goto no_calc; |
---|
1481 | |
---|
1482 | /* Used half Default if sub-gig */ |
---|
1483 | if (adapter->link_speed != 1000) |
---|
1484 | newitr = IGB_DEFAULT_ITR / 2; |
---|
1485 | else { |
---|
1486 | if ((txr->bytes) && (txr->packets)) |
---|
1487 | newitr = txr->bytes/txr->packets; |
---|
1488 | if ((rxr->bytes) && (rxr->packets)) |
---|
1489 | newitr = max(newitr, |
---|
1490 | (rxr->bytes / rxr->packets)); |
---|
1491 | newitr += 24; /* account for hardware frame, crc */ |
---|
1492 | /* set an upper boundary */ |
---|
1493 | newitr = min(newitr, 3000); |
---|
1494 | /* Be nice to the mid range */ |
---|
1495 | if ((newitr > 300) && (newitr < 1200)) |
---|
1496 | newitr = (newitr / 3); |
---|
1497 | else |
---|
1498 | newitr = (newitr / 2); |
---|
1499 | } |
---|
1500 | newitr &= 0x7FFC; /* Mask invalid bits */ |
---|
1501 | if (adapter->hw.mac.type == e1000_82575) |
---|
1502 | newitr |= newitr << 16; |
---|
1503 | else |
---|
1504 | newitr |= E1000_EITR_CNT_IGNR; |
---|
1505 | |
---|
1506 | /* save for next interrupt */ |
---|
1507 | que->eitr_setting = newitr; |
---|
1508 | |
---|
1509 | /* Reset state */ |
---|
1510 | txr->bytes = 0; |
---|
1511 | txr->packets = 0; |
---|
1512 | rxr->bytes = 0; |
---|
1513 | rxr->packets = 0; |
---|
1514 | |
---|
1515 | no_calc: |
---|
1516 | /* Schedule a clean task if needed*/ |
---|
1517 | if (more_tx || more_rx) |
---|
1518 | taskqueue_enqueue(que->tq, &que->que_task); |
---|
1519 | else |
---|
1520 | /* Reenable this interrupt */ |
---|
1521 | E1000_WRITE_REG(&adapter->hw, E1000_EIMS, que->eims); |
---|
1522 | return; |
---|
1523 | } |
---|
1524 | |
---|
1525 | |
---|
1526 | /********************************************************************* |
---|
1527 | * |
---|
1528 | * MSIX Link Interrupt Service routine |
---|
1529 | * |
---|
1530 | **********************************************************************/ |
---|
1531 | |
---|
1532 | static void |
---|
1533 | igb_msix_link(void *arg) |
---|
1534 | { |
---|
1535 | struct adapter *adapter = arg; |
---|
1536 | u32 icr; |
---|
1537 | |
---|
1538 | ++adapter->link_irq; |
---|
1539 | icr = E1000_READ_REG(&adapter->hw, E1000_ICR); |
---|
1540 | if (!(icr & E1000_ICR_LSC)) |
---|
1541 | goto spurious; |
---|
1542 | igb_handle_link(adapter, 0); |
---|
1543 | |
---|
1544 | spurious: |
---|
1545 | /* Rearm */ |
---|
1546 | E1000_WRITE_REG(&adapter->hw, E1000_IMS, E1000_IMS_LSC); |
---|
1547 | E1000_WRITE_REG(&adapter->hw, E1000_EIMS, adapter->link_mask); |
---|
1548 | return; |
---|
1549 | } |
---|
1550 | |
---|
1551 | |
---|
1552 | /********************************************************************* |
---|
1553 | * |
---|
1554 | * Media Ioctl callback |
---|
1555 | * |
---|
1556 | * This routine is called whenever the user queries the status of |
---|
1557 | * the interface using ifconfig. |
---|
1558 | * |
---|
1559 | **********************************************************************/ |
---|
1560 | static void |
---|
1561 | igb_media_status(struct ifnet *ifp, struct ifmediareq *ifmr) |
---|
1562 | { |
---|
1563 | struct adapter *adapter = ifp->if_softc; |
---|
1564 | u_char fiber_type = IFM_1000_SX; |
---|
1565 | |
---|
1566 | INIT_DEBUGOUT("igb_media_status: begin"); |
---|
1567 | |
---|
1568 | IGB_CORE_LOCK(adapter); |
---|
1569 | igb_update_link_status(adapter); |
---|
1570 | |
---|
1571 | ifmr->ifm_status = IFM_AVALID; |
---|
1572 | ifmr->ifm_active = IFM_ETHER; |
---|
1573 | |
---|
1574 | if (!adapter->link_active) { |
---|
1575 | IGB_CORE_UNLOCK(adapter); |
---|
1576 | return; |
---|
1577 | } |
---|
1578 | |
---|
1579 | ifmr->ifm_status |= IFM_ACTIVE; |
---|
1580 | |
---|
1581 | if ((adapter->hw.phy.media_type == e1000_media_type_fiber) || |
---|
1582 | (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) |
---|
1583 | ifmr->ifm_active |= fiber_type | IFM_FDX; |
---|
1584 | else { |
---|
1585 | switch (adapter->link_speed) { |
---|
1586 | case 10: |
---|
1587 | ifmr->ifm_active |= IFM_10_T; |
---|
1588 | break; |
---|
1589 | case 100: |
---|
1590 | ifmr->ifm_active |= IFM_100_TX; |
---|
1591 | break; |
---|
1592 | case 1000: |
---|
1593 | ifmr->ifm_active |= IFM_1000_T; |
---|
1594 | break; |
---|
1595 | } |
---|
1596 | if (adapter->link_duplex == FULL_DUPLEX) |
---|
1597 | ifmr->ifm_active |= IFM_FDX; |
---|
1598 | else |
---|
1599 | ifmr->ifm_active |= IFM_HDX; |
---|
1600 | } |
---|
1601 | IGB_CORE_UNLOCK(adapter); |
---|
1602 | } |
---|
1603 | |
---|
1604 | /********************************************************************* |
---|
1605 | * |
---|
1606 | * Media Ioctl callback |
---|
1607 | * |
---|
1608 | * This routine is called when the user changes speed/duplex using |
---|
1609 | * media/mediopt option with ifconfig. |
---|
1610 | * |
---|
1611 | **********************************************************************/ |
---|
1612 | static int |
---|
1613 | igb_media_change(struct ifnet *ifp) |
---|
1614 | { |
---|
1615 | struct adapter *adapter = ifp->if_softc; |
---|
1616 | struct ifmedia *ifm = &adapter->media; |
---|
1617 | |
---|
1618 | INIT_DEBUGOUT("igb_media_change: begin"); |
---|
1619 | |
---|
1620 | if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) |
---|
1621 | return (EINVAL); |
---|
1622 | |
---|
1623 | IGB_CORE_LOCK(adapter); |
---|
1624 | switch (IFM_SUBTYPE(ifm->ifm_media)) { |
---|
1625 | case IFM_AUTO: |
---|
1626 | adapter->hw.mac.autoneg = DO_AUTO_NEG; |
---|
1627 | adapter->hw.phy.autoneg_advertised = AUTONEG_ADV_DEFAULT; |
---|
1628 | break; |
---|
1629 | case IFM_1000_LX: |
---|
1630 | case IFM_1000_SX: |
---|
1631 | case IFM_1000_T: |
---|
1632 | adapter->hw.mac.autoneg = DO_AUTO_NEG; |
---|
1633 | adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL; |
---|
1634 | break; |
---|
1635 | case IFM_100_TX: |
---|
1636 | adapter->hw.mac.autoneg = FALSE; |
---|
1637 | adapter->hw.phy.autoneg_advertised = 0; |
---|
1638 | if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) |
---|
1639 | adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_FULL; |
---|
1640 | else |
---|
1641 | adapter->hw.mac.forced_speed_duplex = ADVERTISE_100_HALF; |
---|
1642 | break; |
---|
1643 | case IFM_10_T: |
---|
1644 | adapter->hw.mac.autoneg = FALSE; |
---|
1645 | adapter->hw.phy.autoneg_advertised = 0; |
---|
1646 | if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) |
---|
1647 | adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_FULL; |
---|
1648 | else |
---|
1649 | adapter->hw.mac.forced_speed_duplex = ADVERTISE_10_HALF; |
---|
1650 | break; |
---|
1651 | default: |
---|
1652 | device_printf(adapter->dev, "Unsupported media type\n"); |
---|
1653 | } |
---|
1654 | |
---|
1655 | igb_init_locked(adapter); |
---|
1656 | IGB_CORE_UNLOCK(adapter); |
---|
1657 | |
---|
1658 | return (0); |
---|
1659 | } |
---|
1660 | |
---|
1661 | |
---|
1662 | /********************************************************************* |
---|
1663 | * |
---|
1664 | * This routine maps the mbufs to Advanced TX descriptors. |
---|
1665 | * used by the 82575 adapter. |
---|
1666 | * |
---|
1667 | **********************************************************************/ |
---|
1668 | |
---|
1669 | static int |
---|
1670 | igb_xmit(struct tx_ring *txr, struct mbuf **m_headp) |
---|
1671 | { |
---|
1672 | struct adapter *adapter = txr->adapter; |
---|
1673 | bus_dma_segment_t segs[IGB_MAX_SCATTER]; |
---|
1674 | bus_dmamap_t map; |
---|
1675 | struct igb_tx_buffer *tx_buffer, *tx_buffer_mapped; |
---|
1676 | union e1000_adv_tx_desc *txd = NULL; |
---|
1677 | struct mbuf *m_head; |
---|
1678 | u32 olinfo_status = 0, cmd_type_len = 0; |
---|
1679 | int nsegs, i, j, error, first, last = 0; |
---|
1680 | u32 hdrlen = 0; |
---|
1681 | |
---|
1682 | m_head = *m_headp; |
---|
1683 | |
---|
1684 | |
---|
1685 | /* Set basic descriptor constants */ |
---|
1686 | cmd_type_len |= E1000_ADVTXD_DTYP_DATA; |
---|
1687 | cmd_type_len |= E1000_ADVTXD_DCMD_IFCS | E1000_ADVTXD_DCMD_DEXT; |
---|
1688 | if (m_head->m_flags & M_VLANTAG) |
---|
1689 | cmd_type_len |= E1000_ADVTXD_DCMD_VLE; |
---|
1690 | |
---|
1691 | /* |
---|
1692 | * Force a cleanup if number of TX descriptors |
---|
1693 | * available hits the threshold |
---|
1694 | */ |
---|
1695 | if (txr->tx_avail <= IGB_TX_CLEANUP_THRESHOLD) { |
---|
1696 | igb_txeof(txr); |
---|
1697 | /* Now do we at least have a minimal? */ |
---|
1698 | if (txr->tx_avail <= IGB_TX_OP_THRESHOLD) { |
---|
1699 | txr->no_desc_avail++; |
---|
1700 | return (ENOBUFS); |
---|
1701 | } |
---|
1702 | } |
---|
1703 | |
---|
1704 | /* |
---|
1705 | * Map the packet for DMA. |
---|
1706 | * |
---|
1707 | * Capture the first descriptor index, |
---|
1708 | * this descriptor will have the index |
---|
1709 | * of the EOP which is the only one that |
---|
1710 | * now gets a DONE bit writeback. |
---|
1711 | */ |
---|
1712 | first = txr->next_avail_desc; |
---|
1713 | tx_buffer = &txr->tx_buffers[first]; |
---|
1714 | tx_buffer_mapped = tx_buffer; |
---|
1715 | map = tx_buffer->map; |
---|
1716 | |
---|
1717 | error = bus_dmamap_load_mbuf_sg(txr->txtag, map, |
---|
1718 | *m_headp, segs, &nsegs, BUS_DMA_NOWAIT); |
---|
1719 | |
---|
1720 | if (error == EFBIG) { |
---|
1721 | struct mbuf *m; |
---|
1722 | |
---|
1723 | m = m_defrag(*m_headp, M_DONTWAIT); |
---|
1724 | if (m == NULL) { |
---|
1725 | adapter->mbuf_defrag_failed++; |
---|
1726 | m_freem(*m_headp); |
---|
1727 | *m_headp = NULL; |
---|
1728 | return (ENOBUFS); |
---|
1729 | } |
---|
1730 | *m_headp = m; |
---|
1731 | |
---|
1732 | /* Try it again */ |
---|
1733 | error = bus_dmamap_load_mbuf_sg(txr->txtag, map, |
---|
1734 | *m_headp, segs, &nsegs, BUS_DMA_NOWAIT); |
---|
1735 | |
---|
1736 | if (error == ENOMEM) { |
---|
1737 | adapter->no_tx_dma_setup++; |
---|
1738 | return (error); |
---|
1739 | } else if (error != 0) { |
---|
1740 | adapter->no_tx_dma_setup++; |
---|
1741 | m_freem(*m_headp); |
---|
1742 | *m_headp = NULL; |
---|
1743 | return (error); |
---|
1744 | } |
---|
1745 | } else if (error == ENOMEM) { |
---|
1746 | adapter->no_tx_dma_setup++; |
---|
1747 | return (error); |
---|
1748 | } else if (error != 0) { |
---|
1749 | adapter->no_tx_dma_setup++; |
---|
1750 | m_freem(*m_headp); |
---|
1751 | *m_headp = NULL; |
---|
1752 | return (error); |
---|
1753 | } |
---|
1754 | |
---|
1755 | /* Check again to be sure we have enough descriptors */ |
---|
1756 | if (nsegs > (txr->tx_avail - 2)) { |
---|
1757 | txr->no_desc_avail++; |
---|
1758 | bus_dmamap_unload(txr->txtag, map); |
---|
1759 | return (ENOBUFS); |
---|
1760 | } |
---|
1761 | m_head = *m_headp; |
---|
1762 | |
---|
1763 | /* |
---|
1764 | * Set up the context descriptor: |
---|
1765 | * used when any hardware offload is done. |
---|
1766 | * This includes CSUM, VLAN, and TSO. It |
---|
1767 | * will use the first descriptor. |
---|
1768 | */ |
---|
1769 | if (m_head->m_pkthdr.csum_flags & CSUM_TSO) { |
---|
1770 | if (igb_tso_setup(txr, m_head, &hdrlen)) { |
---|
1771 | cmd_type_len |= E1000_ADVTXD_DCMD_TSE; |
---|
1772 | olinfo_status |= E1000_TXD_POPTS_IXSM << 8; |
---|
1773 | olinfo_status |= E1000_TXD_POPTS_TXSM << 8; |
---|
1774 | } else |
---|
1775 | return (ENXIO); |
---|
1776 | } else if (igb_tx_ctx_setup(txr, m_head)) |
---|
1777 | olinfo_status |= E1000_TXD_POPTS_TXSM << 8; |
---|
1778 | |
---|
1779 | /* Calculate payload length */ |
---|
1780 | olinfo_status |= ((m_head->m_pkthdr.len - hdrlen) |
---|
1781 | << E1000_ADVTXD_PAYLEN_SHIFT); |
---|
1782 | |
---|
1783 | /* 82575 needs the queue index added */ |
---|
1784 | if (adapter->hw.mac.type == e1000_82575) |
---|
1785 | olinfo_status |= txr->me << 4; |
---|
1786 | |
---|
1787 | /* Set up our transmit descriptors */ |
---|
1788 | i = txr->next_avail_desc; |
---|
1789 | for (j = 0; j < nsegs; j++) { |
---|
1790 | bus_size_t seg_len; |
---|
1791 | bus_addr_t seg_addr; |
---|
1792 | |
---|
1793 | tx_buffer = &txr->tx_buffers[i]; |
---|
1794 | txd = (union e1000_adv_tx_desc *)&txr->tx_base[i]; |
---|
1795 | seg_addr = segs[j].ds_addr; |
---|
1796 | seg_len = segs[j].ds_len; |
---|
1797 | |
---|
1798 | txd->read.buffer_addr = htole64(seg_addr); |
---|
1799 | txd->read.cmd_type_len = htole32(cmd_type_len | seg_len); |
---|
1800 | txd->read.olinfo_status = htole32(olinfo_status); |
---|
1801 | last = i; |
---|
1802 | if (++i == adapter->num_tx_desc) |
---|
1803 | i = 0; |
---|
1804 | tx_buffer->m_head = NULL; |
---|
1805 | tx_buffer->next_eop = -1; |
---|
1806 | } |
---|
1807 | |
---|
1808 | txr->next_avail_desc = i; |
---|
1809 | txr->tx_avail -= nsegs; |
---|
1810 | |
---|
1811 | tx_buffer->m_head = m_head; |
---|
1812 | tx_buffer_mapped->map = tx_buffer->map; |
---|
1813 | tx_buffer->map = map; |
---|
1814 | bus_dmamap_sync(txr->txtag, map, BUS_DMASYNC_PREWRITE); |
---|
1815 | |
---|
1816 | /* |
---|
1817 | * Last Descriptor of Packet |
---|
1818 | * needs End Of Packet (EOP) |
---|
1819 | * and Report Status (RS) |
---|
1820 | */ |
---|
1821 | txd->read.cmd_type_len |= |
---|
1822 | htole32(E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS); |
---|
1823 | /* |
---|
1824 | * Keep track in the first buffer which |
---|
1825 | * descriptor will be written back |
---|
1826 | */ |
---|
1827 | tx_buffer = &txr->tx_buffers[first]; |
---|
1828 | tx_buffer->next_eop = last; |
---|
1829 | txr->watchdog_time = ticks; |
---|
1830 | |
---|
1831 | /* |
---|
1832 | * Advance the Transmit Descriptor Tail (TDT), this tells the E1000 |
---|
1833 | * that this frame is available to transmit. |
---|
1834 | */ |
---|
1835 | bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, |
---|
1836 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
---|
1837 | E1000_WRITE_REG(&adapter->hw, E1000_TDT(txr->me), i); |
---|
1838 | ++txr->tx_packets; |
---|
1839 | |
---|
1840 | return (0); |
---|
1841 | |
---|
1842 | } |
---|
1843 | |
---|
1844 | static void |
---|
1845 | igb_set_promisc(struct adapter *adapter) |
---|
1846 | { |
---|
1847 | struct ifnet *ifp = adapter->ifp; |
---|
1848 | struct e1000_hw *hw = &adapter->hw; |
---|
1849 | u32 reg; |
---|
1850 | |
---|
1851 | if (hw->mac.type == e1000_vfadapt) { |
---|
1852 | e1000_promisc_set_vf(hw, e1000_promisc_enabled); |
---|
1853 | return; |
---|
1854 | } |
---|
1855 | |
---|
1856 | reg = E1000_READ_REG(hw, E1000_RCTL); |
---|
1857 | if (ifp->if_flags & IFF_PROMISC) { |
---|
1858 | reg |= (E1000_RCTL_UPE | E1000_RCTL_MPE); |
---|
1859 | E1000_WRITE_REG(hw, E1000_RCTL, reg); |
---|
1860 | } else if (ifp->if_flags & IFF_ALLMULTI) { |
---|
1861 | reg |= E1000_RCTL_MPE; |
---|
1862 | reg &= ~E1000_RCTL_UPE; |
---|
1863 | E1000_WRITE_REG(hw, E1000_RCTL, reg); |
---|
1864 | } |
---|
1865 | } |
---|
1866 | |
---|
1867 | static void |
---|
1868 | igb_disable_promisc(struct adapter *adapter) |
---|
1869 | { |
---|
1870 | struct e1000_hw *hw = &adapter->hw; |
---|
1871 | u32 reg; |
---|
1872 | |
---|
1873 | if (hw->mac.type == e1000_vfadapt) { |
---|
1874 | e1000_promisc_set_vf(hw, e1000_promisc_disabled); |
---|
1875 | return; |
---|
1876 | } |
---|
1877 | reg = E1000_READ_REG(hw, E1000_RCTL); |
---|
1878 | reg &= (~E1000_RCTL_UPE); |
---|
1879 | reg &= (~E1000_RCTL_MPE); |
---|
1880 | E1000_WRITE_REG(hw, E1000_RCTL, reg); |
---|
1881 | } |
---|
1882 | |
---|
1883 | |
---|
1884 | /********************************************************************* |
---|
1885 | * Multicast Update |
---|
1886 | * |
---|
1887 | * This routine is called whenever multicast address list is updated. |
---|
1888 | * |
---|
1889 | **********************************************************************/ |
---|
1890 | |
---|
1891 | static void |
---|
1892 | igb_set_multi(struct adapter *adapter) |
---|
1893 | { |
---|
1894 | struct ifnet *ifp = adapter->ifp; |
---|
1895 | struct ifmultiaddr *ifma; |
---|
1896 | u32 reg_rctl = 0; |
---|
1897 | u8 *mta; |
---|
1898 | |
---|
1899 | int mcnt = 0; |
---|
1900 | |
---|
1901 | IOCTL_DEBUGOUT("igb_set_multi: begin"); |
---|
1902 | |
---|
1903 | mta = adapter->mta; |
---|
1904 | bzero(mta, sizeof(uint8_t) * ETH_ADDR_LEN * |
---|
1905 | MAX_NUM_MULTICAST_ADDRESSES); |
---|
1906 | |
---|
1907 | #if __FreeBSD_version < 800000 |
---|
1908 | IF_ADDR_LOCK(ifp); |
---|
1909 | #else |
---|
1910 | if_maddr_rlock(ifp); |
---|
1911 | #endif |
---|
1912 | TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { |
---|
1913 | if (ifma->ifma_addr->sa_family != AF_LINK) |
---|
1914 | continue; |
---|
1915 | |
---|
1916 | if (mcnt == MAX_NUM_MULTICAST_ADDRESSES) |
---|
1917 | break; |
---|
1918 | |
---|
1919 | bcopy(LLADDR((struct sockaddr_dl *)ifma->ifma_addr), |
---|
1920 | &mta[mcnt * ETH_ADDR_LEN], ETH_ADDR_LEN); |
---|
1921 | mcnt++; |
---|
1922 | } |
---|
1923 | #if __FreeBSD_version < 800000 |
---|
1924 | IF_ADDR_UNLOCK(ifp); |
---|
1925 | #else |
---|
1926 | if_maddr_runlock(ifp); |
---|
1927 | #endif |
---|
1928 | |
---|
1929 | if (mcnt >= MAX_NUM_MULTICAST_ADDRESSES) { |
---|
1930 | reg_rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL); |
---|
1931 | reg_rctl |= E1000_RCTL_MPE; |
---|
1932 | E1000_WRITE_REG(&adapter->hw, E1000_RCTL, reg_rctl); |
---|
1933 | } else |
---|
1934 | e1000_update_mc_addr_list(&adapter->hw, mta, mcnt); |
---|
1935 | } |
---|
1936 | |
---|
1937 | |
---|
1938 | /********************************************************************* |
---|
1939 | * Timer routine: |
---|
1940 | * This routine checks for link status, |
---|
1941 | * updates statistics, and does the watchdog. |
---|
1942 | * |
---|
1943 | **********************************************************************/ |
---|
1944 | |
---|
1945 | static void |
---|
1946 | igb_local_timer(void *arg) |
---|
1947 | { |
---|
1948 | struct adapter *adapter = arg; |
---|
1949 | device_t dev = adapter->dev; |
---|
1950 | struct tx_ring *txr = adapter->tx_rings; |
---|
1951 | |
---|
1952 | |
---|
1953 | IGB_CORE_LOCK_ASSERT(adapter); |
---|
1954 | |
---|
1955 | igb_update_link_status(adapter); |
---|
1956 | igb_update_stats_counters(adapter); |
---|
1957 | |
---|
1958 | /* |
---|
1959 | ** If flow control has paused us since last checking |
---|
1960 | ** it invalidates the watchdog timing, so dont run it. |
---|
1961 | */ |
---|
1962 | if (adapter->pause_frames) { |
---|
1963 | adapter->pause_frames = 0; |
---|
1964 | goto out; |
---|
1965 | } |
---|
1966 | |
---|
1967 | /* |
---|
1968 | ** Watchdog: check for time since any descriptor was cleaned |
---|
1969 | */ |
---|
1970 | for (int i = 0; i < adapter->num_queues; i++, txr++) |
---|
1971 | if (txr->queue_status == IGB_QUEUE_HUNG) |
---|
1972 | goto timeout; |
---|
1973 | out: |
---|
1974 | callout_reset(&adapter->timer, hz, igb_local_timer, adapter); |
---|
1975 | return; |
---|
1976 | |
---|
1977 | timeout: |
---|
1978 | device_printf(adapter->dev, "Watchdog timeout -- resetting\n"); |
---|
1979 | device_printf(dev,"Queue(%d) tdh = %d, hw tdt = %d\n", txr->me, |
---|
1980 | E1000_READ_REG(&adapter->hw, E1000_TDH(txr->me)), |
---|
1981 | E1000_READ_REG(&adapter->hw, E1000_TDT(txr->me))); |
---|
1982 | device_printf(dev,"TX(%d) desc avail = %d," |
---|
1983 | "Next TX to Clean = %d\n", |
---|
1984 | txr->me, txr->tx_avail, txr->next_to_clean); |
---|
1985 | adapter->ifp->if_drv_flags &= ~IFF_DRV_RUNNING; |
---|
1986 | adapter->watchdog_events++; |
---|
1987 | igb_init_locked(adapter); |
---|
1988 | } |
---|
1989 | |
---|
1990 | static void |
---|
1991 | igb_update_link_status(struct adapter *adapter) |
---|
1992 | { |
---|
1993 | struct e1000_hw *hw = &adapter->hw; |
---|
1994 | struct ifnet *ifp = adapter->ifp; |
---|
1995 | device_t dev = adapter->dev; |
---|
1996 | struct tx_ring *txr = adapter->tx_rings; |
---|
1997 | u32 link_check = 0; |
---|
1998 | |
---|
1999 | /* Get the cached link value or read for real */ |
---|
2000 | switch (hw->phy.media_type) { |
---|
2001 | case e1000_media_type_copper: |
---|
2002 | if (hw->mac.get_link_status) { |
---|
2003 | /* Do the work to read phy */ |
---|
2004 | e1000_check_for_link(hw); |
---|
2005 | link_check = !hw->mac.get_link_status; |
---|
2006 | } else |
---|
2007 | link_check = TRUE; |
---|
2008 | break; |
---|
2009 | case e1000_media_type_fiber: |
---|
2010 | e1000_check_for_link(hw); |
---|
2011 | link_check = (E1000_READ_REG(hw, E1000_STATUS) & |
---|
2012 | E1000_STATUS_LU); |
---|
2013 | break; |
---|
2014 | case e1000_media_type_internal_serdes: |
---|
2015 | e1000_check_for_link(hw); |
---|
2016 | link_check = adapter->hw.mac.serdes_has_link; |
---|
2017 | break; |
---|
2018 | /* VF device is type_unknown */ |
---|
2019 | case e1000_media_type_unknown: |
---|
2020 | e1000_check_for_link(hw); |
---|
2021 | link_check = !hw->mac.get_link_status; |
---|
2022 | /* Fall thru */ |
---|
2023 | default: |
---|
2024 | break; |
---|
2025 | } |
---|
2026 | |
---|
2027 | /* Now we check if a transition has happened */ |
---|
2028 | if (link_check && (adapter->link_active == 0)) { |
---|
2029 | e1000_get_speed_and_duplex(&adapter->hw, |
---|
2030 | &adapter->link_speed, &adapter->link_duplex); |
---|
2031 | if (bootverbose) |
---|
2032 | device_printf(dev, "Link is up %d Mbps %s\n", |
---|
2033 | adapter->link_speed, |
---|
2034 | ((adapter->link_duplex == FULL_DUPLEX) ? |
---|
2035 | "Full Duplex" : "Half Duplex")); |
---|
2036 | adapter->link_active = 1; |
---|
2037 | ifp->if_baudrate = adapter->link_speed * 1000000; |
---|
2038 | /* This can sleep */ |
---|
2039 | if_link_state_change(ifp, LINK_STATE_UP); |
---|
2040 | } else if (!link_check && (adapter->link_active == 1)) { |
---|
2041 | ifp->if_baudrate = adapter->link_speed = 0; |
---|
2042 | adapter->link_duplex = 0; |
---|
2043 | if (bootverbose) |
---|
2044 | device_printf(dev, "Link is Down\n"); |
---|
2045 | adapter->link_active = 0; |
---|
2046 | /* This can sleep */ |
---|
2047 | if_link_state_change(ifp, LINK_STATE_DOWN); |
---|
2048 | /* Turn off watchdogs */ |
---|
2049 | for (int i = 0; i < adapter->num_queues; i++, txr++) |
---|
2050 | txr->queue_status = IGB_QUEUE_IDLE; |
---|
2051 | } |
---|
2052 | } |
---|
2053 | |
---|
2054 | /********************************************************************* |
---|
2055 | * |
---|
2056 | * This routine disables all traffic on the adapter by issuing a |
---|
2057 | * global reset on the MAC and deallocates TX/RX buffers. |
---|
2058 | * |
---|
2059 | **********************************************************************/ |
---|
2060 | |
---|
2061 | static void |
---|
2062 | igb_stop(void *arg) |
---|
2063 | { |
---|
2064 | struct adapter *adapter = arg; |
---|
2065 | struct ifnet *ifp = adapter->ifp; |
---|
2066 | struct tx_ring *txr = adapter->tx_rings; |
---|
2067 | |
---|
2068 | IGB_CORE_LOCK_ASSERT(adapter); |
---|
2069 | |
---|
2070 | INIT_DEBUGOUT("igb_stop: begin"); |
---|
2071 | |
---|
2072 | igb_disable_intr(adapter); |
---|
2073 | |
---|
2074 | callout_stop(&adapter->timer); |
---|
2075 | |
---|
2076 | /* Tell the stack that the interface is no longer active */ |
---|
2077 | ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); |
---|
2078 | |
---|
2079 | /* Unarm watchdog timer. */ |
---|
2080 | for (int i = 0; i < adapter->num_queues; i++, txr++) { |
---|
2081 | IGB_TX_LOCK(txr); |
---|
2082 | txr->queue_status = IGB_QUEUE_IDLE; |
---|
2083 | IGB_TX_UNLOCK(txr); |
---|
2084 | } |
---|
2085 | |
---|
2086 | e1000_reset_hw(&adapter->hw); |
---|
2087 | E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0); |
---|
2088 | |
---|
2089 | e1000_led_off(&adapter->hw); |
---|
2090 | e1000_cleanup_led(&adapter->hw); |
---|
2091 | } |
---|
2092 | |
---|
2093 | |
---|
2094 | /********************************************************************* |
---|
2095 | * |
---|
2096 | * Determine hardware revision. |
---|
2097 | * |
---|
2098 | **********************************************************************/ |
---|
2099 | static void |
---|
2100 | igb_identify_hardware(struct adapter *adapter) |
---|
2101 | { |
---|
2102 | device_t dev = adapter->dev; |
---|
2103 | |
---|
2104 | /* Make sure our PCI config space has the necessary stuff set */ |
---|
2105 | adapter->hw.bus.pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2); |
---|
2106 | if (!((adapter->hw.bus.pci_cmd_word & PCIM_CMD_BUSMASTEREN) && |
---|
2107 | (adapter->hw.bus.pci_cmd_word & PCIM_CMD_MEMEN))) { |
---|
2108 | INIT_DEBUGOUT("Memory Access and/or Bus Master " |
---|
2109 | "bits were not set!\n"); |
---|
2110 | adapter->hw.bus.pci_cmd_word |= |
---|
2111 | (PCIM_CMD_BUSMASTEREN | PCIM_CMD_MEMEN); |
---|
2112 | pci_write_config(dev, PCIR_COMMAND, |
---|
2113 | adapter->hw.bus.pci_cmd_word, 2); |
---|
2114 | } |
---|
2115 | |
---|
2116 | /* Save off the information about this board */ |
---|
2117 | adapter->hw.vendor_id = pci_get_vendor(dev); |
---|
2118 | adapter->hw.device_id = pci_get_device(dev); |
---|
2119 | adapter->hw.revision_id = pci_read_config(dev, PCIR_REVID, 1); |
---|
2120 | adapter->hw.subsystem_vendor_id = |
---|
2121 | pci_read_config(dev, PCIR_SUBVEND_0, 2); |
---|
2122 | adapter->hw.subsystem_device_id = |
---|
2123 | pci_read_config(dev, PCIR_SUBDEV_0, 2); |
---|
2124 | |
---|
2125 | /* Set MAC type early for PCI setup */ |
---|
2126 | e1000_set_mac_type(&adapter->hw); |
---|
2127 | } |
---|
2128 | |
---|
2129 | static int |
---|
2130 | igb_allocate_pci_resources(struct adapter *adapter) |
---|
2131 | { |
---|
2132 | device_t dev = adapter->dev; |
---|
2133 | int rid; |
---|
2134 | |
---|
2135 | rid = PCIR_BAR(0); |
---|
2136 | adapter->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, |
---|
2137 | &rid, RF_ACTIVE); |
---|
2138 | if (adapter->pci_mem == NULL) { |
---|
2139 | device_printf(dev, "Unable to allocate bus resource: memory\n"); |
---|
2140 | return (ENXIO); |
---|
2141 | } |
---|
2142 | adapter->osdep.mem_bus_space_tag = |
---|
2143 | rman_get_bustag(adapter->pci_mem); |
---|
2144 | adapter->osdep.mem_bus_space_handle = |
---|
2145 | rman_get_bushandle(adapter->pci_mem); |
---|
2146 | adapter->hw.hw_addr = (u8 *)&adapter->osdep.mem_bus_space_handle; |
---|
2147 | |
---|
2148 | adapter->num_queues = 1; /* Defaults for Legacy or MSI */ |
---|
2149 | |
---|
2150 | /* This will setup either MSI/X or MSI */ |
---|
2151 | adapter->msix = igb_setup_msix(adapter); |
---|
2152 | adapter->hw.back = &adapter->osdep; |
---|
2153 | |
---|
2154 | return (0); |
---|
2155 | } |
---|
2156 | |
---|
2157 | /********************************************************************* |
---|
2158 | * |
---|
2159 | * Setup the Legacy or MSI Interrupt handler |
---|
2160 | * |
---|
2161 | **********************************************************************/ |
---|
2162 | static int |
---|
2163 | igb_allocate_legacy(struct adapter *adapter) |
---|
2164 | { |
---|
2165 | device_t dev = adapter->dev; |
---|
2166 | struct igb_queue *que = adapter->queues; |
---|
2167 | int error, rid = 0; |
---|
2168 | |
---|
2169 | /* Turn off all interrupts */ |
---|
2170 | E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xffffffff); |
---|
2171 | |
---|
2172 | /* MSI RID is 1 */ |
---|
2173 | if (adapter->msix == 1) |
---|
2174 | rid = 1; |
---|
2175 | |
---|
2176 | /* We allocate a single interrupt resource */ |
---|
2177 | adapter->res = bus_alloc_resource_any(dev, |
---|
2178 | SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); |
---|
2179 | if (adapter->res == NULL) { |
---|
2180 | device_printf(dev, "Unable to allocate bus resource: " |
---|
2181 | "interrupt\n"); |
---|
2182 | return (ENXIO); |
---|
2183 | } |
---|
2184 | |
---|
2185 | /* |
---|
2186 | * Try allocating a fast interrupt and the associated deferred |
---|
2187 | * processing contexts. |
---|
2188 | */ |
---|
2189 | TASK_INIT(&que->que_task, 0, igb_handle_que, que); |
---|
2190 | /* Make tasklet for deferred link handling */ |
---|
2191 | TASK_INIT(&adapter->link_task, 0, igb_handle_link, adapter); |
---|
2192 | que->tq = taskqueue_create_fast("igb_taskq", M_NOWAIT, |
---|
2193 | taskqueue_thread_enqueue, &que->tq); |
---|
2194 | taskqueue_start_threads(&que->tq, 1, PI_NET, "%s taskq", |
---|
2195 | device_get_nameunit(adapter->dev)); |
---|
2196 | if ((error = bus_setup_intr(dev, adapter->res, |
---|
2197 | INTR_TYPE_NET | INTR_MPSAFE, igb_irq_fast, NULL, |
---|
2198 | adapter, &adapter->tag)) != 0) { |
---|
2199 | device_printf(dev, "Failed to register fast interrupt " |
---|
2200 | "handler: %d\n", error); |
---|
2201 | taskqueue_free(que->tq); |
---|
2202 | que->tq = NULL; |
---|
2203 | return (error); |
---|
2204 | } |
---|
2205 | |
---|
2206 | return (0); |
---|
2207 | } |
---|
2208 | |
---|
2209 | |
---|
2210 | /********************************************************************* |
---|
2211 | * |
---|
2212 | * Setup the MSIX Queue Interrupt handlers: |
---|
2213 | * |
---|
2214 | **********************************************************************/ |
---|
2215 | static int |
---|
2216 | igb_allocate_msix(struct adapter *adapter) |
---|
2217 | { |
---|
2218 | device_t dev = adapter->dev; |
---|
2219 | struct igb_queue *que = adapter->queues; |
---|
2220 | int error, rid, vector = 0; |
---|
2221 | |
---|
2222 | |
---|
2223 | for (int i = 0; i < adapter->num_queues; i++, vector++, que++) { |
---|
2224 | rid = vector +1; |
---|
2225 | que->res = bus_alloc_resource_any(dev, |
---|
2226 | SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); |
---|
2227 | if (que->res == NULL) { |
---|
2228 | device_printf(dev, |
---|
2229 | "Unable to allocate bus resource: " |
---|
2230 | "MSIX Queue Interrupt\n"); |
---|
2231 | return (ENXIO); |
---|
2232 | } |
---|
2233 | error = bus_setup_intr(dev, que->res, |
---|
2234 | INTR_TYPE_NET | INTR_MPSAFE, NULL, |
---|
2235 | igb_msix_que, que, &que->tag); |
---|
2236 | if (error) { |
---|
2237 | que->res = NULL; |
---|
2238 | device_printf(dev, "Failed to register Queue handler"); |
---|
2239 | return (error); |
---|
2240 | } |
---|
2241 | #if __FreeBSD_version >= 800504 |
---|
2242 | bus_describe_intr(dev, que->res, que->tag, "que %d", i); |
---|
2243 | #endif |
---|
2244 | que->msix = vector; |
---|
2245 | if (adapter->hw.mac.type == e1000_82575) |
---|
2246 | que->eims = E1000_EICR_TX_QUEUE0 << i; |
---|
2247 | else |
---|
2248 | que->eims = 1 << vector; |
---|
2249 | /* |
---|
2250 | ** Bind the msix vector, and thus the |
---|
2251 | ** rings to the corresponding cpu. |
---|
2252 | */ |
---|
2253 | if (adapter->num_queues > 1) |
---|
2254 | bus_bind_intr(dev, que->res, i); |
---|
2255 | /* Make tasklet for deferred handling */ |
---|
2256 | TASK_INIT(&que->que_task, 0, igb_handle_que, que); |
---|
2257 | que->tq = taskqueue_create_fast("igb_que", M_NOWAIT, |
---|
2258 | taskqueue_thread_enqueue, &que->tq); |
---|
2259 | taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que", |
---|
2260 | device_get_nameunit(adapter->dev)); |
---|
2261 | } |
---|
2262 | |
---|
2263 | /* And Link */ |
---|
2264 | rid = vector + 1; |
---|
2265 | adapter->res = bus_alloc_resource_any(dev, |
---|
2266 | SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); |
---|
2267 | if (adapter->res == NULL) { |
---|
2268 | device_printf(dev, |
---|
2269 | "Unable to allocate bus resource: " |
---|
2270 | "MSIX Link Interrupt\n"); |
---|
2271 | return (ENXIO); |
---|
2272 | } |
---|
2273 | if ((error = bus_setup_intr(dev, adapter->res, |
---|
2274 | INTR_TYPE_NET | INTR_MPSAFE, NULL, |
---|
2275 | igb_msix_link, adapter, &adapter->tag)) != 0) { |
---|
2276 | device_printf(dev, "Failed to register Link handler"); |
---|
2277 | return (error); |
---|
2278 | } |
---|
2279 | #if __FreeBSD_version >= 800504 |
---|
2280 | bus_describe_intr(dev, adapter->res, adapter->tag, "link"); |
---|
2281 | #endif |
---|
2282 | adapter->linkvec = vector; |
---|
2283 | |
---|
2284 | return (0); |
---|
2285 | } |
---|
2286 | |
---|
2287 | |
---|
2288 | static void |
---|
2289 | igb_configure_queues(struct adapter *adapter) |
---|
2290 | { |
---|
2291 | struct e1000_hw *hw = &adapter->hw; |
---|
2292 | struct igb_queue *que; |
---|
2293 | u32 tmp, ivar = 0, newitr = 0; |
---|
2294 | |
---|
2295 | /* First turn on RSS capability */ |
---|
2296 | if (adapter->hw.mac.type > e1000_82575) |
---|
2297 | E1000_WRITE_REG(hw, E1000_GPIE, |
---|
2298 | E1000_GPIE_MSIX_MODE | E1000_GPIE_EIAME | |
---|
2299 | E1000_GPIE_PBA | E1000_GPIE_NSICR); |
---|
2300 | |
---|
2301 | /* Turn on MSIX */ |
---|
2302 | switch (adapter->hw.mac.type) { |
---|
2303 | case e1000_82580: |
---|
2304 | case e1000_vfadapt: |
---|
2305 | /* RX entries */ |
---|
2306 | for (int i = 0; i < adapter->num_queues; i++) { |
---|
2307 | u32 index = i >> 1; |
---|
2308 | ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); |
---|
2309 | que = &adapter->queues[i]; |
---|
2310 | if (i & 1) { |
---|
2311 | ivar &= 0xFF00FFFF; |
---|
2312 | ivar |= (que->msix | E1000_IVAR_VALID) << 16; |
---|
2313 | } else { |
---|
2314 | ivar &= 0xFFFFFF00; |
---|
2315 | ivar |= que->msix | E1000_IVAR_VALID; |
---|
2316 | } |
---|
2317 | E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); |
---|
2318 | } |
---|
2319 | /* TX entries */ |
---|
2320 | for (int i = 0; i < adapter->num_queues; i++) { |
---|
2321 | u32 index = i >> 1; |
---|
2322 | ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); |
---|
2323 | que = &adapter->queues[i]; |
---|
2324 | if (i & 1) { |
---|
2325 | ivar &= 0x00FFFFFF; |
---|
2326 | ivar |= (que->msix | E1000_IVAR_VALID) << 24; |
---|
2327 | } else { |
---|
2328 | ivar &= 0xFFFF00FF; |
---|
2329 | ivar |= (que->msix | E1000_IVAR_VALID) << 8; |
---|
2330 | } |
---|
2331 | E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); |
---|
2332 | adapter->eims_mask |= que->eims; |
---|
2333 | } |
---|
2334 | |
---|
2335 | /* And for the link interrupt */ |
---|
2336 | ivar = (adapter->linkvec | E1000_IVAR_VALID) << 8; |
---|
2337 | adapter->link_mask = 1 << adapter->linkvec; |
---|
2338 | adapter->eims_mask |= adapter->link_mask; |
---|
2339 | E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar); |
---|
2340 | break; |
---|
2341 | case e1000_82576: |
---|
2342 | /* RX entries */ |
---|
2343 | for (int i = 0; i < adapter->num_queues; i++) { |
---|
2344 | u32 index = i & 0x7; /* Each IVAR has two entries */ |
---|
2345 | ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); |
---|
2346 | que = &adapter->queues[i]; |
---|
2347 | if (i < 8) { |
---|
2348 | ivar &= 0xFFFFFF00; |
---|
2349 | ivar |= que->msix | E1000_IVAR_VALID; |
---|
2350 | } else { |
---|
2351 | ivar &= 0xFF00FFFF; |
---|
2352 | ivar |= (que->msix | E1000_IVAR_VALID) << 16; |
---|
2353 | } |
---|
2354 | E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); |
---|
2355 | adapter->eims_mask |= que->eims; |
---|
2356 | } |
---|
2357 | /* TX entries */ |
---|
2358 | for (int i = 0; i < adapter->num_queues; i++) { |
---|
2359 | u32 index = i & 0x7; /* Each IVAR has two entries */ |
---|
2360 | ivar = E1000_READ_REG_ARRAY(hw, E1000_IVAR0, index); |
---|
2361 | que = &adapter->queues[i]; |
---|
2362 | if (i < 8) { |
---|
2363 | ivar &= 0xFFFF00FF; |
---|
2364 | ivar |= (que->msix | E1000_IVAR_VALID) << 8; |
---|
2365 | } else { |
---|
2366 | ivar &= 0x00FFFFFF; |
---|
2367 | ivar |= (que->msix | E1000_IVAR_VALID) << 24; |
---|
2368 | } |
---|
2369 | E1000_WRITE_REG_ARRAY(hw, E1000_IVAR0, index, ivar); |
---|
2370 | adapter->eims_mask |= que->eims; |
---|
2371 | } |
---|
2372 | |
---|
2373 | /* And for the link interrupt */ |
---|
2374 | ivar = (adapter->linkvec | E1000_IVAR_VALID) << 8; |
---|
2375 | adapter->link_mask = 1 << adapter->linkvec; |
---|
2376 | adapter->eims_mask |= adapter->link_mask; |
---|
2377 | E1000_WRITE_REG(hw, E1000_IVAR_MISC, ivar); |
---|
2378 | break; |
---|
2379 | |
---|
2380 | case e1000_82575: |
---|
2381 | /* enable MSI-X support*/ |
---|
2382 | tmp = E1000_READ_REG(hw, E1000_CTRL_EXT); |
---|
2383 | tmp |= E1000_CTRL_EXT_PBA_CLR; |
---|
2384 | /* Auto-Mask interrupts upon ICR read. */ |
---|
2385 | tmp |= E1000_CTRL_EXT_EIAME; |
---|
2386 | tmp |= E1000_CTRL_EXT_IRCA; |
---|
2387 | E1000_WRITE_REG(hw, E1000_CTRL_EXT, tmp); |
---|
2388 | |
---|
2389 | /* Queues */ |
---|
2390 | for (int i = 0; i < adapter->num_queues; i++) { |
---|
2391 | que = &adapter->queues[i]; |
---|
2392 | tmp = E1000_EICR_RX_QUEUE0 << i; |
---|
2393 | tmp |= E1000_EICR_TX_QUEUE0 << i; |
---|
2394 | que->eims = tmp; |
---|
2395 | E1000_WRITE_REG_ARRAY(hw, E1000_MSIXBM(0), |
---|
2396 | i, que->eims); |
---|
2397 | adapter->eims_mask |= que->eims; |
---|
2398 | } |
---|
2399 | |
---|
2400 | /* Link */ |
---|
2401 | E1000_WRITE_REG(hw, E1000_MSIXBM(adapter->linkvec), |
---|
2402 | E1000_EIMS_OTHER); |
---|
2403 | adapter->link_mask |= E1000_EIMS_OTHER; |
---|
2404 | adapter->eims_mask |= adapter->link_mask; |
---|
2405 | default: |
---|
2406 | break; |
---|
2407 | } |
---|
2408 | |
---|
2409 | /* Set the starting interrupt rate */ |
---|
2410 | if (igb_max_interrupt_rate > 0) |
---|
2411 | newitr = (4000000 / igb_max_interrupt_rate) & 0x7FFC; |
---|
2412 | |
---|
2413 | if (hw->mac.type == e1000_82575) |
---|
2414 | newitr |= newitr << 16; |
---|
2415 | else |
---|
2416 | newitr |= E1000_EITR_CNT_IGNR; |
---|
2417 | |
---|
2418 | for (int i = 0; i < adapter->num_queues; i++) { |
---|
2419 | que = &adapter->queues[i]; |
---|
2420 | E1000_WRITE_REG(hw, E1000_EITR(que->msix), newitr); |
---|
2421 | } |
---|
2422 | |
---|
2423 | return; |
---|
2424 | } |
---|
2425 | |
---|
2426 | |
---|
2427 | static void |
---|
2428 | igb_free_pci_resources(struct adapter *adapter) |
---|
2429 | { |
---|
2430 | struct igb_queue *que = adapter->queues; |
---|
2431 | device_t dev = adapter->dev; |
---|
2432 | int rid; |
---|
2433 | |
---|
2434 | /* |
---|
2435 | ** There is a slight possibility of a failure mode |
---|
2436 | ** in attach that will result in entering this function |
---|
2437 | ** before interrupt resources have been initialized, and |
---|
2438 | ** in that case we do not want to execute the loops below |
---|
2439 | ** We can detect this reliably by the state of the adapter |
---|
2440 | ** res pointer. |
---|
2441 | */ |
---|
2442 | if (adapter->res == NULL) |
---|
2443 | goto mem; |
---|
2444 | |
---|
2445 | /* |
---|
2446 | * First release all the interrupt resources: |
---|
2447 | */ |
---|
2448 | for (int i = 0; i < adapter->num_queues; i++, que++) { |
---|
2449 | rid = que->msix + 1; |
---|
2450 | if (que->tag != NULL) { |
---|
2451 | bus_teardown_intr(dev, que->res, que->tag); |
---|
2452 | que->tag = NULL; |
---|
2453 | } |
---|
2454 | if (que->res != NULL) |
---|
2455 | bus_release_resource(dev, |
---|
2456 | SYS_RES_IRQ, rid, que->res); |
---|
2457 | } |
---|
2458 | |
---|
2459 | /* Clean the Legacy or Link interrupt last */ |
---|
2460 | if (adapter->linkvec) /* we are doing MSIX */ |
---|
2461 | rid = adapter->linkvec + 1; |
---|
2462 | else |
---|
2463 | (adapter->msix != 0) ? (rid = 1):(rid = 0); |
---|
2464 | |
---|
2465 | if (adapter->tag != NULL) { |
---|
2466 | bus_teardown_intr(dev, adapter->res, adapter->tag); |
---|
2467 | adapter->tag = NULL; |
---|
2468 | } |
---|
2469 | if (adapter->res != NULL) |
---|
2470 | bus_release_resource(dev, SYS_RES_IRQ, rid, adapter->res); |
---|
2471 | |
---|
2472 | mem: |
---|
2473 | if (adapter->msix) |
---|
2474 | pci_release_msi(dev); |
---|
2475 | |
---|
2476 | if (adapter->msix_mem != NULL) |
---|
2477 | bus_release_resource(dev, SYS_RES_MEMORY, |
---|
2478 | PCIR_BAR(IGB_MSIX_BAR), adapter->msix_mem); |
---|
2479 | |
---|
2480 | if (adapter->pci_mem != NULL) |
---|
2481 | bus_release_resource(dev, SYS_RES_MEMORY, |
---|
2482 | PCIR_BAR(0), adapter->pci_mem); |
---|
2483 | |
---|
2484 | } |
---|
2485 | |
---|
2486 | /* |
---|
2487 | * Setup Either MSI/X or MSI |
---|
2488 | */ |
---|
2489 | static int |
---|
2490 | igb_setup_msix(struct adapter *adapter) |
---|
2491 | { |
---|
2492 | device_t dev = adapter->dev; |
---|
2493 | int rid, want, queues, msgs; |
---|
2494 | |
---|
2495 | /* tuneable override */ |
---|
2496 | if (igb_enable_msix == 0) |
---|
2497 | goto msi; |
---|
2498 | |
---|
2499 | /* First try MSI/X */ |
---|
2500 | rid = PCIR_BAR(IGB_MSIX_BAR); |
---|
2501 | adapter->msix_mem = bus_alloc_resource_any(dev, |
---|
2502 | SYS_RES_MEMORY, &rid, RF_ACTIVE); |
---|
2503 | if (!adapter->msix_mem) { |
---|
2504 | /* May not be enabled */ |
---|
2505 | device_printf(adapter->dev, |
---|
2506 | "Unable to map MSIX table \n"); |
---|
2507 | goto msi; |
---|
2508 | } |
---|
2509 | |
---|
2510 | msgs = pci_msix_count(dev); |
---|
2511 | if (msgs == 0) { /* system has msix disabled */ |
---|
2512 | bus_release_resource(dev, SYS_RES_MEMORY, |
---|
2513 | PCIR_BAR(IGB_MSIX_BAR), adapter->msix_mem); |
---|
2514 | adapter->msix_mem = NULL; |
---|
2515 | goto msi; |
---|
2516 | } |
---|
2517 | |
---|
2518 | #ifdef __rtems__ |
---|
2519 | /* Figure out a reasonable auto config value */ |
---|
2520 | queues = 10; /* XXX fix me */ |
---|
2521 | #else |
---|
2522 | /* Figure out a reasonable auto config value */ |
---|
2523 | queues = (mp_ncpus > (msgs-1)) ? (msgs-1) : mp_ncpus; |
---|
2524 | #endif |
---|
2525 | |
---|
2526 | /* Manual override */ |
---|
2527 | if (igb_num_queues != 0) |
---|
2528 | queues = igb_num_queues; |
---|
2529 | if (queues > 8) /* max queues */ |
---|
2530 | queues = 8; |
---|
2531 | |
---|
2532 | /* Can have max of 4 queues on 82575 */ |
---|
2533 | if ((adapter->hw.mac.type == e1000_82575) && (queues > 4)) |
---|
2534 | queues = 4; |
---|
2535 | |
---|
2536 | /* Limit the VF adapter to one queue */ |
---|
2537 | if (adapter->hw.mac.type == e1000_vfadapt) |
---|
2538 | queues = 1; |
---|
2539 | |
---|
2540 | /* |
---|
2541 | ** One vector (RX/TX pair) per queue |
---|
2542 | ** plus an additional for Link interrupt |
---|
2543 | */ |
---|
2544 | want = queues + 1; |
---|
2545 | if (msgs >= want) |
---|
2546 | msgs = want; |
---|
2547 | else { |
---|
2548 | device_printf(adapter->dev, |
---|
2549 | "MSIX Configuration Problem, " |
---|
2550 | "%d vectors configured, but %d queues wanted!\n", |
---|
2551 | msgs, want); |
---|
2552 | return (ENXIO); |
---|
2553 | } |
---|
2554 | if ((msgs) && pci_alloc_msix(dev, &msgs) == 0) { |
---|
2555 | device_printf(adapter->dev, |
---|
2556 | "Using MSIX interrupts with %d vectors\n", msgs); |
---|
2557 | adapter->num_queues = queues; |
---|
2558 | return (msgs); |
---|
2559 | } |
---|
2560 | msi: |
---|
2561 | msgs = pci_msi_count(dev); |
---|
2562 | if (msgs == 1 && pci_alloc_msi(dev, &msgs) == 0) |
---|
2563 | device_printf(adapter->dev,"Using MSI interrupt\n"); |
---|
2564 | return (msgs); |
---|
2565 | } |
---|
2566 | |
---|
2567 | /********************************************************************* |
---|
2568 | * |
---|
2569 | * Set up an fresh starting state |
---|
2570 | * |
---|
2571 | **********************************************************************/ |
---|
2572 | static void |
---|
2573 | igb_reset(struct adapter *adapter) |
---|
2574 | { |
---|
2575 | device_t dev = adapter->dev; |
---|
2576 | struct e1000_hw *hw = &adapter->hw; |
---|
2577 | struct e1000_fc_info *fc = &hw->fc; |
---|
2578 | struct ifnet *ifp = adapter->ifp; |
---|
2579 | u32 pba = 0; |
---|
2580 | u16 hwm; |
---|
2581 | |
---|
2582 | INIT_DEBUGOUT("igb_reset: begin"); |
---|
2583 | |
---|
2584 | /* Let the firmware know the OS is in control */ |
---|
2585 | igb_get_hw_control(adapter); |
---|
2586 | |
---|
2587 | /* |
---|
2588 | * Packet Buffer Allocation (PBA) |
---|
2589 | * Writing PBA sets the receive portion of the buffer |
---|
2590 | * the remainder is used for the transmit buffer. |
---|
2591 | */ |
---|
2592 | switch (hw->mac.type) { |
---|
2593 | case e1000_82575: |
---|
2594 | pba = E1000_PBA_32K; |
---|
2595 | break; |
---|
2596 | case e1000_82576: |
---|
2597 | case e1000_vfadapt: |
---|
2598 | pba = E1000_PBA_64K; |
---|
2599 | break; |
---|
2600 | case e1000_82580: |
---|
2601 | pba = E1000_PBA_35K; |
---|
2602 | default: |
---|
2603 | break; |
---|
2604 | } |
---|
2605 | |
---|
2606 | /* Special needs in case of Jumbo frames */ |
---|
2607 | if ((hw->mac.type == e1000_82575) && (ifp->if_mtu > ETHERMTU)) { |
---|
2608 | u32 tx_space, min_tx, min_rx; |
---|
2609 | pba = E1000_READ_REG(hw, E1000_PBA); |
---|
2610 | tx_space = pba >> 16; |
---|
2611 | pba &= 0xffff; |
---|
2612 | min_tx = (adapter->max_frame_size + |
---|
2613 | sizeof(struct e1000_tx_desc) - ETHERNET_FCS_SIZE) * 2; |
---|
2614 | min_tx = roundup2(min_tx, 1024); |
---|
2615 | min_tx >>= 10; |
---|
2616 | min_rx = adapter->max_frame_size; |
---|
2617 | min_rx = roundup2(min_rx, 1024); |
---|
2618 | min_rx >>= 10; |
---|
2619 | if (tx_space < min_tx && |
---|
2620 | ((min_tx - tx_space) < pba)) { |
---|
2621 | pba = pba - (min_tx - tx_space); |
---|
2622 | /* |
---|
2623 | * if short on rx space, rx wins |
---|
2624 | * and must trump tx adjustment |
---|
2625 | */ |
---|
2626 | if (pba < min_rx) |
---|
2627 | pba = min_rx; |
---|
2628 | } |
---|
2629 | E1000_WRITE_REG(hw, E1000_PBA, pba); |
---|
2630 | } |
---|
2631 | |
---|
2632 | INIT_DEBUGOUT1("igb_init: pba=%dK",pba); |
---|
2633 | |
---|
2634 | /* |
---|
2635 | * These parameters control the automatic generation (Tx) and |
---|
2636 | * response (Rx) to Ethernet PAUSE frames. |
---|
2637 | * - High water mark should allow for at least two frames to be |
---|
2638 | * received after sending an XOFF. |
---|
2639 | * - Low water mark works best when it is very near the high water mark. |
---|
2640 | * This allows the receiver to restart by sending XON when it has |
---|
2641 | * drained a bit. |
---|
2642 | */ |
---|
2643 | hwm = min(((pba << 10) * 9 / 10), |
---|
2644 | ((pba << 10) - 2 * adapter->max_frame_size)); |
---|
2645 | |
---|
2646 | if (hw->mac.type < e1000_82576) { |
---|
2647 | fc->high_water = hwm & 0xFFF8; /* 8-byte granularity */ |
---|
2648 | fc->low_water = fc->high_water - 8; |
---|
2649 | } else { |
---|
2650 | fc->high_water = hwm & 0xFFF0; /* 16-byte granularity */ |
---|
2651 | fc->low_water = fc->high_water - 16; |
---|
2652 | } |
---|
2653 | |
---|
2654 | fc->pause_time = IGB_FC_PAUSE_TIME; |
---|
2655 | fc->send_xon = TRUE; |
---|
2656 | |
---|
2657 | /* Set Flow control, use the tunable location if sane */ |
---|
2658 | if ((igb_fc_setting >= 0) && (igb_fc_setting < 4)) |
---|
2659 | fc->requested_mode = igb_fc_setting; |
---|
2660 | else |
---|
2661 | fc->requested_mode = e1000_fc_none; |
---|
2662 | |
---|
2663 | fc->current_mode = fc->requested_mode; |
---|
2664 | |
---|
2665 | /* Issue a global reset */ |
---|
2666 | e1000_reset_hw(hw); |
---|
2667 | E1000_WRITE_REG(hw, E1000_WUC, 0); |
---|
2668 | |
---|
2669 | if (e1000_init_hw(hw) < 0) |
---|
2670 | device_printf(dev, "Hardware Initialization Failed\n"); |
---|
2671 | |
---|
2672 | if (hw->mac.type == e1000_82580) { |
---|
2673 | u32 reg; |
---|
2674 | |
---|
2675 | hwm = (pba << 10) - (2 * adapter->max_frame_size); |
---|
2676 | /* |
---|
2677 | * 0x80000000 - enable DMA COAL |
---|
2678 | * 0x10000000 - use L0s as low power |
---|
2679 | * 0x20000000 - use L1 as low power |
---|
2680 | * X << 16 - exit dma coal when rx data exceeds X kB |
---|
2681 | * Y - upper limit to stay in dma coal in units of 32usecs |
---|
2682 | */ |
---|
2683 | E1000_WRITE_REG(hw, E1000_DMACR, |
---|
2684 | 0xA0000006 | ((hwm << 6) & 0x00FF0000)); |
---|
2685 | |
---|
2686 | /* set hwm to PBA - 2 * max frame size */ |
---|
2687 | E1000_WRITE_REG(hw, E1000_FCRTC, hwm); |
---|
2688 | /* |
---|
2689 | * This sets the time to wait before requesting transition to |
---|
2690 | * low power state to number of usecs needed to receive 1 512 |
---|
2691 | * byte frame at gigabit line rate |
---|
2692 | */ |
---|
2693 | E1000_WRITE_REG(hw, E1000_DMCTLX, 4); |
---|
2694 | |
---|
2695 | /* free space in tx packet buffer to wake from DMA coal */ |
---|
2696 | E1000_WRITE_REG(hw, E1000_DMCTXTH, |
---|
2697 | (20480 - (2 * adapter->max_frame_size)) >> 6); |
---|
2698 | |
---|
2699 | /* make low power state decision controlled by DMA coal */ |
---|
2700 | reg = E1000_READ_REG(hw, E1000_PCIEMISC); |
---|
2701 | E1000_WRITE_REG(hw, E1000_PCIEMISC, |
---|
2702 | reg | E1000_PCIEMISC_LX_DECISION); |
---|
2703 | } |
---|
2704 | |
---|
2705 | E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERTYPE_VLAN); |
---|
2706 | e1000_get_phy_info(hw); |
---|
2707 | e1000_check_for_link(hw); |
---|
2708 | return; |
---|
2709 | } |
---|
2710 | |
---|
2711 | /********************************************************************* |
---|
2712 | * |
---|
2713 | * Setup networking device structure and register an interface. |
---|
2714 | * |
---|
2715 | **********************************************************************/ |
---|
2716 | static int |
---|
2717 | igb_setup_interface(device_t dev, struct adapter *adapter) |
---|
2718 | { |
---|
2719 | struct ifnet *ifp; |
---|
2720 | |
---|
2721 | INIT_DEBUGOUT("igb_setup_interface: begin"); |
---|
2722 | |
---|
2723 | ifp = adapter->ifp = if_alloc(IFT_ETHER); |
---|
2724 | if (ifp == NULL) { |
---|
2725 | device_printf(dev, "can not allocate ifnet structure\n"); |
---|
2726 | return (-1); |
---|
2727 | } |
---|
2728 | if_initname(ifp, device_get_name(dev), device_get_unit(dev)); |
---|
2729 | ifp->if_mtu = ETHERMTU; |
---|
2730 | ifp->if_init = igb_init; |
---|
2731 | ifp->if_softc = adapter; |
---|
2732 | ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; |
---|
2733 | ifp->if_ioctl = igb_ioctl; |
---|
2734 | ifp->if_start = igb_start; |
---|
2735 | #if __FreeBSD_version >= 800000 |
---|
2736 | ifp->if_transmit = igb_mq_start; |
---|
2737 | ifp->if_qflush = igb_qflush; |
---|
2738 | #endif |
---|
2739 | IFQ_SET_MAXLEN(&ifp->if_snd, adapter->num_tx_desc - 1); |
---|
2740 | ifp->if_snd.ifq_drv_maxlen = adapter->num_tx_desc - 1; |
---|
2741 | IFQ_SET_READY(&ifp->if_snd); |
---|
2742 | |
---|
2743 | ether_ifattach(ifp, adapter->hw.mac.addr); |
---|
2744 | |
---|
2745 | ifp->if_capabilities = ifp->if_capenable = 0; |
---|
2746 | |
---|
2747 | ifp->if_capabilities = IFCAP_HWCSUM | IFCAP_VLAN_HWCSUM; |
---|
2748 | ifp->if_capabilities |= IFCAP_TSO4; |
---|
2749 | ifp->if_capabilities |= IFCAP_JUMBO_MTU; |
---|
2750 | ifp->if_capenable = ifp->if_capabilities; |
---|
2751 | |
---|
2752 | /* Don't enable LRO by default */ |
---|
2753 | ifp->if_capabilities |= IFCAP_LRO; |
---|
2754 | |
---|
2755 | #ifdef DEVICE_POLLING |
---|
2756 | ifp->if_capabilities |= IFCAP_POLLING; |
---|
2757 | #endif |
---|
2758 | |
---|
2759 | /* |
---|
2760 | * Tell the upper layer(s) we |
---|
2761 | * support full VLAN capability. |
---|
2762 | */ |
---|
2763 | ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); |
---|
2764 | ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; |
---|
2765 | ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU; |
---|
2766 | |
---|
2767 | /* |
---|
2768 | ** Dont turn this on by default, if vlans are |
---|
2769 | ** created on another pseudo device (eg. lagg) |
---|
2770 | ** then vlan events are not passed thru, breaking |
---|
2771 | ** operation, but with HW FILTER off it works. If |
---|
2772 | ** using vlans directly on the em driver you can |
---|
2773 | ** enable this and get full hardware tag filtering. |
---|
2774 | */ |
---|
2775 | ifp->if_capabilities |= IFCAP_VLAN_HWFILTER; |
---|
2776 | |
---|
2777 | /* |
---|
2778 | * Specify the media types supported by this adapter and register |
---|
2779 | * callbacks to update media and link information |
---|
2780 | */ |
---|
2781 | ifmedia_init(&adapter->media, IFM_IMASK, |
---|
2782 | igb_media_change, igb_media_status); |
---|
2783 | if ((adapter->hw.phy.media_type == e1000_media_type_fiber) || |
---|
2784 | (adapter->hw.phy.media_type == e1000_media_type_internal_serdes)) { |
---|
2785 | ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX | IFM_FDX, |
---|
2786 | 0, NULL); |
---|
2787 | ifmedia_add(&adapter->media, IFM_ETHER | IFM_1000_SX, 0, NULL); |
---|
2788 | } else { |
---|
2789 | ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T, 0, NULL); |
---|
2790 | ifmedia_add(&adapter->media, IFM_ETHER | IFM_10_T | IFM_FDX, |
---|
2791 | 0, NULL); |
---|
2792 | ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX, |
---|
2793 | 0, NULL); |
---|
2794 | ifmedia_add(&adapter->media, IFM_ETHER | IFM_100_TX | IFM_FDX, |
---|
2795 | 0, NULL); |
---|
2796 | if (adapter->hw.phy.type != e1000_phy_ife) { |
---|
2797 | ifmedia_add(&adapter->media, |
---|
2798 | IFM_ETHER | IFM_1000_T | IFM_FDX, 0, NULL); |
---|
2799 | ifmedia_add(&adapter->media, |
---|
2800 | IFM_ETHER | IFM_1000_T, 0, NULL); |
---|
2801 | } |
---|
2802 | } |
---|
2803 | ifmedia_add(&adapter->media, IFM_ETHER | IFM_AUTO, 0, NULL); |
---|
2804 | ifmedia_set(&adapter->media, IFM_ETHER | IFM_AUTO); |
---|
2805 | return (0); |
---|
2806 | } |
---|
2807 | |
---|
2808 | |
---|
2809 | /* |
---|
2810 | * Manage DMA'able memory. |
---|
2811 | */ |
---|
2812 | static void |
---|
2813 | igb_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) |
---|
2814 | { |
---|
2815 | if (error) |
---|
2816 | return; |
---|
2817 | *(bus_addr_t *) arg = segs[0].ds_addr; |
---|
2818 | } |
---|
2819 | |
---|
2820 | static int |
---|
2821 | igb_dma_malloc(struct adapter *adapter, bus_size_t size, |
---|
2822 | struct igb_dma_alloc *dma, int mapflags) |
---|
2823 | { |
---|
2824 | int error; |
---|
2825 | |
---|
2826 | error = bus_dma_tag_create(bus_get_dma_tag(adapter->dev), /* parent */ |
---|
2827 | IGB_DBA_ALIGN, 0, /* alignment, bounds */ |
---|
2828 | BUS_SPACE_MAXADDR, /* lowaddr */ |
---|
2829 | BUS_SPACE_MAXADDR, /* highaddr */ |
---|
2830 | NULL, NULL, /* filter, filterarg */ |
---|
2831 | size, /* maxsize */ |
---|
2832 | 1, /* nsegments */ |
---|
2833 | size, /* maxsegsize */ |
---|
2834 | 0, /* flags */ |
---|
2835 | NULL, /* lockfunc */ |
---|
2836 | NULL, /* lockarg */ |
---|
2837 | &dma->dma_tag); |
---|
2838 | if (error) { |
---|
2839 | device_printf(adapter->dev, |
---|
2840 | "%s: bus_dma_tag_create failed: %d\n", |
---|
2841 | __func__, error); |
---|
2842 | goto fail_0; |
---|
2843 | } |
---|
2844 | |
---|
2845 | error = bus_dmamem_alloc(dma->dma_tag, (void**) &dma->dma_vaddr, |
---|
2846 | BUS_DMA_NOWAIT, &dma->dma_map); |
---|
2847 | if (error) { |
---|
2848 | device_printf(adapter->dev, |
---|
2849 | "%s: bus_dmamem_alloc(%ju) failed: %d\n", |
---|
2850 | __func__, (uintmax_t)size, error); |
---|
2851 | goto fail_2; |
---|
2852 | } |
---|
2853 | |
---|
2854 | dma->dma_paddr = 0; |
---|
2855 | error = bus_dmamap_load(dma->dma_tag, dma->dma_map, dma->dma_vaddr, |
---|
2856 | size, igb_dmamap_cb, &dma->dma_paddr, mapflags | BUS_DMA_NOWAIT); |
---|
2857 | if (error || dma->dma_paddr == 0) { |
---|
2858 | device_printf(adapter->dev, |
---|
2859 | "%s: bus_dmamap_load failed: %d\n", |
---|
2860 | __func__, error); |
---|
2861 | goto fail_3; |
---|
2862 | } |
---|
2863 | |
---|
2864 | return (0); |
---|
2865 | |
---|
2866 | fail_3: |
---|
2867 | bus_dmamap_unload(dma->dma_tag, dma->dma_map); |
---|
2868 | fail_2: |
---|
2869 | bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); |
---|
2870 | bus_dma_tag_destroy(dma->dma_tag); |
---|
2871 | fail_0: |
---|
2872 | dma->dma_map = NULL; |
---|
2873 | dma->dma_tag = NULL; |
---|
2874 | |
---|
2875 | return (error); |
---|
2876 | } |
---|
2877 | |
---|
2878 | static void |
---|
2879 | igb_dma_free(struct adapter *adapter, struct igb_dma_alloc *dma) |
---|
2880 | { |
---|
2881 | if (dma->dma_tag == NULL) |
---|
2882 | return; |
---|
2883 | if (dma->dma_map != NULL) { |
---|
2884 | bus_dmamap_sync(dma->dma_tag, dma->dma_map, |
---|
2885 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); |
---|
2886 | bus_dmamap_unload(dma->dma_tag, dma->dma_map); |
---|
2887 | bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map); |
---|
2888 | dma->dma_map = NULL; |
---|
2889 | } |
---|
2890 | bus_dma_tag_destroy(dma->dma_tag); |
---|
2891 | dma->dma_tag = NULL; |
---|
2892 | } |
---|
2893 | |
---|
2894 | |
---|
2895 | /********************************************************************* |
---|
2896 | * |
---|
2897 | * Allocate memory for the transmit and receive rings, and then |
---|
2898 | * the descriptors associated with each, called only once at attach. |
---|
2899 | * |
---|
2900 | **********************************************************************/ |
---|
2901 | static int |
---|
2902 | igb_allocate_queues(struct adapter *adapter) |
---|
2903 | { |
---|
2904 | device_t dev = adapter->dev; |
---|
2905 | struct igb_queue *que = NULL; |
---|
2906 | struct tx_ring *txr = NULL; |
---|
2907 | struct rx_ring *rxr = NULL; |
---|
2908 | int rsize, tsize, error = E1000_SUCCESS; |
---|
2909 | int txconf = 0, rxconf = 0; |
---|
2910 | |
---|
2911 | /* First allocate the top level queue structs */ |
---|
2912 | if (!(adapter->queues = |
---|
2913 | (struct igb_queue *) malloc(sizeof(struct igb_queue) * |
---|
2914 | adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { |
---|
2915 | device_printf(dev, "Unable to allocate queue memory\n"); |
---|
2916 | error = ENOMEM; |
---|
2917 | goto fail; |
---|
2918 | } |
---|
2919 | |
---|
2920 | /* Next allocate the TX ring struct memory */ |
---|
2921 | if (!(adapter->tx_rings = |
---|
2922 | (struct tx_ring *) malloc(sizeof(struct tx_ring) * |
---|
2923 | adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { |
---|
2924 | device_printf(dev, "Unable to allocate TX ring memory\n"); |
---|
2925 | error = ENOMEM; |
---|
2926 | goto tx_fail; |
---|
2927 | } |
---|
2928 | |
---|
2929 | /* Now allocate the RX */ |
---|
2930 | if (!(adapter->rx_rings = |
---|
2931 | (struct rx_ring *) malloc(sizeof(struct rx_ring) * |
---|
2932 | adapter->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) { |
---|
2933 | device_printf(dev, "Unable to allocate RX ring memory\n"); |
---|
2934 | error = ENOMEM; |
---|
2935 | goto rx_fail; |
---|
2936 | } |
---|
2937 | |
---|
2938 | tsize = roundup2(adapter->num_tx_desc * |
---|
2939 | sizeof(union e1000_adv_tx_desc), IGB_DBA_ALIGN); |
---|
2940 | /* |
---|
2941 | * Now set up the TX queues, txconf is needed to handle the |
---|
2942 | * possibility that things fail midcourse and we need to |
---|
2943 | * undo memory gracefully |
---|
2944 | */ |
---|
2945 | for (int i = 0; i < adapter->num_queues; i++, txconf++) { |
---|
2946 | /* Set up some basics */ |
---|
2947 | txr = &adapter->tx_rings[i]; |
---|
2948 | txr->adapter = adapter; |
---|
2949 | txr->me = i; |
---|
2950 | |
---|
2951 | /* Initialize the TX lock */ |
---|
2952 | snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)", |
---|
2953 | device_get_nameunit(dev), txr->me); |
---|
2954 | mtx_init(&txr->tx_mtx, txr->mtx_name, NULL, MTX_DEF); |
---|
2955 | |
---|
2956 | if (igb_dma_malloc(adapter, tsize, |
---|
2957 | &txr->txdma, BUS_DMA_NOWAIT)) { |
---|
2958 | device_printf(dev, |
---|
2959 | "Unable to allocate TX Descriptor memory\n"); |
---|
2960 | error = ENOMEM; |
---|
2961 | goto err_tx_desc; |
---|
2962 | } |
---|
2963 | txr->tx_base = (struct e1000_tx_desc *)txr->txdma.dma_vaddr; |
---|
2964 | bzero((void *)txr->tx_base, tsize); |
---|
2965 | |
---|
2966 | /* Now allocate transmit buffers for the ring */ |
---|
2967 | if (igb_allocate_transmit_buffers(txr)) { |
---|
2968 | device_printf(dev, |
---|
2969 | "Critical Failure setting up transmit buffers\n"); |
---|
2970 | error = ENOMEM; |
---|
2971 | goto err_tx_desc; |
---|
2972 | } |
---|
2973 | #if __FreeBSD_version >= 800000 |
---|
2974 | /* Allocate a buf ring */ |
---|
2975 | txr->br = buf_ring_alloc(IGB_BR_SIZE, M_DEVBUF, |
---|
2976 | M_WAITOK, &txr->tx_mtx); |
---|
2977 | #endif |
---|
2978 | } |
---|
2979 | |
---|
2980 | /* |
---|
2981 | * Next the RX queues... |
---|
2982 | */ |
---|
2983 | rsize = roundup2(adapter->num_rx_desc * |
---|
2984 | sizeof(union e1000_adv_rx_desc), IGB_DBA_ALIGN); |
---|
2985 | for (int i = 0; i < adapter->num_queues; i++, rxconf++) { |
---|
2986 | rxr = &adapter->rx_rings[i]; |
---|
2987 | rxr->adapter = adapter; |
---|
2988 | rxr->me = i; |
---|
2989 | |
---|
2990 | /* Initialize the RX lock */ |
---|
2991 | snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)", |
---|
2992 | device_get_nameunit(dev), txr->me); |
---|
2993 | mtx_init(&rxr->rx_mtx, rxr->mtx_name, NULL, MTX_DEF); |
---|
2994 | |
---|
2995 | if (igb_dma_malloc(adapter, rsize, |
---|
2996 | &rxr->rxdma, BUS_DMA_NOWAIT)) { |
---|
2997 | device_printf(dev, |
---|
2998 | "Unable to allocate RxDescriptor memory\n"); |
---|
2999 | error = ENOMEM; |
---|
3000 | goto err_rx_desc; |
---|
3001 | } |
---|
3002 | rxr->rx_base = (union e1000_adv_rx_desc *)rxr->rxdma.dma_vaddr; |
---|
3003 | bzero((void *)rxr->rx_base, rsize); |
---|
3004 | |
---|
3005 | /* Allocate receive buffers for the ring*/ |
---|
3006 | if (igb_allocate_receive_buffers(rxr)) { |
---|
3007 | device_printf(dev, |
---|
3008 | "Critical Failure setting up receive buffers\n"); |
---|
3009 | error = ENOMEM; |
---|
3010 | goto err_rx_desc; |
---|
3011 | } |
---|
3012 | } |
---|
3013 | |
---|
3014 | /* |
---|
3015 | ** Finally set up the queue holding structs |
---|
3016 | */ |
---|
3017 | for (int i = 0; i < adapter->num_queues; i++) { |
---|
3018 | que = &adapter->queues[i]; |
---|
3019 | que->adapter = adapter; |
---|
3020 | que->txr = &adapter->tx_rings[i]; |
---|
3021 | que->rxr = &adapter->rx_rings[i]; |
---|
3022 | } |
---|
3023 | |
---|
3024 | return (0); |
---|
3025 | |
---|
3026 | err_rx_desc: |
---|
3027 | for (rxr = adapter->rx_rings; rxconf > 0; rxr++, rxconf--) |
---|
3028 | igb_dma_free(adapter, &rxr->rxdma); |
---|
3029 | err_tx_desc: |
---|
3030 | for (txr = adapter->tx_rings; txconf > 0; txr++, txconf--) |
---|
3031 | igb_dma_free(adapter, &txr->txdma); |
---|
3032 | free(adapter->rx_rings, M_DEVBUF); |
---|
3033 | rx_fail: |
---|
3034 | #if __FreeBSD_version >= 800000 |
---|
3035 | buf_ring_free(txr->br, M_DEVBUF); |
---|
3036 | #endif |
---|
3037 | free(adapter->tx_rings, M_DEVBUF); |
---|
3038 | tx_fail: |
---|
3039 | free(adapter->queues, M_DEVBUF); |
---|
3040 | fail: |
---|
3041 | return (error); |
---|
3042 | } |
---|
3043 | |
---|
3044 | /********************************************************************* |
---|
3045 | * |
---|
3046 | * Allocate memory for tx_buffer structures. The tx_buffer stores all |
---|
3047 | * the information needed to transmit a packet on the wire. This is |
---|
3048 | * called only once at attach, setup is done every reset. |
---|
3049 | * |
---|
3050 | **********************************************************************/ |
---|
3051 | static int |
---|
3052 | igb_allocate_transmit_buffers(struct tx_ring *txr) |
---|
3053 | { |
---|
3054 | struct adapter *adapter = txr->adapter; |
---|
3055 | device_t dev = adapter->dev; |
---|
3056 | struct igb_tx_buffer *txbuf; |
---|
3057 | int error, i; |
---|
3058 | |
---|
3059 | /* |
---|
3060 | * Setup DMA descriptor areas. |
---|
3061 | */ |
---|
3062 | if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), |
---|
3063 | 1, 0, /* alignment, bounds */ |
---|
3064 | BUS_SPACE_MAXADDR, /* lowaddr */ |
---|
3065 | BUS_SPACE_MAXADDR, /* highaddr */ |
---|
3066 | NULL, NULL, /* filter, filterarg */ |
---|
3067 | IGB_TSO_SIZE, /* maxsize */ |
---|
3068 | IGB_MAX_SCATTER, /* nsegments */ |
---|
3069 | PAGE_SIZE, /* maxsegsize */ |
---|
3070 | 0, /* flags */ |
---|
3071 | NULL, /* lockfunc */ |
---|
3072 | NULL, /* lockfuncarg */ |
---|
3073 | &txr->txtag))) { |
---|
3074 | device_printf(dev,"Unable to allocate TX DMA tag\n"); |
---|
3075 | goto fail; |
---|
3076 | } |
---|
3077 | |
---|
3078 | if (!(txr->tx_buffers = |
---|
3079 | (struct igb_tx_buffer *) malloc(sizeof(struct igb_tx_buffer) * |
---|
3080 | adapter->num_tx_desc, M_DEVBUF, M_NOWAIT | M_ZERO))) { |
---|
3081 | device_printf(dev, "Unable to allocate tx_buffer memory\n"); |
---|
3082 | error = ENOMEM; |
---|
3083 | goto fail; |
---|
3084 | } |
---|
3085 | |
---|
3086 | /* Create the descriptor buffer dma maps */ |
---|
3087 | txbuf = txr->tx_buffers; |
---|
3088 | for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) { |
---|
3089 | error = bus_dmamap_create(txr->txtag, 0, &txbuf->map); |
---|
3090 | if (error != 0) { |
---|
3091 | device_printf(dev, "Unable to create TX DMA map\n"); |
---|
3092 | goto fail; |
---|
3093 | } |
---|
3094 | } |
---|
3095 | |
---|
3096 | return 0; |
---|
3097 | fail: |
---|
3098 | /* We free all, it handles case where we are in the middle */ |
---|
3099 | igb_free_transmit_structures(adapter); |
---|
3100 | return (error); |
---|
3101 | } |
---|
3102 | |
---|
3103 | /********************************************************************* |
---|
3104 | * |
---|
3105 | * Initialize a transmit ring. |
---|
3106 | * |
---|
3107 | **********************************************************************/ |
---|
3108 | static void |
---|
3109 | igb_setup_transmit_ring(struct tx_ring *txr) |
---|
3110 | { |
---|
3111 | struct adapter *adapter = txr->adapter; |
---|
3112 | struct igb_tx_buffer *txbuf; |
---|
3113 | int i; |
---|
3114 | |
---|
3115 | /* Clear the old descriptor contents */ |
---|
3116 | IGB_TX_LOCK(txr); |
---|
3117 | bzero((void *)txr->tx_base, |
---|
3118 | (sizeof(union e1000_adv_tx_desc)) * adapter->num_tx_desc); |
---|
3119 | /* Reset indices */ |
---|
3120 | txr->next_avail_desc = 0; |
---|
3121 | txr->next_to_clean = 0; |
---|
3122 | |
---|
3123 | /* Free any existing tx buffers. */ |
---|
3124 | txbuf = txr->tx_buffers; |
---|
3125 | for (i = 0; i < adapter->num_tx_desc; i++, txbuf++) { |
---|
3126 | if (txbuf->m_head != NULL) { |
---|
3127 | bus_dmamap_sync(txr->txtag, txbuf->map, |
---|
3128 | BUS_DMASYNC_POSTWRITE); |
---|
3129 | bus_dmamap_unload(txr->txtag, txbuf->map); |
---|
3130 | m_freem(txbuf->m_head); |
---|
3131 | txbuf->m_head = NULL; |
---|
3132 | } |
---|
3133 | /* clear the watch index */ |
---|
3134 | txbuf->next_eop = -1; |
---|
3135 | } |
---|
3136 | |
---|
3137 | /* Set number of descriptors available */ |
---|
3138 | txr->tx_avail = adapter->num_tx_desc; |
---|
3139 | |
---|
3140 | bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, |
---|
3141 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
---|
3142 | IGB_TX_UNLOCK(txr); |
---|
3143 | } |
---|
3144 | |
---|
3145 | /********************************************************************* |
---|
3146 | * |
---|
3147 | * Initialize all transmit rings. |
---|
3148 | * |
---|
3149 | **********************************************************************/ |
---|
3150 | static void |
---|
3151 | igb_setup_transmit_structures(struct adapter *adapter) |
---|
3152 | { |
---|
3153 | struct tx_ring *txr = adapter->tx_rings; |
---|
3154 | |
---|
3155 | for (int i = 0; i < adapter->num_queues; i++, txr++) |
---|
3156 | igb_setup_transmit_ring(txr); |
---|
3157 | |
---|
3158 | return; |
---|
3159 | } |
---|
3160 | |
---|
3161 | /********************************************************************* |
---|
3162 | * |
---|
3163 | * Enable transmit unit. |
---|
3164 | * |
---|
3165 | **********************************************************************/ |
---|
3166 | static void |
---|
3167 | igb_initialize_transmit_units(struct adapter *adapter) |
---|
3168 | { |
---|
3169 | struct tx_ring *txr = adapter->tx_rings; |
---|
3170 | struct e1000_hw *hw = &adapter->hw; |
---|
3171 | u32 tctl, txdctl; |
---|
3172 | |
---|
3173 | INIT_DEBUGOUT("igb_initialize_transmit_units: begin"); |
---|
3174 | tctl = txdctl = 0; |
---|
3175 | |
---|
3176 | /* Setup the Tx Descriptor Rings */ |
---|
3177 | for (int i = 0; i < adapter->num_queues; i++, txr++) { |
---|
3178 | u64 bus_addr = txr->txdma.dma_paddr; |
---|
3179 | |
---|
3180 | E1000_WRITE_REG(hw, E1000_TDLEN(i), |
---|
3181 | adapter->num_tx_desc * sizeof(struct e1000_tx_desc)); |
---|
3182 | E1000_WRITE_REG(hw, E1000_TDBAH(i), |
---|
3183 | (uint32_t)(bus_addr >> 32)); |
---|
3184 | E1000_WRITE_REG(hw, E1000_TDBAL(i), |
---|
3185 | (uint32_t)bus_addr); |
---|
3186 | |
---|
3187 | /* Setup the HW Tx Head and Tail descriptor pointers */ |
---|
3188 | E1000_WRITE_REG(hw, E1000_TDT(i), 0); |
---|
3189 | E1000_WRITE_REG(hw, E1000_TDH(i), 0); |
---|
3190 | |
---|
3191 | HW_DEBUGOUT2("Base = %x, Length = %x\n", |
---|
3192 | E1000_READ_REG(hw, E1000_TDBAL(i)), |
---|
3193 | E1000_READ_REG(hw, E1000_TDLEN(i))); |
---|
3194 | |
---|
3195 | txr->queue_status = IGB_QUEUE_IDLE; |
---|
3196 | |
---|
3197 | txdctl |= IGB_TX_PTHRESH; |
---|
3198 | txdctl |= IGB_TX_HTHRESH << 8; |
---|
3199 | txdctl |= IGB_TX_WTHRESH << 16; |
---|
3200 | txdctl |= E1000_TXDCTL_QUEUE_ENABLE; |
---|
3201 | E1000_WRITE_REG(hw, E1000_TXDCTL(i), txdctl); |
---|
3202 | } |
---|
3203 | |
---|
3204 | if (adapter->hw.mac.type == e1000_vfadapt) |
---|
3205 | return; |
---|
3206 | |
---|
3207 | e1000_config_collision_dist(hw); |
---|
3208 | |
---|
3209 | /* Program the Transmit Control Register */ |
---|
3210 | tctl = E1000_READ_REG(hw, E1000_TCTL); |
---|
3211 | tctl &= ~E1000_TCTL_CT; |
---|
3212 | tctl |= (E1000_TCTL_PSP | E1000_TCTL_RTLC | E1000_TCTL_EN | |
---|
3213 | (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT)); |
---|
3214 | |
---|
3215 | /* This write will effectively turn on the transmit unit. */ |
---|
3216 | E1000_WRITE_REG(hw, E1000_TCTL, tctl); |
---|
3217 | } |
---|
3218 | |
---|
3219 | /********************************************************************* |
---|
3220 | * |
---|
3221 | * Free all transmit rings. |
---|
3222 | * |
---|
3223 | **********************************************************************/ |
---|
3224 | static void |
---|
3225 | igb_free_transmit_structures(struct adapter *adapter) |
---|
3226 | { |
---|
3227 | struct tx_ring *txr = adapter->tx_rings; |
---|
3228 | |
---|
3229 | for (int i = 0; i < adapter->num_queues; i++, txr++) { |
---|
3230 | IGB_TX_LOCK(txr); |
---|
3231 | igb_free_transmit_buffers(txr); |
---|
3232 | igb_dma_free(adapter, &txr->txdma); |
---|
3233 | IGB_TX_UNLOCK(txr); |
---|
3234 | IGB_TX_LOCK_DESTROY(txr); |
---|
3235 | } |
---|
3236 | free(adapter->tx_rings, M_DEVBUF); |
---|
3237 | } |
---|
3238 | |
---|
3239 | /********************************************************************* |
---|
3240 | * |
---|
3241 | * Free transmit ring related data structures. |
---|
3242 | * |
---|
3243 | **********************************************************************/ |
---|
3244 | static void |
---|
3245 | igb_free_transmit_buffers(struct tx_ring *txr) |
---|
3246 | { |
---|
3247 | struct adapter *adapter = txr->adapter; |
---|
3248 | struct igb_tx_buffer *tx_buffer; |
---|
3249 | int i; |
---|
3250 | |
---|
3251 | INIT_DEBUGOUT("free_transmit_ring: begin"); |
---|
3252 | |
---|
3253 | if (txr->tx_buffers == NULL) |
---|
3254 | return; |
---|
3255 | |
---|
3256 | tx_buffer = txr->tx_buffers; |
---|
3257 | for (i = 0; i < adapter->num_tx_desc; i++, tx_buffer++) { |
---|
3258 | if (tx_buffer->m_head != NULL) { |
---|
3259 | bus_dmamap_sync(txr->txtag, tx_buffer->map, |
---|
3260 | BUS_DMASYNC_POSTWRITE); |
---|
3261 | bus_dmamap_unload(txr->txtag, |
---|
3262 | tx_buffer->map); |
---|
3263 | m_freem(tx_buffer->m_head); |
---|
3264 | tx_buffer->m_head = NULL; |
---|
3265 | if (tx_buffer->map != NULL) { |
---|
3266 | bus_dmamap_destroy(txr->txtag, |
---|
3267 | tx_buffer->map); |
---|
3268 | tx_buffer->map = NULL; |
---|
3269 | } |
---|
3270 | } else if (tx_buffer->map != NULL) { |
---|
3271 | bus_dmamap_unload(txr->txtag, |
---|
3272 | tx_buffer->map); |
---|
3273 | bus_dmamap_destroy(txr->txtag, |
---|
3274 | tx_buffer->map); |
---|
3275 | tx_buffer->map = NULL; |
---|
3276 | } |
---|
3277 | } |
---|
3278 | #if __FreeBSD_version >= 800000 |
---|
3279 | if (txr->br != NULL) |
---|
3280 | buf_ring_free(txr->br, M_DEVBUF); |
---|
3281 | #endif |
---|
3282 | if (txr->tx_buffers != NULL) { |
---|
3283 | free(txr->tx_buffers, M_DEVBUF); |
---|
3284 | txr->tx_buffers = NULL; |
---|
3285 | } |
---|
3286 | if (txr->txtag != NULL) { |
---|
3287 | bus_dma_tag_destroy(txr->txtag); |
---|
3288 | txr->txtag = NULL; |
---|
3289 | } |
---|
3290 | return; |
---|
3291 | } |
---|
3292 | |
---|
3293 | /********************************************************************** |
---|
3294 | * |
---|
3295 | * Setup work for hardware segmentation offload (TSO) |
---|
3296 | * |
---|
3297 | **********************************************************************/ |
---|
3298 | static boolean_t |
---|
3299 | igb_tso_setup(struct tx_ring *txr, struct mbuf *mp, u32 *hdrlen) |
---|
3300 | { |
---|
3301 | struct adapter *adapter = txr->adapter; |
---|
3302 | struct e1000_adv_tx_context_desc *TXD; |
---|
3303 | struct igb_tx_buffer *tx_buffer; |
---|
3304 | u32 vlan_macip_lens = 0, type_tucmd_mlhl = 0; |
---|
3305 | u32 mss_l4len_idx = 0; |
---|
3306 | u16 vtag = 0; |
---|
3307 | int ctxd, ehdrlen, ip_hlen, tcp_hlen; |
---|
3308 | struct ether_vlan_header *eh; |
---|
3309 | struct ip *ip; |
---|
3310 | struct tcphdr *th; |
---|
3311 | |
---|
3312 | |
---|
3313 | /* |
---|
3314 | * Determine where frame payload starts. |
---|
3315 | * Jump over vlan headers if already present |
---|
3316 | */ |
---|
3317 | eh = mtod(mp, struct ether_vlan_header *); |
---|
3318 | if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) |
---|
3319 | ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; |
---|
3320 | else |
---|
3321 | ehdrlen = ETHER_HDR_LEN; |
---|
3322 | |
---|
3323 | /* Ensure we have at least the IP+TCP header in the first mbuf. */ |
---|
3324 | if (mp->m_len < ehdrlen + sizeof(struct ip) + sizeof(struct tcphdr)) |
---|
3325 | return FALSE; |
---|
3326 | |
---|
3327 | /* Only supports IPV4 for now */ |
---|
3328 | ctxd = txr->next_avail_desc; |
---|
3329 | tx_buffer = &txr->tx_buffers[ctxd]; |
---|
3330 | TXD = (struct e1000_adv_tx_context_desc *) &txr->tx_base[ctxd]; |
---|
3331 | |
---|
3332 | ip = (struct ip *)(mp->m_data + ehdrlen); |
---|
3333 | if (ip->ip_p != IPPROTO_TCP) |
---|
3334 | return FALSE; /* 0 */ |
---|
3335 | ip->ip_sum = 0; |
---|
3336 | ip_hlen = ip->ip_hl << 2; |
---|
3337 | th = (struct tcphdr *)((caddr_t)ip + ip_hlen); |
---|
3338 | th->th_sum = in_pseudo(ip->ip_src.s_addr, |
---|
3339 | ip->ip_dst.s_addr, htons(IPPROTO_TCP)); |
---|
3340 | tcp_hlen = th->th_off << 2; |
---|
3341 | /* |
---|
3342 | * Calculate header length, this is used |
---|
3343 | * in the transmit desc in igb_xmit |
---|
3344 | */ |
---|
3345 | *hdrlen = ehdrlen + ip_hlen + tcp_hlen; |
---|
3346 | |
---|
3347 | /* VLAN MACLEN IPLEN */ |
---|
3348 | if (mp->m_flags & M_VLANTAG) { |
---|
3349 | vtag = htole16(mp->m_pkthdr.ether_vtag); |
---|
3350 | vlan_macip_lens |= (vtag << E1000_ADVTXD_VLAN_SHIFT); |
---|
3351 | } |
---|
3352 | |
---|
3353 | vlan_macip_lens |= (ehdrlen << E1000_ADVTXD_MACLEN_SHIFT); |
---|
3354 | vlan_macip_lens |= ip_hlen; |
---|
3355 | TXD->vlan_macip_lens |= htole32(vlan_macip_lens); |
---|
3356 | |
---|
3357 | /* ADV DTYPE TUCMD */ |
---|
3358 | type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT; |
---|
3359 | type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP; |
---|
3360 | type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4; |
---|
3361 | TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl); |
---|
3362 | |
---|
3363 | /* MSS L4LEN IDX */ |
---|
3364 | mss_l4len_idx |= (mp->m_pkthdr.tso_segsz << E1000_ADVTXD_MSS_SHIFT); |
---|
3365 | mss_l4len_idx |= (tcp_hlen << E1000_ADVTXD_L4LEN_SHIFT); |
---|
3366 | /* 82575 needs the queue index added */ |
---|
3367 | if (adapter->hw.mac.type == e1000_82575) |
---|
3368 | mss_l4len_idx |= txr->me << 4; |
---|
3369 | TXD->mss_l4len_idx = htole32(mss_l4len_idx); |
---|
3370 | |
---|
3371 | TXD->seqnum_seed = htole32(0); |
---|
3372 | tx_buffer->m_head = NULL; |
---|
3373 | tx_buffer->next_eop = -1; |
---|
3374 | |
---|
3375 | if (++ctxd == adapter->num_tx_desc) |
---|
3376 | ctxd = 0; |
---|
3377 | |
---|
3378 | txr->tx_avail--; |
---|
3379 | txr->next_avail_desc = ctxd; |
---|
3380 | return TRUE; |
---|
3381 | } |
---|
3382 | |
---|
3383 | |
---|
3384 | /********************************************************************* |
---|
3385 | * |
---|
3386 | * Context Descriptor setup for VLAN or CSUM |
---|
3387 | * |
---|
3388 | **********************************************************************/ |
---|
3389 | |
---|
3390 | static bool |
---|
3391 | igb_tx_ctx_setup(struct tx_ring *txr, struct mbuf *mp) |
---|
3392 | { |
---|
3393 | struct adapter *adapter = txr->adapter; |
---|
3394 | struct e1000_adv_tx_context_desc *TXD; |
---|
3395 | struct igb_tx_buffer *tx_buffer; |
---|
3396 | u32 vlan_macip_lens, type_tucmd_mlhl, mss_l4len_idx; |
---|
3397 | struct ether_vlan_header *eh; |
---|
3398 | struct ip *ip = NULL; |
---|
3399 | struct ip6_hdr *ip6; |
---|
3400 | int ehdrlen, ctxd, ip_hlen = 0; |
---|
3401 | u16 etype, vtag = 0; |
---|
3402 | u8 ipproto = 0; |
---|
3403 | bool offload = TRUE; |
---|
3404 | |
---|
3405 | if ((mp->m_pkthdr.csum_flags & CSUM_OFFLOAD) == 0) |
---|
3406 | offload = FALSE; |
---|
3407 | |
---|
3408 | vlan_macip_lens = type_tucmd_mlhl = mss_l4len_idx = 0; |
---|
3409 | ctxd = txr->next_avail_desc; |
---|
3410 | tx_buffer = &txr->tx_buffers[ctxd]; |
---|
3411 | TXD = (struct e1000_adv_tx_context_desc *) &txr->tx_base[ctxd]; |
---|
3412 | |
---|
3413 | /* |
---|
3414 | ** In advanced descriptors the vlan tag must |
---|
3415 | ** be placed into the context descriptor, thus |
---|
3416 | ** we need to be here just for that setup. |
---|
3417 | */ |
---|
3418 | if (mp->m_flags & M_VLANTAG) { |
---|
3419 | vtag = htole16(mp->m_pkthdr.ether_vtag); |
---|
3420 | vlan_macip_lens |= (vtag << E1000_ADVTXD_VLAN_SHIFT); |
---|
3421 | } else if (offload == FALSE) |
---|
3422 | return FALSE; |
---|
3423 | |
---|
3424 | /* |
---|
3425 | * Determine where frame payload starts. |
---|
3426 | * Jump over vlan headers if already present, |
---|
3427 | * helpful for QinQ too. |
---|
3428 | */ |
---|
3429 | eh = mtod(mp, struct ether_vlan_header *); |
---|
3430 | if (eh->evl_encap_proto == htons(ETHERTYPE_VLAN)) { |
---|
3431 | etype = ntohs(eh->evl_proto); |
---|
3432 | ehdrlen = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; |
---|
3433 | } else { |
---|
3434 | etype = ntohs(eh->evl_encap_proto); |
---|
3435 | ehdrlen = ETHER_HDR_LEN; |
---|
3436 | } |
---|
3437 | |
---|
3438 | /* Set the ether header length */ |
---|
3439 | vlan_macip_lens |= ehdrlen << E1000_ADVTXD_MACLEN_SHIFT; |
---|
3440 | |
---|
3441 | switch (etype) { |
---|
3442 | case ETHERTYPE_IP: |
---|
3443 | ip = (struct ip *)(mp->m_data + ehdrlen); |
---|
3444 | ip_hlen = ip->ip_hl << 2; |
---|
3445 | if (mp->m_len < ehdrlen + ip_hlen) { |
---|
3446 | offload = FALSE; |
---|
3447 | break; |
---|
3448 | } |
---|
3449 | ipproto = ip->ip_p; |
---|
3450 | type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV4; |
---|
3451 | break; |
---|
3452 | case ETHERTYPE_IPV6: |
---|
3453 | ip6 = (struct ip6_hdr *)(mp->m_data + ehdrlen); |
---|
3454 | ip_hlen = sizeof(struct ip6_hdr); |
---|
3455 | ipproto = ip6->ip6_nxt; |
---|
3456 | type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_IPV6; |
---|
3457 | break; |
---|
3458 | default: |
---|
3459 | offload = FALSE; |
---|
3460 | break; |
---|
3461 | } |
---|
3462 | |
---|
3463 | vlan_macip_lens |= ip_hlen; |
---|
3464 | type_tucmd_mlhl |= E1000_ADVTXD_DCMD_DEXT | E1000_ADVTXD_DTYP_CTXT; |
---|
3465 | |
---|
3466 | switch (ipproto) { |
---|
3467 | case IPPROTO_TCP: |
---|
3468 | if (mp->m_pkthdr.csum_flags & CSUM_TCP) |
---|
3469 | type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_TCP; |
---|
3470 | break; |
---|
3471 | case IPPROTO_UDP: |
---|
3472 | if (mp->m_pkthdr.csum_flags & CSUM_UDP) |
---|
3473 | type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_UDP; |
---|
3474 | break; |
---|
3475 | #if __FreeBSD_version >= 800000 |
---|
3476 | case IPPROTO_SCTP: |
---|
3477 | if (mp->m_pkthdr.csum_flags & CSUM_SCTP) |
---|
3478 | type_tucmd_mlhl |= E1000_ADVTXD_TUCMD_L4T_SCTP; |
---|
3479 | break; |
---|
3480 | #endif |
---|
3481 | default: |
---|
3482 | offload = FALSE; |
---|
3483 | break; |
---|
3484 | } |
---|
3485 | |
---|
3486 | /* 82575 needs the queue index added */ |
---|
3487 | if (adapter->hw.mac.type == e1000_82575) |
---|
3488 | mss_l4len_idx = txr->me << 4; |
---|
3489 | |
---|
3490 | /* Now copy bits into descriptor */ |
---|
3491 | TXD->vlan_macip_lens |= htole32(vlan_macip_lens); |
---|
3492 | TXD->type_tucmd_mlhl |= htole32(type_tucmd_mlhl); |
---|
3493 | TXD->seqnum_seed = htole32(0); |
---|
3494 | TXD->mss_l4len_idx = htole32(mss_l4len_idx); |
---|
3495 | |
---|
3496 | tx_buffer->m_head = NULL; |
---|
3497 | tx_buffer->next_eop = -1; |
---|
3498 | |
---|
3499 | /* We've consumed the first desc, adjust counters */ |
---|
3500 | if (++ctxd == adapter->num_tx_desc) |
---|
3501 | ctxd = 0; |
---|
3502 | txr->next_avail_desc = ctxd; |
---|
3503 | --txr->tx_avail; |
---|
3504 | |
---|
3505 | return (offload); |
---|
3506 | } |
---|
3507 | |
---|
3508 | |
---|
3509 | /********************************************************************** |
---|
3510 | * |
---|
3511 | * Examine each tx_buffer in the used queue. If the hardware is done |
---|
3512 | * processing the packet then free associated resources. The |
---|
3513 | * tx_buffer is put back on the free queue. |
---|
3514 | * |
---|
3515 | * TRUE return means there's work in the ring to clean, FALSE its empty. |
---|
3516 | **********************************************************************/ |
---|
3517 | static bool |
---|
3518 | igb_txeof(struct tx_ring *txr) |
---|
3519 | { |
---|
3520 | struct adapter *adapter = txr->adapter; |
---|
3521 | int first, last, done, processed; |
---|
3522 | struct igb_tx_buffer *tx_buffer; |
---|
3523 | struct e1000_tx_desc *tx_desc, *eop_desc; |
---|
3524 | struct ifnet *ifp = adapter->ifp; |
---|
3525 | |
---|
3526 | IGB_TX_LOCK_ASSERT(txr); |
---|
3527 | |
---|
3528 | if (txr->tx_avail == adapter->num_tx_desc) { |
---|
3529 | txr->queue_status = IGB_QUEUE_IDLE; |
---|
3530 | return FALSE; |
---|
3531 | } |
---|
3532 | |
---|
3533 | processed = 0; |
---|
3534 | first = txr->next_to_clean; |
---|
3535 | tx_desc = &txr->tx_base[first]; |
---|
3536 | tx_buffer = &txr->tx_buffers[first]; |
---|
3537 | last = tx_buffer->next_eop; |
---|
3538 | eop_desc = &txr->tx_base[last]; |
---|
3539 | |
---|
3540 | /* |
---|
3541 | * What this does is get the index of the |
---|
3542 | * first descriptor AFTER the EOP of the |
---|
3543 | * first packet, that way we can do the |
---|
3544 | * simple comparison on the inner while loop. |
---|
3545 | */ |
---|
3546 | if (++last == adapter->num_tx_desc) |
---|
3547 | last = 0; |
---|
3548 | done = last; |
---|
3549 | |
---|
3550 | bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, |
---|
3551 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); |
---|
3552 | |
---|
3553 | while (eop_desc->upper.fields.status & E1000_TXD_STAT_DD) { |
---|
3554 | /* We clean the range of the packet */ |
---|
3555 | while (first != done) { |
---|
3556 | tx_desc->upper.data = 0; |
---|
3557 | tx_desc->lower.data = 0; |
---|
3558 | tx_desc->buffer_addr = 0; |
---|
3559 | ++txr->tx_avail; |
---|
3560 | ++processed; |
---|
3561 | |
---|
3562 | if (tx_buffer->m_head) { |
---|
3563 | txr->bytes += |
---|
3564 | tx_buffer->m_head->m_pkthdr.len; |
---|
3565 | bus_dmamap_sync(txr->txtag, |
---|
3566 | tx_buffer->map, |
---|
3567 | BUS_DMASYNC_POSTWRITE); |
---|
3568 | bus_dmamap_unload(txr->txtag, |
---|
3569 | tx_buffer->map); |
---|
3570 | |
---|
3571 | m_freem(tx_buffer->m_head); |
---|
3572 | tx_buffer->m_head = NULL; |
---|
3573 | } |
---|
3574 | tx_buffer->next_eop = -1; |
---|
3575 | txr->watchdog_time = ticks; |
---|
3576 | |
---|
3577 | if (++first == adapter->num_tx_desc) |
---|
3578 | first = 0; |
---|
3579 | |
---|
3580 | tx_buffer = &txr->tx_buffers[first]; |
---|
3581 | tx_desc = &txr->tx_base[first]; |
---|
3582 | } |
---|
3583 | ++txr->packets; |
---|
3584 | ++ifp->if_opackets; |
---|
3585 | /* See if we can continue to the next packet */ |
---|
3586 | last = tx_buffer->next_eop; |
---|
3587 | if (last != -1) { |
---|
3588 | eop_desc = &txr->tx_base[last]; |
---|
3589 | /* Get new done point */ |
---|
3590 | if (++last == adapter->num_tx_desc) last = 0; |
---|
3591 | done = last; |
---|
3592 | } else |
---|
3593 | break; |
---|
3594 | } |
---|
3595 | bus_dmamap_sync(txr->txdma.dma_tag, txr->txdma.dma_map, |
---|
3596 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
---|
3597 | |
---|
3598 | txr->next_to_clean = first; |
---|
3599 | |
---|
3600 | /* |
---|
3601 | ** Watchdog calculation, we know there's |
---|
3602 | ** work outstanding or the first return |
---|
3603 | ** would have been taken, so none processed |
---|
3604 | ** for too long indicates a hang. |
---|
3605 | */ |
---|
3606 | if ((!processed) && ((ticks - txr->watchdog_time) > IGB_WATCHDOG)) |
---|
3607 | txr->queue_status = IGB_QUEUE_HUNG; |
---|
3608 | |
---|
3609 | /* |
---|
3610 | * If we have enough room, clear IFF_DRV_OACTIVE |
---|
3611 | * to tell the stack that it is OK to send packets. |
---|
3612 | */ |
---|
3613 | if (txr->tx_avail > IGB_TX_CLEANUP_THRESHOLD) { |
---|
3614 | ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; |
---|
3615 | /* All clean, turn off the watchdog */ |
---|
3616 | if (txr->tx_avail == adapter->num_tx_desc) { |
---|
3617 | txr->queue_status = IGB_QUEUE_IDLE; |
---|
3618 | return (FALSE); |
---|
3619 | } |
---|
3620 | } |
---|
3621 | |
---|
3622 | return (TRUE); |
---|
3623 | } |
---|
3624 | |
---|
3625 | |
---|
3626 | /********************************************************************* |
---|
3627 | * |
---|
3628 | * Refresh mbuf buffers for RX descriptor rings |
---|
3629 | * - now keeps its own state so discards due to resource |
---|
3630 | * exhaustion are unnecessary, if an mbuf cannot be obtained |
---|
3631 | * it just returns, keeping its placeholder, thus it can simply |
---|
3632 | * be recalled to try again. |
---|
3633 | * |
---|
3634 | **********************************************************************/ |
---|
3635 | static void |
---|
3636 | igb_refresh_mbufs(struct rx_ring *rxr, int limit) |
---|
3637 | { |
---|
3638 | struct adapter *adapter = rxr->adapter; |
---|
3639 | bus_dma_segment_t hseg[1]; |
---|
3640 | bus_dma_segment_t pseg[1]; |
---|
3641 | struct igb_rx_buf *rxbuf; |
---|
3642 | struct mbuf *mh, *mp; |
---|
3643 | int i, nsegs, error, cleaned; |
---|
3644 | |
---|
3645 | i = rxr->next_to_refresh; |
---|
3646 | cleaned = -1; /* Signify no completions */ |
---|
3647 | while (i != limit) { |
---|
3648 | rxbuf = &rxr->rx_buffers[i]; |
---|
3649 | /* No hdr mbuf used with header split off */ |
---|
3650 | if (rxr->hdr_split == FALSE) |
---|
3651 | goto no_split; |
---|
3652 | if (rxbuf->m_head == NULL) { |
---|
3653 | mh = m_gethdr(M_DONTWAIT, MT_DATA); |
---|
3654 | if (mh == NULL) |
---|
3655 | goto update; |
---|
3656 | } else |
---|
3657 | mh = rxbuf->m_head; |
---|
3658 | |
---|
3659 | mh->m_pkthdr.len = mh->m_len = MHLEN; |
---|
3660 | mh->m_len = MHLEN; |
---|
3661 | mh->m_flags |= M_PKTHDR; |
---|
3662 | /* Get the memory mapping */ |
---|
3663 | error = bus_dmamap_load_mbuf_sg(rxr->htag, |
---|
3664 | rxbuf->hmap, mh, hseg, &nsegs, BUS_DMA_NOWAIT); |
---|
3665 | if (error != 0) { |
---|
3666 | printf("Refresh mbufs: hdr dmamap load" |
---|
3667 | " failure - %d\n", error); |
---|
3668 | m_free(mh); |
---|
3669 | rxbuf->m_head = NULL; |
---|
3670 | goto update; |
---|
3671 | } |
---|
3672 | rxbuf->m_head = mh; |
---|
3673 | bus_dmamap_sync(rxr->htag, rxbuf->hmap, |
---|
3674 | BUS_DMASYNC_PREREAD); |
---|
3675 | rxr->rx_base[i].read.hdr_addr = |
---|
3676 | htole64(hseg[0].ds_addr); |
---|
3677 | no_split: |
---|
3678 | if (rxbuf->m_pack == NULL) { |
---|
3679 | mp = m_getjcl(M_DONTWAIT, MT_DATA, |
---|
3680 | M_PKTHDR, adapter->rx_mbuf_sz); |
---|
3681 | if (mp == NULL) |
---|
3682 | goto update; |
---|
3683 | } else |
---|
3684 | mp = rxbuf->m_pack; |
---|
3685 | |
---|
3686 | mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz; |
---|
3687 | /* Get the memory mapping */ |
---|
3688 | error = bus_dmamap_load_mbuf_sg(rxr->ptag, |
---|
3689 | rxbuf->pmap, mp, pseg, &nsegs, BUS_DMA_NOWAIT); |
---|
3690 | if (error != 0) { |
---|
3691 | printf("Refresh mbufs: payload dmamap load" |
---|
3692 | " failure - %d\n", error); |
---|
3693 | m_free(mp); |
---|
3694 | rxbuf->m_pack = NULL; |
---|
3695 | goto update; |
---|
3696 | } |
---|
3697 | rxbuf->m_pack = mp; |
---|
3698 | bus_dmamap_sync(rxr->ptag, rxbuf->pmap, |
---|
3699 | BUS_DMASYNC_PREREAD); |
---|
3700 | rxr->rx_base[i].read.pkt_addr = |
---|
3701 | htole64(pseg[0].ds_addr); |
---|
3702 | |
---|
3703 | cleaned = i; |
---|
3704 | /* Calculate next index */ |
---|
3705 | if (++i == adapter->num_rx_desc) |
---|
3706 | i = 0; |
---|
3707 | /* This is the work marker for refresh */ |
---|
3708 | rxr->next_to_refresh = i; |
---|
3709 | } |
---|
3710 | update: |
---|
3711 | if (cleaned != -1) /* If we refreshed some, bump tail */ |
---|
3712 | E1000_WRITE_REG(&adapter->hw, |
---|
3713 | E1000_RDT(rxr->me), cleaned); |
---|
3714 | return; |
---|
3715 | } |
---|
3716 | |
---|
3717 | |
---|
3718 | /********************************************************************* |
---|
3719 | * |
---|
3720 | * Allocate memory for rx_buffer structures. Since we use one |
---|
3721 | * rx_buffer per received packet, the maximum number of rx_buffer's |
---|
3722 | * that we'll need is equal to the number of receive descriptors |
---|
3723 | * that we've allocated. |
---|
3724 | * |
---|
3725 | **********************************************************************/ |
---|
3726 | static int |
---|
3727 | igb_allocate_receive_buffers(struct rx_ring *rxr) |
---|
3728 | { |
---|
3729 | struct adapter *adapter = rxr->adapter; |
---|
3730 | device_t dev = adapter->dev; |
---|
3731 | struct igb_rx_buf *rxbuf; |
---|
3732 | int i, bsize, error; |
---|
3733 | |
---|
3734 | bsize = sizeof(struct igb_rx_buf) * adapter->num_rx_desc; |
---|
3735 | if (!(rxr->rx_buffers = |
---|
3736 | (struct igb_rx_buf *) malloc(bsize, |
---|
3737 | M_DEVBUF, M_NOWAIT | M_ZERO))) { |
---|
3738 | device_printf(dev, "Unable to allocate rx_buffer memory\n"); |
---|
3739 | error = ENOMEM; |
---|
3740 | goto fail; |
---|
3741 | } |
---|
3742 | |
---|
3743 | if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), |
---|
3744 | 1, 0, /* alignment, bounds */ |
---|
3745 | BUS_SPACE_MAXADDR, /* lowaddr */ |
---|
3746 | BUS_SPACE_MAXADDR, /* highaddr */ |
---|
3747 | NULL, NULL, /* filter, filterarg */ |
---|
3748 | MSIZE, /* maxsize */ |
---|
3749 | 1, /* nsegments */ |
---|
3750 | MSIZE, /* maxsegsize */ |
---|
3751 | 0, /* flags */ |
---|
3752 | NULL, /* lockfunc */ |
---|
3753 | NULL, /* lockfuncarg */ |
---|
3754 | &rxr->htag))) { |
---|
3755 | device_printf(dev, "Unable to create RX DMA tag\n"); |
---|
3756 | goto fail; |
---|
3757 | } |
---|
3758 | |
---|
3759 | if ((error = bus_dma_tag_create(bus_get_dma_tag(dev), |
---|
3760 | 1, 0, /* alignment, bounds */ |
---|
3761 | BUS_SPACE_MAXADDR, /* lowaddr */ |
---|
3762 | BUS_SPACE_MAXADDR, /* highaddr */ |
---|
3763 | NULL, NULL, /* filter, filterarg */ |
---|
3764 | MJUM9BYTES, /* maxsize */ |
---|
3765 | 1, /* nsegments */ |
---|
3766 | MJUM9BYTES, /* maxsegsize */ |
---|
3767 | 0, /* flags */ |
---|
3768 | NULL, /* lockfunc */ |
---|
3769 | NULL, /* lockfuncarg */ |
---|
3770 | &rxr->ptag))) { |
---|
3771 | device_printf(dev, "Unable to create RX payload DMA tag\n"); |
---|
3772 | goto fail; |
---|
3773 | } |
---|
3774 | |
---|
3775 | for (i = 0; i < adapter->num_rx_desc; i++) { |
---|
3776 | rxbuf = &rxr->rx_buffers[i]; |
---|
3777 | error = bus_dmamap_create(rxr->htag, |
---|
3778 | BUS_DMA_NOWAIT, &rxbuf->hmap); |
---|
3779 | if (error) { |
---|
3780 | device_printf(dev, |
---|
3781 | "Unable to create RX head DMA maps\n"); |
---|
3782 | goto fail; |
---|
3783 | } |
---|
3784 | error = bus_dmamap_create(rxr->ptag, |
---|
3785 | BUS_DMA_NOWAIT, &rxbuf->pmap); |
---|
3786 | if (error) { |
---|
3787 | device_printf(dev, |
---|
3788 | "Unable to create RX packet DMA maps\n"); |
---|
3789 | goto fail; |
---|
3790 | } |
---|
3791 | } |
---|
3792 | |
---|
3793 | return (0); |
---|
3794 | |
---|
3795 | fail: |
---|
3796 | /* Frees all, but can handle partial completion */ |
---|
3797 | igb_free_receive_structures(adapter); |
---|
3798 | return (error); |
---|
3799 | } |
---|
3800 | |
---|
3801 | |
---|
3802 | static void |
---|
3803 | igb_free_receive_ring(struct rx_ring *rxr) |
---|
3804 | { |
---|
3805 | struct adapter *adapter; |
---|
3806 | struct igb_rx_buf *rxbuf; |
---|
3807 | int i; |
---|
3808 | |
---|
3809 | adapter = rxr->adapter; |
---|
3810 | for (i = 0; i < adapter->num_rx_desc; i++) { |
---|
3811 | rxbuf = &rxr->rx_buffers[i]; |
---|
3812 | if (rxbuf->m_head != NULL) { |
---|
3813 | bus_dmamap_sync(rxr->htag, rxbuf->hmap, |
---|
3814 | BUS_DMASYNC_POSTREAD); |
---|
3815 | bus_dmamap_unload(rxr->htag, rxbuf->hmap); |
---|
3816 | rxbuf->m_head->m_flags |= M_PKTHDR; |
---|
3817 | m_freem(rxbuf->m_head); |
---|
3818 | } |
---|
3819 | if (rxbuf->m_pack != NULL) { |
---|
3820 | bus_dmamap_sync(rxr->ptag, rxbuf->pmap, |
---|
3821 | BUS_DMASYNC_POSTREAD); |
---|
3822 | bus_dmamap_unload(rxr->ptag, rxbuf->pmap); |
---|
3823 | rxbuf->m_pack->m_flags |= M_PKTHDR; |
---|
3824 | m_freem(rxbuf->m_pack); |
---|
3825 | } |
---|
3826 | rxbuf->m_head = NULL; |
---|
3827 | rxbuf->m_pack = NULL; |
---|
3828 | } |
---|
3829 | } |
---|
3830 | |
---|
3831 | |
---|
3832 | /********************************************************************* |
---|
3833 | * |
---|
3834 | * Initialize a receive ring and its buffers. |
---|
3835 | * |
---|
3836 | **********************************************************************/ |
---|
3837 | static int |
---|
3838 | igb_setup_receive_ring(struct rx_ring *rxr) |
---|
3839 | { |
---|
3840 | struct adapter *adapter; |
---|
3841 | struct ifnet *ifp; |
---|
3842 | device_t dev; |
---|
3843 | struct igb_rx_buf *rxbuf; |
---|
3844 | bus_dma_segment_t pseg[1], hseg[1]; |
---|
3845 | struct lro_ctrl *lro = &rxr->lro; |
---|
3846 | int rsize, nsegs, error = 0; |
---|
3847 | |
---|
3848 | adapter = rxr->adapter; |
---|
3849 | dev = adapter->dev; |
---|
3850 | ifp = adapter->ifp; |
---|
3851 | |
---|
3852 | /* Clear the ring contents */ |
---|
3853 | IGB_RX_LOCK(rxr); |
---|
3854 | rsize = roundup2(adapter->num_rx_desc * |
---|
3855 | sizeof(union e1000_adv_rx_desc), IGB_DBA_ALIGN); |
---|
3856 | bzero((void *)rxr->rx_base, rsize); |
---|
3857 | |
---|
3858 | /* |
---|
3859 | ** Free current RX buffer structures and their mbufs |
---|
3860 | */ |
---|
3861 | igb_free_receive_ring(rxr); |
---|
3862 | |
---|
3863 | /* Configure for header split? */ |
---|
3864 | if (igb_header_split) |
---|
3865 | rxr->hdr_split = TRUE; |
---|
3866 | |
---|
3867 | /* Now replenish the ring mbufs */ |
---|
3868 | for (int j = 0; j < adapter->num_rx_desc; ++j) { |
---|
3869 | struct mbuf *mh, *mp; |
---|
3870 | |
---|
3871 | rxbuf = &rxr->rx_buffers[j]; |
---|
3872 | if (rxr->hdr_split == FALSE) |
---|
3873 | goto skip_head; |
---|
3874 | |
---|
3875 | /* First the header */ |
---|
3876 | rxbuf->m_head = m_gethdr(M_DONTWAIT, MT_DATA); |
---|
3877 | if (rxbuf->m_head == NULL) { |
---|
3878 | error = ENOBUFS; |
---|
3879 | goto fail; |
---|
3880 | } |
---|
3881 | m_adj(rxbuf->m_head, ETHER_ALIGN); |
---|
3882 | mh = rxbuf->m_head; |
---|
3883 | mh->m_len = mh->m_pkthdr.len = MHLEN; |
---|
3884 | mh->m_flags |= M_PKTHDR; |
---|
3885 | /* Get the memory mapping */ |
---|
3886 | error = bus_dmamap_load_mbuf_sg(rxr->htag, |
---|
3887 | rxbuf->hmap, rxbuf->m_head, hseg, |
---|
3888 | &nsegs, BUS_DMA_NOWAIT); |
---|
3889 | if (error != 0) /* Nothing elegant to do here */ |
---|
3890 | goto fail; |
---|
3891 | bus_dmamap_sync(rxr->htag, |
---|
3892 | rxbuf->hmap, BUS_DMASYNC_PREREAD); |
---|
3893 | /* Update descriptor */ |
---|
3894 | rxr->rx_base[j].read.hdr_addr = htole64(hseg[0].ds_addr); |
---|
3895 | |
---|
3896 | skip_head: |
---|
3897 | /* Now the payload cluster */ |
---|
3898 | rxbuf->m_pack = m_getjcl(M_DONTWAIT, MT_DATA, |
---|
3899 | M_PKTHDR, adapter->rx_mbuf_sz); |
---|
3900 | if (rxbuf->m_pack == NULL) { |
---|
3901 | error = ENOBUFS; |
---|
3902 | goto fail; |
---|
3903 | } |
---|
3904 | mp = rxbuf->m_pack; |
---|
3905 | mp->m_pkthdr.len = mp->m_len = adapter->rx_mbuf_sz; |
---|
3906 | /* Get the memory mapping */ |
---|
3907 | error = bus_dmamap_load_mbuf_sg(rxr->ptag, |
---|
3908 | rxbuf->pmap, mp, pseg, |
---|
3909 | &nsegs, BUS_DMA_NOWAIT); |
---|
3910 | if (error != 0) |
---|
3911 | goto fail; |
---|
3912 | bus_dmamap_sync(rxr->ptag, |
---|
3913 | rxbuf->pmap, BUS_DMASYNC_PREREAD); |
---|
3914 | /* Update descriptor */ |
---|
3915 | rxr->rx_base[j].read.pkt_addr = htole64(pseg[0].ds_addr); |
---|
3916 | } |
---|
3917 | |
---|
3918 | /* Setup our descriptor indices */ |
---|
3919 | rxr->next_to_check = 0; |
---|
3920 | rxr->next_to_refresh = 0; |
---|
3921 | rxr->lro_enabled = FALSE; |
---|
3922 | rxr->rx_split_packets = 0; |
---|
3923 | rxr->rx_bytes = 0; |
---|
3924 | |
---|
3925 | rxr->fmp = NULL; |
---|
3926 | rxr->lmp = NULL; |
---|
3927 | rxr->discard = FALSE; |
---|
3928 | |
---|
3929 | bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, |
---|
3930 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
---|
3931 | |
---|
3932 | /* |
---|
3933 | ** Now set up the LRO interface, we |
---|
3934 | ** also only do head split when LRO |
---|
3935 | ** is enabled, since so often they |
---|
3936 | ** are undesireable in similar setups. |
---|
3937 | */ |
---|
3938 | if (ifp->if_capenable & IFCAP_LRO) { |
---|
3939 | error = tcp_lro_init(lro); |
---|
3940 | if (error) { |
---|
3941 | device_printf(dev, "LRO Initialization failed!\n"); |
---|
3942 | goto fail; |
---|
3943 | } |
---|
3944 | INIT_DEBUGOUT("RX LRO Initialized\n"); |
---|
3945 | rxr->lro_enabled = TRUE; |
---|
3946 | lro->ifp = adapter->ifp; |
---|
3947 | } |
---|
3948 | |
---|
3949 | IGB_RX_UNLOCK(rxr); |
---|
3950 | return (0); |
---|
3951 | |
---|
3952 | fail: |
---|
3953 | igb_free_receive_ring(rxr); |
---|
3954 | IGB_RX_UNLOCK(rxr); |
---|
3955 | return (error); |
---|
3956 | } |
---|
3957 | |
---|
3958 | /********************************************************************* |
---|
3959 | * |
---|
3960 | * Initialize all receive rings. |
---|
3961 | * |
---|
3962 | **********************************************************************/ |
---|
3963 | static int |
---|
3964 | igb_setup_receive_structures(struct adapter *adapter) |
---|
3965 | { |
---|
3966 | struct rx_ring *rxr = adapter->rx_rings; |
---|
3967 | int i; |
---|
3968 | |
---|
3969 | for (i = 0; i < adapter->num_queues; i++, rxr++) |
---|
3970 | if (igb_setup_receive_ring(rxr)) |
---|
3971 | goto fail; |
---|
3972 | |
---|
3973 | return (0); |
---|
3974 | fail: |
---|
3975 | /* |
---|
3976 | * Free RX buffers allocated so far, we will only handle |
---|
3977 | * the rings that completed, the failing case will have |
---|
3978 | * cleaned up for itself. 'i' is the endpoint. |
---|
3979 | */ |
---|
3980 | for (int j = 0; j > i; ++j) { |
---|
3981 | rxr = &adapter->rx_rings[i]; |
---|
3982 | IGB_RX_LOCK(rxr); |
---|
3983 | igb_free_receive_ring(rxr); |
---|
3984 | IGB_RX_UNLOCK(rxr); |
---|
3985 | } |
---|
3986 | |
---|
3987 | return (ENOBUFS); |
---|
3988 | } |
---|
3989 | |
---|
3990 | /********************************************************************* |
---|
3991 | * |
---|
3992 | * Enable receive unit. |
---|
3993 | * |
---|
3994 | **********************************************************************/ |
---|
3995 | static void |
---|
3996 | igb_initialize_receive_units(struct adapter *adapter) |
---|
3997 | { |
---|
3998 | struct rx_ring *rxr = adapter->rx_rings; |
---|
3999 | struct ifnet *ifp = adapter->ifp; |
---|
4000 | struct e1000_hw *hw = &adapter->hw; |
---|
4001 | u32 rctl, rxcsum, psize, srrctl = 0; |
---|
4002 | |
---|
4003 | INIT_DEBUGOUT("igb_initialize_receive_unit: begin"); |
---|
4004 | |
---|
4005 | /* |
---|
4006 | * Make sure receives are disabled while setting |
---|
4007 | * up the descriptor ring |
---|
4008 | */ |
---|
4009 | rctl = E1000_READ_REG(hw, E1000_RCTL); |
---|
4010 | E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN); |
---|
4011 | |
---|
4012 | /* |
---|
4013 | ** Set up for header split |
---|
4014 | */ |
---|
4015 | if (rxr->hdr_split) { |
---|
4016 | /* Use a standard mbuf for the header */ |
---|
4017 | srrctl |= IGB_HDR_BUF << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT; |
---|
4018 | srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS; |
---|
4019 | } else |
---|
4020 | srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF; |
---|
4021 | |
---|
4022 | /* |
---|
4023 | ** Set up for jumbo frames |
---|
4024 | */ |
---|
4025 | if (ifp->if_mtu > ETHERMTU) { |
---|
4026 | rctl |= E1000_RCTL_LPE; |
---|
4027 | if (adapter->rx_mbuf_sz == MJUMPAGESIZE) { |
---|
4028 | srrctl |= 4096 >> E1000_SRRCTL_BSIZEPKT_SHIFT; |
---|
4029 | rctl |= E1000_RCTL_SZ_4096 | E1000_RCTL_BSEX; |
---|
4030 | } else if (adapter->rx_mbuf_sz > MJUMPAGESIZE) { |
---|
4031 | srrctl |= 8192 >> E1000_SRRCTL_BSIZEPKT_SHIFT; |
---|
4032 | rctl |= E1000_RCTL_SZ_8192 | E1000_RCTL_BSEX; |
---|
4033 | } |
---|
4034 | /* Set maximum packet len */ |
---|
4035 | psize = adapter->max_frame_size; |
---|
4036 | /* are we on a vlan? */ |
---|
4037 | if (adapter->ifp->if_vlantrunk != NULL) |
---|
4038 | psize += VLAN_TAG_SIZE; |
---|
4039 | E1000_WRITE_REG(&adapter->hw, E1000_RLPML, psize); |
---|
4040 | } else { |
---|
4041 | rctl &= ~E1000_RCTL_LPE; |
---|
4042 | srrctl |= 2048 >> E1000_SRRCTL_BSIZEPKT_SHIFT; |
---|
4043 | rctl |= E1000_RCTL_SZ_2048; |
---|
4044 | } |
---|
4045 | |
---|
4046 | /* Setup the Base and Length of the Rx Descriptor Rings */ |
---|
4047 | for (int i = 0; i < adapter->num_queues; i++, rxr++) { |
---|
4048 | u64 bus_addr = rxr->rxdma.dma_paddr; |
---|
4049 | u32 rxdctl; |
---|
4050 | |
---|
4051 | E1000_WRITE_REG(hw, E1000_RDLEN(i), |
---|
4052 | adapter->num_rx_desc * sizeof(struct e1000_rx_desc)); |
---|
4053 | E1000_WRITE_REG(hw, E1000_RDBAH(i), |
---|
4054 | (uint32_t)(bus_addr >> 32)); |
---|
4055 | E1000_WRITE_REG(hw, E1000_RDBAL(i), |
---|
4056 | (uint32_t)bus_addr); |
---|
4057 | E1000_WRITE_REG(hw, E1000_SRRCTL(i), srrctl); |
---|
4058 | /* Enable this Queue */ |
---|
4059 | rxdctl = E1000_READ_REG(hw, E1000_RXDCTL(i)); |
---|
4060 | rxdctl |= E1000_RXDCTL_QUEUE_ENABLE; |
---|
4061 | rxdctl &= 0xFFF00000; |
---|
4062 | rxdctl |= IGB_RX_PTHRESH; |
---|
4063 | rxdctl |= IGB_RX_HTHRESH << 8; |
---|
4064 | rxdctl |= IGB_RX_WTHRESH << 16; |
---|
4065 | E1000_WRITE_REG(hw, E1000_RXDCTL(i), rxdctl); |
---|
4066 | } |
---|
4067 | |
---|
4068 | /* |
---|
4069 | ** Setup for RX MultiQueue |
---|
4070 | */ |
---|
4071 | rxcsum = E1000_READ_REG(hw, E1000_RXCSUM); |
---|
4072 | if (adapter->num_queues >1) { |
---|
4073 | u32 random[10], mrqc, shift = 0; |
---|
4074 | union igb_reta { |
---|
4075 | u32 dword; |
---|
4076 | u8 bytes[4]; |
---|
4077 | } reta; |
---|
4078 | |
---|
4079 | arc4rand(&random, sizeof(random), 0); |
---|
4080 | if (adapter->hw.mac.type == e1000_82575) |
---|
4081 | shift = 6; |
---|
4082 | /* Warning FM follows */ |
---|
4083 | for (int i = 0; i < 128; i++) { |
---|
4084 | reta.bytes[i & 3] = |
---|
4085 | (i % adapter->num_queues) << shift; |
---|
4086 | if ((i & 3) == 3) |
---|
4087 | E1000_WRITE_REG(hw, |
---|
4088 | E1000_RETA(i >> 2), reta.dword); |
---|
4089 | } |
---|
4090 | /* Now fill in hash table */ |
---|
4091 | mrqc = E1000_MRQC_ENABLE_RSS_4Q; |
---|
4092 | for (int i = 0; i < 10; i++) |
---|
4093 | E1000_WRITE_REG_ARRAY(hw, |
---|
4094 | E1000_RSSRK(0), i, random[i]); |
---|
4095 | |
---|
4096 | mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 | |
---|
4097 | E1000_MRQC_RSS_FIELD_IPV4_TCP); |
---|
4098 | mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 | |
---|
4099 | E1000_MRQC_RSS_FIELD_IPV6_TCP); |
---|
4100 | mrqc |=( E1000_MRQC_RSS_FIELD_IPV4_UDP | |
---|
4101 | E1000_MRQC_RSS_FIELD_IPV6_UDP); |
---|
4102 | mrqc |=( E1000_MRQC_RSS_FIELD_IPV6_UDP_EX | |
---|
4103 | E1000_MRQC_RSS_FIELD_IPV6_TCP_EX); |
---|
4104 | |
---|
4105 | E1000_WRITE_REG(hw, E1000_MRQC, mrqc); |
---|
4106 | |
---|
4107 | /* |
---|
4108 | ** NOTE: Receive Full-Packet Checksum Offload |
---|
4109 | ** is mutually exclusive with Multiqueue. However |
---|
4110 | ** this is not the same as TCP/IP checksums which |
---|
4111 | ** still work. |
---|
4112 | */ |
---|
4113 | rxcsum |= E1000_RXCSUM_PCSD; |
---|
4114 | #if __FreeBSD_version >= 800000 |
---|
4115 | /* For SCTP Offload */ |
---|
4116 | if ((hw->mac.type == e1000_82576) |
---|
4117 | && (ifp->if_capenable & IFCAP_RXCSUM)) |
---|
4118 | rxcsum |= E1000_RXCSUM_CRCOFL; |
---|
4119 | #endif |
---|
4120 | } else { |
---|
4121 | /* Non RSS setup */ |
---|
4122 | if (ifp->if_capenable & IFCAP_RXCSUM) { |
---|
4123 | rxcsum |= E1000_RXCSUM_IPPCSE; |
---|
4124 | #if __FreeBSD_version >= 800000 |
---|
4125 | if (adapter->hw.mac.type == e1000_82576) |
---|
4126 | rxcsum |= E1000_RXCSUM_CRCOFL; |
---|
4127 | #endif |
---|
4128 | } else |
---|
4129 | rxcsum &= ~E1000_RXCSUM_TUOFL; |
---|
4130 | } |
---|
4131 | E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum); |
---|
4132 | |
---|
4133 | /* Setup the Receive Control Register */ |
---|
4134 | rctl &= ~(3 << E1000_RCTL_MO_SHIFT); |
---|
4135 | rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_LBM_NO | |
---|
4136 | E1000_RCTL_RDMTS_HALF | |
---|
4137 | (hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT); |
---|
4138 | /* Strip CRC bytes. */ |
---|
4139 | rctl |= E1000_RCTL_SECRC; |
---|
4140 | /* Make sure VLAN Filters are off */ |
---|
4141 | rctl &= ~E1000_RCTL_VFE; |
---|
4142 | /* Don't store bad packets */ |
---|
4143 | rctl &= ~E1000_RCTL_SBP; |
---|
4144 | |
---|
4145 | /* Enable Receives */ |
---|
4146 | E1000_WRITE_REG(hw, E1000_RCTL, rctl); |
---|
4147 | |
---|
4148 | /* |
---|
4149 | * Setup the HW Rx Head and Tail Descriptor Pointers |
---|
4150 | * - needs to be after enable |
---|
4151 | */ |
---|
4152 | for (int i = 0; i < adapter->num_queues; i++) { |
---|
4153 | E1000_WRITE_REG(hw, E1000_RDH(i), 0); |
---|
4154 | E1000_WRITE_REG(hw, E1000_RDT(i), |
---|
4155 | adapter->num_rx_desc - 1); |
---|
4156 | } |
---|
4157 | return; |
---|
4158 | } |
---|
4159 | |
---|
4160 | /********************************************************************* |
---|
4161 | * |
---|
4162 | * Free receive rings. |
---|
4163 | * |
---|
4164 | **********************************************************************/ |
---|
4165 | static void |
---|
4166 | igb_free_receive_structures(struct adapter *adapter) |
---|
4167 | { |
---|
4168 | struct rx_ring *rxr = adapter->rx_rings; |
---|
4169 | |
---|
4170 | for (int i = 0; i < adapter->num_queues; i++, rxr++) { |
---|
4171 | struct lro_ctrl *lro = &rxr->lro; |
---|
4172 | igb_free_receive_buffers(rxr); |
---|
4173 | tcp_lro_free(lro); |
---|
4174 | igb_dma_free(adapter, &rxr->rxdma); |
---|
4175 | } |
---|
4176 | |
---|
4177 | free(adapter->rx_rings, M_DEVBUF); |
---|
4178 | } |
---|
4179 | |
---|
4180 | /********************************************************************* |
---|
4181 | * |
---|
4182 | * Free receive ring data structures. |
---|
4183 | * |
---|
4184 | **********************************************************************/ |
---|
4185 | static void |
---|
4186 | igb_free_receive_buffers(struct rx_ring *rxr) |
---|
4187 | { |
---|
4188 | struct adapter *adapter = rxr->adapter; |
---|
4189 | struct igb_rx_buf *rxbuf; |
---|
4190 | int i; |
---|
4191 | |
---|
4192 | INIT_DEBUGOUT("free_receive_structures: begin"); |
---|
4193 | |
---|
4194 | /* Cleanup any existing buffers */ |
---|
4195 | if (rxr->rx_buffers != NULL) { |
---|
4196 | for (i = 0; i < adapter->num_rx_desc; i++) { |
---|
4197 | rxbuf = &rxr->rx_buffers[i]; |
---|
4198 | if (rxbuf->m_head != NULL) { |
---|
4199 | bus_dmamap_sync(rxr->htag, rxbuf->hmap, |
---|
4200 | BUS_DMASYNC_POSTREAD); |
---|
4201 | bus_dmamap_unload(rxr->htag, rxbuf->hmap); |
---|
4202 | rxbuf->m_head->m_flags |= M_PKTHDR; |
---|
4203 | m_freem(rxbuf->m_head); |
---|
4204 | } |
---|
4205 | if (rxbuf->m_pack != NULL) { |
---|
4206 | bus_dmamap_sync(rxr->ptag, rxbuf->pmap, |
---|
4207 | BUS_DMASYNC_POSTREAD); |
---|
4208 | bus_dmamap_unload(rxr->ptag, rxbuf->pmap); |
---|
4209 | rxbuf->m_pack->m_flags |= M_PKTHDR; |
---|
4210 | m_freem(rxbuf->m_pack); |
---|
4211 | } |
---|
4212 | rxbuf->m_head = NULL; |
---|
4213 | rxbuf->m_pack = NULL; |
---|
4214 | if (rxbuf->hmap != NULL) { |
---|
4215 | bus_dmamap_destroy(rxr->htag, rxbuf->hmap); |
---|
4216 | rxbuf->hmap = NULL; |
---|
4217 | } |
---|
4218 | if (rxbuf->pmap != NULL) { |
---|
4219 | bus_dmamap_destroy(rxr->ptag, rxbuf->pmap); |
---|
4220 | rxbuf->pmap = NULL; |
---|
4221 | } |
---|
4222 | } |
---|
4223 | if (rxr->rx_buffers != NULL) { |
---|
4224 | free(rxr->rx_buffers, M_DEVBUF); |
---|
4225 | rxr->rx_buffers = NULL; |
---|
4226 | } |
---|
4227 | } |
---|
4228 | |
---|
4229 | if (rxr->htag != NULL) { |
---|
4230 | bus_dma_tag_destroy(rxr->htag); |
---|
4231 | rxr->htag = NULL; |
---|
4232 | } |
---|
4233 | if (rxr->ptag != NULL) { |
---|
4234 | bus_dma_tag_destroy(rxr->ptag); |
---|
4235 | rxr->ptag = NULL; |
---|
4236 | } |
---|
4237 | } |
---|
4238 | |
---|
4239 | static __inline void |
---|
4240 | igb_rx_discard(struct rx_ring *rxr, int i) |
---|
4241 | { |
---|
4242 | struct igb_rx_buf *rbuf; |
---|
4243 | |
---|
4244 | rbuf = &rxr->rx_buffers[i]; |
---|
4245 | |
---|
4246 | /* Partially received? Free the chain */ |
---|
4247 | if (rxr->fmp != NULL) { |
---|
4248 | rxr->fmp->m_flags |= M_PKTHDR; |
---|
4249 | m_freem(rxr->fmp); |
---|
4250 | rxr->fmp = NULL; |
---|
4251 | rxr->lmp = NULL; |
---|
4252 | } |
---|
4253 | |
---|
4254 | /* |
---|
4255 | ** With advanced descriptors the writeback |
---|
4256 | ** clobbers the buffer addrs, so its easier |
---|
4257 | ** to just free the existing mbufs and take |
---|
4258 | ** the normal refresh path to get new buffers |
---|
4259 | ** and mapping. |
---|
4260 | */ |
---|
4261 | if (rbuf->m_head) { |
---|
4262 | m_free(rbuf->m_head); |
---|
4263 | rbuf->m_head = NULL; |
---|
4264 | } |
---|
4265 | |
---|
4266 | if (rbuf->m_pack) { |
---|
4267 | m_free(rbuf->m_pack); |
---|
4268 | rbuf->m_pack = NULL; |
---|
4269 | } |
---|
4270 | |
---|
4271 | return; |
---|
4272 | } |
---|
4273 | |
---|
4274 | static __inline void |
---|
4275 | igb_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u32 ptype) |
---|
4276 | { |
---|
4277 | |
---|
4278 | /* |
---|
4279 | * ATM LRO is only for IPv4/TCP packets and TCP checksum of the packet |
---|
4280 | * should be computed by hardware. Also it should not have VLAN tag in |
---|
4281 | * ethernet header. |
---|
4282 | */ |
---|
4283 | if (rxr->lro_enabled && |
---|
4284 | (ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && |
---|
4285 | (ptype & E1000_RXDADV_PKTTYPE_ETQF) == 0 && |
---|
4286 | (ptype & (E1000_RXDADV_PKTTYPE_IPV4 | E1000_RXDADV_PKTTYPE_TCP)) == |
---|
4287 | (E1000_RXDADV_PKTTYPE_IPV4 | E1000_RXDADV_PKTTYPE_TCP) && |
---|
4288 | (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) == |
---|
4289 | (CSUM_DATA_VALID | CSUM_PSEUDO_HDR)) { |
---|
4290 | /* |
---|
4291 | * Send to the stack if: |
---|
4292 | ** - LRO not enabled, or |
---|
4293 | ** - no LRO resources, or |
---|
4294 | ** - lro enqueue fails |
---|
4295 | */ |
---|
4296 | if (rxr->lro.lro_cnt != 0) |
---|
4297 | if (tcp_lro_rx(&rxr->lro, m, 0) == 0) |
---|
4298 | return; |
---|
4299 | } |
---|
4300 | IGB_RX_UNLOCK(rxr); |
---|
4301 | (*ifp->if_input)(ifp, m); |
---|
4302 | IGB_RX_LOCK(rxr); |
---|
4303 | } |
---|
4304 | |
---|
4305 | /********************************************************************* |
---|
4306 | * |
---|
4307 | * This routine executes in interrupt context. It replenishes |
---|
4308 | * the mbufs in the descriptor and sends data which has been |
---|
4309 | * dma'ed into host memory to upper layer. |
---|
4310 | * |
---|
4311 | * We loop at most count times if count is > 0, or until done if |
---|
4312 | * count < 0. |
---|
4313 | * |
---|
4314 | * Return TRUE if more to clean, FALSE otherwise |
---|
4315 | *********************************************************************/ |
---|
4316 | static bool |
---|
4317 | igb_rxeof(struct igb_queue *que, int count, int *done) |
---|
4318 | { |
---|
4319 | struct adapter *adapter = que->adapter; |
---|
4320 | struct rx_ring *rxr = que->rxr; |
---|
4321 | struct ifnet *ifp = adapter->ifp; |
---|
4322 | struct lro_ctrl *lro = &rxr->lro; |
---|
4323 | struct lro_entry *queued; |
---|
4324 | int i, processed = 0, rxdone = 0; |
---|
4325 | u32 ptype, staterr = 0; |
---|
4326 | union e1000_adv_rx_desc *cur; |
---|
4327 | |
---|
4328 | IGB_RX_LOCK(rxr); |
---|
4329 | /* Sync the ring. */ |
---|
4330 | bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, |
---|
4331 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); |
---|
4332 | |
---|
4333 | /* Main clean loop */ |
---|
4334 | for (i = rxr->next_to_check; count != 0;) { |
---|
4335 | struct mbuf *sendmp, *mh, *mp; |
---|
4336 | struct igb_rx_buf *rxbuf; |
---|
4337 | u16 hlen, plen, hdr, vtag; |
---|
4338 | bool eop = FALSE; |
---|
4339 | |
---|
4340 | cur = &rxr->rx_base[i]; |
---|
4341 | staterr = le32toh(cur->wb.upper.status_error); |
---|
4342 | if ((staterr & E1000_RXD_STAT_DD) == 0) |
---|
4343 | break; |
---|
4344 | if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) |
---|
4345 | break; |
---|
4346 | count--; |
---|
4347 | sendmp = mh = mp = NULL; |
---|
4348 | cur->wb.upper.status_error = 0; |
---|
4349 | rxbuf = &rxr->rx_buffers[i]; |
---|
4350 | plen = le16toh(cur->wb.upper.length); |
---|
4351 | ptype = le32toh(cur->wb.lower.lo_dword.data) & IGB_PKTTYPE_MASK; |
---|
4352 | vtag = le16toh(cur->wb.upper.vlan); |
---|
4353 | hdr = le16toh(cur->wb.lower.lo_dword.hs_rss.hdr_info); |
---|
4354 | eop = ((staterr & E1000_RXD_STAT_EOP) == E1000_RXD_STAT_EOP); |
---|
4355 | |
---|
4356 | /* Make sure all segments of a bad packet are discarded */ |
---|
4357 | if (((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) != 0) || |
---|
4358 | (rxr->discard)) { |
---|
4359 | ifp->if_ierrors++; |
---|
4360 | ++rxr->rx_discarded; |
---|
4361 | if (!eop) /* Catch subsequent segs */ |
---|
4362 | rxr->discard = TRUE; |
---|
4363 | else |
---|
4364 | rxr->discard = FALSE; |
---|
4365 | igb_rx_discard(rxr, i); |
---|
4366 | goto next_desc; |
---|
4367 | } |
---|
4368 | |
---|
4369 | /* |
---|
4370 | ** The way the hardware is configured to |
---|
4371 | ** split, it will ONLY use the header buffer |
---|
4372 | ** when header split is enabled, otherwise we |
---|
4373 | ** get normal behavior, ie, both header and |
---|
4374 | ** payload are DMA'd into the payload buffer. |
---|
4375 | ** |
---|
4376 | ** The fmp test is to catch the case where a |
---|
4377 | ** packet spans multiple descriptors, in that |
---|
4378 | ** case only the first header is valid. |
---|
4379 | */ |
---|
4380 | if (rxr->hdr_split && rxr->fmp == NULL) { |
---|
4381 | hlen = (hdr & E1000_RXDADV_HDRBUFLEN_MASK) >> |
---|
4382 | E1000_RXDADV_HDRBUFLEN_SHIFT; |
---|
4383 | if (hlen > IGB_HDR_BUF) |
---|
4384 | hlen = IGB_HDR_BUF; |
---|
4385 | mh = rxr->rx_buffers[i].m_head; |
---|
4386 | mh->m_len = hlen; |
---|
4387 | /* clear buf pointer for refresh */ |
---|
4388 | rxbuf->m_head = NULL; |
---|
4389 | /* |
---|
4390 | ** Get the payload length, this |
---|
4391 | ** could be zero if its a small |
---|
4392 | ** packet. |
---|
4393 | */ |
---|
4394 | if (plen > 0) { |
---|
4395 | mp = rxr->rx_buffers[i].m_pack; |
---|
4396 | mp->m_len = plen; |
---|
4397 | mh->m_next = mp; |
---|
4398 | /* clear buf pointer */ |
---|
4399 | rxbuf->m_pack = NULL; |
---|
4400 | rxr->rx_split_packets++; |
---|
4401 | } |
---|
4402 | } else { |
---|
4403 | /* |
---|
4404 | ** Either no header split, or a |
---|
4405 | ** secondary piece of a fragmented |
---|
4406 | ** split packet. |
---|
4407 | */ |
---|
4408 | mh = rxr->rx_buffers[i].m_pack; |
---|
4409 | mh->m_len = plen; |
---|
4410 | /* clear buf info for refresh */ |
---|
4411 | rxbuf->m_pack = NULL; |
---|
4412 | } |
---|
4413 | |
---|
4414 | ++processed; /* So we know when to refresh */ |
---|
4415 | |
---|
4416 | /* Initial frame - setup */ |
---|
4417 | if (rxr->fmp == NULL) { |
---|
4418 | mh->m_pkthdr.len = mh->m_len; |
---|
4419 | /* Save the head of the chain */ |
---|
4420 | rxr->fmp = mh; |
---|
4421 | rxr->lmp = mh; |
---|
4422 | if (mp != NULL) { |
---|
4423 | /* Add payload if split */ |
---|
4424 | mh->m_pkthdr.len += mp->m_len; |
---|
4425 | rxr->lmp = mh->m_next; |
---|
4426 | } |
---|
4427 | } else { |
---|
4428 | /* Chain mbuf's together */ |
---|
4429 | rxr->lmp->m_next = mh; |
---|
4430 | rxr->lmp = rxr->lmp->m_next; |
---|
4431 | rxr->fmp->m_pkthdr.len += mh->m_len; |
---|
4432 | } |
---|
4433 | |
---|
4434 | if (eop) { |
---|
4435 | rxr->fmp->m_pkthdr.rcvif = ifp; |
---|
4436 | ifp->if_ipackets++; |
---|
4437 | rxr->rx_packets++; |
---|
4438 | /* capture data for AIM */ |
---|
4439 | rxr->packets++; |
---|
4440 | rxr->bytes += rxr->fmp->m_pkthdr.len; |
---|
4441 | rxr->rx_bytes += rxr->fmp->m_pkthdr.len; |
---|
4442 | |
---|
4443 | if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) |
---|
4444 | igb_rx_checksum(staterr, rxr->fmp, ptype); |
---|
4445 | |
---|
4446 | if ((ifp->if_capenable & IFCAP_VLAN_HWTAGGING) != 0 && |
---|
4447 | (staterr & E1000_RXD_STAT_VP) != 0) { |
---|
4448 | rxr->fmp->m_pkthdr.ether_vtag = vtag; |
---|
4449 | rxr->fmp->m_flags |= M_VLANTAG; |
---|
4450 | } |
---|
4451 | #if __FreeBSD_version >= 800000 |
---|
4452 | rxr->fmp->m_pkthdr.flowid = que->msix; |
---|
4453 | rxr->fmp->m_flags |= M_FLOWID; |
---|
4454 | #endif |
---|
4455 | sendmp = rxr->fmp; |
---|
4456 | /* Make sure to set M_PKTHDR. */ |
---|
4457 | sendmp->m_flags |= M_PKTHDR; |
---|
4458 | rxr->fmp = NULL; |
---|
4459 | rxr->lmp = NULL; |
---|
4460 | } |
---|
4461 | |
---|
4462 | next_desc: |
---|
4463 | bus_dmamap_sync(rxr->rxdma.dma_tag, rxr->rxdma.dma_map, |
---|
4464 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
---|
4465 | |
---|
4466 | /* Advance our pointers to the next descriptor. */ |
---|
4467 | if (++i == adapter->num_rx_desc) |
---|
4468 | i = 0; |
---|
4469 | /* |
---|
4470 | ** Send to the stack or LRO |
---|
4471 | */ |
---|
4472 | if (sendmp != NULL) { |
---|
4473 | rxr->next_to_check = i; |
---|
4474 | igb_rx_input(rxr, ifp, sendmp, ptype); |
---|
4475 | i = rxr->next_to_check; |
---|
4476 | rxdone++; |
---|
4477 | } |
---|
4478 | |
---|
4479 | /* Every 8 descriptors we go to refresh mbufs */ |
---|
4480 | if (processed == 8) { |
---|
4481 | igb_refresh_mbufs(rxr, i); |
---|
4482 | processed = 0; |
---|
4483 | } |
---|
4484 | } |
---|
4485 | |
---|
4486 | /* Catch any remainders */ |
---|
4487 | if (processed != 0) { |
---|
4488 | igb_refresh_mbufs(rxr, i); |
---|
4489 | processed = 0; |
---|
4490 | } |
---|
4491 | |
---|
4492 | rxr->next_to_check = i; |
---|
4493 | |
---|
4494 | /* |
---|
4495 | * Flush any outstanding LRO work |
---|
4496 | */ |
---|
4497 | while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) { |
---|
4498 | SLIST_REMOVE_HEAD(&lro->lro_active, next); |
---|
4499 | tcp_lro_flush(lro, queued); |
---|
4500 | } |
---|
4501 | |
---|
4502 | IGB_RX_UNLOCK(rxr); |
---|
4503 | |
---|
4504 | if (done != NULL) |
---|
4505 | *done = rxdone; |
---|
4506 | |
---|
4507 | /* |
---|
4508 | ** We still have cleaning to do? |
---|
4509 | ** Schedule another interrupt if so. |
---|
4510 | */ |
---|
4511 | if ((staterr & E1000_RXD_STAT_DD) != 0) |
---|
4512 | return (TRUE); |
---|
4513 | |
---|
4514 | return (FALSE); |
---|
4515 | } |
---|
4516 | |
---|
4517 | /********************************************************************* |
---|
4518 | * |
---|
4519 | * Verify that the hardware indicated that the checksum is valid. |
---|
4520 | * Inform the stack about the status of checksum so that stack |
---|
4521 | * doesn't spend time verifying the checksum. |
---|
4522 | * |
---|
4523 | *********************************************************************/ |
---|
4524 | static void |
---|
4525 | igb_rx_checksum(u32 staterr, struct mbuf *mp, u32 ptype) |
---|
4526 | { |
---|
4527 | u16 status = (u16)staterr; |
---|
4528 | u8 errors = (u8) (staterr >> 24); |
---|
4529 | int sctp; |
---|
4530 | |
---|
4531 | /* Ignore Checksum bit is set */ |
---|
4532 | if (status & E1000_RXD_STAT_IXSM) { |
---|
4533 | mp->m_pkthdr.csum_flags = 0; |
---|
4534 | return; |
---|
4535 | } |
---|
4536 | |
---|
4537 | if ((ptype & E1000_RXDADV_PKTTYPE_ETQF) == 0 && |
---|
4538 | (ptype & E1000_RXDADV_PKTTYPE_SCTP) != 0) |
---|
4539 | sctp = 1; |
---|
4540 | else |
---|
4541 | sctp = 0; |
---|
4542 | if (status & E1000_RXD_STAT_IPCS) { |
---|
4543 | /* Did it pass? */ |
---|
4544 | if (!(errors & E1000_RXD_ERR_IPE)) { |
---|
4545 | /* IP Checksum Good */ |
---|
4546 | mp->m_pkthdr.csum_flags = CSUM_IP_CHECKED; |
---|
4547 | mp->m_pkthdr.csum_flags |= CSUM_IP_VALID; |
---|
4548 | } else |
---|
4549 | mp->m_pkthdr.csum_flags = 0; |
---|
4550 | } |
---|
4551 | |
---|
4552 | if (status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) { |
---|
4553 | u16 type = (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); |
---|
4554 | #if __FreeBSD_version >= 800000 |
---|
4555 | if (sctp) /* reassign */ |
---|
4556 | type = CSUM_SCTP_VALID; |
---|
4557 | #endif |
---|
4558 | /* Did it pass? */ |
---|
4559 | if (!(errors & E1000_RXD_ERR_TCPE)) { |
---|
4560 | mp->m_pkthdr.csum_flags |= type; |
---|
4561 | if (sctp == 0) |
---|
4562 | mp->m_pkthdr.csum_data = htons(0xffff); |
---|
4563 | } |
---|
4564 | } |
---|
4565 | return; |
---|
4566 | } |
---|
4567 | |
---|
4568 | /* |
---|
4569 | * This routine is run via an vlan |
---|
4570 | * config EVENT |
---|
4571 | */ |
---|
4572 | static void |
---|
4573 | igb_register_vlan(void *arg, struct ifnet *ifp, u16 vtag) |
---|
4574 | { |
---|
4575 | struct adapter *adapter = ifp->if_softc; |
---|
4576 | u32 index, bit; |
---|
4577 | |
---|
4578 | if (ifp->if_softc != arg) /* Not our event */ |
---|
4579 | return; |
---|
4580 | |
---|
4581 | if ((vtag == 0) || (vtag > 4095)) /* Invalid */ |
---|
4582 | return; |
---|
4583 | |
---|
4584 | IGB_CORE_LOCK(adapter); |
---|
4585 | index = (vtag >> 5) & 0x7F; |
---|
4586 | bit = vtag & 0x1F; |
---|
4587 | adapter->shadow_vfta[index] |= (1 << bit); |
---|
4588 | ++adapter->num_vlans; |
---|
4589 | /* Re-init to load the changes */ |
---|
4590 | if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) |
---|
4591 | igb_init_locked(adapter); |
---|
4592 | IGB_CORE_UNLOCK(adapter); |
---|
4593 | } |
---|
4594 | |
---|
4595 | /* |
---|
4596 | * This routine is run via an vlan |
---|
4597 | * unconfig EVENT |
---|
4598 | */ |
---|
4599 | static void |
---|
4600 | igb_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag) |
---|
4601 | { |
---|
4602 | struct adapter *adapter = ifp->if_softc; |
---|
4603 | u32 index, bit; |
---|
4604 | |
---|
4605 | if (ifp->if_softc != arg) |
---|
4606 | return; |
---|
4607 | |
---|
4608 | if ((vtag == 0) || (vtag > 4095)) /* Invalid */ |
---|
4609 | return; |
---|
4610 | |
---|
4611 | IGB_CORE_LOCK(adapter); |
---|
4612 | index = (vtag >> 5) & 0x7F; |
---|
4613 | bit = vtag & 0x1F; |
---|
4614 | adapter->shadow_vfta[index] &= ~(1 << bit); |
---|
4615 | --adapter->num_vlans; |
---|
4616 | /* Re-init to load the changes */ |
---|
4617 | if (ifp->if_capenable & IFCAP_VLAN_HWFILTER) |
---|
4618 | igb_init_locked(adapter); |
---|
4619 | IGB_CORE_UNLOCK(adapter); |
---|
4620 | } |
---|
4621 | |
---|
4622 | static void |
---|
4623 | igb_setup_vlan_hw_support(struct adapter *adapter) |
---|
4624 | { |
---|
4625 | struct e1000_hw *hw = &adapter->hw; |
---|
4626 | u32 reg; |
---|
4627 | |
---|
4628 | /* |
---|
4629 | ** We get here thru init_locked, meaning |
---|
4630 | ** a soft reset, this has already cleared |
---|
4631 | ** the VFTA and other state, so if there |
---|
4632 | ** have been no vlan's registered do nothing. |
---|
4633 | */ |
---|
4634 | if (adapter->num_vlans == 0) |
---|
4635 | return; |
---|
4636 | |
---|
4637 | /* |
---|
4638 | ** A soft reset zero's out the VFTA, so |
---|
4639 | ** we need to repopulate it now. |
---|
4640 | */ |
---|
4641 | for (int i = 0; i < IGB_VFTA_SIZE; i++) |
---|
4642 | if (adapter->shadow_vfta[i] != 0) { |
---|
4643 | if (hw->mac.type == e1000_vfadapt) |
---|
4644 | e1000_vfta_set_vf(hw, |
---|
4645 | adapter->shadow_vfta[i], TRUE); |
---|
4646 | else |
---|
4647 | E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, |
---|
4648 | i, adapter->shadow_vfta[i]); |
---|
4649 | } |
---|
4650 | |
---|
4651 | if (hw->mac.type == e1000_vfadapt) |
---|
4652 | e1000_rlpml_set_vf(hw, |
---|
4653 | adapter->max_frame_size + VLAN_TAG_SIZE); |
---|
4654 | else { |
---|
4655 | reg = E1000_READ_REG(hw, E1000_CTRL); |
---|
4656 | reg |= E1000_CTRL_VME; |
---|
4657 | E1000_WRITE_REG(hw, E1000_CTRL, reg); |
---|
4658 | |
---|
4659 | /* Enable the Filter Table */ |
---|
4660 | reg = E1000_READ_REG(hw, E1000_RCTL); |
---|
4661 | reg &= ~E1000_RCTL_CFIEN; |
---|
4662 | reg |= E1000_RCTL_VFE; |
---|
4663 | E1000_WRITE_REG(hw, E1000_RCTL, reg); |
---|
4664 | |
---|
4665 | /* Update the frame size */ |
---|
4666 | E1000_WRITE_REG(&adapter->hw, E1000_RLPML, |
---|
4667 | adapter->max_frame_size + VLAN_TAG_SIZE); |
---|
4668 | } |
---|
4669 | } |
---|
4670 | |
---|
4671 | static void |
---|
4672 | igb_enable_intr(struct adapter *adapter) |
---|
4673 | { |
---|
4674 | /* With RSS set up what to auto clear */ |
---|
4675 | if (adapter->msix_mem) { |
---|
4676 | E1000_WRITE_REG(&adapter->hw, E1000_EIAC, |
---|
4677 | adapter->eims_mask); |
---|
4678 | E1000_WRITE_REG(&adapter->hw, E1000_EIAM, |
---|
4679 | adapter->eims_mask); |
---|
4680 | E1000_WRITE_REG(&adapter->hw, E1000_EIMS, |
---|
4681 | adapter->eims_mask); |
---|
4682 | E1000_WRITE_REG(&adapter->hw, E1000_IMS, |
---|
4683 | E1000_IMS_LSC); |
---|
4684 | } else { |
---|
4685 | E1000_WRITE_REG(&adapter->hw, E1000_IMS, |
---|
4686 | IMS_ENABLE_MASK); |
---|
4687 | } |
---|
4688 | E1000_WRITE_FLUSH(&adapter->hw); |
---|
4689 | |
---|
4690 | return; |
---|
4691 | } |
---|
4692 | |
---|
4693 | static void |
---|
4694 | igb_disable_intr(struct adapter *adapter) |
---|
4695 | { |
---|
4696 | if (adapter->msix_mem) { |
---|
4697 | E1000_WRITE_REG(&adapter->hw, E1000_EIMC, ~0); |
---|
4698 | E1000_WRITE_REG(&adapter->hw, E1000_EIAC, 0); |
---|
4699 | } |
---|
4700 | E1000_WRITE_REG(&adapter->hw, E1000_IMC, ~0); |
---|
4701 | E1000_WRITE_FLUSH(&adapter->hw); |
---|
4702 | return; |
---|
4703 | } |
---|
4704 | |
---|
4705 | /* |
---|
4706 | * Bit of a misnomer, what this really means is |
---|
4707 | * to enable OS management of the system... aka |
---|
4708 | * to disable special hardware management features |
---|
4709 | */ |
---|
4710 | static void |
---|
4711 | igb_init_manageability(struct adapter *adapter) |
---|
4712 | { |
---|
4713 | if (adapter->has_manage) { |
---|
4714 | int manc2h = E1000_READ_REG(&adapter->hw, E1000_MANC2H); |
---|
4715 | int manc = E1000_READ_REG(&adapter->hw, E1000_MANC); |
---|
4716 | |
---|
4717 | /* disable hardware interception of ARP */ |
---|
4718 | manc &= ~(E1000_MANC_ARP_EN); |
---|
4719 | |
---|
4720 | /* enable receiving management packets to the host */ |
---|
4721 | manc |= E1000_MANC_EN_MNG2HOST; |
---|
4722 | manc2h |= 1 << 5; /* Mng Port 623 */ |
---|
4723 | manc2h |= 1 << 6; /* Mng Port 664 */ |
---|
4724 | E1000_WRITE_REG(&adapter->hw, E1000_MANC2H, manc2h); |
---|
4725 | E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc); |
---|
4726 | } |
---|
4727 | } |
---|
4728 | |
---|
4729 | /* |
---|
4730 | * Give control back to hardware management |
---|
4731 | * controller if there is one. |
---|
4732 | */ |
---|
4733 | static void |
---|
4734 | igb_release_manageability(struct adapter *adapter) |
---|
4735 | { |
---|
4736 | if (adapter->has_manage) { |
---|
4737 | int manc = E1000_READ_REG(&adapter->hw, E1000_MANC); |
---|
4738 | |
---|
4739 | /* re-enable hardware interception of ARP */ |
---|
4740 | manc |= E1000_MANC_ARP_EN; |
---|
4741 | manc &= ~E1000_MANC_EN_MNG2HOST; |
---|
4742 | |
---|
4743 | E1000_WRITE_REG(&adapter->hw, E1000_MANC, manc); |
---|
4744 | } |
---|
4745 | } |
---|
4746 | |
---|
4747 | /* |
---|
4748 | * igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit. |
---|
4749 | * For ASF and Pass Through versions of f/w this means that |
---|
4750 | * the driver is loaded. |
---|
4751 | * |
---|
4752 | */ |
---|
4753 | static void |
---|
4754 | igb_get_hw_control(struct adapter *adapter) |
---|
4755 | { |
---|
4756 | u32 ctrl_ext; |
---|
4757 | |
---|
4758 | if (adapter->hw.mac.type == e1000_vfadapt) |
---|
4759 | return; |
---|
4760 | |
---|
4761 | /* Let firmware know the driver has taken over */ |
---|
4762 | ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); |
---|
4763 | E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, |
---|
4764 | ctrl_ext | E1000_CTRL_EXT_DRV_LOAD); |
---|
4765 | } |
---|
4766 | |
---|
4767 | /* |
---|
4768 | * igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit. |
---|
4769 | * For ASF and Pass Through versions of f/w this means that the |
---|
4770 | * driver is no longer loaded. |
---|
4771 | * |
---|
4772 | */ |
---|
4773 | static void |
---|
4774 | igb_release_hw_control(struct adapter *adapter) |
---|
4775 | { |
---|
4776 | u32 ctrl_ext; |
---|
4777 | |
---|
4778 | if (adapter->hw.mac.type == e1000_vfadapt) |
---|
4779 | return; |
---|
4780 | |
---|
4781 | /* Let firmware taken over control of h/w */ |
---|
4782 | ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT); |
---|
4783 | E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT, |
---|
4784 | ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); |
---|
4785 | } |
---|
4786 | |
---|
4787 | static int |
---|
4788 | igb_is_valid_ether_addr(uint8_t *addr) |
---|
4789 | { |
---|
4790 | char zero_addr[6] = { 0, 0, 0, 0, 0, 0 }; |
---|
4791 | |
---|
4792 | if ((addr[0] & 1) || (!bcmp(addr, zero_addr, ETHER_ADDR_LEN))) { |
---|
4793 | return (FALSE); |
---|
4794 | } |
---|
4795 | |
---|
4796 | return (TRUE); |
---|
4797 | } |
---|
4798 | |
---|
4799 | |
---|
4800 | /* |
---|
4801 | * Enable PCI Wake On Lan capability |
---|
4802 | */ |
---|
4803 | static void |
---|
4804 | igb_enable_wakeup(device_t dev) |
---|
4805 | { |
---|
4806 | u16 cap, status; |
---|
4807 | u8 id; |
---|
4808 | |
---|
4809 | /* First find the capabilities pointer*/ |
---|
4810 | cap = pci_read_config(dev, PCIR_CAP_PTR, 2); |
---|
4811 | /* Read the PM Capabilities */ |
---|
4812 | id = pci_read_config(dev, cap, 1); |
---|
4813 | if (id != PCIY_PMG) /* Something wrong */ |
---|
4814 | return; |
---|
4815 | /* OK, we have the power capabilities, so |
---|
4816 | now get the status register */ |
---|
4817 | cap += PCIR_POWER_STATUS; |
---|
4818 | status = pci_read_config(dev, cap, 2); |
---|
4819 | status |= PCIM_PSTAT_PME | PCIM_PSTAT_PMEENABLE; |
---|
4820 | pci_write_config(dev, cap, status, 2); |
---|
4821 | return; |
---|
4822 | } |
---|
4823 | |
---|
4824 | static void |
---|
4825 | igb_led_func(void *arg, int onoff) |
---|
4826 | { |
---|
4827 | struct adapter *adapter = arg; |
---|
4828 | |
---|
4829 | IGB_CORE_LOCK(adapter); |
---|
4830 | if (onoff) { |
---|
4831 | e1000_setup_led(&adapter->hw); |
---|
4832 | e1000_led_on(&adapter->hw); |
---|
4833 | } else { |
---|
4834 | e1000_led_off(&adapter->hw); |
---|
4835 | e1000_cleanup_led(&adapter->hw); |
---|
4836 | } |
---|
4837 | IGB_CORE_UNLOCK(adapter); |
---|
4838 | } |
---|
4839 | |
---|
4840 | /********************************************************************** |
---|
4841 | * |
---|
4842 | * Update the board statistics counters. |
---|
4843 | * |
---|
4844 | **********************************************************************/ |
---|
4845 | static void |
---|
4846 | igb_update_stats_counters(struct adapter *adapter) |
---|
4847 | { |
---|
4848 | struct ifnet *ifp; |
---|
4849 | struct e1000_hw *hw = &adapter->hw; |
---|
4850 | struct e1000_hw_stats *stats; |
---|
4851 | |
---|
4852 | /* |
---|
4853 | ** The virtual function adapter has only a |
---|
4854 | ** small controlled set of stats, do only |
---|
4855 | ** those and return. |
---|
4856 | */ |
---|
4857 | if (adapter->hw.mac.type == e1000_vfadapt) { |
---|
4858 | igb_update_vf_stats_counters(adapter); |
---|
4859 | return; |
---|
4860 | } |
---|
4861 | |
---|
4862 | stats = (struct e1000_hw_stats *)adapter->stats; |
---|
4863 | |
---|
4864 | if(adapter->hw.phy.media_type == e1000_media_type_copper || |
---|
4865 | (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)) { |
---|
4866 | stats->symerrs += |
---|
4867 | E1000_READ_REG(hw,E1000_SYMERRS); |
---|
4868 | stats->sec += E1000_READ_REG(hw, E1000_SEC); |
---|
4869 | } |
---|
4870 | |
---|
4871 | stats->crcerrs += E1000_READ_REG(hw, E1000_CRCERRS); |
---|
4872 | stats->mpc += E1000_READ_REG(hw, E1000_MPC); |
---|
4873 | stats->scc += E1000_READ_REG(hw, E1000_SCC); |
---|
4874 | stats->ecol += E1000_READ_REG(hw, E1000_ECOL); |
---|
4875 | |
---|
4876 | stats->mcc += E1000_READ_REG(hw, E1000_MCC); |
---|
4877 | stats->latecol += E1000_READ_REG(hw, E1000_LATECOL); |
---|
4878 | stats->colc += E1000_READ_REG(hw, E1000_COLC); |
---|
4879 | stats->dc += E1000_READ_REG(hw, E1000_DC); |
---|
4880 | stats->rlec += E1000_READ_REG(hw, E1000_RLEC); |
---|
4881 | stats->xonrxc += E1000_READ_REG(hw, E1000_XONRXC); |
---|
4882 | stats->xontxc += E1000_READ_REG(hw, E1000_XONTXC); |
---|
4883 | /* |
---|
4884 | ** For watchdog management we need to know if we have been |
---|
4885 | ** paused during the last interval, so capture that here. |
---|
4886 | */ |
---|
4887 | adapter->pause_frames = E1000_READ_REG(&adapter->hw, E1000_XOFFRXC); |
---|
4888 | stats->xoffrxc += adapter->pause_frames; |
---|
4889 | stats->xofftxc += E1000_READ_REG(hw, E1000_XOFFTXC); |
---|
4890 | stats->fcruc += E1000_READ_REG(hw, E1000_FCRUC); |
---|
4891 | stats->prc64 += E1000_READ_REG(hw, E1000_PRC64); |
---|
4892 | stats->prc127 += E1000_READ_REG(hw, E1000_PRC127); |
---|
4893 | stats->prc255 += E1000_READ_REG(hw, E1000_PRC255); |
---|
4894 | stats->prc511 += E1000_READ_REG(hw, E1000_PRC511); |
---|
4895 | stats->prc1023 += E1000_READ_REG(hw, E1000_PRC1023); |
---|
4896 | stats->prc1522 += E1000_READ_REG(hw, E1000_PRC1522); |
---|
4897 | stats->gprc += E1000_READ_REG(hw, E1000_GPRC); |
---|
4898 | stats->bprc += E1000_READ_REG(hw, E1000_BPRC); |
---|
4899 | stats->mprc += E1000_READ_REG(hw, E1000_MPRC); |
---|
4900 | stats->gptc += E1000_READ_REG(hw, E1000_GPTC); |
---|
4901 | |
---|
4902 | /* For the 64-bit byte counters the low dword must be read first. */ |
---|
4903 | /* Both registers clear on the read of the high dword */ |
---|
4904 | |
---|
4905 | stats->gorc += E1000_READ_REG(hw, E1000_GORCL) + |
---|
4906 | ((u64)E1000_READ_REG(hw, E1000_GORCH) << 32); |
---|
4907 | stats->gotc += E1000_READ_REG(hw, E1000_GOTCL) + |
---|
4908 | ((u64)E1000_READ_REG(hw, E1000_GOTCH) << 32); |
---|
4909 | |
---|
4910 | stats->rnbc += E1000_READ_REG(hw, E1000_RNBC); |
---|
4911 | stats->ruc += E1000_READ_REG(hw, E1000_RUC); |
---|
4912 | stats->rfc += E1000_READ_REG(hw, E1000_RFC); |
---|
4913 | stats->roc += E1000_READ_REG(hw, E1000_ROC); |
---|
4914 | stats->rjc += E1000_READ_REG(hw, E1000_RJC); |
---|
4915 | |
---|
4916 | stats->tor += E1000_READ_REG(hw, E1000_TORH); |
---|
4917 | stats->tot += E1000_READ_REG(hw, E1000_TOTH); |
---|
4918 | |
---|
4919 | stats->tpr += E1000_READ_REG(hw, E1000_TPR); |
---|
4920 | stats->tpt += E1000_READ_REG(hw, E1000_TPT); |
---|
4921 | stats->ptc64 += E1000_READ_REG(hw, E1000_PTC64); |
---|
4922 | stats->ptc127 += E1000_READ_REG(hw, E1000_PTC127); |
---|
4923 | stats->ptc255 += E1000_READ_REG(hw, E1000_PTC255); |
---|
4924 | stats->ptc511 += E1000_READ_REG(hw, E1000_PTC511); |
---|
4925 | stats->ptc1023 += E1000_READ_REG(hw, E1000_PTC1023); |
---|
4926 | stats->ptc1522 += E1000_READ_REG(hw, E1000_PTC1522); |
---|
4927 | stats->mptc += E1000_READ_REG(hw, E1000_MPTC); |
---|
4928 | stats->bptc += E1000_READ_REG(hw, E1000_BPTC); |
---|
4929 | |
---|
4930 | /* Interrupt Counts */ |
---|
4931 | |
---|
4932 | stats->iac += E1000_READ_REG(hw, E1000_IAC); |
---|
4933 | stats->icrxptc += E1000_READ_REG(hw, E1000_ICRXPTC); |
---|
4934 | stats->icrxatc += E1000_READ_REG(hw, E1000_ICRXATC); |
---|
4935 | stats->ictxptc += E1000_READ_REG(hw, E1000_ICTXPTC); |
---|
4936 | stats->ictxatc += E1000_READ_REG(hw, E1000_ICTXATC); |
---|
4937 | stats->ictxqec += E1000_READ_REG(hw, E1000_ICTXQEC); |
---|
4938 | stats->ictxqmtc += E1000_READ_REG(hw, E1000_ICTXQMTC); |
---|
4939 | stats->icrxdmtc += E1000_READ_REG(hw, E1000_ICRXDMTC); |
---|
4940 | stats->icrxoc += E1000_READ_REG(hw, E1000_ICRXOC); |
---|
4941 | |
---|
4942 | /* Host to Card Statistics */ |
---|
4943 | |
---|
4944 | stats->cbtmpc += E1000_READ_REG(hw, E1000_CBTMPC); |
---|
4945 | stats->htdpmc += E1000_READ_REG(hw, E1000_HTDPMC); |
---|
4946 | stats->cbrdpc += E1000_READ_REG(hw, E1000_CBRDPC); |
---|
4947 | stats->cbrmpc += E1000_READ_REG(hw, E1000_CBRMPC); |
---|
4948 | stats->rpthc += E1000_READ_REG(hw, E1000_RPTHC); |
---|
4949 | stats->hgptc += E1000_READ_REG(hw, E1000_HGPTC); |
---|
4950 | stats->htcbdpc += E1000_READ_REG(hw, E1000_HTCBDPC); |
---|
4951 | stats->hgorc += (E1000_READ_REG(hw, E1000_HGORCL) + |
---|
4952 | ((u64)E1000_READ_REG(hw, E1000_HGORCH) << 32)); |
---|
4953 | stats->hgotc += (E1000_READ_REG(hw, E1000_HGOTCL) + |
---|
4954 | ((u64)E1000_READ_REG(hw, E1000_HGOTCH) << 32)); |
---|
4955 | stats->lenerrs += E1000_READ_REG(hw, E1000_LENERRS); |
---|
4956 | stats->scvpc += E1000_READ_REG(hw, E1000_SCVPC); |
---|
4957 | stats->hrmpc += E1000_READ_REG(hw, E1000_HRMPC); |
---|
4958 | |
---|
4959 | stats->algnerrc += E1000_READ_REG(hw, E1000_ALGNERRC); |
---|
4960 | stats->rxerrc += E1000_READ_REG(hw, E1000_RXERRC); |
---|
4961 | stats->tncrs += E1000_READ_REG(hw, E1000_TNCRS); |
---|
4962 | stats->cexterr += E1000_READ_REG(hw, E1000_CEXTERR); |
---|
4963 | stats->tsctc += E1000_READ_REG(hw, E1000_TSCTC); |
---|
4964 | stats->tsctfc += E1000_READ_REG(hw, E1000_TSCTFC); |
---|
4965 | |
---|
4966 | ifp = adapter->ifp; |
---|
4967 | ifp->if_collisions = stats->colc; |
---|
4968 | |
---|
4969 | /* Rx Errors */ |
---|
4970 | ifp->if_ierrors = adapter->dropped_pkts + stats->rxerrc + |
---|
4971 | stats->crcerrs + stats->algnerrc + |
---|
4972 | stats->ruc + stats->roc + stats->mpc + stats->cexterr; |
---|
4973 | |
---|
4974 | /* Tx Errors */ |
---|
4975 | ifp->if_oerrors = stats->ecol + |
---|
4976 | stats->latecol + adapter->watchdog_events; |
---|
4977 | |
---|
4978 | /* Driver specific counters */ |
---|
4979 | adapter->device_control = E1000_READ_REG(hw, E1000_CTRL); |
---|
4980 | adapter->rx_control = E1000_READ_REG(hw, E1000_RCTL); |
---|
4981 | adapter->int_mask = E1000_READ_REG(hw, E1000_IMS); |
---|
4982 | adapter->eint_mask = E1000_READ_REG(hw, E1000_EIMS); |
---|
4983 | adapter->packet_buf_alloc_tx = |
---|
4984 | ((E1000_READ_REG(hw, E1000_PBA) & 0xffff0000) >> 16); |
---|
4985 | adapter->packet_buf_alloc_rx = |
---|
4986 | (E1000_READ_REG(hw, E1000_PBA) & 0xffff); |
---|
4987 | } |
---|
4988 | |
---|
4989 | |
---|
4990 | /********************************************************************** |
---|
4991 | * |
---|
4992 | * Initialize the VF board statistics counters. |
---|
4993 | * |
---|
4994 | **********************************************************************/ |
---|
4995 | static void |
---|
4996 | igb_vf_init_stats(struct adapter *adapter) |
---|
4997 | { |
---|
4998 | struct e1000_hw *hw = &adapter->hw; |
---|
4999 | struct e1000_vf_stats *stats; |
---|
5000 | |
---|
5001 | stats = (struct e1000_vf_stats *)adapter->stats; |
---|
5002 | if (stats == NULL) |
---|
5003 | return; |
---|
5004 | stats->last_gprc = E1000_READ_REG(hw, E1000_VFGPRC); |
---|
5005 | stats->last_gorc = E1000_READ_REG(hw, E1000_VFGORC); |
---|
5006 | stats->last_gptc = E1000_READ_REG(hw, E1000_VFGPTC); |
---|
5007 | stats->last_gotc = E1000_READ_REG(hw, E1000_VFGOTC); |
---|
5008 | stats->last_mprc = E1000_READ_REG(hw, E1000_VFMPRC); |
---|
5009 | } |
---|
5010 | |
---|
5011 | /********************************************************************** |
---|
5012 | * |
---|
5013 | * Update the VF board statistics counters. |
---|
5014 | * |
---|
5015 | **********************************************************************/ |
---|
5016 | static void |
---|
5017 | igb_update_vf_stats_counters(struct adapter *adapter) |
---|
5018 | { |
---|
5019 | struct e1000_hw *hw = &adapter->hw; |
---|
5020 | struct e1000_vf_stats *stats; |
---|
5021 | |
---|
5022 | if (adapter->link_speed == 0) |
---|
5023 | return; |
---|
5024 | |
---|
5025 | stats = (struct e1000_vf_stats *)adapter->stats; |
---|
5026 | |
---|
5027 | UPDATE_VF_REG(E1000_VFGPRC, |
---|
5028 | stats->last_gprc, stats->gprc); |
---|
5029 | UPDATE_VF_REG(E1000_VFGORC, |
---|
5030 | stats->last_gorc, stats->gorc); |
---|
5031 | UPDATE_VF_REG(E1000_VFGPTC, |
---|
5032 | stats->last_gptc, stats->gptc); |
---|
5033 | UPDATE_VF_REG(E1000_VFGOTC, |
---|
5034 | stats->last_gotc, stats->gotc); |
---|
5035 | UPDATE_VF_REG(E1000_VFMPRC, |
---|
5036 | stats->last_mprc, stats->mprc); |
---|
5037 | } |
---|
5038 | |
---|
5039 | /* Export a single 32-bit register via a read-only sysctl. */ |
---|
5040 | static int |
---|
5041 | igb_sysctl_reg_handler(SYSCTL_HANDLER_ARGS) |
---|
5042 | { |
---|
5043 | struct adapter *adapter; |
---|
5044 | u_int val; |
---|
5045 | |
---|
5046 | adapter = oidp->oid_arg1; |
---|
5047 | val = E1000_READ_REG(&adapter->hw, oidp->oid_arg2); |
---|
5048 | return (sysctl_handle_int(oidp, &val, 0, req)); |
---|
5049 | } |
---|
5050 | |
---|
5051 | /* |
---|
5052 | ** Tuneable interrupt rate handler |
---|
5053 | */ |
---|
5054 | static int |
---|
5055 | igb_sysctl_interrupt_rate_handler(SYSCTL_HANDLER_ARGS) |
---|
5056 | { |
---|
5057 | struct igb_queue *que = ((struct igb_queue *)oidp->oid_arg1); |
---|
5058 | int error; |
---|
5059 | u32 reg, usec, rate; |
---|
5060 | |
---|
5061 | reg = E1000_READ_REG(&que->adapter->hw, E1000_EITR(que->msix)); |
---|
5062 | usec = ((reg & 0x7FFC) >> 2); |
---|
5063 | if (usec > 0) |
---|
5064 | rate = 1000000 / usec; |
---|
5065 | else |
---|
5066 | rate = 0; |
---|
5067 | error = sysctl_handle_int(oidp, &rate, 0, req); |
---|
5068 | if (error || !req->newptr) |
---|
5069 | return error; |
---|
5070 | return 0; |
---|
5071 | } |
---|
5072 | |
---|
5073 | /* |
---|
5074 | * Add sysctl variables, one per statistic, to the system. |
---|
5075 | */ |
---|
5076 | static void |
---|
5077 | igb_add_hw_stats(struct adapter *adapter) |
---|
5078 | { |
---|
5079 | device_t dev = adapter->dev; |
---|
5080 | |
---|
5081 | struct tx_ring *txr = adapter->tx_rings; |
---|
5082 | struct rx_ring *rxr = adapter->rx_rings; |
---|
5083 | |
---|
5084 | struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev); |
---|
5085 | struct sysctl_oid *tree = device_get_sysctl_tree(dev); |
---|
5086 | struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree); |
---|
5087 | struct e1000_hw_stats *stats = adapter->stats; |
---|
5088 | |
---|
5089 | struct sysctl_oid *stat_node, *queue_node, *int_node, *host_node; |
---|
5090 | struct sysctl_oid_list *stat_list, *queue_list, *int_list, *host_list; |
---|
5091 | |
---|
5092 | #define QUEUE_NAME_LEN 32 |
---|
5093 | char namebuf[QUEUE_NAME_LEN]; |
---|
5094 | |
---|
5095 | /* Driver Statistics */ |
---|
5096 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "link_irq", |
---|
5097 | CTLFLAG_RD, &adapter->link_irq, 0, |
---|
5098 | "Link MSIX IRQ Handled"); |
---|
5099 | SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "dropped", |
---|
5100 | CTLFLAG_RD, &adapter->dropped_pkts, |
---|
5101 | "Driver dropped packets"); |
---|
5102 | SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_dma_fail", |
---|
5103 | CTLFLAG_RD, &adapter->no_tx_dma_setup, |
---|
5104 | "Driver tx dma failure in xmit"); |
---|
5105 | SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_overruns", |
---|
5106 | CTLFLAG_RD, &adapter->rx_overruns, |
---|
5107 | "RX overruns"); |
---|
5108 | SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_timeouts", |
---|
5109 | CTLFLAG_RD, &adapter->watchdog_events, |
---|
5110 | "Watchdog timeouts"); |
---|
5111 | |
---|
5112 | SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "device_control", |
---|
5113 | CTLFLAG_RD, &adapter->device_control, |
---|
5114 | "Device Control Register"); |
---|
5115 | SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_control", |
---|
5116 | CTLFLAG_RD, &adapter->rx_control, |
---|
5117 | "Receiver Control Register"); |
---|
5118 | SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "interrupt_mask", |
---|
5119 | CTLFLAG_RD, &adapter->int_mask, |
---|
5120 | "Interrupt Mask"); |
---|
5121 | SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "extended_int_mask", |
---|
5122 | CTLFLAG_RD, &adapter->eint_mask, |
---|
5123 | "Extended Interrupt Mask"); |
---|
5124 | SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_buf_alloc", |
---|
5125 | CTLFLAG_RD, &adapter->packet_buf_alloc_tx, |
---|
5126 | "Transmit Buffer Packet Allocation"); |
---|
5127 | SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "rx_buf_alloc", |
---|
5128 | CTLFLAG_RD, &adapter->packet_buf_alloc_rx, |
---|
5129 | "Receive Buffer Packet Allocation"); |
---|
5130 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_high_water", |
---|
5131 | CTLFLAG_RD, &adapter->hw.fc.high_water, 0, |
---|
5132 | "Flow Control High Watermark"); |
---|
5133 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "fc_low_water", |
---|
5134 | CTLFLAG_RD, &adapter->hw.fc.low_water, 0, |
---|
5135 | "Flow Control Low Watermark"); |
---|
5136 | |
---|
5137 | for (int i = 0; i < adapter->num_queues; i++, rxr++, txr++) { |
---|
5138 | struct lro_ctrl *lro = &rxr->lro; |
---|
5139 | |
---|
5140 | snprintf(namebuf, QUEUE_NAME_LEN, "queue%d", i); |
---|
5141 | queue_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, namebuf, |
---|
5142 | CTLFLAG_RD, NULL, "Queue Name"); |
---|
5143 | queue_list = SYSCTL_CHILDREN(queue_node); |
---|
5144 | |
---|
5145 | SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "interrupt_rate", |
---|
5146 | CTLFLAG_RD, &adapter->queues[i], |
---|
5147 | sizeof(&adapter->queues[i]), |
---|
5148 | igb_sysctl_interrupt_rate_handler, |
---|
5149 | "IU", "Interrupt Rate"); |
---|
5150 | |
---|
5151 | SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_head", |
---|
5152 | CTLFLAG_RD, adapter, E1000_TDH(txr->me), |
---|
5153 | igb_sysctl_reg_handler, "IU", |
---|
5154 | "Transmit Descriptor Head"); |
---|
5155 | SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "txd_tail", |
---|
5156 | CTLFLAG_RD, adapter, E1000_TDT(txr->me), |
---|
5157 | igb_sysctl_reg_handler, "IU", |
---|
5158 | "Transmit Descriptor Tail"); |
---|
5159 | SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "no_desc_avail", |
---|
5160 | CTLFLAG_RD, &txr->no_desc_avail, |
---|
5161 | "Queue No Descriptor Available"); |
---|
5162 | SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_packets", |
---|
5163 | CTLFLAG_RD, &txr->tx_packets, |
---|
5164 | "Queue Packets Transmitted"); |
---|
5165 | |
---|
5166 | SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_head", |
---|
5167 | CTLFLAG_RD, adapter, E1000_RDH(rxr->me), |
---|
5168 | igb_sysctl_reg_handler, "IU", |
---|
5169 | "Receive Descriptor Head"); |
---|
5170 | SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "rxd_tail", |
---|
5171 | CTLFLAG_RD, adapter, E1000_RDT(rxr->me), |
---|
5172 | igb_sysctl_reg_handler, "IU", |
---|
5173 | "Receive Descriptor Tail"); |
---|
5174 | SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_packets", |
---|
5175 | CTLFLAG_RD, &rxr->rx_packets, |
---|
5176 | "Queue Packets Received"); |
---|
5177 | SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_bytes", |
---|
5178 | CTLFLAG_RD, &rxr->rx_bytes, |
---|
5179 | "Queue Bytes Received"); |
---|
5180 | SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "lro_queued", |
---|
5181 | CTLFLAG_RD, &lro->lro_queued, 0, |
---|
5182 | "LRO Queued"); |
---|
5183 | SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "lro_flushed", |
---|
5184 | CTLFLAG_RD, &lro->lro_flushed, 0, |
---|
5185 | "LRO Flushed"); |
---|
5186 | } |
---|
5187 | |
---|
5188 | /* MAC stats get their own sub node */ |
---|
5189 | |
---|
5190 | stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac_stats", |
---|
5191 | CTLFLAG_RD, NULL, "MAC Statistics"); |
---|
5192 | stat_list = SYSCTL_CHILDREN(stat_node); |
---|
5193 | |
---|
5194 | /* |
---|
5195 | ** VF adapter has a very limited set of stats |
---|
5196 | ** since its not managing the metal, so to speak. |
---|
5197 | */ |
---|
5198 | if (adapter->hw.mac.type == e1000_vfadapt) { |
---|
5199 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd", |
---|
5200 | CTLFLAG_RD, &stats->gprc, |
---|
5201 | "Good Packets Received"); |
---|
5202 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", |
---|
5203 | CTLFLAG_RD, &stats->gptc, |
---|
5204 | "Good Packets Transmitted"); |
---|
5205 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd", |
---|
5206 | CTLFLAG_RD, &stats->gorc, |
---|
5207 | "Good Octets Received"); |
---|
5208 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", |
---|
5209 | CTLFLAG_RD, &stats->gotc, |
---|
5210 | "Good Octets Transmitted"); |
---|
5211 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd", |
---|
5212 | CTLFLAG_RD, &stats->mprc, |
---|
5213 | "Multicast Packets Received"); |
---|
5214 | return; |
---|
5215 | } |
---|
5216 | |
---|
5217 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "excess_coll", |
---|
5218 | CTLFLAG_RD, &stats->ecol, |
---|
5219 | "Excessive collisions"); |
---|
5220 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "single_coll", |
---|
5221 | CTLFLAG_RD, &stats->scc, |
---|
5222 | "Single collisions"); |
---|
5223 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "multiple_coll", |
---|
5224 | CTLFLAG_RD, &stats->mcc, |
---|
5225 | "Multiple collisions"); |
---|
5226 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "late_coll", |
---|
5227 | CTLFLAG_RD, &stats->latecol, |
---|
5228 | "Late collisions"); |
---|
5229 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "collision_count", |
---|
5230 | CTLFLAG_RD, &stats->colc, |
---|
5231 | "Collision Count"); |
---|
5232 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "symbol_errors", |
---|
5233 | CTLFLAG_RD, &stats->symerrs, |
---|
5234 | "Symbol Errors"); |
---|
5235 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "sequence_errors", |
---|
5236 | CTLFLAG_RD, &stats->sec, |
---|
5237 | "Sequence Errors"); |
---|
5238 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "defer_count", |
---|
5239 | CTLFLAG_RD, &stats->dc, |
---|
5240 | "Defer Count"); |
---|
5241 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "missed_packets", |
---|
5242 | CTLFLAG_RD, &stats->mpc, |
---|
5243 | "Missed Packets"); |
---|
5244 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_no_buff", |
---|
5245 | CTLFLAG_RD, &stats->rnbc, |
---|
5246 | "Receive No Buffers"); |
---|
5247 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_undersize", |
---|
5248 | CTLFLAG_RD, &stats->ruc, |
---|
5249 | "Receive Undersize"); |
---|
5250 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_fragmented", |
---|
5251 | CTLFLAG_RD, &stats->rfc, |
---|
5252 | "Fragmented Packets Received "); |
---|
5253 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_oversize", |
---|
5254 | CTLFLAG_RD, &stats->roc, |
---|
5255 | "Oversized Packets Received"); |
---|
5256 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_jabber", |
---|
5257 | CTLFLAG_RD, &stats->rjc, |
---|
5258 | "Recevied Jabber"); |
---|
5259 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "recv_errs", |
---|
5260 | CTLFLAG_RD, &stats->rxerrc, |
---|
5261 | "Receive Errors"); |
---|
5262 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "crc_errs", |
---|
5263 | CTLFLAG_RD, &stats->crcerrs, |
---|
5264 | "CRC errors"); |
---|
5265 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "alignment_errs", |
---|
5266 | CTLFLAG_RD, &stats->algnerrc, |
---|
5267 | "Alignment Errors"); |
---|
5268 | /* On 82575 these are collision counts */ |
---|
5269 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "coll_ext_errs", |
---|
5270 | CTLFLAG_RD, &stats->cexterr, |
---|
5271 | "Collision/Carrier extension errors"); |
---|
5272 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xon_recvd", |
---|
5273 | CTLFLAG_RD, &stats->xonrxc, |
---|
5274 | "XON Received"); |
---|
5275 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xon_txd", |
---|
5276 | CTLFLAG_RD, &stats->xontxc, |
---|
5277 | "XON Transmitted"); |
---|
5278 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xoff_recvd", |
---|
5279 | CTLFLAG_RD, &stats->xoffrxc, |
---|
5280 | "XOFF Received"); |
---|
5281 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "xoff_txd", |
---|
5282 | CTLFLAG_RD, &stats->xofftxc, |
---|
5283 | "XOFF Transmitted"); |
---|
5284 | /* Packet Reception Stats */ |
---|
5285 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "total_pkts_recvd", |
---|
5286 | CTLFLAG_RD, &stats->tpr, |
---|
5287 | "Total Packets Received "); |
---|
5288 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_recvd", |
---|
5289 | CTLFLAG_RD, &stats->gprc, |
---|
5290 | "Good Packets Received"); |
---|
5291 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_recvd", |
---|
5292 | CTLFLAG_RD, &stats->bprc, |
---|
5293 | "Broadcast Packets Received"); |
---|
5294 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_recvd", |
---|
5295 | CTLFLAG_RD, &stats->mprc, |
---|
5296 | "Multicast Packets Received"); |
---|
5297 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_64", |
---|
5298 | CTLFLAG_RD, &stats->prc64, |
---|
5299 | "64 byte frames received "); |
---|
5300 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_65_127", |
---|
5301 | CTLFLAG_RD, &stats->prc127, |
---|
5302 | "65-127 byte frames received"); |
---|
5303 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_128_255", |
---|
5304 | CTLFLAG_RD, &stats->prc255, |
---|
5305 | "128-255 byte frames received"); |
---|
5306 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_256_511", |
---|
5307 | CTLFLAG_RD, &stats->prc511, |
---|
5308 | "256-511 byte frames received"); |
---|
5309 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_512_1023", |
---|
5310 | CTLFLAG_RD, &stats->prc1023, |
---|
5311 | "512-1023 byte frames received"); |
---|
5312 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "rx_frames_1024_1522", |
---|
5313 | CTLFLAG_RD, &stats->prc1522, |
---|
5314 | "1023-1522 byte frames received"); |
---|
5315 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_recvd", |
---|
5316 | CTLFLAG_RD, &stats->gorc, |
---|
5317 | "Good Octets Received"); |
---|
5318 | |
---|
5319 | /* Packet Transmission Stats */ |
---|
5320 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_octets_txd", |
---|
5321 | CTLFLAG_RD, &stats->gotc, |
---|
5322 | "Good Octets Transmitted"); |
---|
5323 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "total_pkts_txd", |
---|
5324 | CTLFLAG_RD, &stats->tpt, |
---|
5325 | "Total Packets Transmitted"); |
---|
5326 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "good_pkts_txd", |
---|
5327 | CTLFLAG_RD, &stats->gptc, |
---|
5328 | "Good Packets Transmitted"); |
---|
5329 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "bcast_pkts_txd", |
---|
5330 | CTLFLAG_RD, &stats->bptc, |
---|
5331 | "Broadcast Packets Transmitted"); |
---|
5332 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "mcast_pkts_txd", |
---|
5333 | CTLFLAG_RD, &stats->mptc, |
---|
5334 | "Multicast Packets Transmitted"); |
---|
5335 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_64", |
---|
5336 | CTLFLAG_RD, &stats->ptc64, |
---|
5337 | "64 byte frames transmitted "); |
---|
5338 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_65_127", |
---|
5339 | CTLFLAG_RD, &stats->ptc127, |
---|
5340 | "65-127 byte frames transmitted"); |
---|
5341 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_128_255", |
---|
5342 | CTLFLAG_RD, &stats->ptc255, |
---|
5343 | "128-255 byte frames transmitted"); |
---|
5344 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_256_511", |
---|
5345 | CTLFLAG_RD, &stats->ptc511, |
---|
5346 | "256-511 byte frames transmitted"); |
---|
5347 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_512_1023", |
---|
5348 | CTLFLAG_RD, &stats->ptc1023, |
---|
5349 | "512-1023 byte frames transmitted"); |
---|
5350 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tx_frames_1024_1522", |
---|
5351 | CTLFLAG_RD, &stats->ptc1522, |
---|
5352 | "1024-1522 byte frames transmitted"); |
---|
5353 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tso_txd", |
---|
5354 | CTLFLAG_RD, &stats->tsctc, |
---|
5355 | "TSO Contexts Transmitted"); |
---|
5356 | SYSCTL_ADD_QUAD(ctx, stat_list, OID_AUTO, "tso_ctx_fail", |
---|
5357 | CTLFLAG_RD, &stats->tsctfc, |
---|
5358 | "TSO Contexts Failed"); |
---|
5359 | |
---|
5360 | |
---|
5361 | /* Interrupt Stats */ |
---|
5362 | |
---|
5363 | int_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "interrupts", |
---|
5364 | CTLFLAG_RD, NULL, "Interrupt Statistics"); |
---|
5365 | int_list = SYSCTL_CHILDREN(int_node); |
---|
5366 | |
---|
5367 | SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "asserts", |
---|
5368 | CTLFLAG_RD, &stats->iac, |
---|
5369 | "Interrupt Assertion Count"); |
---|
5370 | |
---|
5371 | SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "rx_pkt_timer", |
---|
5372 | CTLFLAG_RD, &stats->icrxptc, |
---|
5373 | "Interrupt Cause Rx Pkt Timer Expire Count"); |
---|
5374 | |
---|
5375 | SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "rx_abs_timer", |
---|
5376 | CTLFLAG_RD, &stats->icrxatc, |
---|
5377 | "Interrupt Cause Rx Abs Timer Expire Count"); |
---|
5378 | |
---|
5379 | SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "tx_pkt_timer", |
---|
5380 | CTLFLAG_RD, &stats->ictxptc, |
---|
5381 | "Interrupt Cause Tx Pkt Timer Expire Count"); |
---|
5382 | |
---|
5383 | SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "tx_abs_timer", |
---|
5384 | CTLFLAG_RD, &stats->ictxatc, |
---|
5385 | "Interrupt Cause Tx Abs Timer Expire Count"); |
---|
5386 | |
---|
5387 | SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "tx_queue_empty", |
---|
5388 | CTLFLAG_RD, &stats->ictxqec, |
---|
5389 | "Interrupt Cause Tx Queue Empty Count"); |
---|
5390 | |
---|
5391 | SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "tx_queue_min_thresh", |
---|
5392 | CTLFLAG_RD, &stats->ictxqmtc, |
---|
5393 | "Interrupt Cause Tx Queue Min Thresh Count"); |
---|
5394 | |
---|
5395 | SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "rx_desc_min_thresh", |
---|
5396 | CTLFLAG_RD, &stats->icrxdmtc, |
---|
5397 | "Interrupt Cause Rx Desc Min Thresh Count"); |
---|
5398 | |
---|
5399 | SYSCTL_ADD_QUAD(ctx, int_list, OID_AUTO, "rx_overrun", |
---|
5400 | CTLFLAG_RD, &stats->icrxoc, |
---|
5401 | "Interrupt Cause Receiver Overrun Count"); |
---|
5402 | |
---|
5403 | /* Host to Card Stats */ |
---|
5404 | |
---|
5405 | host_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "host", |
---|
5406 | CTLFLAG_RD, NULL, |
---|
5407 | "Host to Card Statistics"); |
---|
5408 | |
---|
5409 | host_list = SYSCTL_CHILDREN(host_node); |
---|
5410 | |
---|
5411 | SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "breaker_tx_pkt", |
---|
5412 | CTLFLAG_RD, &stats->cbtmpc, |
---|
5413 | "Circuit Breaker Tx Packet Count"); |
---|
5414 | |
---|
5415 | SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "host_tx_pkt_discard", |
---|
5416 | CTLFLAG_RD, &stats->htdpmc, |
---|
5417 | "Host Transmit Discarded Packets"); |
---|
5418 | |
---|
5419 | SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "rx_pkt", |
---|
5420 | CTLFLAG_RD, &stats->rpthc, |
---|
5421 | "Rx Packets To Host"); |
---|
5422 | |
---|
5423 | SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "breaker_rx_pkts", |
---|
5424 | CTLFLAG_RD, &stats->cbrmpc, |
---|
5425 | "Circuit Breaker Rx Packet Count"); |
---|
5426 | |
---|
5427 | SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "breaker_rx_pkt_drop", |
---|
5428 | CTLFLAG_RD, &stats->cbrdpc, |
---|
5429 | "Circuit Breaker Rx Dropped Count"); |
---|
5430 | |
---|
5431 | SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "tx_good_pkt", |
---|
5432 | CTLFLAG_RD, &stats->hgptc, |
---|
5433 | "Host Good Packets Tx Count"); |
---|
5434 | |
---|
5435 | SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "breaker_tx_pkt_drop", |
---|
5436 | CTLFLAG_RD, &stats->htcbdpc, |
---|
5437 | "Host Tx Circuit Breaker Dropped Count"); |
---|
5438 | |
---|
5439 | SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "rx_good_bytes", |
---|
5440 | CTLFLAG_RD, &stats->hgorc, |
---|
5441 | "Host Good Octets Received Count"); |
---|
5442 | |
---|
5443 | SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "tx_good_bytes", |
---|
5444 | CTLFLAG_RD, &stats->hgotc, |
---|
5445 | "Host Good Octets Transmit Count"); |
---|
5446 | |
---|
5447 | SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "length_errors", |
---|
5448 | CTLFLAG_RD, &stats->lenerrs, |
---|
5449 | "Length Errors"); |
---|
5450 | |
---|
5451 | SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "serdes_violation_pkt", |
---|
5452 | CTLFLAG_RD, &stats->scvpc, |
---|
5453 | "SerDes/SGMII Code Violation Pkt Count"); |
---|
5454 | |
---|
5455 | SYSCTL_ADD_QUAD(ctx, host_list, OID_AUTO, "header_redir_missed", |
---|
5456 | CTLFLAG_RD, &stats->hrmpc, |
---|
5457 | "Header Redirection Missed Packet Count"); |
---|
5458 | } |
---|
5459 | |
---|
5460 | |
---|
5461 | /********************************************************************** |
---|
5462 | * |
---|
5463 | * This routine provides a way to dump out the adapter eeprom, |
---|
5464 | * often a useful debug/service tool. This only dumps the first |
---|
5465 | * 32 words, stuff that matters is in that extent. |
---|
5466 | * |
---|
5467 | **********************************************************************/ |
---|
5468 | static int |
---|
5469 | igb_sysctl_nvm_info(SYSCTL_HANDLER_ARGS) |
---|
5470 | { |
---|
5471 | struct adapter *adapter; |
---|
5472 | int error; |
---|
5473 | int result; |
---|
5474 | |
---|
5475 | result = -1; |
---|
5476 | error = sysctl_handle_int(oidp, &result, 0, req); |
---|
5477 | |
---|
5478 | if (error || !req->newptr) |
---|
5479 | return (error); |
---|
5480 | |
---|
5481 | /* |
---|
5482 | * This value will cause a hex dump of the |
---|
5483 | * first 32 16-bit words of the EEPROM to |
---|
5484 | * the screen. |
---|
5485 | */ |
---|
5486 | if (result == 1) { |
---|
5487 | adapter = (struct adapter *)arg1; |
---|
5488 | igb_print_nvm_info(adapter); |
---|
5489 | } |
---|
5490 | |
---|
5491 | return (error); |
---|
5492 | } |
---|
5493 | |
---|
5494 | static void |
---|
5495 | igb_print_nvm_info(struct adapter *adapter) |
---|
5496 | { |
---|
5497 | u16 eeprom_data; |
---|
5498 | int i, j, row = 0; |
---|
5499 | |
---|
5500 | /* Its a bit crude, but it gets the job done */ |
---|
5501 | printf("\nInterface EEPROM Dump:\n"); |
---|
5502 | printf("Offset\n0x0000 "); |
---|
5503 | for (i = 0, j = 0; i < 32; i++, j++) { |
---|
5504 | if (j == 8) { /* Make the offset block */ |
---|
5505 | j = 0; ++row; |
---|
5506 | printf("\n0x00%x0 ",row); |
---|
5507 | } |
---|
5508 | e1000_read_nvm(&adapter->hw, i, 1, &eeprom_data); |
---|
5509 | printf("%04x ", eeprom_data); |
---|
5510 | } |
---|
5511 | printf("\n"); |
---|
5512 | } |
---|
5513 | |
---|
5514 | static void |
---|
5515 | igb_add_rx_process_limit(struct adapter *adapter, const char *name, |
---|
5516 | const char *description, int *limit, int value) |
---|
5517 | { |
---|
5518 | *limit = value; |
---|
5519 | SYSCTL_ADD_INT(device_get_sysctl_ctx(adapter->dev), |
---|
5520 | SYSCTL_CHILDREN(device_get_sysctl_tree(adapter->dev)), |
---|
5521 | OID_AUTO, name, CTLTYPE_INT|CTLFLAG_RW, limit, value, description); |
---|
5522 | } |
---|