1 | #include <machine/rtems-bsd-kernel-space.h> |
---|
2 | |
---|
3 | /*- |
---|
4 | * Copyright (c) 2012-2014 Thomas Skibo <thomasskibo@yahoo.com> |
---|
5 | * All rights reserved. |
---|
6 | * |
---|
7 | * Redistribution and use in source and binary forms, with or without |
---|
8 | * modification, are permitted provided that the following conditions |
---|
9 | * are met: |
---|
10 | * 1. Redistributions of source code must retain the above copyright |
---|
11 | * notice, this list of conditions and the following disclaimer. |
---|
12 | * 2. Redistributions in binary form must reproduce the above copyright |
---|
13 | * notice, this list of conditions and the following disclaimer in the |
---|
14 | * documentation and/or other materials provided with the distribution. |
---|
15 | * |
---|
16 | * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
---|
17 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
---|
18 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
---|
19 | * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE |
---|
20 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
---|
21 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
---|
22 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
---|
23 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
---|
24 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
---|
25 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
---|
26 | * SUCH DAMAGE. |
---|
27 | */ |
---|
28 | |
---|
29 | /* |
---|
30 | * A network interface driver for Cadence GEM Gigabit Ethernet |
---|
31 | * interface such as the one used in Xilinx Zynq-7000 SoC. |
---|
32 | * |
---|
33 | * Reference: Zynq-7000 All Programmable SoC Technical Reference Manual. |
---|
34 | * (v1.4) November 16, 2012. Xilinx doc UG585. GEM is covered in Ch. 16 |
---|
35 | * and register definitions are in appendix B.18. |
---|
36 | */ |
---|
37 | |
---|
38 | #include <sys/cdefs.h> |
---|
39 | __FBSDID("$FreeBSD$"); |
---|
40 | |
---|
41 | #include <rtems/bsd/sys/param.h> |
---|
42 | #include <sys/systm.h> |
---|
43 | #include <sys/bus.h> |
---|
44 | #include <sys/kernel.h> |
---|
45 | #include <sys/malloc.h> |
---|
46 | #include <sys/mbuf.h> |
---|
47 | #include <sys/module.h> |
---|
48 | #include <sys/rman.h> |
---|
49 | #include <sys/socket.h> |
---|
50 | #include <sys/sockio.h> |
---|
51 | #include <sys/sysctl.h> |
---|
52 | |
---|
53 | #include <machine/bus.h> |
---|
54 | |
---|
55 | #include <net/ethernet.h> |
---|
56 | #include <net/if.h> |
---|
57 | #include <net/if_var.h> |
---|
58 | #include <net/if_arp.h> |
---|
59 | #include <net/if_dl.h> |
---|
60 | #include <net/if_media.h> |
---|
61 | #include <net/if_mib.h> |
---|
62 | #include <net/if_types.h> |
---|
63 | |
---|
64 | #ifdef INET |
---|
65 | #include <netinet/in.h> |
---|
66 | #include <netinet/in_systm.h> |
---|
67 | #include <netinet/in_var.h> |
---|
68 | #include <netinet/ip.h> |
---|
69 | #endif |
---|
70 | |
---|
71 | #include <net/bpf.h> |
---|
72 | #include <net/bpfdesc.h> |
---|
73 | |
---|
74 | #ifndef __rtems__ |
---|
75 | #include <dev/fdt/fdt_common.h> |
---|
76 | #include <dev/ofw/ofw_bus.h> |
---|
77 | #include <dev/ofw/ofw_bus_subr.h> |
---|
78 | #endif /* __rtems__ */ |
---|
79 | |
---|
80 | #include <dev/mii/mii.h> |
---|
81 | #include <dev/mii/miivar.h> |
---|
82 | |
---|
83 | #include <dev/cadence/if_cgem_hw.h> |
---|
84 | |
---|
85 | #include <rtems/bsd/local/miibus_if.h> |
---|
86 | #ifdef __rtems__ |
---|
87 | #pragma GCC diagnostic ignored "-Wpointer-sign" |
---|
88 | #include <rtems/bsd/bsd.h> |
---|
89 | #endif /* __rtems__ */ |
---|
90 | |
---|
91 | #define IF_CGEM_NAME "cgem" |
---|
92 | |
---|
93 | #define CGEM_NUM_RX_DESCS 512 /* size of receive descriptor ring */ |
---|
94 | #define CGEM_NUM_TX_DESCS 512 /* size of transmit descriptor ring */ |
---|
95 | |
---|
96 | #define MAX_DESC_RING_SIZE (MAX(CGEM_NUM_RX_DESCS*sizeof(struct cgem_rx_desc),\ |
---|
97 | CGEM_NUM_TX_DESCS*sizeof(struct cgem_tx_desc))) |
---|
98 | |
---|
99 | |
---|
100 | /* Default for sysctl rxbufs. Must be < CGEM_NUM_RX_DESCS of course. */ |
---|
101 | #define DEFAULT_NUM_RX_BUFS 256 /* number of receive bufs to queue. */ |
---|
102 | |
---|
103 | #define TX_MAX_DMA_SEGS 8 /* maximum segs in a tx mbuf dma */ |
---|
104 | |
---|
105 | #define CGEM_CKSUM_ASSIST (CSUM_IP | CSUM_TCP | CSUM_UDP | \ |
---|
106 | CSUM_TCP_IPV6 | CSUM_UDP_IPV6) |
---|
107 | |
---|
108 | struct cgem_softc { |
---|
109 | struct ifnet *ifp; |
---|
110 | struct mtx sc_mtx; |
---|
111 | device_t dev; |
---|
112 | device_t miibus; |
---|
113 | u_int mii_media_active; /* last active media */ |
---|
114 | int if_old_flags; |
---|
115 | struct resource *mem_res; |
---|
116 | struct resource *irq_res; |
---|
117 | void *intrhand; |
---|
118 | struct callout tick_ch; |
---|
119 | uint32_t net_ctl_shadow; |
---|
120 | int ref_clk_num; |
---|
121 | u_char eaddr[6]; |
---|
122 | |
---|
123 | bus_dma_tag_t desc_dma_tag; |
---|
124 | bus_dma_tag_t mbuf_dma_tag; |
---|
125 | |
---|
126 | /* receive descriptor ring */ |
---|
127 | struct cgem_rx_desc volatile *rxring; |
---|
128 | bus_addr_t rxring_physaddr; |
---|
129 | struct mbuf *rxring_m[CGEM_NUM_RX_DESCS]; |
---|
130 | #ifndef __rtems__ |
---|
131 | bus_dmamap_t rxring_m_dmamap[CGEM_NUM_RX_DESCS]; |
---|
132 | #endif /* __rtems__ */ |
---|
133 | int rxring_hd_ptr; /* where to put rcv bufs */ |
---|
134 | int rxring_tl_ptr; /* where to get receives */ |
---|
135 | int rxring_queued; /* how many rcv bufs queued */ |
---|
136 | bus_dmamap_t rxring_dma_map; |
---|
137 | int rxbufs; /* tunable number rcv bufs */ |
---|
138 | int rxhangwar; /* rx hang work-around */ |
---|
139 | u_int rxoverruns; /* rx overruns */ |
---|
140 | u_int rxnobufs; /* rx buf ring empty events */ |
---|
141 | u_int rxdmamapfails; /* rx dmamap failures */ |
---|
142 | uint32_t rx_frames_prev; |
---|
143 | |
---|
144 | /* transmit descriptor ring */ |
---|
145 | struct cgem_tx_desc volatile *txring; |
---|
146 | bus_addr_t txring_physaddr; |
---|
147 | struct mbuf *txring_m[CGEM_NUM_TX_DESCS]; |
---|
148 | #ifndef __rtems__ |
---|
149 | bus_dmamap_t txring_m_dmamap[CGEM_NUM_TX_DESCS]; |
---|
150 | #endif /* __rtems__ */ |
---|
151 | int txring_hd_ptr; /* where to put next xmits */ |
---|
152 | int txring_tl_ptr; /* next xmit mbuf to free */ |
---|
153 | int txring_queued; /* num xmits segs queued */ |
---|
154 | bus_dmamap_t txring_dma_map; |
---|
155 | u_int txfull; /* tx ring full events */ |
---|
156 | u_int txdefrags; /* tx calls to m_defrag() */ |
---|
157 | u_int txdefragfails; /* tx m_defrag() failures */ |
---|
158 | u_int txdmamapfails; /* tx dmamap failures */ |
---|
159 | |
---|
160 | /* hardware provided statistics */ |
---|
161 | struct cgem_hw_stats { |
---|
162 | uint64_t tx_bytes; |
---|
163 | uint32_t tx_frames; |
---|
164 | uint32_t tx_frames_bcast; |
---|
165 | uint32_t tx_frames_multi; |
---|
166 | uint32_t tx_frames_pause; |
---|
167 | uint32_t tx_frames_64b; |
---|
168 | uint32_t tx_frames_65to127b; |
---|
169 | uint32_t tx_frames_128to255b; |
---|
170 | uint32_t tx_frames_256to511b; |
---|
171 | uint32_t tx_frames_512to1023b; |
---|
172 | uint32_t tx_frames_1024to1536b; |
---|
173 | uint32_t tx_under_runs; |
---|
174 | uint32_t tx_single_collisn; |
---|
175 | uint32_t tx_multi_collisn; |
---|
176 | uint32_t tx_excsv_collisn; |
---|
177 | uint32_t tx_late_collisn; |
---|
178 | uint32_t tx_deferred_frames; |
---|
179 | uint32_t tx_carrier_sense_errs; |
---|
180 | |
---|
181 | uint64_t rx_bytes; |
---|
182 | uint32_t rx_frames; |
---|
183 | uint32_t rx_frames_bcast; |
---|
184 | uint32_t rx_frames_multi; |
---|
185 | uint32_t rx_frames_pause; |
---|
186 | uint32_t rx_frames_64b; |
---|
187 | uint32_t rx_frames_65to127b; |
---|
188 | uint32_t rx_frames_128to255b; |
---|
189 | uint32_t rx_frames_256to511b; |
---|
190 | uint32_t rx_frames_512to1023b; |
---|
191 | uint32_t rx_frames_1024to1536b; |
---|
192 | uint32_t rx_frames_undersize; |
---|
193 | uint32_t rx_frames_oversize; |
---|
194 | uint32_t rx_frames_jabber; |
---|
195 | uint32_t rx_frames_fcs_errs; |
---|
196 | uint32_t rx_frames_length_errs; |
---|
197 | uint32_t rx_symbol_errs; |
---|
198 | uint32_t rx_align_errs; |
---|
199 | uint32_t rx_resource_errs; |
---|
200 | uint32_t rx_overrun_errs; |
---|
201 | uint32_t rx_ip_hdr_csum_errs; |
---|
202 | uint32_t rx_tcp_csum_errs; |
---|
203 | uint32_t rx_udp_csum_errs; |
---|
204 | } stats; |
---|
205 | }; |
---|
206 | |
---|
207 | #define RD4(sc, off) (bus_read_4((sc)->mem_res, (off))) |
---|
208 | #define WR4(sc, off, val) (bus_write_4((sc)->mem_res, (off), (val))) |
---|
209 | #define BARRIER(sc, off, len, flags) \ |
---|
210 | (bus_barrier((sc)->mem_res, (off), (len), (flags)) |
---|
211 | |
---|
212 | #define CGEM_LOCK(sc) mtx_lock(&(sc)->sc_mtx) |
---|
213 | #define CGEM_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx) |
---|
214 | #define CGEM_LOCK_INIT(sc) \ |
---|
215 | mtx_init(&(sc)->sc_mtx, device_get_nameunit((sc)->dev), \ |
---|
216 | MTX_NETWORK_LOCK, MTX_DEF) |
---|
217 | #define CGEM_LOCK_DESTROY(sc) mtx_destroy(&(sc)->sc_mtx) |
---|
218 | #define CGEM_ASSERT_LOCKED(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED) |
---|
219 | |
---|
220 | /* Allow platforms to optionally provide a way to set the reference clock. */ |
---|
221 | int cgem_set_ref_clk(int unit, int frequency); |
---|
222 | |
---|
223 | static devclass_t cgem_devclass; |
---|
224 | |
---|
225 | static int cgem_probe(device_t dev); |
---|
226 | static int cgem_attach(device_t dev); |
---|
227 | static int cgem_detach(device_t dev); |
---|
228 | static void cgem_tick(void *); |
---|
229 | static void cgem_intr(void *); |
---|
230 | |
---|
231 | static void cgem_mediachange(struct cgem_softc *, struct mii_data *); |
---|
232 | |
---|
233 | static void |
---|
234 | cgem_get_mac(struct cgem_softc *sc, u_char eaddr[]) |
---|
235 | { |
---|
236 | int i; |
---|
237 | #ifndef __rtems__ |
---|
238 | uint32_t rnd; |
---|
239 | #endif /* __rtems__ */ |
---|
240 | |
---|
241 | /* See if boot loader gave us a MAC address already. */ |
---|
242 | for (i = 0; i < 4; i++) { |
---|
243 | uint32_t low = RD4(sc, CGEM_SPEC_ADDR_LOW(i)); |
---|
244 | uint32_t high = RD4(sc, CGEM_SPEC_ADDR_HI(i)) & 0xffff; |
---|
245 | if (low != 0 || high != 0) { |
---|
246 | eaddr[0] = low & 0xff; |
---|
247 | eaddr[1] = (low >> 8) & 0xff; |
---|
248 | eaddr[2] = (low >> 16) & 0xff; |
---|
249 | eaddr[3] = (low >> 24) & 0xff; |
---|
250 | eaddr[4] = high & 0xff; |
---|
251 | eaddr[5] = (high >> 8) & 0xff; |
---|
252 | break; |
---|
253 | } |
---|
254 | } |
---|
255 | |
---|
256 | /* No MAC from boot loader? Assign a random one. */ |
---|
257 | if (i == 4) { |
---|
258 | #ifndef __rtems__ |
---|
259 | rnd = arc4random(); |
---|
260 | |
---|
261 | eaddr[0] = 'b'; |
---|
262 | eaddr[1] = 's'; |
---|
263 | eaddr[2] = 'd'; |
---|
264 | eaddr[3] = (rnd >> 16) & 0xff; |
---|
265 | eaddr[4] = (rnd >> 8) & 0xff; |
---|
266 | eaddr[5] = rnd & 0xff; |
---|
267 | #else /* __rtems__ */ |
---|
268 | rtems_bsd_get_mac_address(device_get_name(sc->dev), |
---|
269 | device_get_unit(sc->dev), eaddr); |
---|
270 | #endif /* __rtems__ */ |
---|
271 | |
---|
272 | device_printf(sc->dev, "no mac address found, assigning " |
---|
273 | "random: %02x:%02x:%02x:%02x:%02x:%02x\n", |
---|
274 | eaddr[0], eaddr[1], eaddr[2], |
---|
275 | eaddr[3], eaddr[4], eaddr[5]); |
---|
276 | } |
---|
277 | |
---|
278 | /* Move address to first slot and zero out the rest. */ |
---|
279 | WR4(sc, CGEM_SPEC_ADDR_LOW(0), (eaddr[3] << 24) | |
---|
280 | (eaddr[2] << 16) | (eaddr[1] << 8) | eaddr[0]); |
---|
281 | WR4(sc, CGEM_SPEC_ADDR_HI(0), (eaddr[5] << 8) | eaddr[4]); |
---|
282 | |
---|
283 | for (i = 1; i < 4; i++) { |
---|
284 | WR4(sc, CGEM_SPEC_ADDR_LOW(i), 0); |
---|
285 | WR4(sc, CGEM_SPEC_ADDR_HI(i), 0); |
---|
286 | } |
---|
287 | } |
---|
288 | |
---|
289 | /* cgem_mac_hash(): map 48-bit address to a 6-bit hash. |
---|
290 | * The 6-bit hash corresponds to a bit in a 64-bit hash |
---|
291 | * register. Setting that bit in the hash register enables |
---|
292 | * reception of all frames with a destination address that hashes |
---|
293 | * to that 6-bit value. |
---|
294 | * |
---|
295 | * The hash function is described in sec. 16.2.3 in the Zynq-7000 Tech |
---|
296 | * Reference Manual. Bits 0-5 in the hash are the exclusive-or of |
---|
297 | * every sixth bit in the destination address. |
---|
298 | */ |
---|
299 | static int |
---|
300 | cgem_mac_hash(u_char eaddr[]) |
---|
301 | { |
---|
302 | int hash; |
---|
303 | int i, j; |
---|
304 | |
---|
305 | hash = 0; |
---|
306 | for (i = 0; i < 6; i++) |
---|
307 | for (j = i; j < 48; j += 6) |
---|
308 | if ((eaddr[j >> 3] & (1 << (j & 7))) != 0) |
---|
309 | hash ^= (1 << i); |
---|
310 | |
---|
311 | return hash; |
---|
312 | } |
---|
313 | |
---|
314 | /* After any change in rx flags or multi-cast addresses, set up |
---|
315 | * hash registers and net config register bits. |
---|
316 | */ |
---|
317 | static void |
---|
318 | cgem_rx_filter(struct cgem_softc *sc) |
---|
319 | { |
---|
320 | struct ifnet *ifp = sc->ifp; |
---|
321 | struct ifmultiaddr *ifma; |
---|
322 | int index; |
---|
323 | uint32_t hash_hi, hash_lo; |
---|
324 | uint32_t net_cfg; |
---|
325 | |
---|
326 | hash_hi = 0; |
---|
327 | hash_lo = 0; |
---|
328 | |
---|
329 | net_cfg = RD4(sc, CGEM_NET_CFG); |
---|
330 | |
---|
331 | net_cfg &= ~(CGEM_NET_CFG_MULTI_HASH_EN | |
---|
332 | CGEM_NET_CFG_NO_BCAST | |
---|
333 | CGEM_NET_CFG_COPY_ALL); |
---|
334 | |
---|
335 | if ((ifp->if_flags & IFF_PROMISC) != 0) |
---|
336 | net_cfg |= CGEM_NET_CFG_COPY_ALL; |
---|
337 | else { |
---|
338 | if ((ifp->if_flags & IFF_BROADCAST) == 0) |
---|
339 | net_cfg |= CGEM_NET_CFG_NO_BCAST; |
---|
340 | if ((ifp->if_flags & IFF_ALLMULTI) != 0) { |
---|
341 | hash_hi = 0xffffffff; |
---|
342 | hash_lo = 0xffffffff; |
---|
343 | } else { |
---|
344 | if_maddr_rlock(ifp); |
---|
345 | TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { |
---|
346 | if (ifma->ifma_addr->sa_family != AF_LINK) |
---|
347 | continue; |
---|
348 | index = cgem_mac_hash( |
---|
349 | LLADDR((struct sockaddr_dl *) |
---|
350 | ifma->ifma_addr)); |
---|
351 | if (index > 31) |
---|
352 | hash_hi |= (1<<(index-32)); |
---|
353 | else |
---|
354 | hash_lo |= (1<<index); |
---|
355 | } |
---|
356 | if_maddr_runlock(ifp); |
---|
357 | } |
---|
358 | |
---|
359 | if (hash_hi != 0 || hash_lo != 0) |
---|
360 | net_cfg |= CGEM_NET_CFG_MULTI_HASH_EN; |
---|
361 | } |
---|
362 | |
---|
363 | WR4(sc, CGEM_HASH_TOP, hash_hi); |
---|
364 | WR4(sc, CGEM_HASH_BOT, hash_lo); |
---|
365 | WR4(sc, CGEM_NET_CFG, net_cfg); |
---|
366 | } |
---|
367 | |
---|
368 | /* For bus_dmamap_load() callback. */ |
---|
369 | static void |
---|
370 | cgem_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) |
---|
371 | { |
---|
372 | |
---|
373 | if (nsegs != 1 || error != 0) |
---|
374 | return; |
---|
375 | *(bus_addr_t *)arg = segs[0].ds_addr; |
---|
376 | } |
---|
377 | |
---|
378 | /* Create DMA'able descriptor rings. */ |
---|
379 | static int |
---|
380 | cgem_setup_descs(struct cgem_softc *sc) |
---|
381 | { |
---|
382 | int i, err; |
---|
383 | |
---|
384 | sc->txring = NULL; |
---|
385 | sc->rxring = NULL; |
---|
386 | |
---|
387 | /* Allocate non-cached DMA space for RX and TX descriptors. |
---|
388 | */ |
---|
389 | err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, |
---|
390 | BUS_SPACE_MAXADDR_32BIT, |
---|
391 | BUS_SPACE_MAXADDR, |
---|
392 | NULL, NULL, |
---|
393 | MAX_DESC_RING_SIZE, |
---|
394 | 1, |
---|
395 | MAX_DESC_RING_SIZE, |
---|
396 | 0, |
---|
397 | busdma_lock_mutex, |
---|
398 | &sc->sc_mtx, |
---|
399 | &sc->desc_dma_tag); |
---|
400 | if (err) |
---|
401 | return (err); |
---|
402 | |
---|
403 | /* Set up a bus_dma_tag for mbufs. */ |
---|
404 | err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, |
---|
405 | BUS_SPACE_MAXADDR_32BIT, |
---|
406 | BUS_SPACE_MAXADDR, |
---|
407 | NULL, NULL, |
---|
408 | MCLBYTES, |
---|
409 | TX_MAX_DMA_SEGS, |
---|
410 | MCLBYTES, |
---|
411 | 0, |
---|
412 | busdma_lock_mutex, |
---|
413 | &sc->sc_mtx, |
---|
414 | &sc->mbuf_dma_tag); |
---|
415 | if (err) |
---|
416 | return (err); |
---|
417 | |
---|
418 | /* Allocate DMA memory in non-cacheable space. */ |
---|
419 | err = bus_dmamem_alloc(sc->desc_dma_tag, |
---|
420 | (void **)&sc->rxring, |
---|
421 | BUS_DMA_NOWAIT | BUS_DMA_COHERENT, |
---|
422 | &sc->rxring_dma_map); |
---|
423 | if (err) |
---|
424 | return (err); |
---|
425 | |
---|
426 | /* Load descriptor DMA memory. */ |
---|
427 | err = bus_dmamap_load(sc->desc_dma_tag, sc->rxring_dma_map, |
---|
428 | (void *)sc->rxring, |
---|
429 | CGEM_NUM_RX_DESCS*sizeof(struct cgem_rx_desc), |
---|
430 | cgem_getaddr, &sc->rxring_physaddr, |
---|
431 | BUS_DMA_NOWAIT); |
---|
432 | if (err) |
---|
433 | return (err); |
---|
434 | |
---|
435 | /* Initialize RX descriptors. */ |
---|
436 | for (i = 0; i < CGEM_NUM_RX_DESCS; i++) { |
---|
437 | sc->rxring[i].addr = CGEM_RXDESC_OWN; |
---|
438 | sc->rxring[i].ctl = 0; |
---|
439 | sc->rxring_m[i] = NULL; |
---|
440 | #ifndef __rtems__ |
---|
441 | err = bus_dmamap_create(sc->mbuf_dma_tag, 0, |
---|
442 | &sc->rxring_m_dmamap[i]); |
---|
443 | if (err) |
---|
444 | return (err); |
---|
445 | #endif /* __rtems__ */ |
---|
446 | } |
---|
447 | sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP; |
---|
448 | |
---|
449 | sc->rxring_hd_ptr = 0; |
---|
450 | sc->rxring_tl_ptr = 0; |
---|
451 | sc->rxring_queued = 0; |
---|
452 | |
---|
453 | /* Allocate DMA memory for TX descriptors in non-cacheable space. */ |
---|
454 | err = bus_dmamem_alloc(sc->desc_dma_tag, |
---|
455 | (void **)&sc->txring, |
---|
456 | BUS_DMA_NOWAIT | BUS_DMA_COHERENT, |
---|
457 | &sc->txring_dma_map); |
---|
458 | if (err) |
---|
459 | return (err); |
---|
460 | |
---|
461 | /* Load TX descriptor DMA memory. */ |
---|
462 | err = bus_dmamap_load(sc->desc_dma_tag, sc->txring_dma_map, |
---|
463 | (void *)sc->txring, |
---|
464 | CGEM_NUM_TX_DESCS*sizeof(struct cgem_tx_desc), |
---|
465 | cgem_getaddr, &sc->txring_physaddr, |
---|
466 | BUS_DMA_NOWAIT); |
---|
467 | if (err) |
---|
468 | return (err); |
---|
469 | |
---|
470 | /* Initialize TX descriptor ring. */ |
---|
471 | for (i = 0; i < CGEM_NUM_TX_DESCS; i++) { |
---|
472 | sc->txring[i].addr = 0; |
---|
473 | sc->txring[i].ctl = CGEM_TXDESC_USED; |
---|
474 | sc->txring_m[i] = NULL; |
---|
475 | #ifndef __rtems__ |
---|
476 | err = bus_dmamap_create(sc->mbuf_dma_tag, 0, |
---|
477 | &sc->txring_m_dmamap[i]); |
---|
478 | if (err) |
---|
479 | return (err); |
---|
480 | #endif /* __rtems__ */ |
---|
481 | } |
---|
482 | sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP; |
---|
483 | |
---|
484 | sc->txring_hd_ptr = 0; |
---|
485 | sc->txring_tl_ptr = 0; |
---|
486 | sc->txring_queued = 0; |
---|
487 | |
---|
488 | return (0); |
---|
489 | } |
---|
490 | |
---|
491 | /* Fill receive descriptor ring with mbufs. */ |
---|
492 | static void |
---|
493 | cgem_fill_rqueue(struct cgem_softc *sc) |
---|
494 | { |
---|
495 | struct mbuf *m = NULL; |
---|
496 | #ifndef __rtems__ |
---|
497 | bus_dma_segment_t segs[TX_MAX_DMA_SEGS]; |
---|
498 | int nsegs; |
---|
499 | #else /* __rtems__ */ |
---|
500 | bus_dma_segment_t segs[1]; |
---|
501 | #endif /* __rtems__ */ |
---|
502 | |
---|
503 | CGEM_ASSERT_LOCKED(sc); |
---|
504 | |
---|
505 | while (sc->rxring_queued < sc->rxbufs) { |
---|
506 | /* Get a cluster mbuf. */ |
---|
507 | m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); |
---|
508 | if (m == NULL) |
---|
509 | break; |
---|
510 | |
---|
511 | m->m_len = MCLBYTES; |
---|
512 | m->m_pkthdr.len = MCLBYTES; |
---|
513 | m->m_pkthdr.rcvif = sc->ifp; |
---|
514 | |
---|
515 | #ifndef __rtems__ |
---|
516 | /* Load map and plug in physical address. */ |
---|
517 | if (bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag, |
---|
518 | sc->rxring_m_dmamap[sc->rxring_hd_ptr], m, |
---|
519 | segs, &nsegs, BUS_DMA_NOWAIT)) { |
---|
520 | sc->rxdmamapfails++; |
---|
521 | m_free(m); |
---|
522 | break; |
---|
523 | } |
---|
524 | #endif /* __rtems__ */ |
---|
525 | sc->rxring_m[sc->rxring_hd_ptr] = m; |
---|
526 | |
---|
527 | #ifndef __rtems__ |
---|
528 | /* Sync cache with receive buffer. */ |
---|
529 | bus_dmamap_sync(sc->mbuf_dma_tag, |
---|
530 | sc->rxring_m_dmamap[sc->rxring_hd_ptr], |
---|
531 | BUS_DMASYNC_PREREAD); |
---|
532 | #else /* __rtems__ */ |
---|
533 | rtems_cache_invalidate_multiple_data_lines(m->m_data, m->m_len); |
---|
534 | segs[0].ds_addr = mtod(m, bus_addr_t); |
---|
535 | #endif /* __rtems__ */ |
---|
536 | |
---|
537 | /* Write rx descriptor and increment head pointer. */ |
---|
538 | sc->rxring[sc->rxring_hd_ptr].ctl = 0; |
---|
539 | if (sc->rxring_hd_ptr == CGEM_NUM_RX_DESCS - 1) { |
---|
540 | sc->rxring[sc->rxring_hd_ptr].addr = segs[0].ds_addr | |
---|
541 | CGEM_RXDESC_WRAP; |
---|
542 | sc->rxring_hd_ptr = 0; |
---|
543 | } else |
---|
544 | sc->rxring[sc->rxring_hd_ptr++].addr = segs[0].ds_addr; |
---|
545 | |
---|
546 | sc->rxring_queued++; |
---|
547 | } |
---|
548 | } |
---|
549 | |
---|
550 | /* Pull received packets off of receive descriptor ring. */ |
---|
551 | static void |
---|
552 | cgem_recv(struct cgem_softc *sc) |
---|
553 | { |
---|
554 | struct ifnet *ifp = sc->ifp; |
---|
555 | struct mbuf *m, *m_hd, **m_tl; |
---|
556 | uint32_t ctl; |
---|
557 | |
---|
558 | CGEM_ASSERT_LOCKED(sc); |
---|
559 | |
---|
560 | /* Pick up all packets in which the OWN bit is set. */ |
---|
561 | m_hd = NULL; |
---|
562 | m_tl = &m_hd; |
---|
563 | while (sc->rxring_queued > 0 && |
---|
564 | (sc->rxring[sc->rxring_tl_ptr].addr & CGEM_RXDESC_OWN) != 0) { |
---|
565 | |
---|
566 | ctl = sc->rxring[sc->rxring_tl_ptr].ctl; |
---|
567 | |
---|
568 | /* Grab filled mbuf. */ |
---|
569 | m = sc->rxring_m[sc->rxring_tl_ptr]; |
---|
570 | sc->rxring_m[sc->rxring_tl_ptr] = NULL; |
---|
571 | |
---|
572 | #ifndef __rtems__ |
---|
573 | /* Sync cache with receive buffer. */ |
---|
574 | bus_dmamap_sync(sc->mbuf_dma_tag, |
---|
575 | sc->rxring_m_dmamap[sc->rxring_tl_ptr], |
---|
576 | BUS_DMASYNC_POSTREAD); |
---|
577 | |
---|
578 | /* Unload dmamap. */ |
---|
579 | bus_dmamap_unload(sc->mbuf_dma_tag, |
---|
580 | sc->rxring_m_dmamap[sc->rxring_tl_ptr]); |
---|
581 | #else /* __rtems__ */ |
---|
582 | rtems_cache_invalidate_multiple_data_lines(m->m_data, m->m_len); |
---|
583 | #endif /* __rtems__ */ |
---|
584 | |
---|
585 | /* Increment tail pointer. */ |
---|
586 | if (++sc->rxring_tl_ptr == CGEM_NUM_RX_DESCS) |
---|
587 | sc->rxring_tl_ptr = 0; |
---|
588 | sc->rxring_queued--; |
---|
589 | |
---|
590 | /* Check FCS and make sure entire packet landed in one mbuf |
---|
591 | * cluster (which is much bigger than the largest ethernet |
---|
592 | * packet). |
---|
593 | */ |
---|
594 | if ((ctl & CGEM_RXDESC_BAD_FCS) != 0 || |
---|
595 | (ctl & (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) != |
---|
596 | (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) { |
---|
597 | /* discard. */ |
---|
598 | m_free(m); |
---|
599 | #ifndef __rtems__ |
---|
600 | if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); |
---|
601 | #else /* __rtems__ */ |
---|
602 | ifp->if_ierrors++; |
---|
603 | #endif /* __rtems__ */ |
---|
604 | continue; |
---|
605 | } |
---|
606 | |
---|
607 | /* Ready it to hand off to upper layers. */ |
---|
608 | m->m_data += ETHER_ALIGN; |
---|
609 | m->m_len = (ctl & CGEM_RXDESC_LENGTH_MASK); |
---|
610 | m->m_pkthdr.rcvif = ifp; |
---|
611 | m->m_pkthdr.len = m->m_len; |
---|
612 | |
---|
613 | /* Are we using hardware checksumming? Check the |
---|
614 | * status in the receive descriptor. |
---|
615 | */ |
---|
616 | if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { |
---|
617 | /* TCP or UDP checks out, IP checks out too. */ |
---|
618 | if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) == |
---|
619 | CGEM_RXDESC_CKSUM_STAT_TCP_GOOD || |
---|
620 | (ctl & CGEM_RXDESC_CKSUM_STAT_MASK) == |
---|
621 | CGEM_RXDESC_CKSUM_STAT_UDP_GOOD) { |
---|
622 | m->m_pkthdr.csum_flags |= |
---|
623 | CSUM_IP_CHECKED | CSUM_IP_VALID | |
---|
624 | CSUM_DATA_VALID | CSUM_PSEUDO_HDR; |
---|
625 | m->m_pkthdr.csum_data = 0xffff; |
---|
626 | } else if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) == |
---|
627 | CGEM_RXDESC_CKSUM_STAT_IP_GOOD) { |
---|
628 | /* Only IP checks out. */ |
---|
629 | m->m_pkthdr.csum_flags |= |
---|
630 | CSUM_IP_CHECKED | CSUM_IP_VALID; |
---|
631 | m->m_pkthdr.csum_data = 0xffff; |
---|
632 | } |
---|
633 | } |
---|
634 | |
---|
635 | /* Queue it up for delivery below. */ |
---|
636 | *m_tl = m; |
---|
637 | m_tl = &m->m_next; |
---|
638 | } |
---|
639 | |
---|
640 | /* Replenish receive buffers. */ |
---|
641 | cgem_fill_rqueue(sc); |
---|
642 | |
---|
643 | /* Unlock and send up packets. */ |
---|
644 | CGEM_UNLOCK(sc); |
---|
645 | while (m_hd != NULL) { |
---|
646 | m = m_hd; |
---|
647 | m_hd = m_hd->m_next; |
---|
648 | m->m_next = NULL; |
---|
649 | #ifndef __rtems__ |
---|
650 | if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); |
---|
651 | #else /* __rtems__ */ |
---|
652 | ifp->if_ipackets++; |
---|
653 | #endif /* __rtems__ */ |
---|
654 | (*ifp->if_input)(ifp, m); |
---|
655 | } |
---|
656 | CGEM_LOCK(sc); |
---|
657 | } |
---|
658 | |
---|
659 | /* Find completed transmits and free their mbufs. */ |
---|
660 | static void |
---|
661 | cgem_clean_tx(struct cgem_softc *sc) |
---|
662 | { |
---|
663 | struct mbuf *m; |
---|
664 | uint32_t ctl; |
---|
665 | |
---|
666 | CGEM_ASSERT_LOCKED(sc); |
---|
667 | |
---|
668 | /* free up finished transmits. */ |
---|
669 | while (sc->txring_queued > 0 && |
---|
670 | ((ctl = sc->txring[sc->txring_tl_ptr].ctl) & |
---|
671 | CGEM_TXDESC_USED) != 0) { |
---|
672 | |
---|
673 | #ifndef __rtems__ |
---|
674 | /* Sync cache. nop? */ |
---|
675 | bus_dmamap_sync(sc->mbuf_dma_tag, |
---|
676 | sc->txring_m_dmamap[sc->txring_tl_ptr], |
---|
677 | BUS_DMASYNC_POSTWRITE); |
---|
678 | |
---|
679 | /* Unload DMA map. */ |
---|
680 | bus_dmamap_unload(sc->mbuf_dma_tag, |
---|
681 | sc->txring_m_dmamap[sc->txring_tl_ptr]); |
---|
682 | #endif /* __rtems__ */ |
---|
683 | |
---|
684 | /* Free up the mbuf. */ |
---|
685 | m = sc->txring_m[sc->txring_tl_ptr]; |
---|
686 | sc->txring_m[sc->txring_tl_ptr] = NULL; |
---|
687 | m_freem(m); |
---|
688 | |
---|
689 | /* Check the status. */ |
---|
690 | if ((ctl & CGEM_TXDESC_AHB_ERR) != 0) { |
---|
691 | /* Serious bus error. log to console. */ |
---|
692 | device_printf(sc->dev, "cgem_clean_tx: Whoa! " |
---|
693 | "AHB error, addr=0x%x\n", |
---|
694 | sc->txring[sc->txring_tl_ptr].addr); |
---|
695 | } else if ((ctl & (CGEM_TXDESC_RETRY_ERR | |
---|
696 | CGEM_TXDESC_LATE_COLL)) != 0) { |
---|
697 | #ifndef __rtems__ |
---|
698 | if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); |
---|
699 | #else /* __rtems__ */ |
---|
700 | sc->ifp->if_oerrors++; |
---|
701 | #endif /* __rtems__ */ |
---|
702 | } else |
---|
703 | #ifndef __rtems__ |
---|
704 | if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1); |
---|
705 | #else /* __rtems__ */ |
---|
706 | sc->ifp->if_opackets++; |
---|
707 | #endif /* __rtems__ */ |
---|
708 | |
---|
709 | /* If the packet spanned more than one tx descriptor, |
---|
710 | * skip descriptors until we find the end so that only |
---|
711 | * start-of-frame descriptors are processed. |
---|
712 | */ |
---|
713 | while ((ctl & CGEM_TXDESC_LAST_BUF) == 0) { |
---|
714 | if ((ctl & CGEM_TXDESC_WRAP) != 0) |
---|
715 | sc->txring_tl_ptr = 0; |
---|
716 | else |
---|
717 | sc->txring_tl_ptr++; |
---|
718 | sc->txring_queued--; |
---|
719 | |
---|
720 | ctl = sc->txring[sc->txring_tl_ptr].ctl; |
---|
721 | |
---|
722 | sc->txring[sc->txring_tl_ptr].ctl = |
---|
723 | ctl | CGEM_TXDESC_USED; |
---|
724 | } |
---|
725 | |
---|
726 | /* Next descriptor. */ |
---|
727 | if ((ctl & CGEM_TXDESC_WRAP) != 0) |
---|
728 | sc->txring_tl_ptr = 0; |
---|
729 | else |
---|
730 | sc->txring_tl_ptr++; |
---|
731 | sc->txring_queued--; |
---|
732 | |
---|
733 | sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; |
---|
734 | } |
---|
735 | } |
---|
736 | |
---|
737 | #ifdef __rtems__ |
---|
738 | static int |
---|
739 | cgem_get_segs_for_tx(struct mbuf *m, bus_dma_segment_t segs[TX_MAX_DMA_SEGS], |
---|
740 | int *nsegs) |
---|
741 | { |
---|
742 | int i = 0; |
---|
743 | |
---|
744 | do { |
---|
745 | if (m->m_len > 0) { |
---|
746 | segs[i].ds_addr = mtod(m, bus_addr_t); |
---|
747 | segs[i].ds_len = m->m_len; |
---|
748 | rtems_cache_flush_multiple_data_lines(m->m_data, m->m_len); |
---|
749 | ++i; |
---|
750 | } |
---|
751 | |
---|
752 | m = m->m_next; |
---|
753 | |
---|
754 | if (m == NULL) { |
---|
755 | *nsegs = i; |
---|
756 | |
---|
757 | return (0); |
---|
758 | } |
---|
759 | } while (i < TX_MAX_DMA_SEGS); |
---|
760 | |
---|
761 | return (EFBIG); |
---|
762 | } |
---|
763 | #endif /* __rtems__ */ |
---|
764 | /* Start transmits. */ |
---|
765 | static void |
---|
766 | cgem_start_locked(struct ifnet *ifp) |
---|
767 | { |
---|
768 | struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc; |
---|
769 | struct mbuf *m; |
---|
770 | bus_dma_segment_t segs[TX_MAX_DMA_SEGS]; |
---|
771 | uint32_t ctl; |
---|
772 | int i, nsegs, wrap, err; |
---|
773 | |
---|
774 | CGEM_ASSERT_LOCKED(sc); |
---|
775 | |
---|
776 | if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) != 0) |
---|
777 | return; |
---|
778 | |
---|
779 | for (;;) { |
---|
780 | /* Check that there is room in the descriptor ring. */ |
---|
781 | if (sc->txring_queued >= |
---|
782 | CGEM_NUM_TX_DESCS - TX_MAX_DMA_SEGS * 2) { |
---|
783 | |
---|
784 | /* Try to make room. */ |
---|
785 | cgem_clean_tx(sc); |
---|
786 | |
---|
787 | /* Still no room? */ |
---|
788 | if (sc->txring_queued >= |
---|
789 | CGEM_NUM_TX_DESCS - TX_MAX_DMA_SEGS * 2) { |
---|
790 | ifp->if_drv_flags |= IFF_DRV_OACTIVE; |
---|
791 | sc->txfull++; |
---|
792 | break; |
---|
793 | } |
---|
794 | } |
---|
795 | |
---|
796 | /* Grab next transmit packet. */ |
---|
797 | IFQ_DRV_DEQUEUE(&ifp->if_snd, m); |
---|
798 | if (m == NULL) |
---|
799 | break; |
---|
800 | |
---|
801 | #ifndef __rtems__ |
---|
802 | /* Load DMA map. */ |
---|
803 | err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag, |
---|
804 | sc->txring_m_dmamap[sc->txring_hd_ptr], |
---|
805 | m, segs, &nsegs, BUS_DMA_NOWAIT); |
---|
806 | #else /* __rtems__ */ |
---|
807 | err = cgem_get_segs_for_tx(m, segs, &nsegs); |
---|
808 | #endif /* __rtems__ */ |
---|
809 | if (err == EFBIG) { |
---|
810 | /* Too many segments! defrag and try again. */ |
---|
811 | struct mbuf *m2 = m_defrag(m, M_NOWAIT); |
---|
812 | |
---|
813 | if (m2 == NULL) { |
---|
814 | sc->txdefragfails++; |
---|
815 | m_freem(m); |
---|
816 | continue; |
---|
817 | } |
---|
818 | m = m2; |
---|
819 | #ifndef __rtems__ |
---|
820 | err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag, |
---|
821 | sc->txring_m_dmamap[sc->txring_hd_ptr], |
---|
822 | m, segs, &nsegs, BUS_DMA_NOWAIT); |
---|
823 | #else /* __rtems__ */ |
---|
824 | err = cgem_get_segs_for_tx(m, segs, &nsegs); |
---|
825 | #endif /* __rtems__ */ |
---|
826 | sc->txdefrags++; |
---|
827 | } |
---|
828 | if (err) { |
---|
829 | /* Give up. */ |
---|
830 | m_freem(m); |
---|
831 | sc->txdmamapfails++; |
---|
832 | continue; |
---|
833 | } |
---|
834 | sc->txring_m[sc->txring_hd_ptr] = m; |
---|
835 | |
---|
836 | #ifndef __rtems__ |
---|
837 | /* Sync tx buffer with cache. */ |
---|
838 | bus_dmamap_sync(sc->mbuf_dma_tag, |
---|
839 | sc->txring_m_dmamap[sc->txring_hd_ptr], |
---|
840 | BUS_DMASYNC_PREWRITE); |
---|
841 | #endif /* __rtems__ */ |
---|
842 | |
---|
843 | /* Set wrap flag if next packet might run off end of ring. */ |
---|
844 | wrap = sc->txring_hd_ptr + nsegs + TX_MAX_DMA_SEGS >= |
---|
845 | CGEM_NUM_TX_DESCS; |
---|
846 | |
---|
847 | /* Fill in the TX descriptors back to front so that USED |
---|
848 | * bit in first descriptor is cleared last. |
---|
849 | */ |
---|
850 | for (i = nsegs - 1; i >= 0; i--) { |
---|
851 | /* Descriptor address. */ |
---|
852 | sc->txring[sc->txring_hd_ptr + i].addr = |
---|
853 | segs[i].ds_addr; |
---|
854 | |
---|
855 | /* Descriptor control word. */ |
---|
856 | ctl = segs[i].ds_len; |
---|
857 | if (i == nsegs - 1) { |
---|
858 | ctl |= CGEM_TXDESC_LAST_BUF; |
---|
859 | if (wrap) |
---|
860 | ctl |= CGEM_TXDESC_WRAP; |
---|
861 | } |
---|
862 | sc->txring[sc->txring_hd_ptr + i].ctl = ctl; |
---|
863 | |
---|
864 | if (i != 0) |
---|
865 | sc->txring_m[sc->txring_hd_ptr + i] = NULL; |
---|
866 | } |
---|
867 | |
---|
868 | if (wrap) |
---|
869 | sc->txring_hd_ptr = 0; |
---|
870 | else |
---|
871 | sc->txring_hd_ptr += nsegs; |
---|
872 | sc->txring_queued += nsegs; |
---|
873 | |
---|
874 | /* Kick the transmitter. */ |
---|
875 | WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow | |
---|
876 | CGEM_NET_CTRL_START_TX); |
---|
877 | |
---|
878 | /* If there is a BPF listener, bounce a copy to to him. */ |
---|
879 | ETHER_BPF_MTAP(ifp, m); |
---|
880 | } |
---|
881 | } |
---|
882 | |
---|
883 | static void |
---|
884 | cgem_start(struct ifnet *ifp) |
---|
885 | { |
---|
886 | struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc; |
---|
887 | |
---|
888 | CGEM_LOCK(sc); |
---|
889 | cgem_start_locked(ifp); |
---|
890 | CGEM_UNLOCK(sc); |
---|
891 | } |
---|
892 | |
---|
893 | static void |
---|
894 | cgem_poll_hw_stats(struct cgem_softc *sc) |
---|
895 | { |
---|
896 | uint32_t n; |
---|
897 | |
---|
898 | CGEM_ASSERT_LOCKED(sc); |
---|
899 | |
---|
900 | sc->stats.tx_bytes += RD4(sc, CGEM_OCTETS_TX_BOT); |
---|
901 | sc->stats.tx_bytes += (uint64_t)RD4(sc, CGEM_OCTETS_TX_TOP) << 32; |
---|
902 | |
---|
903 | sc->stats.tx_frames += RD4(sc, CGEM_FRAMES_TX); |
---|
904 | sc->stats.tx_frames_bcast += RD4(sc, CGEM_BCAST_FRAMES_TX); |
---|
905 | sc->stats.tx_frames_multi += RD4(sc, CGEM_MULTI_FRAMES_TX); |
---|
906 | sc->stats.tx_frames_pause += RD4(sc, CGEM_PAUSE_FRAMES_TX); |
---|
907 | sc->stats.tx_frames_64b += RD4(sc, CGEM_FRAMES_64B_TX); |
---|
908 | sc->stats.tx_frames_65to127b += RD4(sc, CGEM_FRAMES_65_127B_TX); |
---|
909 | sc->stats.tx_frames_128to255b += RD4(sc, CGEM_FRAMES_128_255B_TX); |
---|
910 | sc->stats.tx_frames_256to511b += RD4(sc, CGEM_FRAMES_256_511B_TX); |
---|
911 | sc->stats.tx_frames_512to1023b += RD4(sc, CGEM_FRAMES_512_1023B_TX); |
---|
912 | sc->stats.tx_frames_1024to1536b += RD4(sc, CGEM_FRAMES_1024_1518B_TX); |
---|
913 | sc->stats.tx_under_runs += RD4(sc, CGEM_TX_UNDERRUNS); |
---|
914 | |
---|
915 | n = RD4(sc, CGEM_SINGLE_COLL_FRAMES); |
---|
916 | sc->stats.tx_single_collisn += n; |
---|
917 | #ifndef __rtems__ |
---|
918 | if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n); |
---|
919 | #else /* __rtems__ */ |
---|
920 | sc->ifp->if_collisions += n; |
---|
921 | #endif /* __rtems__ */ |
---|
922 | n = RD4(sc, CGEM_MULTI_COLL_FRAMES); |
---|
923 | sc->stats.tx_multi_collisn += n; |
---|
924 | #ifndef __rtems__ |
---|
925 | if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n); |
---|
926 | #else /* __rtems__ */ |
---|
927 | sc->ifp->if_collisions += n; |
---|
928 | #endif /* __rtems__ */ |
---|
929 | n = RD4(sc, CGEM_EXCESSIVE_COLL_FRAMES); |
---|
930 | sc->stats.tx_excsv_collisn += n; |
---|
931 | #ifndef __rtems__ |
---|
932 | if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n); |
---|
933 | #else /* __rtems__ */ |
---|
934 | sc->ifp->if_collisions += n; |
---|
935 | #endif /* __rtems__ */ |
---|
936 | n = RD4(sc, CGEM_LATE_COLL); |
---|
937 | sc->stats.tx_late_collisn += n; |
---|
938 | #ifndef __rtems__ |
---|
939 | if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n); |
---|
940 | #else /* __rtems__ */ |
---|
941 | sc->ifp->if_collisions += n; |
---|
942 | #endif /* __rtems__ */ |
---|
943 | |
---|
944 | sc->stats.tx_deferred_frames += RD4(sc, CGEM_DEFERRED_TX_FRAMES); |
---|
945 | sc->stats.tx_carrier_sense_errs += RD4(sc, CGEM_CARRIER_SENSE_ERRS); |
---|
946 | |
---|
947 | sc->stats.rx_bytes += RD4(sc, CGEM_OCTETS_RX_BOT); |
---|
948 | sc->stats.rx_bytes += (uint64_t)RD4(sc, CGEM_OCTETS_RX_TOP) << 32; |
---|
949 | |
---|
950 | sc->stats.rx_frames += RD4(sc, CGEM_FRAMES_RX); |
---|
951 | sc->stats.rx_frames_bcast += RD4(sc, CGEM_BCAST_FRAMES_RX); |
---|
952 | sc->stats.rx_frames_multi += RD4(sc, CGEM_MULTI_FRAMES_RX); |
---|
953 | sc->stats.rx_frames_pause += RD4(sc, CGEM_PAUSE_FRAMES_RX); |
---|
954 | sc->stats.rx_frames_64b += RD4(sc, CGEM_FRAMES_64B_RX); |
---|
955 | sc->stats.rx_frames_65to127b += RD4(sc, CGEM_FRAMES_65_127B_RX); |
---|
956 | sc->stats.rx_frames_128to255b += RD4(sc, CGEM_FRAMES_128_255B_RX); |
---|
957 | sc->stats.rx_frames_256to511b += RD4(sc, CGEM_FRAMES_256_511B_RX); |
---|
958 | sc->stats.rx_frames_512to1023b += RD4(sc, CGEM_FRAMES_512_1023B_RX); |
---|
959 | sc->stats.rx_frames_1024to1536b += RD4(sc, CGEM_FRAMES_1024_1518B_RX); |
---|
960 | sc->stats.rx_frames_undersize += RD4(sc, CGEM_UNDERSZ_RX); |
---|
961 | sc->stats.rx_frames_oversize += RD4(sc, CGEM_OVERSZ_RX); |
---|
962 | sc->stats.rx_frames_jabber += RD4(sc, CGEM_JABBERS_RX); |
---|
963 | sc->stats.rx_frames_fcs_errs += RD4(sc, CGEM_FCS_ERRS); |
---|
964 | sc->stats.rx_frames_length_errs += RD4(sc, CGEM_LENGTH_FIELD_ERRS); |
---|
965 | sc->stats.rx_symbol_errs += RD4(sc, CGEM_RX_SYMBOL_ERRS); |
---|
966 | sc->stats.rx_align_errs += RD4(sc, CGEM_ALIGN_ERRS); |
---|
967 | sc->stats.rx_resource_errs += RD4(sc, CGEM_RX_RESOURCE_ERRS); |
---|
968 | sc->stats.rx_overrun_errs += RD4(sc, CGEM_RX_OVERRUN_ERRS); |
---|
969 | sc->stats.rx_ip_hdr_csum_errs += RD4(sc, CGEM_IP_HDR_CKSUM_ERRS); |
---|
970 | sc->stats.rx_tcp_csum_errs += RD4(sc, CGEM_TCP_CKSUM_ERRS); |
---|
971 | sc->stats.rx_udp_csum_errs += RD4(sc, CGEM_UDP_CKSUM_ERRS); |
---|
972 | } |
---|
973 | |
---|
974 | static void |
---|
975 | cgem_tick(void *arg) |
---|
976 | { |
---|
977 | struct cgem_softc *sc = (struct cgem_softc *)arg; |
---|
978 | struct mii_data *mii; |
---|
979 | |
---|
980 | CGEM_ASSERT_LOCKED(sc); |
---|
981 | |
---|
982 | /* Poll the phy. */ |
---|
983 | if (sc->miibus != NULL) { |
---|
984 | mii = device_get_softc(sc->miibus); |
---|
985 | mii_tick(mii); |
---|
986 | } |
---|
987 | |
---|
988 | /* Poll statistics registers. */ |
---|
989 | cgem_poll_hw_stats(sc); |
---|
990 | |
---|
991 | /* Check for receiver hang. */ |
---|
992 | if (sc->rxhangwar && sc->rx_frames_prev == sc->stats.rx_frames) { |
---|
993 | /* |
---|
994 | * Reset receiver logic by toggling RX_EN bit. 1usec |
---|
995 | * delay is necessary especially when operating at 100mbps |
---|
996 | * and 10mbps speeds. |
---|
997 | */ |
---|
998 | WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow & |
---|
999 | ~CGEM_NET_CTRL_RX_EN); |
---|
1000 | DELAY(1); |
---|
1001 | WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow); |
---|
1002 | } |
---|
1003 | sc->rx_frames_prev = sc->stats.rx_frames; |
---|
1004 | |
---|
1005 | /* Next callout in one second. */ |
---|
1006 | callout_reset(&sc->tick_ch, hz, cgem_tick, sc); |
---|
1007 | } |
---|
1008 | |
---|
1009 | /* Interrupt handler. */ |
---|
1010 | static void |
---|
1011 | cgem_intr(void *arg) |
---|
1012 | { |
---|
1013 | struct cgem_softc *sc = (struct cgem_softc *)arg; |
---|
1014 | uint32_t istatus; |
---|
1015 | |
---|
1016 | CGEM_LOCK(sc); |
---|
1017 | |
---|
1018 | if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { |
---|
1019 | CGEM_UNLOCK(sc); |
---|
1020 | return; |
---|
1021 | } |
---|
1022 | |
---|
1023 | /* Read interrupt status and immediately clear the bits. */ |
---|
1024 | istatus = RD4(sc, CGEM_INTR_STAT); |
---|
1025 | WR4(sc, CGEM_INTR_STAT, istatus); |
---|
1026 | |
---|
1027 | /* Packets received. */ |
---|
1028 | if ((istatus & CGEM_INTR_RX_COMPLETE) != 0) |
---|
1029 | cgem_recv(sc); |
---|
1030 | |
---|
1031 | /* Free up any completed transmit buffers. */ |
---|
1032 | cgem_clean_tx(sc); |
---|
1033 | |
---|
1034 | /* Hresp not ok. Something is very bad with DMA. Try to clear. */ |
---|
1035 | if ((istatus & CGEM_INTR_HRESP_NOT_OK) != 0) { |
---|
1036 | device_printf(sc->dev, "cgem_intr: hresp not okay! " |
---|
1037 | "rx_status=0x%x\n", RD4(sc, CGEM_RX_STAT)); |
---|
1038 | WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_HRESP_NOT_OK); |
---|
1039 | } |
---|
1040 | |
---|
1041 | /* Receiver overrun. */ |
---|
1042 | if ((istatus & CGEM_INTR_RX_OVERRUN) != 0) { |
---|
1043 | /* Clear status bit. */ |
---|
1044 | WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_OVERRUN); |
---|
1045 | sc->rxoverruns++; |
---|
1046 | } |
---|
1047 | |
---|
1048 | /* Receiver ran out of bufs. */ |
---|
1049 | if ((istatus & CGEM_INTR_RX_USED_READ) != 0) { |
---|
1050 | WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow | |
---|
1051 | CGEM_NET_CTRL_FLUSH_DPRAM_PKT); |
---|
1052 | cgem_fill_rqueue(sc); |
---|
1053 | sc->rxnobufs++; |
---|
1054 | } |
---|
1055 | |
---|
1056 | /* Restart transmitter if needed. */ |
---|
1057 | if (!IFQ_DRV_IS_EMPTY(&sc->ifp->if_snd)) |
---|
1058 | cgem_start_locked(sc->ifp); |
---|
1059 | |
---|
1060 | CGEM_UNLOCK(sc); |
---|
1061 | } |
---|
1062 | |
---|
1063 | /* Reset hardware. */ |
---|
1064 | static void |
---|
1065 | cgem_reset(struct cgem_softc *sc) |
---|
1066 | { |
---|
1067 | |
---|
1068 | CGEM_ASSERT_LOCKED(sc); |
---|
1069 | |
---|
1070 | WR4(sc, CGEM_NET_CTRL, 0); |
---|
1071 | WR4(sc, CGEM_NET_CFG, 0); |
---|
1072 | WR4(sc, CGEM_NET_CTRL, CGEM_NET_CTRL_CLR_STAT_REGS); |
---|
1073 | WR4(sc, CGEM_TX_STAT, CGEM_TX_STAT_ALL); |
---|
1074 | WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_ALL); |
---|
1075 | WR4(sc, CGEM_INTR_DIS, CGEM_INTR_ALL); |
---|
1076 | WR4(sc, CGEM_HASH_BOT, 0); |
---|
1077 | WR4(sc, CGEM_HASH_TOP, 0); |
---|
1078 | WR4(sc, CGEM_TX_QBAR, 0); /* manual says do this. */ |
---|
1079 | WR4(sc, CGEM_RX_QBAR, 0); |
---|
1080 | |
---|
1081 | /* Get management port running even if interface is down. */ |
---|
1082 | WR4(sc, CGEM_NET_CFG, |
---|
1083 | CGEM_NET_CFG_DBUS_WIDTH_32 | |
---|
1084 | CGEM_NET_CFG_MDC_CLK_DIV_64); |
---|
1085 | |
---|
1086 | sc->net_ctl_shadow = CGEM_NET_CTRL_MGMT_PORT_EN; |
---|
1087 | WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow); |
---|
1088 | } |
---|
1089 | |
---|
1090 | /* Bring up the hardware. */ |
---|
1091 | static void |
---|
1092 | cgem_config(struct cgem_softc *sc) |
---|
1093 | { |
---|
1094 | uint32_t net_cfg; |
---|
1095 | uint32_t dma_cfg; |
---|
1096 | u_char *eaddr = IF_LLADDR(sc->ifp); |
---|
1097 | |
---|
1098 | CGEM_ASSERT_LOCKED(sc); |
---|
1099 | |
---|
1100 | /* Program Net Config Register. */ |
---|
1101 | net_cfg = CGEM_NET_CFG_DBUS_WIDTH_32 | |
---|
1102 | CGEM_NET_CFG_MDC_CLK_DIV_64 | |
---|
1103 | CGEM_NET_CFG_FCS_REMOVE | |
---|
1104 | CGEM_NET_CFG_RX_BUF_OFFSET(ETHER_ALIGN) | |
---|
1105 | CGEM_NET_CFG_GIGE_EN | |
---|
1106 | CGEM_NET_CFG_1536RXEN | |
---|
1107 | CGEM_NET_CFG_FULL_DUPLEX | |
---|
1108 | CGEM_NET_CFG_SPEED100; |
---|
1109 | |
---|
1110 | /* Enable receive checksum offloading? */ |
---|
1111 | if ((sc->ifp->if_capenable & IFCAP_RXCSUM) != 0) |
---|
1112 | net_cfg |= CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN; |
---|
1113 | |
---|
1114 | WR4(sc, CGEM_NET_CFG, net_cfg); |
---|
1115 | |
---|
1116 | /* Program DMA Config Register. */ |
---|
1117 | dma_cfg = CGEM_DMA_CFG_RX_BUF_SIZE(MCLBYTES) | |
---|
1118 | CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_8K | |
---|
1119 | CGEM_DMA_CFG_TX_PKTBUF_MEMSZ_SEL | |
---|
1120 | CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_16 | |
---|
1121 | CGEM_DMA_CFG_DISC_WHEN_NO_AHB; |
---|
1122 | |
---|
1123 | /* Enable transmit checksum offloading? */ |
---|
1124 | if ((sc->ifp->if_capenable & IFCAP_TXCSUM) != 0) |
---|
1125 | dma_cfg |= CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN; |
---|
1126 | |
---|
1127 | WR4(sc, CGEM_DMA_CFG, dma_cfg); |
---|
1128 | |
---|
1129 | /* Write the rx and tx descriptor ring addresses to the QBAR regs. */ |
---|
1130 | WR4(sc, CGEM_RX_QBAR, (uint32_t) sc->rxring_physaddr); |
---|
1131 | WR4(sc, CGEM_TX_QBAR, (uint32_t) sc->txring_physaddr); |
---|
1132 | |
---|
1133 | /* Enable rx and tx. */ |
---|
1134 | sc->net_ctl_shadow |= (CGEM_NET_CTRL_TX_EN | CGEM_NET_CTRL_RX_EN); |
---|
1135 | WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow); |
---|
1136 | |
---|
1137 | /* Set receive address in case it changed. */ |
---|
1138 | WR4(sc, CGEM_SPEC_ADDR_LOW(0), (eaddr[3] << 24) | |
---|
1139 | (eaddr[2] << 16) | (eaddr[1] << 8) | eaddr[0]); |
---|
1140 | WR4(sc, CGEM_SPEC_ADDR_HI(0), (eaddr[5] << 8) | eaddr[4]); |
---|
1141 | |
---|
1142 | /* Set up interrupts. */ |
---|
1143 | WR4(sc, CGEM_INTR_EN, |
---|
1144 | CGEM_INTR_RX_COMPLETE | CGEM_INTR_RX_OVERRUN | |
---|
1145 | CGEM_INTR_TX_USED_READ | CGEM_INTR_RX_USED_READ | |
---|
1146 | CGEM_INTR_HRESP_NOT_OK); |
---|
1147 | } |
---|
1148 | |
---|
1149 | /* Turn on interface and load up receive ring with buffers. */ |
---|
1150 | static void |
---|
1151 | cgem_init_locked(struct cgem_softc *sc) |
---|
1152 | { |
---|
1153 | struct mii_data *mii; |
---|
1154 | |
---|
1155 | CGEM_ASSERT_LOCKED(sc); |
---|
1156 | |
---|
1157 | if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) |
---|
1158 | return; |
---|
1159 | |
---|
1160 | cgem_config(sc); |
---|
1161 | cgem_fill_rqueue(sc); |
---|
1162 | |
---|
1163 | sc->ifp->if_drv_flags |= IFF_DRV_RUNNING; |
---|
1164 | sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; |
---|
1165 | |
---|
1166 | mii = device_get_softc(sc->miibus); |
---|
1167 | mii_mediachg(mii); |
---|
1168 | |
---|
1169 | callout_reset(&sc->tick_ch, hz, cgem_tick, sc); |
---|
1170 | } |
---|
1171 | |
---|
1172 | static void |
---|
1173 | cgem_init(void *arg) |
---|
1174 | { |
---|
1175 | struct cgem_softc *sc = (struct cgem_softc *)arg; |
---|
1176 | |
---|
1177 | CGEM_LOCK(sc); |
---|
1178 | cgem_init_locked(sc); |
---|
1179 | CGEM_UNLOCK(sc); |
---|
1180 | } |
---|
1181 | |
---|
1182 | /* Turn off interface. Free up any buffers in transmit or receive queues. */ |
---|
1183 | static void |
---|
1184 | cgem_stop(struct cgem_softc *sc) |
---|
1185 | { |
---|
1186 | int i; |
---|
1187 | |
---|
1188 | CGEM_ASSERT_LOCKED(sc); |
---|
1189 | |
---|
1190 | callout_stop(&sc->tick_ch); |
---|
1191 | |
---|
1192 | /* Shut down hardware. */ |
---|
1193 | cgem_reset(sc); |
---|
1194 | |
---|
1195 | /* Clear out transmit queue. */ |
---|
1196 | for (i = 0; i < CGEM_NUM_TX_DESCS; i++) { |
---|
1197 | sc->txring[i].ctl = CGEM_TXDESC_USED; |
---|
1198 | sc->txring[i].addr = 0; |
---|
1199 | if (sc->txring_m[i]) { |
---|
1200 | #ifndef __rtems__ |
---|
1201 | bus_dmamap_unload(sc->mbuf_dma_tag, |
---|
1202 | sc->txring_m_dmamap[i]); |
---|
1203 | #endif /* __rtems__ */ |
---|
1204 | m_freem(sc->txring_m[i]); |
---|
1205 | sc->txring_m[i] = NULL; |
---|
1206 | } |
---|
1207 | } |
---|
1208 | sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP; |
---|
1209 | |
---|
1210 | sc->txring_hd_ptr = 0; |
---|
1211 | sc->txring_tl_ptr = 0; |
---|
1212 | sc->txring_queued = 0; |
---|
1213 | |
---|
1214 | /* Clear out receive queue. */ |
---|
1215 | for (i = 0; i < CGEM_NUM_RX_DESCS; i++) { |
---|
1216 | sc->rxring[i].addr = CGEM_RXDESC_OWN; |
---|
1217 | sc->rxring[i].ctl = 0; |
---|
1218 | if (sc->rxring_m[i]) { |
---|
1219 | #ifndef __rtems__ |
---|
1220 | /* Unload dmamap. */ |
---|
1221 | bus_dmamap_unload(sc->mbuf_dma_tag, |
---|
1222 | sc->rxring_m_dmamap[sc->rxring_tl_ptr]); |
---|
1223 | #endif /* __rtems__ */ |
---|
1224 | |
---|
1225 | m_freem(sc->rxring_m[i]); |
---|
1226 | sc->rxring_m[i] = NULL; |
---|
1227 | } |
---|
1228 | } |
---|
1229 | sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP; |
---|
1230 | |
---|
1231 | sc->rxring_hd_ptr = 0; |
---|
1232 | sc->rxring_tl_ptr = 0; |
---|
1233 | sc->rxring_queued = 0; |
---|
1234 | |
---|
1235 | /* Force next statchg or linkchg to program net config register. */ |
---|
1236 | sc->mii_media_active = 0; |
---|
1237 | } |
---|
1238 | |
---|
1239 | |
---|
1240 | static int |
---|
1241 | cgem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) |
---|
1242 | { |
---|
1243 | struct cgem_softc *sc = ifp->if_softc; |
---|
1244 | struct ifreq *ifr = (struct ifreq *)data; |
---|
1245 | struct mii_data *mii; |
---|
1246 | int error = 0, mask; |
---|
1247 | |
---|
1248 | switch (cmd) { |
---|
1249 | case SIOCSIFFLAGS: |
---|
1250 | CGEM_LOCK(sc); |
---|
1251 | if ((ifp->if_flags & IFF_UP) != 0) { |
---|
1252 | if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { |
---|
1253 | if (((ifp->if_flags ^ sc->if_old_flags) & |
---|
1254 | (IFF_PROMISC | IFF_ALLMULTI)) != 0) { |
---|
1255 | cgem_rx_filter(sc); |
---|
1256 | } |
---|
1257 | } else { |
---|
1258 | cgem_init_locked(sc); |
---|
1259 | } |
---|
1260 | } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { |
---|
1261 | ifp->if_drv_flags &= ~IFF_DRV_RUNNING; |
---|
1262 | cgem_stop(sc); |
---|
1263 | } |
---|
1264 | sc->if_old_flags = ifp->if_flags; |
---|
1265 | CGEM_UNLOCK(sc); |
---|
1266 | break; |
---|
1267 | |
---|
1268 | case SIOCADDMULTI: |
---|
1269 | case SIOCDELMULTI: |
---|
1270 | /* Set up multi-cast filters. */ |
---|
1271 | if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { |
---|
1272 | CGEM_LOCK(sc); |
---|
1273 | cgem_rx_filter(sc); |
---|
1274 | CGEM_UNLOCK(sc); |
---|
1275 | } |
---|
1276 | break; |
---|
1277 | |
---|
1278 | case SIOCSIFMEDIA: |
---|
1279 | case SIOCGIFMEDIA: |
---|
1280 | mii = device_get_softc(sc->miibus); |
---|
1281 | error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); |
---|
1282 | break; |
---|
1283 | |
---|
1284 | case SIOCSIFCAP: |
---|
1285 | CGEM_LOCK(sc); |
---|
1286 | mask = ifp->if_capenable ^ ifr->ifr_reqcap; |
---|
1287 | |
---|
1288 | if ((mask & IFCAP_TXCSUM) != 0) { |
---|
1289 | if ((ifr->ifr_reqcap & IFCAP_TXCSUM) != 0) { |
---|
1290 | /* Turn on TX checksumming. */ |
---|
1291 | ifp->if_capenable |= (IFCAP_TXCSUM | |
---|
1292 | IFCAP_TXCSUM_IPV6); |
---|
1293 | ifp->if_hwassist |= CGEM_CKSUM_ASSIST; |
---|
1294 | |
---|
1295 | WR4(sc, CGEM_DMA_CFG, |
---|
1296 | RD4(sc, CGEM_DMA_CFG) | |
---|
1297 | CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN); |
---|
1298 | } else { |
---|
1299 | /* Turn off TX checksumming. */ |
---|
1300 | ifp->if_capenable &= ~(IFCAP_TXCSUM | |
---|
1301 | IFCAP_TXCSUM_IPV6); |
---|
1302 | ifp->if_hwassist &= ~CGEM_CKSUM_ASSIST; |
---|
1303 | |
---|
1304 | WR4(sc, CGEM_DMA_CFG, |
---|
1305 | RD4(sc, CGEM_DMA_CFG) & |
---|
1306 | ~CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN); |
---|
1307 | } |
---|
1308 | } |
---|
1309 | if ((mask & IFCAP_RXCSUM) != 0) { |
---|
1310 | if ((ifr->ifr_reqcap & IFCAP_RXCSUM) != 0) { |
---|
1311 | /* Turn on RX checksumming. */ |
---|
1312 | ifp->if_capenable |= (IFCAP_RXCSUM | |
---|
1313 | IFCAP_RXCSUM_IPV6); |
---|
1314 | WR4(sc, CGEM_NET_CFG, |
---|
1315 | RD4(sc, CGEM_NET_CFG) | |
---|
1316 | CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN); |
---|
1317 | } else { |
---|
1318 | /* Turn off RX checksumming. */ |
---|
1319 | ifp->if_capenable &= ~(IFCAP_RXCSUM | |
---|
1320 | IFCAP_RXCSUM_IPV6); |
---|
1321 | WR4(sc, CGEM_NET_CFG, |
---|
1322 | RD4(sc, CGEM_NET_CFG) & |
---|
1323 | ~CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN); |
---|
1324 | } |
---|
1325 | } |
---|
1326 | if ((ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_TXCSUM)) == |
---|
1327 | (IFCAP_RXCSUM | IFCAP_TXCSUM)) |
---|
1328 | ifp->if_capenable |= IFCAP_VLAN_HWCSUM; |
---|
1329 | else |
---|
1330 | ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM; |
---|
1331 | |
---|
1332 | CGEM_UNLOCK(sc); |
---|
1333 | break; |
---|
1334 | default: |
---|
1335 | error = ether_ioctl(ifp, cmd, data); |
---|
1336 | break; |
---|
1337 | } |
---|
1338 | |
---|
1339 | return (error); |
---|
1340 | } |
---|
1341 | |
---|
1342 | /* MII bus support routines. |
---|
1343 | */ |
---|
1344 | static void |
---|
1345 | cgem_child_detached(device_t dev, device_t child) |
---|
1346 | { |
---|
1347 | struct cgem_softc *sc = device_get_softc(dev); |
---|
1348 | |
---|
1349 | if (child == sc->miibus) |
---|
1350 | sc->miibus = NULL; |
---|
1351 | } |
---|
1352 | |
---|
1353 | static int |
---|
1354 | cgem_ifmedia_upd(struct ifnet *ifp) |
---|
1355 | { |
---|
1356 | struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc; |
---|
1357 | struct mii_data *mii; |
---|
1358 | struct mii_softc *miisc; |
---|
1359 | int error = 0; |
---|
1360 | |
---|
1361 | mii = device_get_softc(sc->miibus); |
---|
1362 | CGEM_LOCK(sc); |
---|
1363 | if ((ifp->if_flags & IFF_UP) != 0) { |
---|
1364 | LIST_FOREACH(miisc, &mii->mii_phys, mii_list) |
---|
1365 | PHY_RESET(miisc); |
---|
1366 | error = mii_mediachg(mii); |
---|
1367 | } |
---|
1368 | CGEM_UNLOCK(sc); |
---|
1369 | |
---|
1370 | return (error); |
---|
1371 | } |
---|
1372 | |
---|
1373 | static void |
---|
1374 | cgem_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) |
---|
1375 | { |
---|
1376 | struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc; |
---|
1377 | struct mii_data *mii; |
---|
1378 | |
---|
1379 | mii = device_get_softc(sc->miibus); |
---|
1380 | CGEM_LOCK(sc); |
---|
1381 | mii_pollstat(mii); |
---|
1382 | ifmr->ifm_active = mii->mii_media_active; |
---|
1383 | ifmr->ifm_status = mii->mii_media_status; |
---|
1384 | CGEM_UNLOCK(sc); |
---|
1385 | } |
---|
1386 | |
---|
1387 | static int |
---|
1388 | cgem_miibus_readreg(device_t dev, int phy, int reg) |
---|
1389 | { |
---|
1390 | struct cgem_softc *sc = device_get_softc(dev); |
---|
1391 | int tries, val; |
---|
1392 | |
---|
1393 | WR4(sc, CGEM_PHY_MAINT, |
---|
1394 | CGEM_PHY_MAINT_CLAUSE_22 | CGEM_PHY_MAINT_MUST_10 | |
---|
1395 | CGEM_PHY_MAINT_OP_READ | |
---|
1396 | (phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) | |
---|
1397 | (reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT)); |
---|
1398 | |
---|
1399 | /* Wait for completion. */ |
---|
1400 | tries=0; |
---|
1401 | while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) { |
---|
1402 | DELAY(5); |
---|
1403 | if (++tries > 200) { |
---|
1404 | device_printf(dev, "phy read timeout: %d\n", reg); |
---|
1405 | return (-1); |
---|
1406 | } |
---|
1407 | } |
---|
1408 | |
---|
1409 | val = RD4(sc, CGEM_PHY_MAINT) & CGEM_PHY_MAINT_DATA_MASK; |
---|
1410 | |
---|
1411 | if (reg == MII_EXTSR) |
---|
1412 | /* |
---|
1413 | * MAC does not support half-duplex at gig speeds. |
---|
1414 | * Let mii(4) exclude the capability. |
---|
1415 | */ |
---|
1416 | val &= ~(EXTSR_1000XHDX | EXTSR_1000THDX); |
---|
1417 | |
---|
1418 | return (val); |
---|
1419 | } |
---|
1420 | |
---|
1421 | static int |
---|
1422 | cgem_miibus_writereg(device_t dev, int phy, int reg, int data) |
---|
1423 | { |
---|
1424 | struct cgem_softc *sc = device_get_softc(dev); |
---|
1425 | int tries; |
---|
1426 | |
---|
1427 | WR4(sc, CGEM_PHY_MAINT, |
---|
1428 | CGEM_PHY_MAINT_CLAUSE_22 | CGEM_PHY_MAINT_MUST_10 | |
---|
1429 | CGEM_PHY_MAINT_OP_WRITE | |
---|
1430 | (phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) | |
---|
1431 | (reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT) | |
---|
1432 | (data & CGEM_PHY_MAINT_DATA_MASK)); |
---|
1433 | |
---|
1434 | /* Wait for completion. */ |
---|
1435 | tries = 0; |
---|
1436 | while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) { |
---|
1437 | DELAY(5); |
---|
1438 | if (++tries > 200) { |
---|
1439 | device_printf(dev, "phy write timeout: %d\n", reg); |
---|
1440 | return (-1); |
---|
1441 | } |
---|
1442 | } |
---|
1443 | |
---|
1444 | return (0); |
---|
1445 | } |
---|
1446 | |
---|
1447 | static void |
---|
1448 | cgem_miibus_statchg(device_t dev) |
---|
1449 | { |
---|
1450 | struct cgem_softc *sc = device_get_softc(dev); |
---|
1451 | struct mii_data *mii = device_get_softc(sc->miibus); |
---|
1452 | |
---|
1453 | CGEM_ASSERT_LOCKED(sc); |
---|
1454 | |
---|
1455 | if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == |
---|
1456 | (IFM_ACTIVE | IFM_AVALID) && |
---|
1457 | sc->mii_media_active != mii->mii_media_active) |
---|
1458 | cgem_mediachange(sc, mii); |
---|
1459 | } |
---|
1460 | |
---|
1461 | static void |
---|
1462 | cgem_miibus_linkchg(device_t dev) |
---|
1463 | { |
---|
1464 | struct cgem_softc *sc = device_get_softc(dev); |
---|
1465 | struct mii_data *mii = device_get_softc(sc->miibus); |
---|
1466 | |
---|
1467 | CGEM_ASSERT_LOCKED(sc); |
---|
1468 | |
---|
1469 | if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == |
---|
1470 | (IFM_ACTIVE | IFM_AVALID) && |
---|
1471 | sc->mii_media_active != mii->mii_media_active) |
---|
1472 | cgem_mediachange(sc, mii); |
---|
1473 | } |
---|
1474 | |
---|
1475 | /* |
---|
1476 | * Overridable weak symbol cgem_set_ref_clk(). This allows platforms to |
---|
1477 | * provide a function to set the cgem's reference clock. |
---|
1478 | */ |
---|
1479 | static int __used |
---|
1480 | cgem_default_set_ref_clk(int unit, int frequency) |
---|
1481 | { |
---|
1482 | |
---|
1483 | return 0; |
---|
1484 | } |
---|
1485 | __weak_reference(cgem_default_set_ref_clk, cgem_set_ref_clk); |
---|
1486 | |
---|
1487 | /* Call to set reference clock and network config bits according to media. */ |
---|
1488 | static void |
---|
1489 | cgem_mediachange(struct cgem_softc *sc, struct mii_data *mii) |
---|
1490 | { |
---|
1491 | uint32_t net_cfg; |
---|
1492 | int ref_clk_freq; |
---|
1493 | |
---|
1494 | CGEM_ASSERT_LOCKED(sc); |
---|
1495 | |
---|
1496 | /* Update hardware to reflect media. */ |
---|
1497 | net_cfg = RD4(sc, CGEM_NET_CFG); |
---|
1498 | net_cfg &= ~(CGEM_NET_CFG_SPEED100 | CGEM_NET_CFG_GIGE_EN | |
---|
1499 | CGEM_NET_CFG_FULL_DUPLEX); |
---|
1500 | |
---|
1501 | switch (IFM_SUBTYPE(mii->mii_media_active)) { |
---|
1502 | case IFM_1000_T: |
---|
1503 | net_cfg |= (CGEM_NET_CFG_SPEED100 | |
---|
1504 | CGEM_NET_CFG_GIGE_EN); |
---|
1505 | ref_clk_freq = 125000000; |
---|
1506 | break; |
---|
1507 | case IFM_100_TX: |
---|
1508 | net_cfg |= CGEM_NET_CFG_SPEED100; |
---|
1509 | ref_clk_freq = 25000000; |
---|
1510 | break; |
---|
1511 | default: |
---|
1512 | ref_clk_freq = 2500000; |
---|
1513 | } |
---|
1514 | |
---|
1515 | if ((mii->mii_media_active & IFM_FDX) != 0) |
---|
1516 | net_cfg |= CGEM_NET_CFG_FULL_DUPLEX; |
---|
1517 | |
---|
1518 | WR4(sc, CGEM_NET_CFG, net_cfg); |
---|
1519 | |
---|
1520 | /* Set the reference clock if necessary. */ |
---|
1521 | if (cgem_set_ref_clk(sc->ref_clk_num, ref_clk_freq)) |
---|
1522 | device_printf(sc->dev, "cgem_mediachange: " |
---|
1523 | "could not set ref clk%d to %d.\n", |
---|
1524 | sc->ref_clk_num, ref_clk_freq); |
---|
1525 | |
---|
1526 | sc->mii_media_active = mii->mii_media_active; |
---|
1527 | } |
---|
1528 | |
---|
1529 | static void |
---|
1530 | cgem_add_sysctls(device_t dev) |
---|
1531 | { |
---|
1532 | #ifndef __rtems__ |
---|
1533 | struct cgem_softc *sc = device_get_softc(dev); |
---|
1534 | struct sysctl_ctx_list *ctx; |
---|
1535 | struct sysctl_oid_list *child; |
---|
1536 | struct sysctl_oid *tree; |
---|
1537 | |
---|
1538 | ctx = device_get_sysctl_ctx(dev); |
---|
1539 | child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); |
---|
1540 | |
---|
1541 | SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rxbufs", CTLFLAG_RW, |
---|
1542 | &sc->rxbufs, 0, |
---|
1543 | "Number receive buffers to provide"); |
---|
1544 | |
---|
1545 | SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rxhangwar", CTLFLAG_RW, |
---|
1546 | &sc->rxhangwar, 0, |
---|
1547 | "Enable receive hang work-around"); |
---|
1548 | |
---|
1549 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxoverruns", CTLFLAG_RD, |
---|
1550 | &sc->rxoverruns, 0, |
---|
1551 | "Receive overrun events"); |
---|
1552 | |
---|
1553 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxnobufs", CTLFLAG_RD, |
---|
1554 | &sc->rxnobufs, 0, |
---|
1555 | "Receive buf queue empty events"); |
---|
1556 | |
---|
1557 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxdmamapfails", CTLFLAG_RD, |
---|
1558 | &sc->rxdmamapfails, 0, |
---|
1559 | "Receive DMA map failures"); |
---|
1560 | |
---|
1561 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txfull", CTLFLAG_RD, |
---|
1562 | &sc->txfull, 0, |
---|
1563 | "Transmit ring full events"); |
---|
1564 | |
---|
1565 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdmamapfails", CTLFLAG_RD, |
---|
1566 | &sc->txdmamapfails, 0, |
---|
1567 | "Transmit DMA map failures"); |
---|
1568 | |
---|
1569 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdefrags", CTLFLAG_RD, |
---|
1570 | &sc->txdefrags, 0, |
---|
1571 | "Transmit m_defrag() calls"); |
---|
1572 | |
---|
1573 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdefragfails", CTLFLAG_RD, |
---|
1574 | &sc->txdefragfails, 0, |
---|
1575 | "Transmit m_defrag() failures"); |
---|
1576 | |
---|
1577 | tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, |
---|
1578 | NULL, "GEM statistics"); |
---|
1579 | child = SYSCTL_CHILDREN(tree); |
---|
1580 | |
---|
1581 | SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_bytes", CTLFLAG_RD, |
---|
1582 | &sc->stats.tx_bytes, "Total bytes transmitted"); |
---|
1583 | |
---|
1584 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames", CTLFLAG_RD, |
---|
1585 | &sc->stats.tx_frames, 0, "Total frames transmitted"); |
---|
1586 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_bcast", CTLFLAG_RD, |
---|
1587 | &sc->stats.tx_frames_bcast, 0, |
---|
1588 | "Number broadcast frames transmitted"); |
---|
1589 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_multi", CTLFLAG_RD, |
---|
1590 | &sc->stats.tx_frames_multi, 0, |
---|
1591 | "Number multicast frames transmitted"); |
---|
1592 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_pause", |
---|
1593 | CTLFLAG_RD, &sc->stats.tx_frames_pause, 0, |
---|
1594 | "Number pause frames transmitted"); |
---|
1595 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_64b", CTLFLAG_RD, |
---|
1596 | &sc->stats.tx_frames_64b, 0, |
---|
1597 | "Number frames transmitted of size 64 bytes or less"); |
---|
1598 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_65to127b", CTLFLAG_RD, |
---|
1599 | &sc->stats.tx_frames_65to127b, 0, |
---|
1600 | "Number frames transmitted of size 65-127 bytes"); |
---|
1601 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_128to255b", |
---|
1602 | CTLFLAG_RD, &sc->stats.tx_frames_128to255b, 0, |
---|
1603 | "Number frames transmitted of size 128-255 bytes"); |
---|
1604 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_256to511b", |
---|
1605 | CTLFLAG_RD, &sc->stats.tx_frames_256to511b, 0, |
---|
1606 | "Number frames transmitted of size 256-511 bytes"); |
---|
1607 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_512to1023b", |
---|
1608 | CTLFLAG_RD, &sc->stats.tx_frames_512to1023b, 0, |
---|
1609 | "Number frames transmitted of size 512-1023 bytes"); |
---|
1610 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_1024to1536b", |
---|
1611 | CTLFLAG_RD, &sc->stats.tx_frames_1024to1536b, 0, |
---|
1612 | "Number frames transmitted of size 1024-1536 bytes"); |
---|
1613 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_under_runs", |
---|
1614 | CTLFLAG_RD, &sc->stats.tx_under_runs, 0, |
---|
1615 | "Number transmit under-run events"); |
---|
1616 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_single_collisn", |
---|
1617 | CTLFLAG_RD, &sc->stats.tx_single_collisn, 0, |
---|
1618 | "Number single-collision transmit frames"); |
---|
1619 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_multi_collisn", |
---|
1620 | CTLFLAG_RD, &sc->stats.tx_multi_collisn, 0, |
---|
1621 | "Number multi-collision transmit frames"); |
---|
1622 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_excsv_collisn", |
---|
1623 | CTLFLAG_RD, &sc->stats.tx_excsv_collisn, 0, |
---|
1624 | "Number excessive collision transmit frames"); |
---|
1625 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_late_collisn", |
---|
1626 | CTLFLAG_RD, &sc->stats.tx_late_collisn, 0, |
---|
1627 | "Number late-collision transmit frames"); |
---|
1628 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_deferred_frames", |
---|
1629 | CTLFLAG_RD, &sc->stats.tx_deferred_frames, 0, |
---|
1630 | "Number deferred transmit frames"); |
---|
1631 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_carrier_sense_errs", |
---|
1632 | CTLFLAG_RD, &sc->stats.tx_carrier_sense_errs, 0, |
---|
1633 | "Number carrier sense errors on transmit"); |
---|
1634 | |
---|
1635 | SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_bytes", CTLFLAG_RD, |
---|
1636 | &sc->stats.rx_bytes, "Total bytes received"); |
---|
1637 | |
---|
1638 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames", CTLFLAG_RD, |
---|
1639 | &sc->stats.rx_frames, 0, "Total frames received"); |
---|
1640 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_bcast", |
---|
1641 | CTLFLAG_RD, &sc->stats.rx_frames_bcast, 0, |
---|
1642 | "Number broadcast frames received"); |
---|
1643 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_multi", |
---|
1644 | CTLFLAG_RD, &sc->stats.rx_frames_multi, 0, |
---|
1645 | "Number multicast frames received"); |
---|
1646 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_pause", |
---|
1647 | CTLFLAG_RD, &sc->stats.rx_frames_pause, 0, |
---|
1648 | "Number pause frames received"); |
---|
1649 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_64b", |
---|
1650 | CTLFLAG_RD, &sc->stats.rx_frames_64b, 0, |
---|
1651 | "Number frames received of size 64 bytes or less"); |
---|
1652 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_65to127b", |
---|
1653 | CTLFLAG_RD, &sc->stats.rx_frames_65to127b, 0, |
---|
1654 | "Number frames received of size 65-127 bytes"); |
---|
1655 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_128to255b", |
---|
1656 | CTLFLAG_RD, &sc->stats.rx_frames_128to255b, 0, |
---|
1657 | "Number frames received of size 128-255 bytes"); |
---|
1658 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_256to511b", |
---|
1659 | CTLFLAG_RD, &sc->stats.rx_frames_256to511b, 0, |
---|
1660 | "Number frames received of size 256-511 bytes"); |
---|
1661 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_512to1023b", |
---|
1662 | CTLFLAG_RD, &sc->stats.rx_frames_512to1023b, 0, |
---|
1663 | "Number frames received of size 512-1023 bytes"); |
---|
1664 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_1024to1536b", |
---|
1665 | CTLFLAG_RD, &sc->stats.rx_frames_1024to1536b, 0, |
---|
1666 | "Number frames received of size 1024-1536 bytes"); |
---|
1667 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_undersize", |
---|
1668 | CTLFLAG_RD, &sc->stats.rx_frames_undersize, 0, |
---|
1669 | "Number undersize frames received"); |
---|
1670 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_oversize", |
---|
1671 | CTLFLAG_RD, &sc->stats.rx_frames_oversize, 0, |
---|
1672 | "Number oversize frames received"); |
---|
1673 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_jabber", |
---|
1674 | CTLFLAG_RD, &sc->stats.rx_frames_jabber, 0, |
---|
1675 | "Number jabber frames received"); |
---|
1676 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_fcs_errs", |
---|
1677 | CTLFLAG_RD, &sc->stats.rx_frames_fcs_errs, 0, |
---|
1678 | "Number frames received with FCS errors"); |
---|
1679 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_length_errs", |
---|
1680 | CTLFLAG_RD, &sc->stats.rx_frames_length_errs, 0, |
---|
1681 | "Number frames received with length errors"); |
---|
1682 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_symbol_errs", |
---|
1683 | CTLFLAG_RD, &sc->stats.rx_symbol_errs, 0, |
---|
1684 | "Number receive symbol errors"); |
---|
1685 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_align_errs", |
---|
1686 | CTLFLAG_RD, &sc->stats.rx_align_errs, 0, |
---|
1687 | "Number receive alignment errors"); |
---|
1688 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_resource_errs", |
---|
1689 | CTLFLAG_RD, &sc->stats.rx_resource_errs, 0, |
---|
1690 | "Number frames received when no rx buffer available"); |
---|
1691 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_overrun_errs", |
---|
1692 | CTLFLAG_RD, &sc->stats.rx_overrun_errs, 0, |
---|
1693 | "Number frames received but not copied due to " |
---|
1694 | "receive overrun"); |
---|
1695 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_ip_hdr_csum_errs", |
---|
1696 | CTLFLAG_RD, &sc->stats.rx_ip_hdr_csum_errs, 0, |
---|
1697 | "Number frames received with IP header checksum " |
---|
1698 | "errors"); |
---|
1699 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_tcp_csum_errs", |
---|
1700 | CTLFLAG_RD, &sc->stats.rx_tcp_csum_errs, 0, |
---|
1701 | "Number frames received with TCP checksum errors"); |
---|
1702 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_udp_csum_errs", |
---|
1703 | CTLFLAG_RD, &sc->stats.rx_udp_csum_errs, 0, |
---|
1704 | "Number frames received with UDP checksum errors"); |
---|
1705 | #endif /* __rtems__ */ |
---|
1706 | } |
---|
1707 | |
---|
1708 | |
---|
1709 | static int |
---|
1710 | cgem_probe(device_t dev) |
---|
1711 | { |
---|
1712 | |
---|
1713 | #ifndef __rtems__ |
---|
1714 | if (!ofw_bus_is_compatible(dev, "cadence,gem")) |
---|
1715 | return (ENXIO); |
---|
1716 | #endif /* __rtems__ */ |
---|
1717 | |
---|
1718 | device_set_desc(dev, "Cadence CGEM Gigabit Ethernet Interface"); |
---|
1719 | return (0); |
---|
1720 | } |
---|
1721 | |
---|
1722 | static int |
---|
1723 | cgem_attach(device_t dev) |
---|
1724 | { |
---|
1725 | struct cgem_softc *sc = device_get_softc(dev); |
---|
1726 | struct ifnet *ifp = NULL; |
---|
1727 | #ifndef __rtems__ |
---|
1728 | phandle_t node; |
---|
1729 | pcell_t cell; |
---|
1730 | #endif /* __rtems__ */ |
---|
1731 | int rid, err; |
---|
1732 | u_char eaddr[ETHER_ADDR_LEN]; |
---|
1733 | |
---|
1734 | sc->dev = dev; |
---|
1735 | CGEM_LOCK_INIT(sc); |
---|
1736 | |
---|
1737 | #ifndef __rtems__ |
---|
1738 | /* Get reference clock number and base divider from fdt. */ |
---|
1739 | node = ofw_bus_get_node(dev); |
---|
1740 | sc->ref_clk_num = 0; |
---|
1741 | if (OF_getprop(node, "ref-clock-num", &cell, sizeof(cell)) > 0) |
---|
1742 | sc->ref_clk_num = fdt32_to_cpu(cell); |
---|
1743 | #else /* __rtems__ */ |
---|
1744 | sc->ref_clk_num = device_get_unit(dev); |
---|
1745 | #endif /* __rtems__ */ |
---|
1746 | |
---|
1747 | /* Get memory resource. */ |
---|
1748 | rid = 0; |
---|
1749 | sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, |
---|
1750 | RF_ACTIVE); |
---|
1751 | if (sc->mem_res == NULL) { |
---|
1752 | device_printf(dev, "could not allocate memory resources.\n"); |
---|
1753 | return (ENOMEM); |
---|
1754 | } |
---|
1755 | |
---|
1756 | /* Get IRQ resource. */ |
---|
1757 | rid = 0; |
---|
1758 | sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, |
---|
1759 | RF_ACTIVE); |
---|
1760 | if (sc->irq_res == NULL) { |
---|
1761 | device_printf(dev, "could not allocate interrupt resource.\n"); |
---|
1762 | cgem_detach(dev); |
---|
1763 | return (ENOMEM); |
---|
1764 | } |
---|
1765 | |
---|
1766 | /* Set up ifnet structure. */ |
---|
1767 | ifp = sc->ifp = if_alloc(IFT_ETHER); |
---|
1768 | if (ifp == NULL) { |
---|
1769 | device_printf(dev, "could not allocate ifnet structure\n"); |
---|
1770 | cgem_detach(dev); |
---|
1771 | return (ENOMEM); |
---|
1772 | } |
---|
1773 | ifp->if_softc = sc; |
---|
1774 | if_initname(ifp, IF_CGEM_NAME, device_get_unit(dev)); |
---|
1775 | ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; |
---|
1776 | ifp->if_start = cgem_start; |
---|
1777 | ifp->if_ioctl = cgem_ioctl; |
---|
1778 | ifp->if_init = cgem_init; |
---|
1779 | ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | |
---|
1780 | IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM; |
---|
1781 | #ifndef __rtems__ |
---|
1782 | /* Disable hardware checksumming by default. */ |
---|
1783 | ifp->if_hwassist = 0; |
---|
1784 | ifp->if_capenable = ifp->if_capabilities & |
---|
1785 | ~(IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | IFCAP_VLAN_HWCSUM); |
---|
1786 | #else /* __rtems__ */ |
---|
1787 | ifp->if_hwassist = CGEM_CKSUM_ASSIST; |
---|
1788 | ifp->if_capenable = ifp->if_capabilities; |
---|
1789 | #endif /* __rtems__ */ |
---|
1790 | ifp->if_snd.ifq_drv_maxlen = CGEM_NUM_TX_DESCS; |
---|
1791 | IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); |
---|
1792 | IFQ_SET_READY(&ifp->if_snd); |
---|
1793 | |
---|
1794 | sc->if_old_flags = ifp->if_flags; |
---|
1795 | sc->rxbufs = DEFAULT_NUM_RX_BUFS; |
---|
1796 | sc->rxhangwar = 1; |
---|
1797 | |
---|
1798 | /* Reset hardware. */ |
---|
1799 | CGEM_LOCK(sc); |
---|
1800 | cgem_reset(sc); |
---|
1801 | CGEM_UNLOCK(sc); |
---|
1802 | |
---|
1803 | /* Attach phy to mii bus. */ |
---|
1804 | err = mii_attach(dev, &sc->miibus, ifp, |
---|
1805 | cgem_ifmedia_upd, cgem_ifmedia_sts, |
---|
1806 | BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); |
---|
1807 | if (err) { |
---|
1808 | device_printf(dev, "attaching PHYs failed\n"); |
---|
1809 | cgem_detach(dev); |
---|
1810 | return (err); |
---|
1811 | } |
---|
1812 | |
---|
1813 | /* Set up TX and RX descriptor area. */ |
---|
1814 | err = cgem_setup_descs(sc); |
---|
1815 | if (err) { |
---|
1816 | device_printf(dev, "could not set up dma mem for descs.\n"); |
---|
1817 | cgem_detach(dev); |
---|
1818 | return (ENOMEM); |
---|
1819 | } |
---|
1820 | |
---|
1821 | /* Get a MAC address. */ |
---|
1822 | cgem_get_mac(sc, eaddr); |
---|
1823 | |
---|
1824 | /* Start ticks. */ |
---|
1825 | callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0); |
---|
1826 | |
---|
1827 | ether_ifattach(ifp, eaddr); |
---|
1828 | |
---|
1829 | err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE | |
---|
1830 | INTR_EXCL, NULL, cgem_intr, sc, &sc->intrhand); |
---|
1831 | if (err) { |
---|
1832 | device_printf(dev, "could not set interrupt handler.\n"); |
---|
1833 | ether_ifdetach(ifp); |
---|
1834 | cgem_detach(dev); |
---|
1835 | return (err); |
---|
1836 | } |
---|
1837 | |
---|
1838 | cgem_add_sysctls(dev); |
---|
1839 | |
---|
1840 | return (0); |
---|
1841 | } |
---|
1842 | |
---|
1843 | static int |
---|
1844 | cgem_detach(device_t dev) |
---|
1845 | { |
---|
1846 | struct cgem_softc *sc = device_get_softc(dev); |
---|
1847 | #ifndef __rtems__ |
---|
1848 | int i; |
---|
1849 | #endif /* __rtems__ */ |
---|
1850 | |
---|
1851 | if (sc == NULL) |
---|
1852 | return (ENODEV); |
---|
1853 | |
---|
1854 | if (device_is_attached(dev)) { |
---|
1855 | CGEM_LOCK(sc); |
---|
1856 | cgem_stop(sc); |
---|
1857 | CGEM_UNLOCK(sc); |
---|
1858 | callout_drain(&sc->tick_ch); |
---|
1859 | sc->ifp->if_flags &= ~IFF_UP; |
---|
1860 | ether_ifdetach(sc->ifp); |
---|
1861 | } |
---|
1862 | |
---|
1863 | if (sc->miibus != NULL) { |
---|
1864 | device_delete_child(dev, sc->miibus); |
---|
1865 | sc->miibus = NULL; |
---|
1866 | } |
---|
1867 | |
---|
1868 | /* Release resources. */ |
---|
1869 | if (sc->mem_res != NULL) { |
---|
1870 | bus_release_resource(dev, SYS_RES_MEMORY, |
---|
1871 | rman_get_rid(sc->mem_res), sc->mem_res); |
---|
1872 | sc->mem_res = NULL; |
---|
1873 | } |
---|
1874 | if (sc->irq_res != NULL) { |
---|
1875 | if (sc->intrhand) |
---|
1876 | bus_teardown_intr(dev, sc->irq_res, sc->intrhand); |
---|
1877 | bus_release_resource(dev, SYS_RES_IRQ, |
---|
1878 | rman_get_rid(sc->irq_res), sc->irq_res); |
---|
1879 | sc->irq_res = NULL; |
---|
1880 | } |
---|
1881 | |
---|
1882 | /* Release DMA resources. */ |
---|
1883 | if (sc->rxring != NULL) { |
---|
1884 | if (sc->rxring_physaddr != 0) { |
---|
1885 | bus_dmamap_unload(sc->desc_dma_tag, sc->rxring_dma_map); |
---|
1886 | sc->rxring_physaddr = 0; |
---|
1887 | } |
---|
1888 | bus_dmamem_free(sc->desc_dma_tag, __DEVOLATILE(void *, sc->rxring), |
---|
1889 | sc->rxring_dma_map); |
---|
1890 | sc->rxring = NULL; |
---|
1891 | #ifndef __rtems__ |
---|
1892 | for (i = 0; i < CGEM_NUM_RX_DESCS; i++) |
---|
1893 | if (sc->rxring_m_dmamap[i] != NULL) { |
---|
1894 | bus_dmamap_destroy(sc->mbuf_dma_tag, |
---|
1895 | sc->rxring_m_dmamap[i]); |
---|
1896 | sc->rxring_m_dmamap[i] = NULL; |
---|
1897 | } |
---|
1898 | #endif /* __rtems__ */ |
---|
1899 | } |
---|
1900 | if (sc->txring != NULL) { |
---|
1901 | if (sc->txring_physaddr != 0) { |
---|
1902 | bus_dmamap_unload(sc->desc_dma_tag, sc->txring_dma_map); |
---|
1903 | sc->txring_physaddr = 0; |
---|
1904 | } |
---|
1905 | bus_dmamem_free(sc->desc_dma_tag, __DEVOLATILE(void *, sc->txring), |
---|
1906 | sc->txring_dma_map); |
---|
1907 | sc->txring = NULL; |
---|
1908 | #ifndef __rtems__ |
---|
1909 | for (i = 0; i < CGEM_NUM_TX_DESCS; i++) |
---|
1910 | if (sc->txring_m_dmamap[i] != NULL) { |
---|
1911 | bus_dmamap_destroy(sc->mbuf_dma_tag, |
---|
1912 | sc->txring_m_dmamap[i]); |
---|
1913 | sc->txring_m_dmamap[i] = NULL; |
---|
1914 | } |
---|
1915 | #endif /* __rtems__ */ |
---|
1916 | } |
---|
1917 | if (sc->desc_dma_tag != NULL) { |
---|
1918 | bus_dma_tag_destroy(sc->desc_dma_tag); |
---|
1919 | sc->desc_dma_tag = NULL; |
---|
1920 | } |
---|
1921 | if (sc->mbuf_dma_tag != NULL) { |
---|
1922 | bus_dma_tag_destroy(sc->mbuf_dma_tag); |
---|
1923 | sc->mbuf_dma_tag = NULL; |
---|
1924 | } |
---|
1925 | |
---|
1926 | bus_generic_detach(dev); |
---|
1927 | |
---|
1928 | CGEM_LOCK_DESTROY(sc); |
---|
1929 | |
---|
1930 | return (0); |
---|
1931 | } |
---|
1932 | |
---|
1933 | static device_method_t cgem_methods[] = { |
---|
1934 | /* Device interface */ |
---|
1935 | DEVMETHOD(device_probe, cgem_probe), |
---|
1936 | DEVMETHOD(device_attach, cgem_attach), |
---|
1937 | DEVMETHOD(device_detach, cgem_detach), |
---|
1938 | |
---|
1939 | /* Bus interface */ |
---|
1940 | DEVMETHOD(bus_child_detached, cgem_child_detached), |
---|
1941 | |
---|
1942 | /* MII interface */ |
---|
1943 | DEVMETHOD(miibus_readreg, cgem_miibus_readreg), |
---|
1944 | DEVMETHOD(miibus_writereg, cgem_miibus_writereg), |
---|
1945 | DEVMETHOD(miibus_statchg, cgem_miibus_statchg), |
---|
1946 | DEVMETHOD(miibus_linkchg, cgem_miibus_linkchg), |
---|
1947 | |
---|
1948 | DEVMETHOD_END |
---|
1949 | }; |
---|
1950 | |
---|
1951 | static driver_t cgem_driver = { |
---|
1952 | "cgem", |
---|
1953 | cgem_methods, |
---|
1954 | sizeof(struct cgem_softc), |
---|
1955 | }; |
---|
1956 | |
---|
1957 | #ifndef __rtems__ |
---|
1958 | DRIVER_MODULE(cgem, simplebus, cgem_driver, cgem_devclass, NULL, NULL); |
---|
1959 | #else /* __rtems__ */ |
---|
1960 | DRIVER_MODULE(cgem, nexus, cgem_driver, cgem_devclass, NULL, NULL); |
---|
1961 | #endif /* __rtems__ */ |
---|
1962 | DRIVER_MODULE(miibus, cgem, miibus_driver, miibus_devclass, NULL, NULL); |
---|
1963 | MODULE_DEPEND(cgem, miibus, 1, 1, 1); |
---|
1964 | MODULE_DEPEND(cgem, ether, 1, 1, 1); |
---|