1 | #include <machine/rtems-bsd-kernel-space.h> |
---|
2 | |
---|
3 | /*- |
---|
4 | * Copyright (c) 2012-2014 Thomas Skibo <thomasskibo@yahoo.com> |
---|
5 | * All rights reserved. |
---|
6 | * |
---|
7 | * Redistribution and use in source and binary forms, with or without |
---|
8 | * modification, are permitted provided that the following conditions |
---|
9 | * are met: |
---|
10 | * 1. Redistributions of source code must retain the above copyright |
---|
11 | * notice, this list of conditions and the following disclaimer. |
---|
12 | * 2. Redistributions in binary form must reproduce the above copyright |
---|
13 | * notice, this list of conditions and the following disclaimer in the |
---|
14 | * documentation and/or other materials provided with the distribution. |
---|
15 | * |
---|
16 | * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
---|
17 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
---|
18 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
---|
19 | * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE |
---|
20 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
---|
21 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
---|
22 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
---|
23 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
---|
24 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
---|
25 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
---|
26 | * SUCH DAMAGE. |
---|
27 | */ |
---|
28 | |
---|
29 | /* |
---|
30 | * A network interface driver for Cadence GEM Gigabit Ethernet |
---|
31 | * interface such as the one used in Xilinx Zynq-7000 SoC. |
---|
32 | * |
---|
33 | * Reference: Zynq-7000 All Programmable SoC Technical Reference Manual. |
---|
34 | * (v1.4) November 16, 2012. Xilinx doc UG585. GEM is covered in Ch. 16 |
---|
35 | * and register definitions are in appendix B.18. |
---|
36 | */ |
---|
37 | |
---|
38 | #include <sys/cdefs.h> |
---|
39 | __FBSDID("$FreeBSD$"); |
---|
40 | |
---|
41 | #include <rtems/bsd/sys/param.h> |
---|
42 | #include <sys/systm.h> |
---|
43 | #include <sys/bus.h> |
---|
44 | #include <sys/kernel.h> |
---|
45 | #include <sys/malloc.h> |
---|
46 | #include <sys/mbuf.h> |
---|
47 | #include <sys/module.h> |
---|
48 | #include <sys/rman.h> |
---|
49 | #include <sys/socket.h> |
---|
50 | #include <sys/sockio.h> |
---|
51 | #include <sys/sysctl.h> |
---|
52 | |
---|
53 | #include <machine/bus.h> |
---|
54 | |
---|
55 | #include <net/ethernet.h> |
---|
56 | #include <net/if.h> |
---|
57 | #include <net/if_var.h> |
---|
58 | #include <net/if_arp.h> |
---|
59 | #include <net/if_dl.h> |
---|
60 | #include <net/if_media.h> |
---|
61 | #include <net/if_mib.h> |
---|
62 | #include <net/if_types.h> |
---|
63 | |
---|
64 | #ifdef INET |
---|
65 | #include <netinet/in.h> |
---|
66 | #include <netinet/in_systm.h> |
---|
67 | #include <netinet/in_var.h> |
---|
68 | #include <netinet/ip.h> |
---|
69 | #endif |
---|
70 | |
---|
71 | #include <net/bpf.h> |
---|
72 | #include <net/bpfdesc.h> |
---|
73 | |
---|
74 | #ifndef __rtems__ |
---|
75 | #include <dev/fdt/fdt_common.h> |
---|
76 | #include <dev/ofw/ofw_bus.h> |
---|
77 | #include <dev/ofw/ofw_bus_subr.h> |
---|
78 | #endif /* __rtems__ */ |
---|
79 | |
---|
80 | #include <dev/mii/mii.h> |
---|
81 | #include <dev/mii/miivar.h> |
---|
82 | |
---|
83 | #include <dev/cadence/if_cgem_hw.h> |
---|
84 | |
---|
85 | #include <rtems/bsd/local/miibus_if.h> |
---|
86 | #ifdef __rtems__ |
---|
87 | #pragma GCC diagnostic ignored "-Wpointer-sign" |
---|
88 | #endif /* __rtems__ */ |
---|
89 | |
---|
90 | #define IF_CGEM_NAME "cgem" |
---|
91 | |
---|
92 | #define CGEM_NUM_RX_DESCS 512 /* size of receive descriptor ring */ |
---|
93 | #define CGEM_NUM_TX_DESCS 512 /* size of transmit descriptor ring */ |
---|
94 | |
---|
95 | #define MAX_DESC_RING_SIZE (MAX(CGEM_NUM_RX_DESCS*sizeof(struct cgem_rx_desc),\ |
---|
96 | CGEM_NUM_TX_DESCS*sizeof(struct cgem_tx_desc))) |
---|
97 | |
---|
98 | |
---|
99 | /* Default for sysctl rxbufs. Must be < CGEM_NUM_RX_DESCS of course. */ |
---|
100 | #define DEFAULT_NUM_RX_BUFS 256 /* number of receive bufs to queue. */ |
---|
101 | |
---|
102 | #define TX_MAX_DMA_SEGS 8 /* maximum segs in a tx mbuf dma */ |
---|
103 | |
---|
104 | #define CGEM_CKSUM_ASSIST (CSUM_IP | CSUM_TCP | CSUM_UDP | \ |
---|
105 | CSUM_TCP_IPV6 | CSUM_UDP_IPV6) |
---|
106 | |
---|
107 | struct cgem_softc { |
---|
108 | struct ifnet *ifp; |
---|
109 | struct mtx sc_mtx; |
---|
110 | device_t dev; |
---|
111 | device_t miibus; |
---|
112 | u_int mii_media_active; /* last active media */ |
---|
113 | int if_old_flags; |
---|
114 | struct resource *mem_res; |
---|
115 | struct resource *irq_res; |
---|
116 | void *intrhand; |
---|
117 | struct callout tick_ch; |
---|
118 | uint32_t net_ctl_shadow; |
---|
119 | int ref_clk_num; |
---|
120 | u_char eaddr[6]; |
---|
121 | |
---|
122 | bus_dma_tag_t desc_dma_tag; |
---|
123 | bus_dma_tag_t mbuf_dma_tag; |
---|
124 | |
---|
125 | /* receive descriptor ring */ |
---|
126 | struct cgem_rx_desc volatile *rxring; |
---|
127 | bus_addr_t rxring_physaddr; |
---|
128 | struct mbuf *rxring_m[CGEM_NUM_RX_DESCS]; |
---|
129 | #ifndef __rtems__ |
---|
130 | bus_dmamap_t rxring_m_dmamap[CGEM_NUM_RX_DESCS]; |
---|
131 | #endif /* __rtems__ */ |
---|
132 | int rxring_hd_ptr; /* where to put rcv bufs */ |
---|
133 | int rxring_tl_ptr; /* where to get receives */ |
---|
134 | int rxring_queued; /* how many rcv bufs queued */ |
---|
135 | bus_dmamap_t rxring_dma_map; |
---|
136 | int rxbufs; /* tunable number rcv bufs */ |
---|
137 | int rxhangwar; /* rx hang work-around */ |
---|
138 | u_int rxoverruns; /* rx overruns */ |
---|
139 | u_int rxnobufs; /* rx buf ring empty events */ |
---|
140 | u_int rxdmamapfails; /* rx dmamap failures */ |
---|
141 | uint32_t rx_frames_prev; |
---|
142 | |
---|
143 | /* transmit descriptor ring */ |
---|
144 | struct cgem_tx_desc volatile *txring; |
---|
145 | bus_addr_t txring_physaddr; |
---|
146 | struct mbuf *txring_m[CGEM_NUM_TX_DESCS]; |
---|
147 | #ifndef __rtems__ |
---|
148 | bus_dmamap_t txring_m_dmamap[CGEM_NUM_TX_DESCS]; |
---|
149 | #endif /* __rtems__ */ |
---|
150 | int txring_hd_ptr; /* where to put next xmits */ |
---|
151 | int txring_tl_ptr; /* next xmit mbuf to free */ |
---|
152 | int txring_queued; /* num xmits segs queued */ |
---|
153 | bus_dmamap_t txring_dma_map; |
---|
154 | u_int txfull; /* tx ring full events */ |
---|
155 | u_int txdefrags; /* tx calls to m_defrag() */ |
---|
156 | u_int txdefragfails; /* tx m_defrag() failures */ |
---|
157 | u_int txdmamapfails; /* tx dmamap failures */ |
---|
158 | |
---|
159 | /* hardware provided statistics */ |
---|
160 | struct cgem_hw_stats { |
---|
161 | uint64_t tx_bytes; |
---|
162 | uint32_t tx_frames; |
---|
163 | uint32_t tx_frames_bcast; |
---|
164 | uint32_t tx_frames_multi; |
---|
165 | uint32_t tx_frames_pause; |
---|
166 | uint32_t tx_frames_64b; |
---|
167 | uint32_t tx_frames_65to127b; |
---|
168 | uint32_t tx_frames_128to255b; |
---|
169 | uint32_t tx_frames_256to511b; |
---|
170 | uint32_t tx_frames_512to1023b; |
---|
171 | uint32_t tx_frames_1024to1536b; |
---|
172 | uint32_t tx_under_runs; |
---|
173 | uint32_t tx_single_collisn; |
---|
174 | uint32_t tx_multi_collisn; |
---|
175 | uint32_t tx_excsv_collisn; |
---|
176 | uint32_t tx_late_collisn; |
---|
177 | uint32_t tx_deferred_frames; |
---|
178 | uint32_t tx_carrier_sense_errs; |
---|
179 | |
---|
180 | uint64_t rx_bytes; |
---|
181 | uint32_t rx_frames; |
---|
182 | uint32_t rx_frames_bcast; |
---|
183 | uint32_t rx_frames_multi; |
---|
184 | uint32_t rx_frames_pause; |
---|
185 | uint32_t rx_frames_64b; |
---|
186 | uint32_t rx_frames_65to127b; |
---|
187 | uint32_t rx_frames_128to255b; |
---|
188 | uint32_t rx_frames_256to511b; |
---|
189 | uint32_t rx_frames_512to1023b; |
---|
190 | uint32_t rx_frames_1024to1536b; |
---|
191 | uint32_t rx_frames_undersize; |
---|
192 | uint32_t rx_frames_oversize; |
---|
193 | uint32_t rx_frames_jabber; |
---|
194 | uint32_t rx_frames_fcs_errs; |
---|
195 | uint32_t rx_frames_length_errs; |
---|
196 | uint32_t rx_symbol_errs; |
---|
197 | uint32_t rx_align_errs; |
---|
198 | uint32_t rx_resource_errs; |
---|
199 | uint32_t rx_overrun_errs; |
---|
200 | uint32_t rx_ip_hdr_csum_errs; |
---|
201 | uint32_t rx_tcp_csum_errs; |
---|
202 | uint32_t rx_udp_csum_errs; |
---|
203 | } stats; |
---|
204 | }; |
---|
205 | |
---|
206 | #define RD4(sc, off) (bus_read_4((sc)->mem_res, (off))) |
---|
207 | #define WR4(sc, off, val) (bus_write_4((sc)->mem_res, (off), (val))) |
---|
208 | #define BARRIER(sc, off, len, flags) \ |
---|
209 | (bus_barrier((sc)->mem_res, (off), (len), (flags)) |
---|
210 | |
---|
211 | #define CGEM_LOCK(sc) mtx_lock(&(sc)->sc_mtx) |
---|
212 | #define CGEM_UNLOCK(sc) mtx_unlock(&(sc)->sc_mtx) |
---|
213 | #define CGEM_LOCK_INIT(sc) \ |
---|
214 | mtx_init(&(sc)->sc_mtx, device_get_nameunit((sc)->dev), \ |
---|
215 | MTX_NETWORK_LOCK, MTX_DEF) |
---|
216 | #define CGEM_LOCK_DESTROY(sc) mtx_destroy(&(sc)->sc_mtx) |
---|
217 | #define CGEM_ASSERT_LOCKED(sc) mtx_assert(&(sc)->sc_mtx, MA_OWNED) |
---|
218 | |
---|
219 | /* Allow platforms to optionally provide a way to set the reference clock. */ |
---|
220 | int cgem_set_ref_clk(int unit, int frequency); |
---|
221 | |
---|
222 | static devclass_t cgem_devclass; |
---|
223 | |
---|
224 | static int cgem_probe(device_t dev); |
---|
225 | static int cgem_attach(device_t dev); |
---|
226 | static int cgem_detach(device_t dev); |
---|
227 | static void cgem_tick(void *); |
---|
228 | static void cgem_intr(void *); |
---|
229 | |
---|
230 | static void cgem_mediachange(struct cgem_softc *, struct mii_data *); |
---|
231 | |
---|
232 | static void |
---|
233 | cgem_get_mac(struct cgem_softc *sc, u_char eaddr[]) |
---|
234 | { |
---|
235 | int i; |
---|
236 | uint32_t rnd; |
---|
237 | |
---|
238 | /* See if boot loader gave us a MAC address already. */ |
---|
239 | for (i = 0; i < 4; i++) { |
---|
240 | uint32_t low = RD4(sc, CGEM_SPEC_ADDR_LOW(i)); |
---|
241 | uint32_t high = RD4(sc, CGEM_SPEC_ADDR_HI(i)) & 0xffff; |
---|
242 | if (low != 0 || high != 0) { |
---|
243 | eaddr[0] = low & 0xff; |
---|
244 | eaddr[1] = (low >> 8) & 0xff; |
---|
245 | eaddr[2] = (low >> 16) & 0xff; |
---|
246 | eaddr[3] = (low >> 24) & 0xff; |
---|
247 | eaddr[4] = high & 0xff; |
---|
248 | eaddr[5] = (high >> 8) & 0xff; |
---|
249 | break; |
---|
250 | } |
---|
251 | } |
---|
252 | |
---|
253 | /* No MAC from boot loader? Assign a random one. */ |
---|
254 | if (i == 4) { |
---|
255 | rnd = arc4random(); |
---|
256 | |
---|
257 | eaddr[0] = 'b'; |
---|
258 | eaddr[1] = 's'; |
---|
259 | eaddr[2] = 'd'; |
---|
260 | eaddr[3] = (rnd >> 16) & 0xff; |
---|
261 | eaddr[4] = (rnd >> 8) & 0xff; |
---|
262 | eaddr[5] = rnd & 0xff; |
---|
263 | |
---|
264 | device_printf(sc->dev, "no mac address found, assigning " |
---|
265 | "random: %02x:%02x:%02x:%02x:%02x:%02x\n", |
---|
266 | eaddr[0], eaddr[1], eaddr[2], |
---|
267 | eaddr[3], eaddr[4], eaddr[5]); |
---|
268 | } |
---|
269 | |
---|
270 | /* Move address to first slot and zero out the rest. */ |
---|
271 | WR4(sc, CGEM_SPEC_ADDR_LOW(0), (eaddr[3] << 24) | |
---|
272 | (eaddr[2] << 16) | (eaddr[1] << 8) | eaddr[0]); |
---|
273 | WR4(sc, CGEM_SPEC_ADDR_HI(0), (eaddr[5] << 8) | eaddr[4]); |
---|
274 | |
---|
275 | for (i = 1; i < 4; i++) { |
---|
276 | WR4(sc, CGEM_SPEC_ADDR_LOW(i), 0); |
---|
277 | WR4(sc, CGEM_SPEC_ADDR_HI(i), 0); |
---|
278 | } |
---|
279 | } |
---|
280 | |
---|
281 | /* cgem_mac_hash(): map 48-bit address to a 6-bit hash. |
---|
282 | * The 6-bit hash corresponds to a bit in a 64-bit hash |
---|
283 | * register. Setting that bit in the hash register enables |
---|
284 | * reception of all frames with a destination address that hashes |
---|
285 | * to that 6-bit value. |
---|
286 | * |
---|
287 | * The hash function is described in sec. 16.2.3 in the Zynq-7000 Tech |
---|
288 | * Reference Manual. Bits 0-5 in the hash are the exclusive-or of |
---|
289 | * every sixth bit in the destination address. |
---|
290 | */ |
---|
291 | static int |
---|
292 | cgem_mac_hash(u_char eaddr[]) |
---|
293 | { |
---|
294 | int hash; |
---|
295 | int i, j; |
---|
296 | |
---|
297 | hash = 0; |
---|
298 | for (i = 0; i < 6; i++) |
---|
299 | for (j = i; j < 48; j += 6) |
---|
300 | if ((eaddr[j >> 3] & (1 << (j & 7))) != 0) |
---|
301 | hash ^= (1 << i); |
---|
302 | |
---|
303 | return hash; |
---|
304 | } |
---|
305 | |
---|
306 | /* After any change in rx flags or multi-cast addresses, set up |
---|
307 | * hash registers and net config register bits. |
---|
308 | */ |
---|
309 | static void |
---|
310 | cgem_rx_filter(struct cgem_softc *sc) |
---|
311 | { |
---|
312 | struct ifnet *ifp = sc->ifp; |
---|
313 | struct ifmultiaddr *ifma; |
---|
314 | int index; |
---|
315 | uint32_t hash_hi, hash_lo; |
---|
316 | uint32_t net_cfg; |
---|
317 | |
---|
318 | hash_hi = 0; |
---|
319 | hash_lo = 0; |
---|
320 | |
---|
321 | net_cfg = RD4(sc, CGEM_NET_CFG); |
---|
322 | |
---|
323 | net_cfg &= ~(CGEM_NET_CFG_MULTI_HASH_EN | |
---|
324 | CGEM_NET_CFG_NO_BCAST | |
---|
325 | CGEM_NET_CFG_COPY_ALL); |
---|
326 | |
---|
327 | if ((ifp->if_flags & IFF_PROMISC) != 0) |
---|
328 | net_cfg |= CGEM_NET_CFG_COPY_ALL; |
---|
329 | else { |
---|
330 | if ((ifp->if_flags & IFF_BROADCAST) == 0) |
---|
331 | net_cfg |= CGEM_NET_CFG_NO_BCAST; |
---|
332 | if ((ifp->if_flags & IFF_ALLMULTI) != 0) { |
---|
333 | hash_hi = 0xffffffff; |
---|
334 | hash_lo = 0xffffffff; |
---|
335 | } else { |
---|
336 | if_maddr_rlock(ifp); |
---|
337 | TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { |
---|
338 | if (ifma->ifma_addr->sa_family != AF_LINK) |
---|
339 | continue; |
---|
340 | index = cgem_mac_hash( |
---|
341 | LLADDR((struct sockaddr_dl *) |
---|
342 | ifma->ifma_addr)); |
---|
343 | if (index > 31) |
---|
344 | hash_hi |= (1<<(index-32)); |
---|
345 | else |
---|
346 | hash_lo |= (1<<index); |
---|
347 | } |
---|
348 | if_maddr_runlock(ifp); |
---|
349 | } |
---|
350 | |
---|
351 | if (hash_hi != 0 || hash_lo != 0) |
---|
352 | net_cfg |= CGEM_NET_CFG_MULTI_HASH_EN; |
---|
353 | } |
---|
354 | |
---|
355 | WR4(sc, CGEM_HASH_TOP, hash_hi); |
---|
356 | WR4(sc, CGEM_HASH_BOT, hash_lo); |
---|
357 | WR4(sc, CGEM_NET_CFG, net_cfg); |
---|
358 | } |
---|
359 | |
---|
360 | /* For bus_dmamap_load() callback. */ |
---|
361 | static void |
---|
362 | cgem_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) |
---|
363 | { |
---|
364 | |
---|
365 | if (nsegs != 1 || error != 0) |
---|
366 | return; |
---|
367 | *(bus_addr_t *)arg = segs[0].ds_addr; |
---|
368 | } |
---|
369 | |
---|
370 | /* Create DMA'able descriptor rings. */ |
---|
371 | static int |
---|
372 | cgem_setup_descs(struct cgem_softc *sc) |
---|
373 | { |
---|
374 | int i, err; |
---|
375 | |
---|
376 | sc->txring = NULL; |
---|
377 | sc->rxring = NULL; |
---|
378 | |
---|
379 | /* Allocate non-cached DMA space for RX and TX descriptors. |
---|
380 | */ |
---|
381 | err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, |
---|
382 | BUS_SPACE_MAXADDR_32BIT, |
---|
383 | BUS_SPACE_MAXADDR, |
---|
384 | NULL, NULL, |
---|
385 | MAX_DESC_RING_SIZE, |
---|
386 | 1, |
---|
387 | MAX_DESC_RING_SIZE, |
---|
388 | 0, |
---|
389 | busdma_lock_mutex, |
---|
390 | &sc->sc_mtx, |
---|
391 | &sc->desc_dma_tag); |
---|
392 | if (err) |
---|
393 | return (err); |
---|
394 | |
---|
395 | /* Set up a bus_dma_tag for mbufs. */ |
---|
396 | err = bus_dma_tag_create(bus_get_dma_tag(sc->dev), 1, 0, |
---|
397 | BUS_SPACE_MAXADDR_32BIT, |
---|
398 | BUS_SPACE_MAXADDR, |
---|
399 | NULL, NULL, |
---|
400 | MCLBYTES, |
---|
401 | TX_MAX_DMA_SEGS, |
---|
402 | MCLBYTES, |
---|
403 | 0, |
---|
404 | busdma_lock_mutex, |
---|
405 | &sc->sc_mtx, |
---|
406 | &sc->mbuf_dma_tag); |
---|
407 | if (err) |
---|
408 | return (err); |
---|
409 | |
---|
410 | /* Allocate DMA memory in non-cacheable space. */ |
---|
411 | err = bus_dmamem_alloc(sc->desc_dma_tag, |
---|
412 | (void **)&sc->rxring, |
---|
413 | BUS_DMA_NOWAIT | BUS_DMA_COHERENT, |
---|
414 | &sc->rxring_dma_map); |
---|
415 | if (err) |
---|
416 | return (err); |
---|
417 | |
---|
418 | /* Load descriptor DMA memory. */ |
---|
419 | err = bus_dmamap_load(sc->desc_dma_tag, sc->rxring_dma_map, |
---|
420 | (void *)sc->rxring, |
---|
421 | CGEM_NUM_RX_DESCS*sizeof(struct cgem_rx_desc), |
---|
422 | cgem_getaddr, &sc->rxring_physaddr, |
---|
423 | BUS_DMA_NOWAIT); |
---|
424 | if (err) |
---|
425 | return (err); |
---|
426 | |
---|
427 | /* Initialize RX descriptors. */ |
---|
428 | for (i = 0; i < CGEM_NUM_RX_DESCS; i++) { |
---|
429 | sc->rxring[i].addr = CGEM_RXDESC_OWN; |
---|
430 | sc->rxring[i].ctl = 0; |
---|
431 | sc->rxring_m[i] = NULL; |
---|
432 | #ifndef __rtems__ |
---|
433 | err = bus_dmamap_create(sc->mbuf_dma_tag, 0, |
---|
434 | &sc->rxring_m_dmamap[i]); |
---|
435 | if (err) |
---|
436 | return (err); |
---|
437 | #endif /* __rtems__ */ |
---|
438 | } |
---|
439 | sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP; |
---|
440 | |
---|
441 | sc->rxring_hd_ptr = 0; |
---|
442 | sc->rxring_tl_ptr = 0; |
---|
443 | sc->rxring_queued = 0; |
---|
444 | |
---|
445 | /* Allocate DMA memory for TX descriptors in non-cacheable space. */ |
---|
446 | err = bus_dmamem_alloc(sc->desc_dma_tag, |
---|
447 | (void **)&sc->txring, |
---|
448 | BUS_DMA_NOWAIT | BUS_DMA_COHERENT, |
---|
449 | &sc->txring_dma_map); |
---|
450 | if (err) |
---|
451 | return (err); |
---|
452 | |
---|
453 | /* Load TX descriptor DMA memory. */ |
---|
454 | err = bus_dmamap_load(sc->desc_dma_tag, sc->txring_dma_map, |
---|
455 | (void *)sc->txring, |
---|
456 | CGEM_NUM_TX_DESCS*sizeof(struct cgem_tx_desc), |
---|
457 | cgem_getaddr, &sc->txring_physaddr, |
---|
458 | BUS_DMA_NOWAIT); |
---|
459 | if (err) |
---|
460 | return (err); |
---|
461 | |
---|
462 | /* Initialize TX descriptor ring. */ |
---|
463 | for (i = 0; i < CGEM_NUM_TX_DESCS; i++) { |
---|
464 | sc->txring[i].addr = 0; |
---|
465 | sc->txring[i].ctl = CGEM_TXDESC_USED; |
---|
466 | sc->txring_m[i] = NULL; |
---|
467 | #ifndef __rtems__ |
---|
468 | err = bus_dmamap_create(sc->mbuf_dma_tag, 0, |
---|
469 | &sc->txring_m_dmamap[i]); |
---|
470 | if (err) |
---|
471 | return (err); |
---|
472 | #endif /* __rtems__ */ |
---|
473 | } |
---|
474 | sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP; |
---|
475 | |
---|
476 | sc->txring_hd_ptr = 0; |
---|
477 | sc->txring_tl_ptr = 0; |
---|
478 | sc->txring_queued = 0; |
---|
479 | |
---|
480 | return (0); |
---|
481 | } |
---|
482 | |
---|
483 | /* Fill receive descriptor ring with mbufs. */ |
---|
484 | static void |
---|
485 | cgem_fill_rqueue(struct cgem_softc *sc) |
---|
486 | { |
---|
487 | struct mbuf *m = NULL; |
---|
488 | #ifndef __rtems__ |
---|
489 | bus_dma_segment_t segs[TX_MAX_DMA_SEGS]; |
---|
490 | int nsegs; |
---|
491 | #else /* __rtems__ */ |
---|
492 | bus_dma_segment_t segs[1]; |
---|
493 | #endif /* __rtems__ */ |
---|
494 | |
---|
495 | CGEM_ASSERT_LOCKED(sc); |
---|
496 | |
---|
497 | while (sc->rxring_queued < sc->rxbufs) { |
---|
498 | /* Get a cluster mbuf. */ |
---|
499 | m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); |
---|
500 | if (m == NULL) |
---|
501 | break; |
---|
502 | |
---|
503 | m->m_len = MCLBYTES; |
---|
504 | m->m_pkthdr.len = MCLBYTES; |
---|
505 | m->m_pkthdr.rcvif = sc->ifp; |
---|
506 | |
---|
507 | #ifndef __rtems__ |
---|
508 | /* Load map and plug in physical address. */ |
---|
509 | if (bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag, |
---|
510 | sc->rxring_m_dmamap[sc->rxring_hd_ptr], m, |
---|
511 | segs, &nsegs, BUS_DMA_NOWAIT)) { |
---|
512 | sc->rxdmamapfails++; |
---|
513 | m_free(m); |
---|
514 | break; |
---|
515 | } |
---|
516 | #endif /* __rtems__ */ |
---|
517 | sc->rxring_m[sc->rxring_hd_ptr] = m; |
---|
518 | |
---|
519 | #ifndef __rtems__ |
---|
520 | /* Sync cache with receive buffer. */ |
---|
521 | bus_dmamap_sync(sc->mbuf_dma_tag, |
---|
522 | sc->rxring_m_dmamap[sc->rxring_hd_ptr], |
---|
523 | BUS_DMASYNC_PREREAD); |
---|
524 | #else /* __rtems__ */ |
---|
525 | rtems_cache_invalidate_multiple_data_lines(m->m_data, m->m_len); |
---|
526 | segs[0].ds_addr = mtod(m, bus_addr_t); |
---|
527 | #endif /* __rtems__ */ |
---|
528 | |
---|
529 | /* Write rx descriptor and increment head pointer. */ |
---|
530 | sc->rxring[sc->rxring_hd_ptr].ctl = 0; |
---|
531 | if (sc->rxring_hd_ptr == CGEM_NUM_RX_DESCS - 1) { |
---|
532 | sc->rxring[sc->rxring_hd_ptr].addr = segs[0].ds_addr | |
---|
533 | CGEM_RXDESC_WRAP; |
---|
534 | sc->rxring_hd_ptr = 0; |
---|
535 | } else |
---|
536 | sc->rxring[sc->rxring_hd_ptr++].addr = segs[0].ds_addr; |
---|
537 | |
---|
538 | sc->rxring_queued++; |
---|
539 | } |
---|
540 | } |
---|
541 | |
---|
542 | /* Pull received packets off of receive descriptor ring. */ |
---|
543 | static void |
---|
544 | cgem_recv(struct cgem_softc *sc) |
---|
545 | { |
---|
546 | struct ifnet *ifp = sc->ifp; |
---|
547 | struct mbuf *m, *m_hd, **m_tl; |
---|
548 | uint32_t ctl; |
---|
549 | |
---|
550 | CGEM_ASSERT_LOCKED(sc); |
---|
551 | |
---|
552 | /* Pick up all packets in which the OWN bit is set. */ |
---|
553 | m_hd = NULL; |
---|
554 | m_tl = &m_hd; |
---|
555 | while (sc->rxring_queued > 0 && |
---|
556 | (sc->rxring[sc->rxring_tl_ptr].addr & CGEM_RXDESC_OWN) != 0) { |
---|
557 | |
---|
558 | ctl = sc->rxring[sc->rxring_tl_ptr].ctl; |
---|
559 | |
---|
560 | /* Grab filled mbuf. */ |
---|
561 | m = sc->rxring_m[sc->rxring_tl_ptr]; |
---|
562 | sc->rxring_m[sc->rxring_tl_ptr] = NULL; |
---|
563 | |
---|
564 | #ifndef __rtems__ |
---|
565 | /* Sync cache with receive buffer. */ |
---|
566 | bus_dmamap_sync(sc->mbuf_dma_tag, |
---|
567 | sc->rxring_m_dmamap[sc->rxring_tl_ptr], |
---|
568 | BUS_DMASYNC_POSTREAD); |
---|
569 | |
---|
570 | /* Unload dmamap. */ |
---|
571 | bus_dmamap_unload(sc->mbuf_dma_tag, |
---|
572 | sc->rxring_m_dmamap[sc->rxring_tl_ptr]); |
---|
573 | #else /* __rtems__ */ |
---|
574 | rtems_cache_invalidate_multiple_data_lines(m->m_data, m->m_len); |
---|
575 | #endif /* __rtems__ */ |
---|
576 | |
---|
577 | /* Increment tail pointer. */ |
---|
578 | if (++sc->rxring_tl_ptr == CGEM_NUM_RX_DESCS) |
---|
579 | sc->rxring_tl_ptr = 0; |
---|
580 | sc->rxring_queued--; |
---|
581 | |
---|
582 | /* Check FCS and make sure entire packet landed in one mbuf |
---|
583 | * cluster (which is much bigger than the largest ethernet |
---|
584 | * packet). |
---|
585 | */ |
---|
586 | if ((ctl & CGEM_RXDESC_BAD_FCS) != 0 || |
---|
587 | (ctl & (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) != |
---|
588 | (CGEM_RXDESC_SOF | CGEM_RXDESC_EOF)) { |
---|
589 | /* discard. */ |
---|
590 | m_free(m); |
---|
591 | #ifndef __rtems__ |
---|
592 | if_inc_counter(ifp, IFCOUNTER_IERRORS, 1); |
---|
593 | #else /* __rtems__ */ |
---|
594 | ifp->if_ierrors++; |
---|
595 | #endif /* __rtems__ */ |
---|
596 | continue; |
---|
597 | } |
---|
598 | |
---|
599 | /* Ready it to hand off to upper layers. */ |
---|
600 | m->m_data += ETHER_ALIGN; |
---|
601 | m->m_len = (ctl & CGEM_RXDESC_LENGTH_MASK); |
---|
602 | m->m_pkthdr.rcvif = ifp; |
---|
603 | m->m_pkthdr.len = m->m_len; |
---|
604 | |
---|
605 | /* Are we using hardware checksumming? Check the |
---|
606 | * status in the receive descriptor. |
---|
607 | */ |
---|
608 | if ((ifp->if_capenable & IFCAP_RXCSUM) != 0) { |
---|
609 | /* TCP or UDP checks out, IP checks out too. */ |
---|
610 | if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) == |
---|
611 | CGEM_RXDESC_CKSUM_STAT_TCP_GOOD || |
---|
612 | (ctl & CGEM_RXDESC_CKSUM_STAT_MASK) == |
---|
613 | CGEM_RXDESC_CKSUM_STAT_UDP_GOOD) { |
---|
614 | m->m_pkthdr.csum_flags |= |
---|
615 | CSUM_IP_CHECKED | CSUM_IP_VALID | |
---|
616 | CSUM_DATA_VALID | CSUM_PSEUDO_HDR; |
---|
617 | m->m_pkthdr.csum_data = 0xffff; |
---|
618 | } else if ((ctl & CGEM_RXDESC_CKSUM_STAT_MASK) == |
---|
619 | CGEM_RXDESC_CKSUM_STAT_IP_GOOD) { |
---|
620 | /* Only IP checks out. */ |
---|
621 | m->m_pkthdr.csum_flags |= |
---|
622 | CSUM_IP_CHECKED | CSUM_IP_VALID; |
---|
623 | m->m_pkthdr.csum_data = 0xffff; |
---|
624 | } |
---|
625 | } |
---|
626 | |
---|
627 | /* Queue it up for delivery below. */ |
---|
628 | *m_tl = m; |
---|
629 | m_tl = &m->m_next; |
---|
630 | } |
---|
631 | |
---|
632 | /* Replenish receive buffers. */ |
---|
633 | cgem_fill_rqueue(sc); |
---|
634 | |
---|
635 | /* Unlock and send up packets. */ |
---|
636 | CGEM_UNLOCK(sc); |
---|
637 | while (m_hd != NULL) { |
---|
638 | m = m_hd; |
---|
639 | m_hd = m_hd->m_next; |
---|
640 | m->m_next = NULL; |
---|
641 | #ifndef __rtems__ |
---|
642 | if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1); |
---|
643 | #else /* __rtems__ */ |
---|
644 | ifp->if_ipackets++; |
---|
645 | #endif /* __rtems__ */ |
---|
646 | (*ifp->if_input)(ifp, m); |
---|
647 | } |
---|
648 | CGEM_LOCK(sc); |
---|
649 | } |
---|
650 | |
---|
651 | /* Find completed transmits and free their mbufs. */ |
---|
652 | static void |
---|
653 | cgem_clean_tx(struct cgem_softc *sc) |
---|
654 | { |
---|
655 | struct mbuf *m; |
---|
656 | uint32_t ctl; |
---|
657 | |
---|
658 | CGEM_ASSERT_LOCKED(sc); |
---|
659 | |
---|
660 | /* free up finished transmits. */ |
---|
661 | while (sc->txring_queued > 0 && |
---|
662 | ((ctl = sc->txring[sc->txring_tl_ptr].ctl) & |
---|
663 | CGEM_TXDESC_USED) != 0) { |
---|
664 | |
---|
665 | #ifndef __rtems__ |
---|
666 | /* Sync cache. nop? */ |
---|
667 | bus_dmamap_sync(sc->mbuf_dma_tag, |
---|
668 | sc->txring_m_dmamap[sc->txring_tl_ptr], |
---|
669 | BUS_DMASYNC_POSTWRITE); |
---|
670 | |
---|
671 | /* Unload DMA map. */ |
---|
672 | bus_dmamap_unload(sc->mbuf_dma_tag, |
---|
673 | sc->txring_m_dmamap[sc->txring_tl_ptr]); |
---|
674 | #endif /* __rtems__ */ |
---|
675 | |
---|
676 | /* Free up the mbuf. */ |
---|
677 | m = sc->txring_m[sc->txring_tl_ptr]; |
---|
678 | sc->txring_m[sc->txring_tl_ptr] = NULL; |
---|
679 | m_freem(m); |
---|
680 | |
---|
681 | /* Check the status. */ |
---|
682 | if ((ctl & CGEM_TXDESC_AHB_ERR) != 0) { |
---|
683 | /* Serious bus error. log to console. */ |
---|
684 | device_printf(sc->dev, "cgem_clean_tx: Whoa! " |
---|
685 | "AHB error, addr=0x%x\n", |
---|
686 | sc->txring[sc->txring_tl_ptr].addr); |
---|
687 | } else if ((ctl & (CGEM_TXDESC_RETRY_ERR | |
---|
688 | CGEM_TXDESC_LATE_COLL)) != 0) { |
---|
689 | #ifndef __rtems__ |
---|
690 | if_inc_counter(sc->ifp, IFCOUNTER_OERRORS, 1); |
---|
691 | #else /* __rtems__ */ |
---|
692 | sc->ifp->if_oerrors++; |
---|
693 | #endif /* __rtems__ */ |
---|
694 | } else |
---|
695 | #ifndef __rtems__ |
---|
696 | if_inc_counter(sc->ifp, IFCOUNTER_OPACKETS, 1); |
---|
697 | #else /* __rtems__ */ |
---|
698 | sc->ifp->if_opackets++; |
---|
699 | #endif /* __rtems__ */ |
---|
700 | |
---|
701 | /* If the packet spanned more than one tx descriptor, |
---|
702 | * skip descriptors until we find the end so that only |
---|
703 | * start-of-frame descriptors are processed. |
---|
704 | */ |
---|
705 | while ((ctl & CGEM_TXDESC_LAST_BUF) == 0) { |
---|
706 | if ((ctl & CGEM_TXDESC_WRAP) != 0) |
---|
707 | sc->txring_tl_ptr = 0; |
---|
708 | else |
---|
709 | sc->txring_tl_ptr++; |
---|
710 | sc->txring_queued--; |
---|
711 | |
---|
712 | ctl = sc->txring[sc->txring_tl_ptr].ctl; |
---|
713 | |
---|
714 | sc->txring[sc->txring_tl_ptr].ctl = |
---|
715 | ctl | CGEM_TXDESC_USED; |
---|
716 | } |
---|
717 | |
---|
718 | /* Next descriptor. */ |
---|
719 | if ((ctl & CGEM_TXDESC_WRAP) != 0) |
---|
720 | sc->txring_tl_ptr = 0; |
---|
721 | else |
---|
722 | sc->txring_tl_ptr++; |
---|
723 | sc->txring_queued--; |
---|
724 | |
---|
725 | sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; |
---|
726 | } |
---|
727 | } |
---|
728 | |
---|
729 | #ifdef __rtems__ |
---|
730 | static int |
---|
731 | cgem_get_segs_for_tx(struct mbuf *m, bus_dma_segment_t segs[TX_MAX_DMA_SEGS], |
---|
732 | int *nsegs) |
---|
733 | { |
---|
734 | int i = 0; |
---|
735 | |
---|
736 | do { |
---|
737 | if (m->m_len > 0) { |
---|
738 | segs[i].ds_addr = mtod(m, bus_addr_t); |
---|
739 | segs[i].ds_len = m->m_len; |
---|
740 | rtems_cache_flush_multiple_data_lines(m->m_data, m->m_len); |
---|
741 | ++i; |
---|
742 | } |
---|
743 | |
---|
744 | m = m->m_next; |
---|
745 | |
---|
746 | if (m == NULL) { |
---|
747 | *nsegs = i; |
---|
748 | |
---|
749 | return (0); |
---|
750 | } |
---|
751 | } while (i < TX_MAX_DMA_SEGS); |
---|
752 | |
---|
753 | return (EFBIG); |
---|
754 | } |
---|
755 | #endif /* __rtems__ */ |
---|
756 | /* Start transmits. */ |
---|
757 | static void |
---|
758 | cgem_start_locked(struct ifnet *ifp) |
---|
759 | { |
---|
760 | struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc; |
---|
761 | struct mbuf *m; |
---|
762 | bus_dma_segment_t segs[TX_MAX_DMA_SEGS]; |
---|
763 | uint32_t ctl; |
---|
764 | int i, nsegs, wrap, err; |
---|
765 | |
---|
766 | CGEM_ASSERT_LOCKED(sc); |
---|
767 | |
---|
768 | if ((ifp->if_drv_flags & IFF_DRV_OACTIVE) != 0) |
---|
769 | return; |
---|
770 | |
---|
771 | for (;;) { |
---|
772 | /* Check that there is room in the descriptor ring. */ |
---|
773 | if (sc->txring_queued >= |
---|
774 | CGEM_NUM_TX_DESCS - TX_MAX_DMA_SEGS * 2) { |
---|
775 | |
---|
776 | /* Try to make room. */ |
---|
777 | cgem_clean_tx(sc); |
---|
778 | |
---|
779 | /* Still no room? */ |
---|
780 | if (sc->txring_queued >= |
---|
781 | CGEM_NUM_TX_DESCS - TX_MAX_DMA_SEGS * 2) { |
---|
782 | ifp->if_drv_flags |= IFF_DRV_OACTIVE; |
---|
783 | sc->txfull++; |
---|
784 | break; |
---|
785 | } |
---|
786 | } |
---|
787 | |
---|
788 | /* Grab next transmit packet. */ |
---|
789 | IFQ_DRV_DEQUEUE(&ifp->if_snd, m); |
---|
790 | if (m == NULL) |
---|
791 | break; |
---|
792 | |
---|
793 | #ifndef __rtems__ |
---|
794 | /* Load DMA map. */ |
---|
795 | err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag, |
---|
796 | sc->txring_m_dmamap[sc->txring_hd_ptr], |
---|
797 | m, segs, &nsegs, BUS_DMA_NOWAIT); |
---|
798 | #else /* __rtems__ */ |
---|
799 | err = cgem_get_segs_for_tx(m, segs, &nsegs); |
---|
800 | #endif /* __rtems__ */ |
---|
801 | if (err == EFBIG) { |
---|
802 | /* Too many segments! defrag and try again. */ |
---|
803 | struct mbuf *m2 = m_defrag(m, M_NOWAIT); |
---|
804 | |
---|
805 | if (m2 == NULL) { |
---|
806 | sc->txdefragfails++; |
---|
807 | m_freem(m); |
---|
808 | continue; |
---|
809 | } |
---|
810 | m = m2; |
---|
811 | #ifndef __rtems__ |
---|
812 | err = bus_dmamap_load_mbuf_sg(sc->mbuf_dma_tag, |
---|
813 | sc->txring_m_dmamap[sc->txring_hd_ptr], |
---|
814 | m, segs, &nsegs, BUS_DMA_NOWAIT); |
---|
815 | #else /* __rtems__ */ |
---|
816 | err = cgem_get_segs_for_tx(m, segs, &nsegs); |
---|
817 | #endif /* __rtems__ */ |
---|
818 | sc->txdefrags++; |
---|
819 | } |
---|
820 | if (err) { |
---|
821 | /* Give up. */ |
---|
822 | m_freem(m); |
---|
823 | sc->txdmamapfails++; |
---|
824 | continue; |
---|
825 | } |
---|
826 | sc->txring_m[sc->txring_hd_ptr] = m; |
---|
827 | |
---|
828 | #ifndef __rtems__ |
---|
829 | /* Sync tx buffer with cache. */ |
---|
830 | bus_dmamap_sync(sc->mbuf_dma_tag, |
---|
831 | sc->txring_m_dmamap[sc->txring_hd_ptr], |
---|
832 | BUS_DMASYNC_PREWRITE); |
---|
833 | #endif /* __rtems__ */ |
---|
834 | |
---|
835 | /* Set wrap flag if next packet might run off end of ring. */ |
---|
836 | wrap = sc->txring_hd_ptr + nsegs + TX_MAX_DMA_SEGS >= |
---|
837 | CGEM_NUM_TX_DESCS; |
---|
838 | |
---|
839 | /* Fill in the TX descriptors back to front so that USED |
---|
840 | * bit in first descriptor is cleared last. |
---|
841 | */ |
---|
842 | for (i = nsegs - 1; i >= 0; i--) { |
---|
843 | /* Descriptor address. */ |
---|
844 | sc->txring[sc->txring_hd_ptr + i].addr = |
---|
845 | segs[i].ds_addr; |
---|
846 | |
---|
847 | /* Descriptor control word. */ |
---|
848 | ctl = segs[i].ds_len; |
---|
849 | if (i == nsegs - 1) { |
---|
850 | ctl |= CGEM_TXDESC_LAST_BUF; |
---|
851 | if (wrap) |
---|
852 | ctl |= CGEM_TXDESC_WRAP; |
---|
853 | } |
---|
854 | sc->txring[sc->txring_hd_ptr + i].ctl = ctl; |
---|
855 | |
---|
856 | if (i != 0) |
---|
857 | sc->txring_m[sc->txring_hd_ptr + i] = NULL; |
---|
858 | } |
---|
859 | |
---|
860 | if (wrap) |
---|
861 | sc->txring_hd_ptr = 0; |
---|
862 | else |
---|
863 | sc->txring_hd_ptr += nsegs; |
---|
864 | sc->txring_queued += nsegs; |
---|
865 | |
---|
866 | /* Kick the transmitter. */ |
---|
867 | WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow | |
---|
868 | CGEM_NET_CTRL_START_TX); |
---|
869 | |
---|
870 | /* If there is a BPF listener, bounce a copy to to him. */ |
---|
871 | ETHER_BPF_MTAP(ifp, m); |
---|
872 | } |
---|
873 | } |
---|
874 | |
---|
875 | static void |
---|
876 | cgem_start(struct ifnet *ifp) |
---|
877 | { |
---|
878 | struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc; |
---|
879 | |
---|
880 | CGEM_LOCK(sc); |
---|
881 | cgem_start_locked(ifp); |
---|
882 | CGEM_UNLOCK(sc); |
---|
883 | } |
---|
884 | |
---|
885 | static void |
---|
886 | cgem_poll_hw_stats(struct cgem_softc *sc) |
---|
887 | { |
---|
888 | uint32_t n; |
---|
889 | |
---|
890 | CGEM_ASSERT_LOCKED(sc); |
---|
891 | |
---|
892 | sc->stats.tx_bytes += RD4(sc, CGEM_OCTETS_TX_BOT); |
---|
893 | sc->stats.tx_bytes += (uint64_t)RD4(sc, CGEM_OCTETS_TX_TOP) << 32; |
---|
894 | |
---|
895 | sc->stats.tx_frames += RD4(sc, CGEM_FRAMES_TX); |
---|
896 | sc->stats.tx_frames_bcast += RD4(sc, CGEM_BCAST_FRAMES_TX); |
---|
897 | sc->stats.tx_frames_multi += RD4(sc, CGEM_MULTI_FRAMES_TX); |
---|
898 | sc->stats.tx_frames_pause += RD4(sc, CGEM_PAUSE_FRAMES_TX); |
---|
899 | sc->stats.tx_frames_64b += RD4(sc, CGEM_FRAMES_64B_TX); |
---|
900 | sc->stats.tx_frames_65to127b += RD4(sc, CGEM_FRAMES_65_127B_TX); |
---|
901 | sc->stats.tx_frames_128to255b += RD4(sc, CGEM_FRAMES_128_255B_TX); |
---|
902 | sc->stats.tx_frames_256to511b += RD4(sc, CGEM_FRAMES_256_511B_TX); |
---|
903 | sc->stats.tx_frames_512to1023b += RD4(sc, CGEM_FRAMES_512_1023B_TX); |
---|
904 | sc->stats.tx_frames_1024to1536b += RD4(sc, CGEM_FRAMES_1024_1518B_TX); |
---|
905 | sc->stats.tx_under_runs += RD4(sc, CGEM_TX_UNDERRUNS); |
---|
906 | |
---|
907 | n = RD4(sc, CGEM_SINGLE_COLL_FRAMES); |
---|
908 | sc->stats.tx_single_collisn += n; |
---|
909 | #ifndef __rtems__ |
---|
910 | if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n); |
---|
911 | #else /* __rtems__ */ |
---|
912 | sc->ifp->if_collisions += n; |
---|
913 | #endif /* __rtems__ */ |
---|
914 | n = RD4(sc, CGEM_MULTI_COLL_FRAMES); |
---|
915 | sc->stats.tx_multi_collisn += n; |
---|
916 | #ifndef __rtems__ |
---|
917 | if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n); |
---|
918 | #else /* __rtems__ */ |
---|
919 | sc->ifp->if_collisions += n; |
---|
920 | #endif /* __rtems__ */ |
---|
921 | n = RD4(sc, CGEM_EXCESSIVE_COLL_FRAMES); |
---|
922 | sc->stats.tx_excsv_collisn += n; |
---|
923 | #ifndef __rtems__ |
---|
924 | if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n); |
---|
925 | #else /* __rtems__ */ |
---|
926 | sc->ifp->if_collisions += n; |
---|
927 | #endif /* __rtems__ */ |
---|
928 | n = RD4(sc, CGEM_LATE_COLL); |
---|
929 | sc->stats.tx_late_collisn += n; |
---|
930 | #ifndef __rtems__ |
---|
931 | if_inc_counter(sc->ifp, IFCOUNTER_COLLISIONS, n); |
---|
932 | #else /* __rtems__ */ |
---|
933 | sc->ifp->if_collisions += n; |
---|
934 | #endif /* __rtems__ */ |
---|
935 | |
---|
936 | sc->stats.tx_deferred_frames += RD4(sc, CGEM_DEFERRED_TX_FRAMES); |
---|
937 | sc->stats.tx_carrier_sense_errs += RD4(sc, CGEM_CARRIER_SENSE_ERRS); |
---|
938 | |
---|
939 | sc->stats.rx_bytes += RD4(sc, CGEM_OCTETS_RX_BOT); |
---|
940 | sc->stats.rx_bytes += (uint64_t)RD4(sc, CGEM_OCTETS_RX_TOP) << 32; |
---|
941 | |
---|
942 | sc->stats.rx_frames += RD4(sc, CGEM_FRAMES_RX); |
---|
943 | sc->stats.rx_frames_bcast += RD4(sc, CGEM_BCAST_FRAMES_RX); |
---|
944 | sc->stats.rx_frames_multi += RD4(sc, CGEM_MULTI_FRAMES_RX); |
---|
945 | sc->stats.rx_frames_pause += RD4(sc, CGEM_PAUSE_FRAMES_RX); |
---|
946 | sc->stats.rx_frames_64b += RD4(sc, CGEM_FRAMES_64B_RX); |
---|
947 | sc->stats.rx_frames_65to127b += RD4(sc, CGEM_FRAMES_65_127B_RX); |
---|
948 | sc->stats.rx_frames_128to255b += RD4(sc, CGEM_FRAMES_128_255B_RX); |
---|
949 | sc->stats.rx_frames_256to511b += RD4(sc, CGEM_FRAMES_256_511B_RX); |
---|
950 | sc->stats.rx_frames_512to1023b += RD4(sc, CGEM_FRAMES_512_1023B_RX); |
---|
951 | sc->stats.rx_frames_1024to1536b += RD4(sc, CGEM_FRAMES_1024_1518B_RX); |
---|
952 | sc->stats.rx_frames_undersize += RD4(sc, CGEM_UNDERSZ_RX); |
---|
953 | sc->stats.rx_frames_oversize += RD4(sc, CGEM_OVERSZ_RX); |
---|
954 | sc->stats.rx_frames_jabber += RD4(sc, CGEM_JABBERS_RX); |
---|
955 | sc->stats.rx_frames_fcs_errs += RD4(sc, CGEM_FCS_ERRS); |
---|
956 | sc->stats.rx_frames_length_errs += RD4(sc, CGEM_LENGTH_FIELD_ERRS); |
---|
957 | sc->stats.rx_symbol_errs += RD4(sc, CGEM_RX_SYMBOL_ERRS); |
---|
958 | sc->stats.rx_align_errs += RD4(sc, CGEM_ALIGN_ERRS); |
---|
959 | sc->stats.rx_resource_errs += RD4(sc, CGEM_RX_RESOURCE_ERRS); |
---|
960 | sc->stats.rx_overrun_errs += RD4(sc, CGEM_RX_OVERRUN_ERRS); |
---|
961 | sc->stats.rx_ip_hdr_csum_errs += RD4(sc, CGEM_IP_HDR_CKSUM_ERRS); |
---|
962 | sc->stats.rx_tcp_csum_errs += RD4(sc, CGEM_TCP_CKSUM_ERRS); |
---|
963 | sc->stats.rx_udp_csum_errs += RD4(sc, CGEM_UDP_CKSUM_ERRS); |
---|
964 | } |
---|
965 | |
---|
966 | static void |
---|
967 | cgem_tick(void *arg) |
---|
968 | { |
---|
969 | struct cgem_softc *sc = (struct cgem_softc *)arg; |
---|
970 | struct mii_data *mii; |
---|
971 | |
---|
972 | CGEM_ASSERT_LOCKED(sc); |
---|
973 | |
---|
974 | /* Poll the phy. */ |
---|
975 | if (sc->miibus != NULL) { |
---|
976 | mii = device_get_softc(sc->miibus); |
---|
977 | mii_tick(mii); |
---|
978 | } |
---|
979 | |
---|
980 | /* Poll statistics registers. */ |
---|
981 | cgem_poll_hw_stats(sc); |
---|
982 | |
---|
983 | /* Check for receiver hang. */ |
---|
984 | if (sc->rxhangwar && sc->rx_frames_prev == sc->stats.rx_frames) { |
---|
985 | /* |
---|
986 | * Reset receiver logic by toggling RX_EN bit. 1usec |
---|
987 | * delay is necessary especially when operating at 100mbps |
---|
988 | * and 10mbps speeds. |
---|
989 | */ |
---|
990 | WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow & |
---|
991 | ~CGEM_NET_CTRL_RX_EN); |
---|
992 | DELAY(1); |
---|
993 | WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow); |
---|
994 | } |
---|
995 | sc->rx_frames_prev = sc->stats.rx_frames; |
---|
996 | |
---|
997 | /* Next callout in one second. */ |
---|
998 | callout_reset(&sc->tick_ch, hz, cgem_tick, sc); |
---|
999 | } |
---|
1000 | |
---|
1001 | /* Interrupt handler. */ |
---|
1002 | static void |
---|
1003 | cgem_intr(void *arg) |
---|
1004 | { |
---|
1005 | struct cgem_softc *sc = (struct cgem_softc *)arg; |
---|
1006 | uint32_t istatus; |
---|
1007 | |
---|
1008 | CGEM_LOCK(sc); |
---|
1009 | |
---|
1010 | if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) == 0) { |
---|
1011 | CGEM_UNLOCK(sc); |
---|
1012 | return; |
---|
1013 | } |
---|
1014 | |
---|
1015 | /* Read interrupt status and immediately clear the bits. */ |
---|
1016 | istatus = RD4(sc, CGEM_INTR_STAT); |
---|
1017 | WR4(sc, CGEM_INTR_STAT, istatus); |
---|
1018 | |
---|
1019 | /* Packets received. */ |
---|
1020 | if ((istatus & CGEM_INTR_RX_COMPLETE) != 0) |
---|
1021 | cgem_recv(sc); |
---|
1022 | |
---|
1023 | /* Free up any completed transmit buffers. */ |
---|
1024 | cgem_clean_tx(sc); |
---|
1025 | |
---|
1026 | /* Hresp not ok. Something is very bad with DMA. Try to clear. */ |
---|
1027 | if ((istatus & CGEM_INTR_HRESP_NOT_OK) != 0) { |
---|
1028 | device_printf(sc->dev, "cgem_intr: hresp not okay! " |
---|
1029 | "rx_status=0x%x\n", RD4(sc, CGEM_RX_STAT)); |
---|
1030 | WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_HRESP_NOT_OK); |
---|
1031 | } |
---|
1032 | |
---|
1033 | /* Receiver overrun. */ |
---|
1034 | if ((istatus & CGEM_INTR_RX_OVERRUN) != 0) { |
---|
1035 | /* Clear status bit. */ |
---|
1036 | WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_OVERRUN); |
---|
1037 | sc->rxoverruns++; |
---|
1038 | } |
---|
1039 | |
---|
1040 | /* Receiver ran out of bufs. */ |
---|
1041 | if ((istatus & CGEM_INTR_RX_USED_READ) != 0) { |
---|
1042 | WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow | |
---|
1043 | CGEM_NET_CTRL_FLUSH_DPRAM_PKT); |
---|
1044 | cgem_fill_rqueue(sc); |
---|
1045 | sc->rxnobufs++; |
---|
1046 | } |
---|
1047 | |
---|
1048 | /* Restart transmitter if needed. */ |
---|
1049 | if (!IFQ_DRV_IS_EMPTY(&sc->ifp->if_snd)) |
---|
1050 | cgem_start_locked(sc->ifp); |
---|
1051 | |
---|
1052 | CGEM_UNLOCK(sc); |
---|
1053 | } |
---|
1054 | |
---|
1055 | /* Reset hardware. */ |
---|
1056 | static void |
---|
1057 | cgem_reset(struct cgem_softc *sc) |
---|
1058 | { |
---|
1059 | |
---|
1060 | CGEM_ASSERT_LOCKED(sc); |
---|
1061 | |
---|
1062 | WR4(sc, CGEM_NET_CTRL, 0); |
---|
1063 | WR4(sc, CGEM_NET_CFG, 0); |
---|
1064 | WR4(sc, CGEM_NET_CTRL, CGEM_NET_CTRL_CLR_STAT_REGS); |
---|
1065 | WR4(sc, CGEM_TX_STAT, CGEM_TX_STAT_ALL); |
---|
1066 | WR4(sc, CGEM_RX_STAT, CGEM_RX_STAT_ALL); |
---|
1067 | WR4(sc, CGEM_INTR_DIS, CGEM_INTR_ALL); |
---|
1068 | WR4(sc, CGEM_HASH_BOT, 0); |
---|
1069 | WR4(sc, CGEM_HASH_TOP, 0); |
---|
1070 | WR4(sc, CGEM_TX_QBAR, 0); /* manual says do this. */ |
---|
1071 | WR4(sc, CGEM_RX_QBAR, 0); |
---|
1072 | |
---|
1073 | /* Get management port running even if interface is down. */ |
---|
1074 | WR4(sc, CGEM_NET_CFG, |
---|
1075 | CGEM_NET_CFG_DBUS_WIDTH_32 | |
---|
1076 | CGEM_NET_CFG_MDC_CLK_DIV_64); |
---|
1077 | |
---|
1078 | sc->net_ctl_shadow = CGEM_NET_CTRL_MGMT_PORT_EN; |
---|
1079 | WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow); |
---|
1080 | } |
---|
1081 | |
---|
1082 | /* Bring up the hardware. */ |
---|
1083 | static void |
---|
1084 | cgem_config(struct cgem_softc *sc) |
---|
1085 | { |
---|
1086 | uint32_t net_cfg; |
---|
1087 | uint32_t dma_cfg; |
---|
1088 | u_char *eaddr = IF_LLADDR(sc->ifp); |
---|
1089 | |
---|
1090 | CGEM_ASSERT_LOCKED(sc); |
---|
1091 | |
---|
1092 | /* Program Net Config Register. */ |
---|
1093 | net_cfg = CGEM_NET_CFG_DBUS_WIDTH_32 | |
---|
1094 | CGEM_NET_CFG_MDC_CLK_DIV_64 | |
---|
1095 | CGEM_NET_CFG_FCS_REMOVE | |
---|
1096 | CGEM_NET_CFG_RX_BUF_OFFSET(ETHER_ALIGN) | |
---|
1097 | CGEM_NET_CFG_GIGE_EN | |
---|
1098 | CGEM_NET_CFG_1536RXEN | |
---|
1099 | CGEM_NET_CFG_FULL_DUPLEX | |
---|
1100 | CGEM_NET_CFG_SPEED100; |
---|
1101 | |
---|
1102 | /* Enable receive checksum offloading? */ |
---|
1103 | if ((sc->ifp->if_capenable & IFCAP_RXCSUM) != 0) |
---|
1104 | net_cfg |= CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN; |
---|
1105 | |
---|
1106 | WR4(sc, CGEM_NET_CFG, net_cfg); |
---|
1107 | |
---|
1108 | /* Program DMA Config Register. */ |
---|
1109 | dma_cfg = CGEM_DMA_CFG_RX_BUF_SIZE(MCLBYTES) | |
---|
1110 | CGEM_DMA_CFG_RX_PKTBUF_MEMSZ_SEL_8K | |
---|
1111 | CGEM_DMA_CFG_TX_PKTBUF_MEMSZ_SEL | |
---|
1112 | CGEM_DMA_CFG_AHB_FIXED_BURST_LEN_16 | |
---|
1113 | CGEM_DMA_CFG_DISC_WHEN_NO_AHB; |
---|
1114 | |
---|
1115 | /* Enable transmit checksum offloading? */ |
---|
1116 | if ((sc->ifp->if_capenable & IFCAP_TXCSUM) != 0) |
---|
1117 | dma_cfg |= CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN; |
---|
1118 | |
---|
1119 | WR4(sc, CGEM_DMA_CFG, dma_cfg); |
---|
1120 | |
---|
1121 | /* Write the rx and tx descriptor ring addresses to the QBAR regs. */ |
---|
1122 | WR4(sc, CGEM_RX_QBAR, (uint32_t) sc->rxring_physaddr); |
---|
1123 | WR4(sc, CGEM_TX_QBAR, (uint32_t) sc->txring_physaddr); |
---|
1124 | |
---|
1125 | /* Enable rx and tx. */ |
---|
1126 | sc->net_ctl_shadow |= (CGEM_NET_CTRL_TX_EN | CGEM_NET_CTRL_RX_EN); |
---|
1127 | WR4(sc, CGEM_NET_CTRL, sc->net_ctl_shadow); |
---|
1128 | |
---|
1129 | /* Set receive address in case it changed. */ |
---|
1130 | WR4(sc, CGEM_SPEC_ADDR_LOW(0), (eaddr[3] << 24) | |
---|
1131 | (eaddr[2] << 16) | (eaddr[1] << 8) | eaddr[0]); |
---|
1132 | WR4(sc, CGEM_SPEC_ADDR_HI(0), (eaddr[5] << 8) | eaddr[4]); |
---|
1133 | |
---|
1134 | /* Set up interrupts. */ |
---|
1135 | WR4(sc, CGEM_INTR_EN, |
---|
1136 | CGEM_INTR_RX_COMPLETE | CGEM_INTR_RX_OVERRUN | |
---|
1137 | CGEM_INTR_TX_USED_READ | CGEM_INTR_RX_USED_READ | |
---|
1138 | CGEM_INTR_HRESP_NOT_OK); |
---|
1139 | } |
---|
1140 | |
---|
1141 | /* Turn on interface and load up receive ring with buffers. */ |
---|
1142 | static void |
---|
1143 | cgem_init_locked(struct cgem_softc *sc) |
---|
1144 | { |
---|
1145 | struct mii_data *mii; |
---|
1146 | |
---|
1147 | CGEM_ASSERT_LOCKED(sc); |
---|
1148 | |
---|
1149 | if ((sc->ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) |
---|
1150 | return; |
---|
1151 | |
---|
1152 | cgem_config(sc); |
---|
1153 | cgem_fill_rqueue(sc); |
---|
1154 | |
---|
1155 | sc->ifp->if_drv_flags |= IFF_DRV_RUNNING; |
---|
1156 | sc->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; |
---|
1157 | |
---|
1158 | mii = device_get_softc(sc->miibus); |
---|
1159 | mii_mediachg(mii); |
---|
1160 | |
---|
1161 | callout_reset(&sc->tick_ch, hz, cgem_tick, sc); |
---|
1162 | } |
---|
1163 | |
---|
1164 | static void |
---|
1165 | cgem_init(void *arg) |
---|
1166 | { |
---|
1167 | struct cgem_softc *sc = (struct cgem_softc *)arg; |
---|
1168 | |
---|
1169 | CGEM_LOCK(sc); |
---|
1170 | cgem_init_locked(sc); |
---|
1171 | CGEM_UNLOCK(sc); |
---|
1172 | } |
---|
1173 | |
---|
1174 | /* Turn off interface. Free up any buffers in transmit or receive queues. */ |
---|
1175 | static void |
---|
1176 | cgem_stop(struct cgem_softc *sc) |
---|
1177 | { |
---|
1178 | int i; |
---|
1179 | |
---|
1180 | CGEM_ASSERT_LOCKED(sc); |
---|
1181 | |
---|
1182 | callout_stop(&sc->tick_ch); |
---|
1183 | |
---|
1184 | /* Shut down hardware. */ |
---|
1185 | cgem_reset(sc); |
---|
1186 | |
---|
1187 | /* Clear out transmit queue. */ |
---|
1188 | for (i = 0; i < CGEM_NUM_TX_DESCS; i++) { |
---|
1189 | sc->txring[i].ctl = CGEM_TXDESC_USED; |
---|
1190 | sc->txring[i].addr = 0; |
---|
1191 | if (sc->txring_m[i]) { |
---|
1192 | #ifndef __rtems__ |
---|
1193 | bus_dmamap_unload(sc->mbuf_dma_tag, |
---|
1194 | sc->txring_m_dmamap[i]); |
---|
1195 | #endif /* __rtems__ */ |
---|
1196 | m_freem(sc->txring_m[i]); |
---|
1197 | sc->txring_m[i] = NULL; |
---|
1198 | } |
---|
1199 | } |
---|
1200 | sc->txring[CGEM_NUM_TX_DESCS - 1].ctl |= CGEM_TXDESC_WRAP; |
---|
1201 | |
---|
1202 | sc->txring_hd_ptr = 0; |
---|
1203 | sc->txring_tl_ptr = 0; |
---|
1204 | sc->txring_queued = 0; |
---|
1205 | |
---|
1206 | /* Clear out receive queue. */ |
---|
1207 | for (i = 0; i < CGEM_NUM_RX_DESCS; i++) { |
---|
1208 | sc->rxring[i].addr = CGEM_RXDESC_OWN; |
---|
1209 | sc->rxring[i].ctl = 0; |
---|
1210 | if (sc->rxring_m[i]) { |
---|
1211 | #ifndef __rtems__ |
---|
1212 | /* Unload dmamap. */ |
---|
1213 | bus_dmamap_unload(sc->mbuf_dma_tag, |
---|
1214 | sc->rxring_m_dmamap[sc->rxring_tl_ptr]); |
---|
1215 | #endif /* __rtems__ */ |
---|
1216 | |
---|
1217 | m_freem(sc->rxring_m[i]); |
---|
1218 | sc->rxring_m[i] = NULL; |
---|
1219 | } |
---|
1220 | } |
---|
1221 | sc->rxring[CGEM_NUM_RX_DESCS - 1].addr |= CGEM_RXDESC_WRAP; |
---|
1222 | |
---|
1223 | sc->rxring_hd_ptr = 0; |
---|
1224 | sc->rxring_tl_ptr = 0; |
---|
1225 | sc->rxring_queued = 0; |
---|
1226 | |
---|
1227 | /* Force next statchg or linkchg to program net config register. */ |
---|
1228 | sc->mii_media_active = 0; |
---|
1229 | } |
---|
1230 | |
---|
1231 | |
---|
1232 | static int |
---|
1233 | cgem_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) |
---|
1234 | { |
---|
1235 | struct cgem_softc *sc = ifp->if_softc; |
---|
1236 | struct ifreq *ifr = (struct ifreq *)data; |
---|
1237 | struct mii_data *mii; |
---|
1238 | int error = 0, mask; |
---|
1239 | |
---|
1240 | switch (cmd) { |
---|
1241 | case SIOCSIFFLAGS: |
---|
1242 | CGEM_LOCK(sc); |
---|
1243 | if ((ifp->if_flags & IFF_UP) != 0) { |
---|
1244 | if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { |
---|
1245 | if (((ifp->if_flags ^ sc->if_old_flags) & |
---|
1246 | (IFF_PROMISC | IFF_ALLMULTI)) != 0) { |
---|
1247 | cgem_rx_filter(sc); |
---|
1248 | } |
---|
1249 | } else { |
---|
1250 | cgem_init_locked(sc); |
---|
1251 | } |
---|
1252 | } else if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { |
---|
1253 | ifp->if_drv_flags &= ~IFF_DRV_RUNNING; |
---|
1254 | cgem_stop(sc); |
---|
1255 | } |
---|
1256 | sc->if_old_flags = ifp->if_flags; |
---|
1257 | CGEM_UNLOCK(sc); |
---|
1258 | break; |
---|
1259 | |
---|
1260 | case SIOCADDMULTI: |
---|
1261 | case SIOCDELMULTI: |
---|
1262 | /* Set up multi-cast filters. */ |
---|
1263 | if ((ifp->if_drv_flags & IFF_DRV_RUNNING) != 0) { |
---|
1264 | CGEM_LOCK(sc); |
---|
1265 | cgem_rx_filter(sc); |
---|
1266 | CGEM_UNLOCK(sc); |
---|
1267 | } |
---|
1268 | break; |
---|
1269 | |
---|
1270 | case SIOCSIFMEDIA: |
---|
1271 | case SIOCGIFMEDIA: |
---|
1272 | mii = device_get_softc(sc->miibus); |
---|
1273 | error = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd); |
---|
1274 | break; |
---|
1275 | |
---|
1276 | case SIOCSIFCAP: |
---|
1277 | CGEM_LOCK(sc); |
---|
1278 | mask = ifp->if_capenable ^ ifr->ifr_reqcap; |
---|
1279 | |
---|
1280 | if ((mask & IFCAP_TXCSUM) != 0) { |
---|
1281 | if ((ifr->ifr_reqcap & IFCAP_TXCSUM) != 0) { |
---|
1282 | /* Turn on TX checksumming. */ |
---|
1283 | ifp->if_capenable |= (IFCAP_TXCSUM | |
---|
1284 | IFCAP_TXCSUM_IPV6); |
---|
1285 | ifp->if_hwassist |= CGEM_CKSUM_ASSIST; |
---|
1286 | |
---|
1287 | WR4(sc, CGEM_DMA_CFG, |
---|
1288 | RD4(sc, CGEM_DMA_CFG) | |
---|
1289 | CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN); |
---|
1290 | } else { |
---|
1291 | /* Turn off TX checksumming. */ |
---|
1292 | ifp->if_capenable &= ~(IFCAP_TXCSUM | |
---|
1293 | IFCAP_TXCSUM_IPV6); |
---|
1294 | ifp->if_hwassist &= ~CGEM_CKSUM_ASSIST; |
---|
1295 | |
---|
1296 | WR4(sc, CGEM_DMA_CFG, |
---|
1297 | RD4(sc, CGEM_DMA_CFG) & |
---|
1298 | ~CGEM_DMA_CFG_CHKSUM_GEN_OFFLOAD_EN); |
---|
1299 | } |
---|
1300 | } |
---|
1301 | if ((mask & IFCAP_RXCSUM) != 0) { |
---|
1302 | if ((ifr->ifr_reqcap & IFCAP_RXCSUM) != 0) { |
---|
1303 | /* Turn on RX checksumming. */ |
---|
1304 | ifp->if_capenable |= (IFCAP_RXCSUM | |
---|
1305 | IFCAP_RXCSUM_IPV6); |
---|
1306 | WR4(sc, CGEM_NET_CFG, |
---|
1307 | RD4(sc, CGEM_NET_CFG) | |
---|
1308 | CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN); |
---|
1309 | } else { |
---|
1310 | /* Turn off RX checksumming. */ |
---|
1311 | ifp->if_capenable &= ~(IFCAP_RXCSUM | |
---|
1312 | IFCAP_RXCSUM_IPV6); |
---|
1313 | WR4(sc, CGEM_NET_CFG, |
---|
1314 | RD4(sc, CGEM_NET_CFG) & |
---|
1315 | ~CGEM_NET_CFG_RX_CHKSUM_OFFLD_EN); |
---|
1316 | } |
---|
1317 | } |
---|
1318 | if ((ifp->if_capenable & (IFCAP_RXCSUM | IFCAP_TXCSUM)) == |
---|
1319 | (IFCAP_RXCSUM | IFCAP_TXCSUM)) |
---|
1320 | ifp->if_capenable |= IFCAP_VLAN_HWCSUM; |
---|
1321 | else |
---|
1322 | ifp->if_capenable &= ~IFCAP_VLAN_HWCSUM; |
---|
1323 | |
---|
1324 | CGEM_UNLOCK(sc); |
---|
1325 | break; |
---|
1326 | default: |
---|
1327 | error = ether_ioctl(ifp, cmd, data); |
---|
1328 | break; |
---|
1329 | } |
---|
1330 | |
---|
1331 | return (error); |
---|
1332 | } |
---|
1333 | |
---|
1334 | /* MII bus support routines. |
---|
1335 | */ |
---|
1336 | static void |
---|
1337 | cgem_child_detached(device_t dev, device_t child) |
---|
1338 | { |
---|
1339 | struct cgem_softc *sc = device_get_softc(dev); |
---|
1340 | |
---|
1341 | if (child == sc->miibus) |
---|
1342 | sc->miibus = NULL; |
---|
1343 | } |
---|
1344 | |
---|
1345 | static int |
---|
1346 | cgem_ifmedia_upd(struct ifnet *ifp) |
---|
1347 | { |
---|
1348 | struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc; |
---|
1349 | struct mii_data *mii; |
---|
1350 | struct mii_softc *miisc; |
---|
1351 | int error = 0; |
---|
1352 | |
---|
1353 | mii = device_get_softc(sc->miibus); |
---|
1354 | CGEM_LOCK(sc); |
---|
1355 | if ((ifp->if_flags & IFF_UP) != 0) { |
---|
1356 | LIST_FOREACH(miisc, &mii->mii_phys, mii_list) |
---|
1357 | PHY_RESET(miisc); |
---|
1358 | error = mii_mediachg(mii); |
---|
1359 | } |
---|
1360 | CGEM_UNLOCK(sc); |
---|
1361 | |
---|
1362 | return (error); |
---|
1363 | } |
---|
1364 | |
---|
1365 | static void |
---|
1366 | cgem_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) |
---|
1367 | { |
---|
1368 | struct cgem_softc *sc = (struct cgem_softc *) ifp->if_softc; |
---|
1369 | struct mii_data *mii; |
---|
1370 | |
---|
1371 | mii = device_get_softc(sc->miibus); |
---|
1372 | CGEM_LOCK(sc); |
---|
1373 | mii_pollstat(mii); |
---|
1374 | ifmr->ifm_active = mii->mii_media_active; |
---|
1375 | ifmr->ifm_status = mii->mii_media_status; |
---|
1376 | CGEM_UNLOCK(sc); |
---|
1377 | } |
---|
1378 | |
---|
1379 | static int |
---|
1380 | cgem_miibus_readreg(device_t dev, int phy, int reg) |
---|
1381 | { |
---|
1382 | struct cgem_softc *sc = device_get_softc(dev); |
---|
1383 | int tries, val; |
---|
1384 | |
---|
1385 | WR4(sc, CGEM_PHY_MAINT, |
---|
1386 | CGEM_PHY_MAINT_CLAUSE_22 | CGEM_PHY_MAINT_MUST_10 | |
---|
1387 | CGEM_PHY_MAINT_OP_READ | |
---|
1388 | (phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) | |
---|
1389 | (reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT)); |
---|
1390 | |
---|
1391 | /* Wait for completion. */ |
---|
1392 | tries=0; |
---|
1393 | while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) { |
---|
1394 | DELAY(5); |
---|
1395 | if (++tries > 200) { |
---|
1396 | device_printf(dev, "phy read timeout: %d\n", reg); |
---|
1397 | return (-1); |
---|
1398 | } |
---|
1399 | } |
---|
1400 | |
---|
1401 | val = RD4(sc, CGEM_PHY_MAINT) & CGEM_PHY_MAINT_DATA_MASK; |
---|
1402 | |
---|
1403 | if (reg == MII_EXTSR) |
---|
1404 | /* |
---|
1405 | * MAC does not support half-duplex at gig speeds. |
---|
1406 | * Let mii(4) exclude the capability. |
---|
1407 | */ |
---|
1408 | val &= ~(EXTSR_1000XHDX | EXTSR_1000THDX); |
---|
1409 | |
---|
1410 | return (val); |
---|
1411 | } |
---|
1412 | |
---|
1413 | static int |
---|
1414 | cgem_miibus_writereg(device_t dev, int phy, int reg, int data) |
---|
1415 | { |
---|
1416 | struct cgem_softc *sc = device_get_softc(dev); |
---|
1417 | int tries; |
---|
1418 | |
---|
1419 | WR4(sc, CGEM_PHY_MAINT, |
---|
1420 | CGEM_PHY_MAINT_CLAUSE_22 | CGEM_PHY_MAINT_MUST_10 | |
---|
1421 | CGEM_PHY_MAINT_OP_WRITE | |
---|
1422 | (phy << CGEM_PHY_MAINT_PHY_ADDR_SHIFT) | |
---|
1423 | (reg << CGEM_PHY_MAINT_REG_ADDR_SHIFT) | |
---|
1424 | (data & CGEM_PHY_MAINT_DATA_MASK)); |
---|
1425 | |
---|
1426 | /* Wait for completion. */ |
---|
1427 | tries = 0; |
---|
1428 | while ((RD4(sc, CGEM_NET_STAT) & CGEM_NET_STAT_PHY_MGMT_IDLE) == 0) { |
---|
1429 | DELAY(5); |
---|
1430 | if (++tries > 200) { |
---|
1431 | device_printf(dev, "phy write timeout: %d\n", reg); |
---|
1432 | return (-1); |
---|
1433 | } |
---|
1434 | } |
---|
1435 | |
---|
1436 | return (0); |
---|
1437 | } |
---|
1438 | |
---|
1439 | static void |
---|
1440 | cgem_miibus_statchg(device_t dev) |
---|
1441 | { |
---|
1442 | struct cgem_softc *sc = device_get_softc(dev); |
---|
1443 | struct mii_data *mii = device_get_softc(sc->miibus); |
---|
1444 | |
---|
1445 | CGEM_ASSERT_LOCKED(sc); |
---|
1446 | |
---|
1447 | if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == |
---|
1448 | (IFM_ACTIVE | IFM_AVALID) && |
---|
1449 | sc->mii_media_active != mii->mii_media_active) |
---|
1450 | cgem_mediachange(sc, mii); |
---|
1451 | } |
---|
1452 | |
---|
1453 | static void |
---|
1454 | cgem_miibus_linkchg(device_t dev) |
---|
1455 | { |
---|
1456 | struct cgem_softc *sc = device_get_softc(dev); |
---|
1457 | struct mii_data *mii = device_get_softc(sc->miibus); |
---|
1458 | |
---|
1459 | CGEM_ASSERT_LOCKED(sc); |
---|
1460 | |
---|
1461 | if ((mii->mii_media_status & (IFM_ACTIVE | IFM_AVALID)) == |
---|
1462 | (IFM_ACTIVE | IFM_AVALID) && |
---|
1463 | sc->mii_media_active != mii->mii_media_active) |
---|
1464 | cgem_mediachange(sc, mii); |
---|
1465 | } |
---|
1466 | |
---|
1467 | /* |
---|
1468 | * Overridable weak symbol cgem_set_ref_clk(). This allows platforms to |
---|
1469 | * provide a function to set the cgem's reference clock. |
---|
1470 | */ |
---|
1471 | static int __used |
---|
1472 | cgem_default_set_ref_clk(int unit, int frequency) |
---|
1473 | { |
---|
1474 | |
---|
1475 | return 0; |
---|
1476 | } |
---|
1477 | __weak_reference(cgem_default_set_ref_clk, cgem_set_ref_clk); |
---|
1478 | |
---|
1479 | /* Call to set reference clock and network config bits according to media. */ |
---|
1480 | static void |
---|
1481 | cgem_mediachange(struct cgem_softc *sc, struct mii_data *mii) |
---|
1482 | { |
---|
1483 | uint32_t net_cfg; |
---|
1484 | int ref_clk_freq; |
---|
1485 | |
---|
1486 | CGEM_ASSERT_LOCKED(sc); |
---|
1487 | |
---|
1488 | /* Update hardware to reflect media. */ |
---|
1489 | net_cfg = RD4(sc, CGEM_NET_CFG); |
---|
1490 | net_cfg &= ~(CGEM_NET_CFG_SPEED100 | CGEM_NET_CFG_GIGE_EN | |
---|
1491 | CGEM_NET_CFG_FULL_DUPLEX); |
---|
1492 | |
---|
1493 | switch (IFM_SUBTYPE(mii->mii_media_active)) { |
---|
1494 | case IFM_1000_T: |
---|
1495 | net_cfg |= (CGEM_NET_CFG_SPEED100 | |
---|
1496 | CGEM_NET_CFG_GIGE_EN); |
---|
1497 | ref_clk_freq = 125000000; |
---|
1498 | break; |
---|
1499 | case IFM_100_TX: |
---|
1500 | net_cfg |= CGEM_NET_CFG_SPEED100; |
---|
1501 | ref_clk_freq = 25000000; |
---|
1502 | break; |
---|
1503 | default: |
---|
1504 | ref_clk_freq = 2500000; |
---|
1505 | } |
---|
1506 | |
---|
1507 | if ((mii->mii_media_active & IFM_FDX) != 0) |
---|
1508 | net_cfg |= CGEM_NET_CFG_FULL_DUPLEX; |
---|
1509 | |
---|
1510 | WR4(sc, CGEM_NET_CFG, net_cfg); |
---|
1511 | |
---|
1512 | /* Set the reference clock if necessary. */ |
---|
1513 | if (cgem_set_ref_clk(sc->ref_clk_num, ref_clk_freq)) |
---|
1514 | device_printf(sc->dev, "cgem_mediachange: " |
---|
1515 | "could not set ref clk%d to %d.\n", |
---|
1516 | sc->ref_clk_num, ref_clk_freq); |
---|
1517 | |
---|
1518 | sc->mii_media_active = mii->mii_media_active; |
---|
1519 | } |
---|
1520 | |
---|
1521 | static void |
---|
1522 | cgem_add_sysctls(device_t dev) |
---|
1523 | { |
---|
1524 | #ifndef __rtems__ |
---|
1525 | struct cgem_softc *sc = device_get_softc(dev); |
---|
1526 | struct sysctl_ctx_list *ctx; |
---|
1527 | struct sysctl_oid_list *child; |
---|
1528 | struct sysctl_oid *tree; |
---|
1529 | |
---|
1530 | ctx = device_get_sysctl_ctx(dev); |
---|
1531 | child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); |
---|
1532 | |
---|
1533 | SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rxbufs", CTLFLAG_RW, |
---|
1534 | &sc->rxbufs, 0, |
---|
1535 | "Number receive buffers to provide"); |
---|
1536 | |
---|
1537 | SYSCTL_ADD_INT(ctx, child, OID_AUTO, "rxhangwar", CTLFLAG_RW, |
---|
1538 | &sc->rxhangwar, 0, |
---|
1539 | "Enable receive hang work-around"); |
---|
1540 | |
---|
1541 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxoverruns", CTLFLAG_RD, |
---|
1542 | &sc->rxoverruns, 0, |
---|
1543 | "Receive overrun events"); |
---|
1544 | |
---|
1545 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxnobufs", CTLFLAG_RD, |
---|
1546 | &sc->rxnobufs, 0, |
---|
1547 | "Receive buf queue empty events"); |
---|
1548 | |
---|
1549 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_rxdmamapfails", CTLFLAG_RD, |
---|
1550 | &sc->rxdmamapfails, 0, |
---|
1551 | "Receive DMA map failures"); |
---|
1552 | |
---|
1553 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txfull", CTLFLAG_RD, |
---|
1554 | &sc->txfull, 0, |
---|
1555 | "Transmit ring full events"); |
---|
1556 | |
---|
1557 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdmamapfails", CTLFLAG_RD, |
---|
1558 | &sc->txdmamapfails, 0, |
---|
1559 | "Transmit DMA map failures"); |
---|
1560 | |
---|
1561 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdefrags", CTLFLAG_RD, |
---|
1562 | &sc->txdefrags, 0, |
---|
1563 | "Transmit m_defrag() calls"); |
---|
1564 | |
---|
1565 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "_txdefragfails", CTLFLAG_RD, |
---|
1566 | &sc->txdefragfails, 0, |
---|
1567 | "Transmit m_defrag() failures"); |
---|
1568 | |
---|
1569 | tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "stats", CTLFLAG_RD, |
---|
1570 | NULL, "GEM statistics"); |
---|
1571 | child = SYSCTL_CHILDREN(tree); |
---|
1572 | |
---|
1573 | SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "tx_bytes", CTLFLAG_RD, |
---|
1574 | &sc->stats.tx_bytes, "Total bytes transmitted"); |
---|
1575 | |
---|
1576 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames", CTLFLAG_RD, |
---|
1577 | &sc->stats.tx_frames, 0, "Total frames transmitted"); |
---|
1578 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_bcast", CTLFLAG_RD, |
---|
1579 | &sc->stats.tx_frames_bcast, 0, |
---|
1580 | "Number broadcast frames transmitted"); |
---|
1581 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_multi", CTLFLAG_RD, |
---|
1582 | &sc->stats.tx_frames_multi, 0, |
---|
1583 | "Number multicast frames transmitted"); |
---|
1584 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_pause", |
---|
1585 | CTLFLAG_RD, &sc->stats.tx_frames_pause, 0, |
---|
1586 | "Number pause frames transmitted"); |
---|
1587 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_64b", CTLFLAG_RD, |
---|
1588 | &sc->stats.tx_frames_64b, 0, |
---|
1589 | "Number frames transmitted of size 64 bytes or less"); |
---|
1590 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_65to127b", CTLFLAG_RD, |
---|
1591 | &sc->stats.tx_frames_65to127b, 0, |
---|
1592 | "Number frames transmitted of size 65-127 bytes"); |
---|
1593 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_128to255b", |
---|
1594 | CTLFLAG_RD, &sc->stats.tx_frames_128to255b, 0, |
---|
1595 | "Number frames transmitted of size 128-255 bytes"); |
---|
1596 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_256to511b", |
---|
1597 | CTLFLAG_RD, &sc->stats.tx_frames_256to511b, 0, |
---|
1598 | "Number frames transmitted of size 256-511 bytes"); |
---|
1599 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_512to1023b", |
---|
1600 | CTLFLAG_RD, &sc->stats.tx_frames_512to1023b, 0, |
---|
1601 | "Number frames transmitted of size 512-1023 bytes"); |
---|
1602 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_frames_1024to1536b", |
---|
1603 | CTLFLAG_RD, &sc->stats.tx_frames_1024to1536b, 0, |
---|
1604 | "Number frames transmitted of size 1024-1536 bytes"); |
---|
1605 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_under_runs", |
---|
1606 | CTLFLAG_RD, &sc->stats.tx_under_runs, 0, |
---|
1607 | "Number transmit under-run events"); |
---|
1608 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_single_collisn", |
---|
1609 | CTLFLAG_RD, &sc->stats.tx_single_collisn, 0, |
---|
1610 | "Number single-collision transmit frames"); |
---|
1611 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_multi_collisn", |
---|
1612 | CTLFLAG_RD, &sc->stats.tx_multi_collisn, 0, |
---|
1613 | "Number multi-collision transmit frames"); |
---|
1614 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_excsv_collisn", |
---|
1615 | CTLFLAG_RD, &sc->stats.tx_excsv_collisn, 0, |
---|
1616 | "Number excessive collision transmit frames"); |
---|
1617 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_late_collisn", |
---|
1618 | CTLFLAG_RD, &sc->stats.tx_late_collisn, 0, |
---|
1619 | "Number late-collision transmit frames"); |
---|
1620 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_deferred_frames", |
---|
1621 | CTLFLAG_RD, &sc->stats.tx_deferred_frames, 0, |
---|
1622 | "Number deferred transmit frames"); |
---|
1623 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "tx_carrier_sense_errs", |
---|
1624 | CTLFLAG_RD, &sc->stats.tx_carrier_sense_errs, 0, |
---|
1625 | "Number carrier sense errors on transmit"); |
---|
1626 | |
---|
1627 | SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "rx_bytes", CTLFLAG_RD, |
---|
1628 | &sc->stats.rx_bytes, "Total bytes received"); |
---|
1629 | |
---|
1630 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames", CTLFLAG_RD, |
---|
1631 | &sc->stats.rx_frames, 0, "Total frames received"); |
---|
1632 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_bcast", |
---|
1633 | CTLFLAG_RD, &sc->stats.rx_frames_bcast, 0, |
---|
1634 | "Number broadcast frames received"); |
---|
1635 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_multi", |
---|
1636 | CTLFLAG_RD, &sc->stats.rx_frames_multi, 0, |
---|
1637 | "Number multicast frames received"); |
---|
1638 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_pause", |
---|
1639 | CTLFLAG_RD, &sc->stats.rx_frames_pause, 0, |
---|
1640 | "Number pause frames received"); |
---|
1641 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_64b", |
---|
1642 | CTLFLAG_RD, &sc->stats.rx_frames_64b, 0, |
---|
1643 | "Number frames received of size 64 bytes or less"); |
---|
1644 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_65to127b", |
---|
1645 | CTLFLAG_RD, &sc->stats.rx_frames_65to127b, 0, |
---|
1646 | "Number frames received of size 65-127 bytes"); |
---|
1647 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_128to255b", |
---|
1648 | CTLFLAG_RD, &sc->stats.rx_frames_128to255b, 0, |
---|
1649 | "Number frames received of size 128-255 bytes"); |
---|
1650 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_256to511b", |
---|
1651 | CTLFLAG_RD, &sc->stats.rx_frames_256to511b, 0, |
---|
1652 | "Number frames received of size 256-511 bytes"); |
---|
1653 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_512to1023b", |
---|
1654 | CTLFLAG_RD, &sc->stats.rx_frames_512to1023b, 0, |
---|
1655 | "Number frames received of size 512-1023 bytes"); |
---|
1656 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_1024to1536b", |
---|
1657 | CTLFLAG_RD, &sc->stats.rx_frames_1024to1536b, 0, |
---|
1658 | "Number frames received of size 1024-1536 bytes"); |
---|
1659 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_undersize", |
---|
1660 | CTLFLAG_RD, &sc->stats.rx_frames_undersize, 0, |
---|
1661 | "Number undersize frames received"); |
---|
1662 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_oversize", |
---|
1663 | CTLFLAG_RD, &sc->stats.rx_frames_oversize, 0, |
---|
1664 | "Number oversize frames received"); |
---|
1665 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_jabber", |
---|
1666 | CTLFLAG_RD, &sc->stats.rx_frames_jabber, 0, |
---|
1667 | "Number jabber frames received"); |
---|
1668 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_fcs_errs", |
---|
1669 | CTLFLAG_RD, &sc->stats.rx_frames_fcs_errs, 0, |
---|
1670 | "Number frames received with FCS errors"); |
---|
1671 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_length_errs", |
---|
1672 | CTLFLAG_RD, &sc->stats.rx_frames_length_errs, 0, |
---|
1673 | "Number frames received with length errors"); |
---|
1674 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_symbol_errs", |
---|
1675 | CTLFLAG_RD, &sc->stats.rx_symbol_errs, 0, |
---|
1676 | "Number receive symbol errors"); |
---|
1677 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_align_errs", |
---|
1678 | CTLFLAG_RD, &sc->stats.rx_align_errs, 0, |
---|
1679 | "Number receive alignment errors"); |
---|
1680 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_resource_errs", |
---|
1681 | CTLFLAG_RD, &sc->stats.rx_resource_errs, 0, |
---|
1682 | "Number frames received when no rx buffer available"); |
---|
1683 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_overrun_errs", |
---|
1684 | CTLFLAG_RD, &sc->stats.rx_overrun_errs, 0, |
---|
1685 | "Number frames received but not copied due to " |
---|
1686 | "receive overrun"); |
---|
1687 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_ip_hdr_csum_errs", |
---|
1688 | CTLFLAG_RD, &sc->stats.rx_ip_hdr_csum_errs, 0, |
---|
1689 | "Number frames received with IP header checksum " |
---|
1690 | "errors"); |
---|
1691 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_tcp_csum_errs", |
---|
1692 | CTLFLAG_RD, &sc->stats.rx_tcp_csum_errs, 0, |
---|
1693 | "Number frames received with TCP checksum errors"); |
---|
1694 | SYSCTL_ADD_UINT(ctx, child, OID_AUTO, "rx_frames_udp_csum_errs", |
---|
1695 | CTLFLAG_RD, &sc->stats.rx_udp_csum_errs, 0, |
---|
1696 | "Number frames received with UDP checksum errors"); |
---|
1697 | #endif /* __rtems__ */ |
---|
1698 | } |
---|
1699 | |
---|
1700 | |
---|
1701 | static int |
---|
1702 | cgem_probe(device_t dev) |
---|
1703 | { |
---|
1704 | |
---|
1705 | #ifndef __rtems__ |
---|
1706 | if (!ofw_bus_is_compatible(dev, "cadence,gem")) |
---|
1707 | return (ENXIO); |
---|
1708 | #endif /* __rtems__ */ |
---|
1709 | |
---|
1710 | device_set_desc(dev, "Cadence CGEM Gigabit Ethernet Interface"); |
---|
1711 | return (0); |
---|
1712 | } |
---|
1713 | |
---|
1714 | static int |
---|
1715 | cgem_attach(device_t dev) |
---|
1716 | { |
---|
1717 | struct cgem_softc *sc = device_get_softc(dev); |
---|
1718 | struct ifnet *ifp = NULL; |
---|
1719 | #ifndef __rtems__ |
---|
1720 | phandle_t node; |
---|
1721 | pcell_t cell; |
---|
1722 | #endif /* __rtems__ */ |
---|
1723 | int rid, err; |
---|
1724 | u_char eaddr[ETHER_ADDR_LEN]; |
---|
1725 | |
---|
1726 | sc->dev = dev; |
---|
1727 | CGEM_LOCK_INIT(sc); |
---|
1728 | |
---|
1729 | #ifndef __rtems__ |
---|
1730 | /* Get reference clock number and base divider from fdt. */ |
---|
1731 | node = ofw_bus_get_node(dev); |
---|
1732 | sc->ref_clk_num = 0; |
---|
1733 | if (OF_getprop(node, "ref-clock-num", &cell, sizeof(cell)) > 0) |
---|
1734 | sc->ref_clk_num = fdt32_to_cpu(cell); |
---|
1735 | #else /* __rtems__ */ |
---|
1736 | sc->ref_clk_num = device_get_unit(dev); |
---|
1737 | #endif /* __rtems__ */ |
---|
1738 | |
---|
1739 | /* Get memory resource. */ |
---|
1740 | rid = 0; |
---|
1741 | sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, |
---|
1742 | RF_ACTIVE); |
---|
1743 | if (sc->mem_res == NULL) { |
---|
1744 | device_printf(dev, "could not allocate memory resources.\n"); |
---|
1745 | return (ENOMEM); |
---|
1746 | } |
---|
1747 | |
---|
1748 | /* Get IRQ resource. */ |
---|
1749 | rid = 0; |
---|
1750 | sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, |
---|
1751 | RF_ACTIVE); |
---|
1752 | if (sc->irq_res == NULL) { |
---|
1753 | device_printf(dev, "could not allocate interrupt resource.\n"); |
---|
1754 | cgem_detach(dev); |
---|
1755 | return (ENOMEM); |
---|
1756 | } |
---|
1757 | |
---|
1758 | /* Set up ifnet structure. */ |
---|
1759 | ifp = sc->ifp = if_alloc(IFT_ETHER); |
---|
1760 | if (ifp == NULL) { |
---|
1761 | device_printf(dev, "could not allocate ifnet structure\n"); |
---|
1762 | cgem_detach(dev); |
---|
1763 | return (ENOMEM); |
---|
1764 | } |
---|
1765 | ifp->if_softc = sc; |
---|
1766 | if_initname(ifp, IF_CGEM_NAME, device_get_unit(dev)); |
---|
1767 | ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; |
---|
1768 | ifp->if_start = cgem_start; |
---|
1769 | ifp->if_ioctl = cgem_ioctl; |
---|
1770 | ifp->if_init = cgem_init; |
---|
1771 | ifp->if_capabilities |= IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | |
---|
1772 | IFCAP_VLAN_MTU | IFCAP_VLAN_HWCSUM; |
---|
1773 | /* Disable hardware checksumming by default. */ |
---|
1774 | ifp->if_hwassist = 0; |
---|
1775 | ifp->if_capenable = ifp->if_capabilities & |
---|
1776 | ~(IFCAP_HWCSUM | IFCAP_HWCSUM_IPV6 | IFCAP_VLAN_HWCSUM); |
---|
1777 | ifp->if_snd.ifq_drv_maxlen = CGEM_NUM_TX_DESCS; |
---|
1778 | IFQ_SET_MAXLEN(&ifp->if_snd, ifp->if_snd.ifq_drv_maxlen); |
---|
1779 | IFQ_SET_READY(&ifp->if_snd); |
---|
1780 | |
---|
1781 | sc->if_old_flags = ifp->if_flags; |
---|
1782 | sc->rxbufs = DEFAULT_NUM_RX_BUFS; |
---|
1783 | sc->rxhangwar = 1; |
---|
1784 | |
---|
1785 | /* Reset hardware. */ |
---|
1786 | CGEM_LOCK(sc); |
---|
1787 | cgem_reset(sc); |
---|
1788 | CGEM_UNLOCK(sc); |
---|
1789 | |
---|
1790 | /* Attach phy to mii bus. */ |
---|
1791 | err = mii_attach(dev, &sc->miibus, ifp, |
---|
1792 | cgem_ifmedia_upd, cgem_ifmedia_sts, |
---|
1793 | BMSR_DEFCAPMASK, MII_PHY_ANY, MII_OFFSET_ANY, 0); |
---|
1794 | if (err) { |
---|
1795 | device_printf(dev, "attaching PHYs failed\n"); |
---|
1796 | cgem_detach(dev); |
---|
1797 | return (err); |
---|
1798 | } |
---|
1799 | |
---|
1800 | /* Set up TX and RX descriptor area. */ |
---|
1801 | err = cgem_setup_descs(sc); |
---|
1802 | if (err) { |
---|
1803 | device_printf(dev, "could not set up dma mem for descs.\n"); |
---|
1804 | cgem_detach(dev); |
---|
1805 | return (ENOMEM); |
---|
1806 | } |
---|
1807 | |
---|
1808 | /* Get a MAC address. */ |
---|
1809 | cgem_get_mac(sc, eaddr); |
---|
1810 | |
---|
1811 | /* Start ticks. */ |
---|
1812 | callout_init_mtx(&sc->tick_ch, &sc->sc_mtx, 0); |
---|
1813 | |
---|
1814 | ether_ifattach(ifp, eaddr); |
---|
1815 | |
---|
1816 | err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_NET | INTR_MPSAFE | |
---|
1817 | INTR_EXCL, NULL, cgem_intr, sc, &sc->intrhand); |
---|
1818 | if (err) { |
---|
1819 | device_printf(dev, "could not set interrupt handler.\n"); |
---|
1820 | ether_ifdetach(ifp); |
---|
1821 | cgem_detach(dev); |
---|
1822 | return (err); |
---|
1823 | } |
---|
1824 | |
---|
1825 | cgem_add_sysctls(dev); |
---|
1826 | |
---|
1827 | return (0); |
---|
1828 | } |
---|
1829 | |
---|
1830 | static int |
---|
1831 | cgem_detach(device_t dev) |
---|
1832 | { |
---|
1833 | struct cgem_softc *sc = device_get_softc(dev); |
---|
1834 | #ifndef __rtems__ |
---|
1835 | int i; |
---|
1836 | #endif /* __rtems__ */ |
---|
1837 | |
---|
1838 | if (sc == NULL) |
---|
1839 | return (ENODEV); |
---|
1840 | |
---|
1841 | if (device_is_attached(dev)) { |
---|
1842 | CGEM_LOCK(sc); |
---|
1843 | cgem_stop(sc); |
---|
1844 | CGEM_UNLOCK(sc); |
---|
1845 | callout_drain(&sc->tick_ch); |
---|
1846 | sc->ifp->if_flags &= ~IFF_UP; |
---|
1847 | ether_ifdetach(sc->ifp); |
---|
1848 | } |
---|
1849 | |
---|
1850 | if (sc->miibus != NULL) { |
---|
1851 | device_delete_child(dev, sc->miibus); |
---|
1852 | sc->miibus = NULL; |
---|
1853 | } |
---|
1854 | |
---|
1855 | /* Release resources. */ |
---|
1856 | if (sc->mem_res != NULL) { |
---|
1857 | bus_release_resource(dev, SYS_RES_MEMORY, |
---|
1858 | rman_get_rid(sc->mem_res), sc->mem_res); |
---|
1859 | sc->mem_res = NULL; |
---|
1860 | } |
---|
1861 | if (sc->irq_res != NULL) { |
---|
1862 | if (sc->intrhand) |
---|
1863 | bus_teardown_intr(dev, sc->irq_res, sc->intrhand); |
---|
1864 | bus_release_resource(dev, SYS_RES_IRQ, |
---|
1865 | rman_get_rid(sc->irq_res), sc->irq_res); |
---|
1866 | sc->irq_res = NULL; |
---|
1867 | } |
---|
1868 | |
---|
1869 | /* Release DMA resources. */ |
---|
1870 | if (sc->rxring != NULL) { |
---|
1871 | if (sc->rxring_physaddr != 0) { |
---|
1872 | bus_dmamap_unload(sc->desc_dma_tag, sc->rxring_dma_map); |
---|
1873 | sc->rxring_physaddr = 0; |
---|
1874 | } |
---|
1875 | bus_dmamem_free(sc->desc_dma_tag, __DEVOLATILE(void *, sc->rxring), |
---|
1876 | sc->rxring_dma_map); |
---|
1877 | sc->rxring = NULL; |
---|
1878 | #ifndef __rtems__ |
---|
1879 | for (i = 0; i < CGEM_NUM_RX_DESCS; i++) |
---|
1880 | if (sc->rxring_m_dmamap[i] != NULL) { |
---|
1881 | bus_dmamap_destroy(sc->mbuf_dma_tag, |
---|
1882 | sc->rxring_m_dmamap[i]); |
---|
1883 | sc->rxring_m_dmamap[i] = NULL; |
---|
1884 | } |
---|
1885 | #endif /* __rtems__ */ |
---|
1886 | } |
---|
1887 | if (sc->txring != NULL) { |
---|
1888 | if (sc->txring_physaddr != 0) { |
---|
1889 | bus_dmamap_unload(sc->desc_dma_tag, sc->txring_dma_map); |
---|
1890 | sc->txring_physaddr = 0; |
---|
1891 | } |
---|
1892 | bus_dmamem_free(sc->desc_dma_tag, __DEVOLATILE(void *, sc->txring), |
---|
1893 | sc->txring_dma_map); |
---|
1894 | sc->txring = NULL; |
---|
1895 | #ifndef __rtems__ |
---|
1896 | for (i = 0; i < CGEM_NUM_TX_DESCS; i++) |
---|
1897 | if (sc->txring_m_dmamap[i] != NULL) { |
---|
1898 | bus_dmamap_destroy(sc->mbuf_dma_tag, |
---|
1899 | sc->txring_m_dmamap[i]); |
---|
1900 | sc->txring_m_dmamap[i] = NULL; |
---|
1901 | } |
---|
1902 | #endif /* __rtems__ */ |
---|
1903 | } |
---|
1904 | if (sc->desc_dma_tag != NULL) { |
---|
1905 | bus_dma_tag_destroy(sc->desc_dma_tag); |
---|
1906 | sc->desc_dma_tag = NULL; |
---|
1907 | } |
---|
1908 | if (sc->mbuf_dma_tag != NULL) { |
---|
1909 | bus_dma_tag_destroy(sc->mbuf_dma_tag); |
---|
1910 | sc->mbuf_dma_tag = NULL; |
---|
1911 | } |
---|
1912 | |
---|
1913 | bus_generic_detach(dev); |
---|
1914 | |
---|
1915 | CGEM_LOCK_DESTROY(sc); |
---|
1916 | |
---|
1917 | return (0); |
---|
1918 | } |
---|
1919 | |
---|
1920 | static device_method_t cgem_methods[] = { |
---|
1921 | /* Device interface */ |
---|
1922 | DEVMETHOD(device_probe, cgem_probe), |
---|
1923 | DEVMETHOD(device_attach, cgem_attach), |
---|
1924 | DEVMETHOD(device_detach, cgem_detach), |
---|
1925 | |
---|
1926 | /* Bus interface */ |
---|
1927 | DEVMETHOD(bus_child_detached, cgem_child_detached), |
---|
1928 | |
---|
1929 | /* MII interface */ |
---|
1930 | DEVMETHOD(miibus_readreg, cgem_miibus_readreg), |
---|
1931 | DEVMETHOD(miibus_writereg, cgem_miibus_writereg), |
---|
1932 | DEVMETHOD(miibus_statchg, cgem_miibus_statchg), |
---|
1933 | DEVMETHOD(miibus_linkchg, cgem_miibus_linkchg), |
---|
1934 | |
---|
1935 | DEVMETHOD_END |
---|
1936 | }; |
---|
1937 | |
---|
1938 | static driver_t cgem_driver = { |
---|
1939 | "cgem", |
---|
1940 | cgem_methods, |
---|
1941 | sizeof(struct cgem_softc), |
---|
1942 | }; |
---|
1943 | |
---|
1944 | #ifndef __rtems__ |
---|
1945 | DRIVER_MODULE(cgem, simplebus, cgem_driver, cgem_devclass, NULL, NULL); |
---|
1946 | #else /* __rtems__ */ |
---|
1947 | DRIVER_MODULE(cgem, nexus, cgem_driver, cgem_devclass, NULL, NULL); |
---|
1948 | #endif /* __rtems__ */ |
---|
1949 | DRIVER_MODULE(miibus, cgem, miibus_driver, miibus_devclass, NULL, NULL); |
---|
1950 | MODULE_DEPEND(cgem, miibus, 1, 1, 1); |
---|
1951 | MODULE_DEPEND(cgem, ether, 1, 1, 1); |
---|