1 | #include <machine/rtems-bsd-kernel-space.h> |
---|
2 | |
---|
3 | /*- |
---|
4 | * Copyright (C) 2007-2008 Semihalf, Rafal Jaworowski |
---|
5 | * Copyright (C) 2006-2007 Semihalf, Piotr Kruszynski |
---|
6 | * All rights reserved. |
---|
7 | * |
---|
8 | * Redistribution and use in source and binary forms, with or without |
---|
9 | * modification, are permitted provided that the following conditions |
---|
10 | * are met: |
---|
11 | * 1. Redistributions of source code must retain the above copyright |
---|
12 | * notice, this list of conditions and the following disclaimer. |
---|
13 | * 2. Redistributions in binary form must reproduce the above copyright |
---|
14 | * notice, this list of conditions and the following disclaimer in the |
---|
15 | * documentation and/or other materials provided with the distribution. |
---|
16 | * |
---|
17 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR |
---|
18 | * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES |
---|
19 | * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN |
---|
20 | * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
---|
21 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED |
---|
22 | * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
---|
23 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF |
---|
24 | * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING |
---|
25 | * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS |
---|
26 | * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
---|
27 | */ |
---|
28 | |
---|
29 | /* |
---|
30 | * Freescale integrated Three-Speed Ethernet Controller (TSEC) driver. |
---|
31 | */ |
---|
32 | #include <sys/cdefs.h> |
---|
33 | __FBSDID("$FreeBSD$"); |
---|
34 | |
---|
35 | #ifdef HAVE_KERNEL_OPTION_HEADERS |
---|
36 | #include <rtems/bsd/local/opt_device_polling.h> |
---|
37 | #endif |
---|
38 | |
---|
39 | #include <rtems/bsd/sys/param.h> |
---|
40 | #include <sys/systm.h> |
---|
41 | #include <sys/bus.h> |
---|
42 | #include <sys/endian.h> |
---|
43 | #include <sys/mbuf.h> |
---|
44 | #include <sys/kernel.h> |
---|
45 | #include <sys/module.h> |
---|
46 | #include <sys/socket.h> |
---|
47 | #include <sys/sockio.h> |
---|
48 | #include <sys/sysctl.h> |
---|
49 | |
---|
50 | #include <net/bpf.h> |
---|
51 | #include <net/ethernet.h> |
---|
52 | #include <net/if.h> |
---|
53 | #include <net/if_arp.h> |
---|
54 | #include <net/if_dl.h> |
---|
55 | #include <net/if_media.h> |
---|
56 | #include <net/if_types.h> |
---|
57 | #include <net/if_vlan_var.h> |
---|
58 | |
---|
59 | #include <netinet/in_systm.h> |
---|
60 | #include <netinet/in.h> |
---|
61 | #include <netinet/ip.h> |
---|
62 | |
---|
63 | #include <machine/bus.h> |
---|
64 | |
---|
65 | #include <dev/mii/mii.h> |
---|
66 | #include <dev/mii/miivar.h> |
---|
67 | |
---|
68 | #include <dev/tsec/if_tsec.h> |
---|
69 | #include <dev/tsec/if_tsecreg.h> |
---|
70 | |
---|
71 | static int tsec_alloc_dma_desc(device_t dev, bus_dma_tag_t *dtag, |
---|
72 | bus_dmamap_t *dmap, bus_size_t dsize, void **vaddr, void *raddr, |
---|
73 | const char *dname); |
---|
74 | static void tsec_dma_ctl(struct tsec_softc *sc, int state); |
---|
75 | static int tsec_encap(struct tsec_softc *sc, struct mbuf *m_head, |
---|
76 | int fcb_inserted); |
---|
77 | static void tsec_free_dma(struct tsec_softc *sc); |
---|
78 | static void tsec_free_dma_desc(bus_dma_tag_t dtag, bus_dmamap_t dmap, void *vaddr); |
---|
79 | static int tsec_ifmedia_upd(struct ifnet *ifp); |
---|
80 | static void tsec_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr); |
---|
81 | static int tsec_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, |
---|
82 | struct mbuf **mbufp, uint32_t *paddr); |
---|
83 | static void tsec_map_dma_addr(void *arg, bus_dma_segment_t *segs, |
---|
84 | int nseg, int error); |
---|
85 | static void tsec_intrs_ctl(struct tsec_softc *sc, int state); |
---|
86 | static void tsec_init(void *xsc); |
---|
87 | static void tsec_init_locked(struct tsec_softc *sc); |
---|
88 | static int tsec_ioctl(struct ifnet *ifp, u_long command, caddr_t data); |
---|
89 | static void tsec_reset_mac(struct tsec_softc *sc); |
---|
90 | static void tsec_setfilter(struct tsec_softc *sc); |
---|
91 | static void tsec_set_mac_address(struct tsec_softc *sc); |
---|
92 | static void tsec_start(struct ifnet *ifp); |
---|
93 | static void tsec_start_locked(struct ifnet *ifp); |
---|
94 | static void tsec_stop(struct tsec_softc *sc); |
---|
95 | static void tsec_tick(void *arg); |
---|
96 | static void tsec_watchdog(struct tsec_softc *sc); |
---|
97 | #ifndef __rtems__ |
---|
98 | static void tsec_add_sysctls(struct tsec_softc *sc); |
---|
99 | static int tsec_sysctl_ic_time(SYSCTL_HANDLER_ARGS); |
---|
100 | static int tsec_sysctl_ic_count(SYSCTL_HANDLER_ARGS); |
---|
101 | #endif /* __rtems__ */ |
---|
102 | static void tsec_set_rxic(struct tsec_softc *sc); |
---|
103 | static void tsec_set_txic(struct tsec_softc *sc); |
---|
104 | static int tsec_receive_intr_locked(struct tsec_softc *sc, int count); |
---|
105 | static void tsec_transmit_intr_locked(struct tsec_softc *sc); |
---|
106 | static void tsec_error_intr_locked(struct tsec_softc *sc, int count); |
---|
107 | static void tsec_offload_setup(struct tsec_softc *sc); |
---|
108 | static void tsec_offload_process_frame(struct tsec_softc *sc, |
---|
109 | struct mbuf *m); |
---|
110 | static void tsec_setup_multicast(struct tsec_softc *sc); |
---|
111 | static int tsec_set_mtu(struct tsec_softc *sc, unsigned int mtu); |
---|
112 | |
---|
113 | devclass_t tsec_devclass; |
---|
114 | DRIVER_MODULE(miibus, tsec, miibus_driver, miibus_devclass, 0, 0); |
---|
115 | MODULE_DEPEND(tsec, ether, 1, 1, 1); |
---|
116 | MODULE_DEPEND(tsec, miibus, 1, 1, 1); |
---|
117 | |
---|
118 | int |
---|
119 | tsec_attach(struct tsec_softc *sc) |
---|
120 | { |
---|
121 | uint8_t hwaddr[ETHER_ADDR_LEN]; |
---|
122 | struct ifnet *ifp; |
---|
123 | bus_dmamap_t *map_ptr; |
---|
124 | bus_dmamap_t **map_pptr; |
---|
125 | int error = 0; |
---|
126 | int i; |
---|
127 | |
---|
128 | /* Reset all TSEC counters */ |
---|
129 | TSEC_TX_RX_COUNTERS_INIT(sc); |
---|
130 | |
---|
131 | /* Stop DMA engine if enabled by firmware */ |
---|
132 | tsec_dma_ctl(sc, 0); |
---|
133 | |
---|
134 | /* Reset MAC */ |
---|
135 | tsec_reset_mac(sc); |
---|
136 | |
---|
137 | /* Disable interrupts for now */ |
---|
138 | tsec_intrs_ctl(sc, 0); |
---|
139 | |
---|
140 | /* Configure defaults for interrupts coalescing */ |
---|
141 | sc->rx_ic_time = 768; |
---|
142 | sc->rx_ic_count = 16; |
---|
143 | sc->tx_ic_time = 768; |
---|
144 | sc->tx_ic_count = 16; |
---|
145 | tsec_set_rxic(sc); |
---|
146 | tsec_set_txic(sc); |
---|
147 | #ifndef __rtems__ |
---|
148 | tsec_add_sysctls(sc); |
---|
149 | #endif /* __rtems__ */ |
---|
150 | |
---|
151 | /* Allocate a busdma tag and DMA safe memory for TX descriptors. */ |
---|
152 | error = tsec_alloc_dma_desc(sc->dev, &sc->tsec_tx_dtag, |
---|
153 | &sc->tsec_tx_dmap, sizeof(*sc->tsec_tx_vaddr) * TSEC_TX_NUM_DESC, |
---|
154 | (void **)&sc->tsec_tx_vaddr, &sc->tsec_tx_raddr, "TX"); |
---|
155 | |
---|
156 | if (error) { |
---|
157 | tsec_detach(sc); |
---|
158 | return (ENXIO); |
---|
159 | } |
---|
160 | |
---|
161 | /* Allocate a busdma tag and DMA safe memory for RX descriptors. */ |
---|
162 | error = tsec_alloc_dma_desc(sc->dev, &sc->tsec_rx_dtag, |
---|
163 | &sc->tsec_rx_dmap, sizeof(*sc->tsec_rx_vaddr) * TSEC_RX_NUM_DESC, |
---|
164 | (void **)&sc->tsec_rx_vaddr, &sc->tsec_rx_raddr, "RX"); |
---|
165 | if (error) { |
---|
166 | tsec_detach(sc); |
---|
167 | return (ENXIO); |
---|
168 | } |
---|
169 | |
---|
170 | /* Allocate a busdma tag for TX mbufs. */ |
---|
171 | error = bus_dma_tag_create(NULL, /* parent */ |
---|
172 | TSEC_TXBUFFER_ALIGNMENT, 0, /* alignment, boundary */ |
---|
173 | BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ |
---|
174 | BUS_SPACE_MAXADDR, /* highaddr */ |
---|
175 | NULL, NULL, /* filtfunc, filtfuncarg */ |
---|
176 | MCLBYTES * (TSEC_TX_NUM_DESC - 1), /* maxsize */ |
---|
177 | TSEC_TX_NUM_DESC - 1, /* nsegments */ |
---|
178 | MCLBYTES, 0, /* maxsegsz, flags */ |
---|
179 | NULL, NULL, /* lockfunc, lockfuncarg */ |
---|
180 | &sc->tsec_tx_mtag); /* dmat */ |
---|
181 | if (error) { |
---|
182 | device_printf(sc->dev, "failed to allocate busdma tag " |
---|
183 | "(tx mbufs)\n"); |
---|
184 | tsec_detach(sc); |
---|
185 | return (ENXIO); |
---|
186 | } |
---|
187 | |
---|
188 | /* Allocate a busdma tag for RX mbufs. */ |
---|
189 | error = bus_dma_tag_create(NULL, /* parent */ |
---|
190 | TSEC_RXBUFFER_ALIGNMENT, 0, /* alignment, boundary */ |
---|
191 | BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ |
---|
192 | BUS_SPACE_MAXADDR, /* highaddr */ |
---|
193 | NULL, NULL, /* filtfunc, filtfuncarg */ |
---|
194 | MCLBYTES, /* maxsize */ |
---|
195 | 1, /* nsegments */ |
---|
196 | MCLBYTES, 0, /* maxsegsz, flags */ |
---|
197 | NULL, NULL, /* lockfunc, lockfuncarg */ |
---|
198 | &sc->tsec_rx_mtag); /* dmat */ |
---|
199 | if (error) { |
---|
200 | device_printf(sc->dev, "failed to allocate busdma tag " |
---|
201 | "(rx mbufs)\n"); |
---|
202 | tsec_detach(sc); |
---|
203 | return (ENXIO); |
---|
204 | } |
---|
205 | |
---|
206 | /* Create TX busdma maps */ |
---|
207 | map_ptr = sc->tx_map_data; |
---|
208 | map_pptr = sc->tx_map_unused_data; |
---|
209 | |
---|
210 | for (i = 0; i < TSEC_TX_NUM_DESC; i++) { |
---|
211 | map_pptr[i] = &map_ptr[i]; |
---|
212 | error = bus_dmamap_create(sc->tsec_tx_mtag, 0, map_pptr[i]); |
---|
213 | if (error) { |
---|
214 | device_printf(sc->dev, "failed to init TX ring\n"); |
---|
215 | tsec_detach(sc); |
---|
216 | return (ENXIO); |
---|
217 | } |
---|
218 | } |
---|
219 | |
---|
220 | /* Create RX busdma maps and zero mbuf handlers */ |
---|
221 | for (i = 0; i < TSEC_RX_NUM_DESC; i++) { |
---|
222 | error = bus_dmamap_create(sc->tsec_rx_mtag, 0, |
---|
223 | &sc->rx_data[i].map); |
---|
224 | if (error) { |
---|
225 | device_printf(sc->dev, "failed to init RX ring\n"); |
---|
226 | tsec_detach(sc); |
---|
227 | return (ENXIO); |
---|
228 | } |
---|
229 | sc->rx_data[i].mbuf = NULL; |
---|
230 | } |
---|
231 | |
---|
232 | /* Create mbufs for RX buffers */ |
---|
233 | for (i = 0; i < TSEC_RX_NUM_DESC; i++) { |
---|
234 | error = tsec_new_rxbuf(sc->tsec_rx_mtag, sc->rx_data[i].map, |
---|
235 | &sc->rx_data[i].mbuf, &sc->rx_data[i].paddr); |
---|
236 | if (error) { |
---|
237 | device_printf(sc->dev, "can't load rx DMA map %d, " |
---|
238 | "error = %d\n", i, error); |
---|
239 | tsec_detach(sc); |
---|
240 | return (error); |
---|
241 | } |
---|
242 | } |
---|
243 | |
---|
244 | /* Create network interface for upper layers */ |
---|
245 | ifp = sc->tsec_ifp = if_alloc(IFT_ETHER); |
---|
246 | if (ifp == NULL) { |
---|
247 | device_printf(sc->dev, "if_alloc() failed\n"); |
---|
248 | tsec_detach(sc); |
---|
249 | return (ENOMEM); |
---|
250 | } |
---|
251 | |
---|
252 | ifp->if_softc = sc; |
---|
253 | if_initname(ifp, device_get_name(sc->dev), device_get_unit(sc->dev)); |
---|
254 | ifp->if_mtu = ETHERMTU; |
---|
255 | ifp->if_flags = IFF_SIMPLEX | IFF_MULTICAST | IFF_BROADCAST; |
---|
256 | ifp->if_init = tsec_init; |
---|
257 | ifp->if_start = tsec_start; |
---|
258 | ifp->if_ioctl = tsec_ioctl; |
---|
259 | |
---|
260 | IFQ_SET_MAXLEN(&ifp->if_snd, TSEC_TX_NUM_DESC - 1); |
---|
261 | ifp->if_snd.ifq_drv_maxlen = TSEC_TX_NUM_DESC - 1; |
---|
262 | IFQ_SET_READY(&ifp->if_snd); |
---|
263 | |
---|
264 | ifp->if_capabilities = IFCAP_VLAN_MTU; |
---|
265 | if (sc->is_etsec) |
---|
266 | ifp->if_capabilities |= IFCAP_HWCSUM; |
---|
267 | |
---|
268 | ifp->if_capenable = ifp->if_capabilities; |
---|
269 | |
---|
270 | #ifdef DEVICE_POLLING |
---|
271 | /* Advertise that polling is supported */ |
---|
272 | ifp->if_capabilities |= IFCAP_POLLING; |
---|
273 | #endif |
---|
274 | |
---|
275 | /* Attach PHY(s) */ |
---|
276 | error = mii_attach(sc->dev, &sc->tsec_miibus, ifp, tsec_ifmedia_upd, |
---|
277 | tsec_ifmedia_sts, BMSR_DEFCAPMASK, sc->phyaddr, MII_OFFSET_ANY, |
---|
278 | 0); |
---|
279 | if (error) { |
---|
280 | device_printf(sc->dev, "attaching PHYs failed\n"); |
---|
281 | if_free(ifp); |
---|
282 | sc->tsec_ifp = NULL; |
---|
283 | tsec_detach(sc); |
---|
284 | return (error); |
---|
285 | } |
---|
286 | sc->tsec_mii = device_get_softc(sc->tsec_miibus); |
---|
287 | |
---|
288 | /* Set MAC address */ |
---|
289 | tsec_get_hwaddr(sc, hwaddr); |
---|
290 | ether_ifattach(ifp, hwaddr); |
---|
291 | |
---|
292 | return (0); |
---|
293 | } |
---|
294 | |
---|
295 | int |
---|
296 | tsec_detach(struct tsec_softc *sc) |
---|
297 | { |
---|
298 | |
---|
299 | if (sc->tsec_ifp != NULL) { |
---|
300 | #ifdef DEVICE_POLLING |
---|
301 | if (sc->tsec_ifp->if_capenable & IFCAP_POLLING) |
---|
302 | ether_poll_deregister(sc->tsec_ifp); |
---|
303 | #endif |
---|
304 | |
---|
305 | /* Stop TSEC controller and free TX queue */ |
---|
306 | if (sc->sc_rres) |
---|
307 | tsec_shutdown(sc->dev); |
---|
308 | |
---|
309 | /* Detach network interface */ |
---|
310 | ether_ifdetach(sc->tsec_ifp); |
---|
311 | if_free(sc->tsec_ifp); |
---|
312 | sc->tsec_ifp = NULL; |
---|
313 | } |
---|
314 | |
---|
315 | /* Free DMA resources */ |
---|
316 | tsec_free_dma(sc); |
---|
317 | |
---|
318 | return (0); |
---|
319 | } |
---|
320 | |
---|
321 | int |
---|
322 | tsec_shutdown(device_t dev) |
---|
323 | { |
---|
324 | struct tsec_softc *sc; |
---|
325 | |
---|
326 | sc = device_get_softc(dev); |
---|
327 | |
---|
328 | TSEC_GLOBAL_LOCK(sc); |
---|
329 | tsec_stop(sc); |
---|
330 | TSEC_GLOBAL_UNLOCK(sc); |
---|
331 | return (0); |
---|
332 | } |
---|
333 | |
---|
334 | int |
---|
335 | tsec_suspend(device_t dev) |
---|
336 | { |
---|
337 | |
---|
338 | /* TODO not implemented! */ |
---|
339 | return (0); |
---|
340 | } |
---|
341 | |
---|
342 | int |
---|
343 | tsec_resume(device_t dev) |
---|
344 | { |
---|
345 | |
---|
346 | /* TODO not implemented! */ |
---|
347 | return (0); |
---|
348 | } |
---|
349 | |
---|
350 | static void |
---|
351 | tsec_init(void *xsc) |
---|
352 | { |
---|
353 | struct tsec_softc *sc = xsc; |
---|
354 | |
---|
355 | TSEC_GLOBAL_LOCK(sc); |
---|
356 | tsec_init_locked(sc); |
---|
357 | TSEC_GLOBAL_UNLOCK(sc); |
---|
358 | } |
---|
359 | |
---|
360 | static void |
---|
361 | tsec_init_locked(struct tsec_softc *sc) |
---|
362 | { |
---|
363 | struct tsec_desc *tx_desc = sc->tsec_tx_vaddr; |
---|
364 | struct tsec_desc *rx_desc = sc->tsec_rx_vaddr; |
---|
365 | struct ifnet *ifp = sc->tsec_ifp; |
---|
366 | uint32_t timeout, val, i; |
---|
367 | |
---|
368 | if (ifp->if_drv_flags & IFF_DRV_RUNNING) |
---|
369 | return; |
---|
370 | |
---|
371 | TSEC_GLOBAL_LOCK_ASSERT(sc); |
---|
372 | tsec_stop(sc); |
---|
373 | |
---|
374 | /* |
---|
375 | * These steps are according to the MPC8555E PowerQUICCIII RM: |
---|
376 | * 14.7 Initialization/Application Information |
---|
377 | */ |
---|
378 | |
---|
379 | /* Step 1: soft reset MAC */ |
---|
380 | tsec_reset_mac(sc); |
---|
381 | |
---|
382 | /* Step 2: Initialize MACCFG2 */ |
---|
383 | TSEC_WRITE(sc, TSEC_REG_MACCFG2, |
---|
384 | TSEC_MACCFG2_FULLDUPLEX | /* Full Duplex = 1 */ |
---|
385 | TSEC_MACCFG2_PADCRC | /* PAD/CRC append */ |
---|
386 | TSEC_MACCFG2_GMII | /* I/F Mode bit */ |
---|
387 | TSEC_MACCFG2_PRECNT /* Preamble count = 7 */ |
---|
388 | ); |
---|
389 | |
---|
390 | /* Step 3: Initialize ECNTRL |
---|
391 | * While the documentation states that R100M is ignored if RPM is |
---|
392 | * not set, it does seem to be needed to get the orange boxes to |
---|
393 | * work (which have a Marvell 88E1111 PHY). Go figure. |
---|
394 | */ |
---|
395 | |
---|
396 | /* |
---|
397 | * XXX kludge - use circumstancial evidence to program ECNTRL |
---|
398 | * correctly. Ideally we need some board information to guide |
---|
399 | * us here. |
---|
400 | */ |
---|
401 | i = TSEC_READ(sc, TSEC_REG_ID2); |
---|
402 | val = (i & 0xffff) |
---|
403 | ? (TSEC_ECNTRL_TBIM | TSEC_ECNTRL_SGMIIM) /* Sumatra */ |
---|
404 | : TSEC_ECNTRL_R100M; /* Orange + CDS */ |
---|
405 | TSEC_WRITE(sc, TSEC_REG_ECNTRL, TSEC_ECNTRL_STEN | val); |
---|
406 | |
---|
407 | /* Step 4: Initialize MAC station address */ |
---|
408 | tsec_set_mac_address(sc); |
---|
409 | |
---|
410 | /* |
---|
411 | * Step 5: Assign a Physical address to the TBI so as to not conflict |
---|
412 | * with the external PHY physical address |
---|
413 | */ |
---|
414 | TSEC_WRITE(sc, TSEC_REG_TBIPA, 5); |
---|
415 | |
---|
416 | /* Step 6: Reset the management interface */ |
---|
417 | TSEC_WRITE(sc->phy_sc, TSEC_REG_MIIMCFG, TSEC_MIIMCFG_RESETMGMT); |
---|
418 | |
---|
419 | /* Step 7: Setup the MII Mgmt clock speed */ |
---|
420 | TSEC_WRITE(sc->phy_sc, TSEC_REG_MIIMCFG, TSEC_MIIMCFG_CLKDIV28); |
---|
421 | |
---|
422 | /* Step 8: Read MII Mgmt indicator register and check for Busy = 0 */ |
---|
423 | timeout = TSEC_READ_RETRY; |
---|
424 | while (--timeout && (TSEC_READ(sc->phy_sc, TSEC_REG_MIIMIND) & |
---|
425 | TSEC_MIIMIND_BUSY)) |
---|
426 | DELAY(TSEC_READ_DELAY); |
---|
427 | if (timeout == 0) { |
---|
428 | if_printf(ifp, "tsec_init_locked(): Mgmt busy timeout\n"); |
---|
429 | return; |
---|
430 | } |
---|
431 | |
---|
432 | /* Step 9: Setup the MII Mgmt */ |
---|
433 | mii_mediachg(sc->tsec_mii); |
---|
434 | |
---|
435 | /* Step 10: Clear IEVENT register */ |
---|
436 | TSEC_WRITE(sc, TSEC_REG_IEVENT, 0xffffffff); |
---|
437 | |
---|
438 | /* Step 11: Enable interrupts */ |
---|
439 | #ifdef DEVICE_POLLING |
---|
440 | /* |
---|
441 | * ...only if polling is not turned on. Disable interrupts explicitly |
---|
442 | * if polling is enabled. |
---|
443 | */ |
---|
444 | if (ifp->if_capenable & IFCAP_POLLING ) |
---|
445 | tsec_intrs_ctl(sc, 0); |
---|
446 | else |
---|
447 | #endif /* DEVICE_POLLING */ |
---|
448 | tsec_intrs_ctl(sc, 1); |
---|
449 | |
---|
450 | /* Step 12: Initialize IADDRn */ |
---|
451 | TSEC_WRITE(sc, TSEC_REG_IADDR0, 0); |
---|
452 | TSEC_WRITE(sc, TSEC_REG_IADDR1, 0); |
---|
453 | TSEC_WRITE(sc, TSEC_REG_IADDR2, 0); |
---|
454 | TSEC_WRITE(sc, TSEC_REG_IADDR3, 0); |
---|
455 | TSEC_WRITE(sc, TSEC_REG_IADDR4, 0); |
---|
456 | TSEC_WRITE(sc, TSEC_REG_IADDR5, 0); |
---|
457 | TSEC_WRITE(sc, TSEC_REG_IADDR6, 0); |
---|
458 | TSEC_WRITE(sc, TSEC_REG_IADDR7, 0); |
---|
459 | |
---|
460 | /* Step 13: Initialize GADDRn */ |
---|
461 | TSEC_WRITE(sc, TSEC_REG_GADDR0, 0); |
---|
462 | TSEC_WRITE(sc, TSEC_REG_GADDR1, 0); |
---|
463 | TSEC_WRITE(sc, TSEC_REG_GADDR2, 0); |
---|
464 | TSEC_WRITE(sc, TSEC_REG_GADDR3, 0); |
---|
465 | TSEC_WRITE(sc, TSEC_REG_GADDR4, 0); |
---|
466 | TSEC_WRITE(sc, TSEC_REG_GADDR5, 0); |
---|
467 | TSEC_WRITE(sc, TSEC_REG_GADDR6, 0); |
---|
468 | TSEC_WRITE(sc, TSEC_REG_GADDR7, 0); |
---|
469 | |
---|
470 | /* Step 14: Initialize RCTRL */ |
---|
471 | TSEC_WRITE(sc, TSEC_REG_RCTRL, 0); |
---|
472 | |
---|
473 | /* Step 15: Initialize DMACTRL */ |
---|
474 | tsec_dma_ctl(sc, 1); |
---|
475 | |
---|
476 | /* Step 16: Initialize FIFO_PAUSE_CTRL */ |
---|
477 | TSEC_WRITE(sc, TSEC_REG_FIFO_PAUSE_CTRL, TSEC_FIFO_PAUSE_CTRL_EN); |
---|
478 | |
---|
479 | /* |
---|
480 | * Step 17: Initialize transmit/receive descriptor rings. |
---|
481 | * Initialize TBASE and RBASE. |
---|
482 | */ |
---|
483 | TSEC_WRITE(sc, TSEC_REG_TBASE, sc->tsec_tx_raddr); |
---|
484 | TSEC_WRITE(sc, TSEC_REG_RBASE, sc->tsec_rx_raddr); |
---|
485 | |
---|
486 | for (i = 0; i < TSEC_TX_NUM_DESC; i++) { |
---|
487 | tx_desc[i].bufptr = 0; |
---|
488 | tx_desc[i].length = 0; |
---|
489 | tx_desc[i].flags = ((i == TSEC_TX_NUM_DESC - 1) ? |
---|
490 | TSEC_TXBD_W : 0); |
---|
491 | } |
---|
492 | bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, |
---|
493 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
---|
494 | |
---|
495 | for (i = 0; i < TSEC_RX_NUM_DESC; i++) { |
---|
496 | rx_desc[i].bufptr = sc->rx_data[i].paddr; |
---|
497 | rx_desc[i].length = 0; |
---|
498 | rx_desc[i].flags = TSEC_RXBD_E | TSEC_RXBD_I | |
---|
499 | ((i == TSEC_RX_NUM_DESC - 1) ? TSEC_RXBD_W : 0); |
---|
500 | } |
---|
501 | bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap, |
---|
502 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
---|
503 | |
---|
504 | /* Step 18: Initialize the maximum receive buffer length */ |
---|
505 | TSEC_WRITE(sc, TSEC_REG_MRBLR, MCLBYTES); |
---|
506 | |
---|
507 | /* Step 19: Configure ethernet frame sizes */ |
---|
508 | TSEC_WRITE(sc, TSEC_REG_MINFLR, TSEC_MIN_FRAME_SIZE); |
---|
509 | tsec_set_mtu(sc, ifp->if_mtu); |
---|
510 | |
---|
511 | /* Step 20: Enable Rx and RxBD sdata snooping */ |
---|
512 | TSEC_WRITE(sc, TSEC_REG_ATTR, TSEC_ATTR_RDSEN | TSEC_ATTR_RBDSEN); |
---|
513 | TSEC_WRITE(sc, TSEC_REG_ATTRELI, 0); |
---|
514 | |
---|
515 | /* Step 21: Reset collision counters in hardware */ |
---|
516 | TSEC_WRITE(sc, TSEC_REG_MON_TSCL, 0); |
---|
517 | TSEC_WRITE(sc, TSEC_REG_MON_TMCL, 0); |
---|
518 | TSEC_WRITE(sc, TSEC_REG_MON_TLCL, 0); |
---|
519 | TSEC_WRITE(sc, TSEC_REG_MON_TXCL, 0); |
---|
520 | TSEC_WRITE(sc, TSEC_REG_MON_TNCL, 0); |
---|
521 | |
---|
522 | /* Step 22: Mask all CAM interrupts */ |
---|
523 | TSEC_WRITE(sc, TSEC_REG_MON_CAM1, 0xffffffff); |
---|
524 | TSEC_WRITE(sc, TSEC_REG_MON_CAM2, 0xffffffff); |
---|
525 | |
---|
526 | /* Step 23: Enable Rx and Tx */ |
---|
527 | val = TSEC_READ(sc, TSEC_REG_MACCFG1); |
---|
528 | val |= (TSEC_MACCFG1_RX_EN | TSEC_MACCFG1_TX_EN); |
---|
529 | TSEC_WRITE(sc, TSEC_REG_MACCFG1, val); |
---|
530 | |
---|
531 | /* Step 24: Reset TSEC counters for Tx and Rx rings */ |
---|
532 | TSEC_TX_RX_COUNTERS_INIT(sc); |
---|
533 | |
---|
534 | /* Step 25: Setup TCP/IP Off-Load engine */ |
---|
535 | if (sc->is_etsec) |
---|
536 | tsec_offload_setup(sc); |
---|
537 | |
---|
538 | /* Step 26: Setup multicast filters */ |
---|
539 | tsec_setup_multicast(sc); |
---|
540 | |
---|
541 | /* Step 27: Activate network interface */ |
---|
542 | ifp->if_drv_flags |= IFF_DRV_RUNNING; |
---|
543 | ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; |
---|
544 | sc->tsec_if_flags = ifp->if_flags; |
---|
545 | sc->tsec_watchdog = 0; |
---|
546 | |
---|
547 | /* Schedule watchdog timeout */ |
---|
548 | callout_reset(&sc->tsec_callout, hz, tsec_tick, sc); |
---|
549 | } |
---|
550 | |
---|
551 | static void |
---|
552 | tsec_set_mac_address(struct tsec_softc *sc) |
---|
553 | { |
---|
554 | uint32_t macbuf[2] = { 0, 0 }; |
---|
555 | char *macbufp, *curmac; |
---|
556 | int i; |
---|
557 | |
---|
558 | TSEC_GLOBAL_LOCK_ASSERT(sc); |
---|
559 | |
---|
560 | KASSERT((ETHER_ADDR_LEN <= sizeof(macbuf)), |
---|
561 | ("tsec_set_mac_address: (%d <= %d", ETHER_ADDR_LEN, |
---|
562 | sizeof(macbuf))); |
---|
563 | |
---|
564 | macbufp = (char *)macbuf; |
---|
565 | curmac = (char *)IF_LLADDR(sc->tsec_ifp); |
---|
566 | |
---|
567 | /* Correct order of MAC address bytes */ |
---|
568 | for (i = 1; i <= ETHER_ADDR_LEN; i++) |
---|
569 | macbufp[ETHER_ADDR_LEN-i] = curmac[i-1]; |
---|
570 | |
---|
571 | /* Initialize MAC station address MACSTNADDR2 and MACSTNADDR1 */ |
---|
572 | TSEC_WRITE(sc, TSEC_REG_MACSTNADDR2, macbuf[1]); |
---|
573 | TSEC_WRITE(sc, TSEC_REG_MACSTNADDR1, macbuf[0]); |
---|
574 | } |
---|
575 | |
---|
576 | /* |
---|
577 | * DMA control function, if argument state is: |
---|
578 | * 0 - DMA engine will be disabled |
---|
579 | * 1 - DMA engine will be enabled |
---|
580 | */ |
---|
581 | static void |
---|
582 | tsec_dma_ctl(struct tsec_softc *sc, int state) |
---|
583 | { |
---|
584 | device_t dev; |
---|
585 | uint32_t dma_flags, timeout; |
---|
586 | |
---|
587 | dev = sc->dev; |
---|
588 | |
---|
589 | dma_flags = TSEC_READ(sc, TSEC_REG_DMACTRL); |
---|
590 | |
---|
591 | switch (state) { |
---|
592 | case 0: |
---|
593 | /* Temporarily clear stop graceful stop bits. */ |
---|
594 | tsec_dma_ctl(sc, 1000); |
---|
595 | |
---|
596 | /* Set it again */ |
---|
597 | dma_flags |= (TSEC_DMACTRL_GRS | TSEC_DMACTRL_GTS); |
---|
598 | break; |
---|
599 | case 1000: |
---|
600 | case 1: |
---|
601 | /* Set write with response (WWR), wait (WOP) and snoop bits */ |
---|
602 | dma_flags |= (TSEC_DMACTRL_TDSEN | TSEC_DMACTRL_TBDSEN | |
---|
603 | DMACTRL_WWR | DMACTRL_WOP); |
---|
604 | |
---|
605 | /* Clear graceful stop bits */ |
---|
606 | dma_flags &= ~(TSEC_DMACTRL_GRS | TSEC_DMACTRL_GTS); |
---|
607 | break; |
---|
608 | default: |
---|
609 | device_printf(dev, "tsec_dma_ctl(): unknown state value: %d\n", |
---|
610 | state); |
---|
611 | } |
---|
612 | |
---|
613 | TSEC_WRITE(sc, TSEC_REG_DMACTRL, dma_flags); |
---|
614 | |
---|
615 | switch (state) { |
---|
616 | case 0: |
---|
617 | /* Wait for DMA stop */ |
---|
618 | timeout = TSEC_READ_RETRY; |
---|
619 | while (--timeout && (!(TSEC_READ(sc, TSEC_REG_IEVENT) & |
---|
620 | (TSEC_IEVENT_GRSC | TSEC_IEVENT_GTSC)))) |
---|
621 | DELAY(TSEC_READ_DELAY); |
---|
622 | |
---|
623 | if (timeout == 0) |
---|
624 | device_printf(dev, "tsec_dma_ctl(): timeout!\n"); |
---|
625 | break; |
---|
626 | case 1: |
---|
627 | /* Restart transmission function */ |
---|
628 | TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT); |
---|
629 | } |
---|
630 | } |
---|
631 | |
---|
632 | /* |
---|
633 | * Interrupts control function, if argument state is: |
---|
634 | * 0 - all TSEC interrupts will be masked |
---|
635 | * 1 - all TSEC interrupts will be unmasked |
---|
636 | */ |
---|
637 | static void |
---|
638 | tsec_intrs_ctl(struct tsec_softc *sc, int state) |
---|
639 | { |
---|
640 | device_t dev; |
---|
641 | |
---|
642 | dev = sc->dev; |
---|
643 | |
---|
644 | switch (state) { |
---|
645 | case 0: |
---|
646 | TSEC_WRITE(sc, TSEC_REG_IMASK, 0); |
---|
647 | break; |
---|
648 | case 1: |
---|
649 | TSEC_WRITE(sc, TSEC_REG_IMASK, TSEC_IMASK_BREN | |
---|
650 | TSEC_IMASK_RXCEN | TSEC_IMASK_BSYEN | TSEC_IMASK_EBERREN | |
---|
651 | TSEC_IMASK_BTEN | TSEC_IMASK_TXEEN | TSEC_IMASK_TXBEN | |
---|
652 | TSEC_IMASK_TXFEN | TSEC_IMASK_XFUNEN | TSEC_IMASK_RXFEN); |
---|
653 | break; |
---|
654 | default: |
---|
655 | device_printf(dev, "tsec_intrs_ctl(): unknown state value: %d\n", |
---|
656 | state); |
---|
657 | } |
---|
658 | } |
---|
659 | |
---|
660 | static void |
---|
661 | tsec_reset_mac(struct tsec_softc *sc) |
---|
662 | { |
---|
663 | uint32_t maccfg1_flags; |
---|
664 | |
---|
665 | /* Set soft reset bit */ |
---|
666 | maccfg1_flags = TSEC_READ(sc, TSEC_REG_MACCFG1); |
---|
667 | maccfg1_flags |= TSEC_MACCFG1_SOFT_RESET; |
---|
668 | TSEC_WRITE(sc, TSEC_REG_MACCFG1, maccfg1_flags); |
---|
669 | |
---|
670 | /* Clear soft reset bit */ |
---|
671 | maccfg1_flags = TSEC_READ(sc, TSEC_REG_MACCFG1); |
---|
672 | maccfg1_flags &= ~TSEC_MACCFG1_SOFT_RESET; |
---|
673 | TSEC_WRITE(sc, TSEC_REG_MACCFG1, maccfg1_flags); |
---|
674 | } |
---|
675 | |
---|
676 | static void |
---|
677 | tsec_watchdog(struct tsec_softc *sc) |
---|
678 | { |
---|
679 | struct ifnet *ifp; |
---|
680 | |
---|
681 | TSEC_GLOBAL_LOCK_ASSERT(sc); |
---|
682 | |
---|
683 | if (sc->tsec_watchdog == 0 || --sc->tsec_watchdog > 0) |
---|
684 | return; |
---|
685 | |
---|
686 | ifp = sc->tsec_ifp; |
---|
687 | ifp->if_oerrors++; |
---|
688 | if_printf(ifp, "watchdog timeout\n"); |
---|
689 | |
---|
690 | tsec_stop(sc); |
---|
691 | tsec_init_locked(sc); |
---|
692 | } |
---|
693 | |
---|
694 | static void |
---|
695 | tsec_start(struct ifnet *ifp) |
---|
696 | { |
---|
697 | struct tsec_softc *sc = ifp->if_softc; |
---|
698 | |
---|
699 | TSEC_TRANSMIT_LOCK(sc); |
---|
700 | tsec_start_locked(ifp); |
---|
701 | TSEC_TRANSMIT_UNLOCK(sc); |
---|
702 | } |
---|
703 | |
---|
704 | static void |
---|
705 | tsec_start_locked(struct ifnet *ifp) |
---|
706 | { |
---|
707 | struct tsec_softc *sc; |
---|
708 | struct mbuf *m0, *mtmp; |
---|
709 | struct tsec_tx_fcb *tx_fcb; |
---|
710 | unsigned int queued = 0; |
---|
711 | int csum_flags, fcb_inserted = 0; |
---|
712 | |
---|
713 | sc = ifp->if_softc; |
---|
714 | |
---|
715 | TSEC_TRANSMIT_LOCK_ASSERT(sc); |
---|
716 | |
---|
717 | if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != |
---|
718 | IFF_DRV_RUNNING) |
---|
719 | return; |
---|
720 | |
---|
721 | if (sc->tsec_link == 0) |
---|
722 | return; |
---|
723 | |
---|
724 | bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, |
---|
725 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); |
---|
726 | |
---|
727 | while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { |
---|
728 | /* Get packet from the queue */ |
---|
729 | IFQ_DRV_DEQUEUE(&ifp->if_snd, m0); |
---|
730 | if (m0 == NULL) |
---|
731 | break; |
---|
732 | |
---|
733 | /* Insert TCP/IP Off-load frame control block */ |
---|
734 | csum_flags = m0->m_pkthdr.csum_flags; |
---|
735 | if (csum_flags) { |
---|
736 | |
---|
737 | M_PREPEND(m0, sizeof(struct tsec_tx_fcb), M_NOWAIT); |
---|
738 | if (m0 == NULL) |
---|
739 | break; |
---|
740 | |
---|
741 | tx_fcb = mtod(m0, struct tsec_tx_fcb *); |
---|
742 | tx_fcb->flags = 0; |
---|
743 | tx_fcb->l3_offset = ETHER_HDR_LEN; |
---|
744 | tx_fcb->l4_offset = sizeof(struct ip); |
---|
745 | |
---|
746 | if (csum_flags & CSUM_IP) |
---|
747 | tx_fcb->flags |= TSEC_TX_FCB_IP4 | |
---|
748 | TSEC_TX_FCB_CSUM_IP; |
---|
749 | |
---|
750 | if (csum_flags & CSUM_TCP) |
---|
751 | tx_fcb->flags |= TSEC_TX_FCB_TCP | |
---|
752 | TSEC_TX_FCB_CSUM_TCP_UDP; |
---|
753 | |
---|
754 | if (csum_flags & CSUM_UDP) |
---|
755 | tx_fcb->flags |= TSEC_TX_FCB_UDP | |
---|
756 | TSEC_TX_FCB_CSUM_TCP_UDP; |
---|
757 | |
---|
758 | fcb_inserted = 1; |
---|
759 | } |
---|
760 | |
---|
761 | mtmp = m_defrag(m0, M_NOWAIT); |
---|
762 | if (mtmp) |
---|
763 | m0 = mtmp; |
---|
764 | |
---|
765 | if (tsec_encap(sc, m0, fcb_inserted)) { |
---|
766 | IFQ_DRV_PREPEND(&ifp->if_snd, m0); |
---|
767 | ifp->if_drv_flags |= IFF_DRV_OACTIVE; |
---|
768 | break; |
---|
769 | } |
---|
770 | queued++; |
---|
771 | BPF_MTAP(ifp, m0); |
---|
772 | } |
---|
773 | bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, |
---|
774 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
---|
775 | |
---|
776 | if (queued) { |
---|
777 | /* Enable transmitter and watchdog timer */ |
---|
778 | TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT); |
---|
779 | sc->tsec_watchdog = 5; |
---|
780 | } |
---|
781 | } |
---|
782 | |
---|
783 | static int |
---|
784 | tsec_encap(struct tsec_softc *sc, struct mbuf *m0, int fcb_inserted) |
---|
785 | { |
---|
786 | struct tsec_desc *tx_desc = NULL; |
---|
787 | struct ifnet *ifp; |
---|
788 | bus_dma_segment_t segs[TSEC_TX_NUM_DESC]; |
---|
789 | bus_dmamap_t *mapp; |
---|
790 | int csum_flag = 0, error, seg, nsegs; |
---|
791 | |
---|
792 | TSEC_TRANSMIT_LOCK_ASSERT(sc); |
---|
793 | |
---|
794 | ifp = sc->tsec_ifp; |
---|
795 | |
---|
796 | if (TSEC_FREE_TX_DESC(sc) == 0) { |
---|
797 | /* No free descriptors */ |
---|
798 | return (-1); |
---|
799 | } |
---|
800 | |
---|
801 | /* Fetch unused map */ |
---|
802 | mapp = TSEC_ALLOC_TX_MAP(sc); |
---|
803 | |
---|
804 | /* Create mapping in DMA memory */ |
---|
805 | error = bus_dmamap_load_mbuf_sg(sc->tsec_tx_mtag, |
---|
806 | *mapp, m0, segs, &nsegs, BUS_DMA_NOWAIT); |
---|
807 | if (error != 0 || nsegs > TSEC_FREE_TX_DESC(sc) || nsegs <= 0) { |
---|
808 | bus_dmamap_unload(sc->tsec_tx_mtag, *mapp); |
---|
809 | TSEC_FREE_TX_MAP(sc, mapp); |
---|
810 | return ((error != 0) ? error : -1); |
---|
811 | } |
---|
812 | bus_dmamap_sync(sc->tsec_tx_mtag, *mapp, BUS_DMASYNC_PREWRITE); |
---|
813 | |
---|
814 | if ((ifp->if_flags & IFF_DEBUG) && (nsegs > 1)) |
---|
815 | if_printf(ifp, "TX buffer has %d segments\n", nsegs); |
---|
816 | |
---|
817 | if (fcb_inserted) |
---|
818 | csum_flag = TSEC_TXBD_TOE; |
---|
819 | |
---|
820 | /* Everything is ok, now we can send buffers */ |
---|
821 | for (seg = 0; seg < nsegs; seg++) { |
---|
822 | tx_desc = TSEC_GET_CUR_TX_DESC(sc); |
---|
823 | |
---|
824 | tx_desc->length = segs[seg].ds_len; |
---|
825 | tx_desc->bufptr = segs[seg].ds_addr; |
---|
826 | |
---|
827 | /* |
---|
828 | * Set flags: |
---|
829 | * - wrap |
---|
830 | * - checksum |
---|
831 | * - ready to send |
---|
832 | * - transmit the CRC sequence after the last data byte |
---|
833 | * - interrupt after the last buffer |
---|
834 | */ |
---|
835 | tx_desc->flags = |
---|
836 | (tx_desc->flags & TSEC_TXBD_W) | |
---|
837 | ((seg == 0) ? csum_flag : 0) | TSEC_TXBD_R | TSEC_TXBD_TC | |
---|
838 | ((seg == nsegs - 1) ? TSEC_TXBD_L | TSEC_TXBD_I : 0); |
---|
839 | } |
---|
840 | |
---|
841 | /* Save mbuf and DMA mapping for release at later stage */ |
---|
842 | TSEC_PUT_TX_MBUF(sc, m0); |
---|
843 | TSEC_PUT_TX_MAP(sc, mapp); |
---|
844 | |
---|
845 | return (0); |
---|
846 | } |
---|
847 | |
---|
848 | static void |
---|
849 | tsec_setfilter(struct tsec_softc *sc) |
---|
850 | { |
---|
851 | struct ifnet *ifp; |
---|
852 | uint32_t flags; |
---|
853 | |
---|
854 | ifp = sc->tsec_ifp; |
---|
855 | flags = TSEC_READ(sc, TSEC_REG_RCTRL); |
---|
856 | |
---|
857 | /* Promiscuous mode */ |
---|
858 | if (ifp->if_flags & IFF_PROMISC) |
---|
859 | flags |= TSEC_RCTRL_PROM; |
---|
860 | else |
---|
861 | flags &= ~TSEC_RCTRL_PROM; |
---|
862 | |
---|
863 | TSEC_WRITE(sc, TSEC_REG_RCTRL, flags); |
---|
864 | } |
---|
865 | |
---|
866 | #ifdef DEVICE_POLLING |
---|
867 | static poll_handler_t tsec_poll; |
---|
868 | |
---|
869 | static int |
---|
870 | tsec_poll(struct ifnet *ifp, enum poll_cmd cmd, int count) |
---|
871 | { |
---|
872 | uint32_t ie; |
---|
873 | struct tsec_softc *sc = ifp->if_softc; |
---|
874 | int rx_npkts; |
---|
875 | |
---|
876 | rx_npkts = 0; |
---|
877 | |
---|
878 | TSEC_GLOBAL_LOCK(sc); |
---|
879 | if (!(ifp->if_drv_flags & IFF_DRV_RUNNING)) { |
---|
880 | TSEC_GLOBAL_UNLOCK(sc); |
---|
881 | return (rx_npkts); |
---|
882 | } |
---|
883 | |
---|
884 | if (cmd == POLL_AND_CHECK_STATUS) { |
---|
885 | tsec_error_intr_locked(sc, count); |
---|
886 | |
---|
887 | /* Clear all events reported */ |
---|
888 | ie = TSEC_READ(sc, TSEC_REG_IEVENT); |
---|
889 | TSEC_WRITE(sc, TSEC_REG_IEVENT, ie); |
---|
890 | } |
---|
891 | |
---|
892 | tsec_transmit_intr_locked(sc); |
---|
893 | |
---|
894 | TSEC_GLOBAL_TO_RECEIVE_LOCK(sc); |
---|
895 | |
---|
896 | rx_npkts = tsec_receive_intr_locked(sc, count); |
---|
897 | |
---|
898 | TSEC_RECEIVE_UNLOCK(sc); |
---|
899 | |
---|
900 | return (rx_npkts); |
---|
901 | } |
---|
902 | #endif /* DEVICE_POLLING */ |
---|
903 | |
---|
904 | static int |
---|
905 | tsec_ioctl(struct ifnet *ifp, u_long command, caddr_t data) |
---|
906 | { |
---|
907 | struct tsec_softc *sc = ifp->if_softc; |
---|
908 | struct ifreq *ifr = (struct ifreq *)data; |
---|
909 | device_t dev; |
---|
910 | int mask, error = 0; |
---|
911 | |
---|
912 | dev = sc->dev; |
---|
913 | |
---|
914 | switch (command) { |
---|
915 | case SIOCSIFMTU: |
---|
916 | TSEC_GLOBAL_LOCK(sc); |
---|
917 | if (tsec_set_mtu(sc, ifr->ifr_mtu)) |
---|
918 | ifp->if_mtu = ifr->ifr_mtu; |
---|
919 | else |
---|
920 | error = EINVAL; |
---|
921 | TSEC_GLOBAL_UNLOCK(sc); |
---|
922 | break; |
---|
923 | case SIOCSIFFLAGS: |
---|
924 | TSEC_GLOBAL_LOCK(sc); |
---|
925 | if (ifp->if_flags & IFF_UP) { |
---|
926 | if (ifp->if_drv_flags & IFF_DRV_RUNNING) { |
---|
927 | if ((sc->tsec_if_flags ^ ifp->if_flags) & |
---|
928 | IFF_PROMISC) |
---|
929 | tsec_setfilter(sc); |
---|
930 | |
---|
931 | if ((sc->tsec_if_flags ^ ifp->if_flags) & |
---|
932 | IFF_ALLMULTI) |
---|
933 | tsec_setup_multicast(sc); |
---|
934 | } else |
---|
935 | tsec_init_locked(sc); |
---|
936 | } else if (ifp->if_drv_flags & IFF_DRV_RUNNING) |
---|
937 | tsec_stop(sc); |
---|
938 | |
---|
939 | sc->tsec_if_flags = ifp->if_flags; |
---|
940 | TSEC_GLOBAL_UNLOCK(sc); |
---|
941 | break; |
---|
942 | case SIOCADDMULTI: |
---|
943 | case SIOCDELMULTI: |
---|
944 | if (ifp->if_drv_flags & IFF_DRV_RUNNING) { |
---|
945 | TSEC_GLOBAL_LOCK(sc); |
---|
946 | tsec_setup_multicast(sc); |
---|
947 | TSEC_GLOBAL_UNLOCK(sc); |
---|
948 | } |
---|
949 | case SIOCGIFMEDIA: |
---|
950 | case SIOCSIFMEDIA: |
---|
951 | error = ifmedia_ioctl(ifp, ifr, &sc->tsec_mii->mii_media, |
---|
952 | command); |
---|
953 | break; |
---|
954 | case SIOCSIFCAP: |
---|
955 | mask = ifp->if_capenable ^ ifr->ifr_reqcap; |
---|
956 | if ((mask & IFCAP_HWCSUM) && sc->is_etsec) { |
---|
957 | TSEC_GLOBAL_LOCK(sc); |
---|
958 | ifp->if_capenable &= ~IFCAP_HWCSUM; |
---|
959 | ifp->if_capenable |= IFCAP_HWCSUM & ifr->ifr_reqcap; |
---|
960 | tsec_offload_setup(sc); |
---|
961 | TSEC_GLOBAL_UNLOCK(sc); |
---|
962 | } |
---|
963 | #ifdef DEVICE_POLLING |
---|
964 | if (mask & IFCAP_POLLING) { |
---|
965 | if (ifr->ifr_reqcap & IFCAP_POLLING) { |
---|
966 | error = ether_poll_register(tsec_poll, ifp); |
---|
967 | if (error) |
---|
968 | return (error); |
---|
969 | |
---|
970 | TSEC_GLOBAL_LOCK(sc); |
---|
971 | /* Disable interrupts */ |
---|
972 | tsec_intrs_ctl(sc, 0); |
---|
973 | ifp->if_capenable |= IFCAP_POLLING; |
---|
974 | TSEC_GLOBAL_UNLOCK(sc); |
---|
975 | } else { |
---|
976 | error = ether_poll_deregister(ifp); |
---|
977 | TSEC_GLOBAL_LOCK(sc); |
---|
978 | /* Enable interrupts */ |
---|
979 | tsec_intrs_ctl(sc, 1); |
---|
980 | ifp->if_capenable &= ~IFCAP_POLLING; |
---|
981 | TSEC_GLOBAL_UNLOCK(sc); |
---|
982 | } |
---|
983 | } |
---|
984 | #endif |
---|
985 | break; |
---|
986 | |
---|
987 | default: |
---|
988 | error = ether_ioctl(ifp, command, data); |
---|
989 | } |
---|
990 | |
---|
991 | /* Flush buffers if not empty */ |
---|
992 | if (ifp->if_flags & IFF_UP) |
---|
993 | tsec_start(ifp); |
---|
994 | return (error); |
---|
995 | } |
---|
996 | |
---|
997 | static int |
---|
998 | tsec_ifmedia_upd(struct ifnet *ifp) |
---|
999 | { |
---|
1000 | struct tsec_softc *sc = ifp->if_softc; |
---|
1001 | struct mii_data *mii; |
---|
1002 | |
---|
1003 | TSEC_TRANSMIT_LOCK(sc); |
---|
1004 | |
---|
1005 | mii = sc->tsec_mii; |
---|
1006 | mii_mediachg(mii); |
---|
1007 | |
---|
1008 | TSEC_TRANSMIT_UNLOCK(sc); |
---|
1009 | return (0); |
---|
1010 | } |
---|
1011 | |
---|
1012 | static void |
---|
1013 | tsec_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) |
---|
1014 | { |
---|
1015 | struct tsec_softc *sc = ifp->if_softc; |
---|
1016 | struct mii_data *mii; |
---|
1017 | |
---|
1018 | TSEC_TRANSMIT_LOCK(sc); |
---|
1019 | |
---|
1020 | mii = sc->tsec_mii; |
---|
1021 | mii_pollstat(mii); |
---|
1022 | |
---|
1023 | ifmr->ifm_active = mii->mii_media_active; |
---|
1024 | ifmr->ifm_status = mii->mii_media_status; |
---|
1025 | |
---|
1026 | TSEC_TRANSMIT_UNLOCK(sc); |
---|
1027 | } |
---|
1028 | |
---|
1029 | static int |
---|
1030 | tsec_new_rxbuf(bus_dma_tag_t tag, bus_dmamap_t map, struct mbuf **mbufp, |
---|
1031 | uint32_t *paddr) |
---|
1032 | { |
---|
1033 | struct mbuf *new_mbuf; |
---|
1034 | bus_dma_segment_t seg[1]; |
---|
1035 | int error, nsegs; |
---|
1036 | |
---|
1037 | KASSERT(mbufp != NULL, ("NULL mbuf pointer!")); |
---|
1038 | |
---|
1039 | new_mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, MCLBYTES); |
---|
1040 | if (new_mbuf == NULL) |
---|
1041 | return (ENOBUFS); |
---|
1042 | new_mbuf->m_len = new_mbuf->m_pkthdr.len = new_mbuf->m_ext.ext_size; |
---|
1043 | |
---|
1044 | if (*mbufp) { |
---|
1045 | bus_dmamap_sync(tag, map, BUS_DMASYNC_POSTREAD); |
---|
1046 | bus_dmamap_unload(tag, map); |
---|
1047 | } |
---|
1048 | |
---|
1049 | error = bus_dmamap_load_mbuf_sg(tag, map, new_mbuf, seg, &nsegs, |
---|
1050 | BUS_DMA_NOWAIT); |
---|
1051 | KASSERT(nsegs == 1, ("Too many segments returned!")); |
---|
1052 | if (nsegs != 1 || error) |
---|
1053 | panic("tsec_new_rxbuf(): nsegs(%d), error(%d)", nsegs, error); |
---|
1054 | |
---|
1055 | #if 0 |
---|
1056 | if (error) { |
---|
1057 | printf("tsec: bus_dmamap_load_mbuf_sg() returned: %d!\n", |
---|
1058 | error); |
---|
1059 | m_freem(new_mbuf); |
---|
1060 | return (ENOBUFS); |
---|
1061 | } |
---|
1062 | #endif |
---|
1063 | |
---|
1064 | #if 0 |
---|
1065 | KASSERT(((seg->ds_addr) & (TSEC_RXBUFFER_ALIGNMENT-1)) == 0, |
---|
1066 | ("Wrong alignment of RX buffer!")); |
---|
1067 | #endif |
---|
1068 | bus_dmamap_sync(tag, map, BUS_DMASYNC_PREREAD); |
---|
1069 | |
---|
1070 | (*mbufp) = new_mbuf; |
---|
1071 | (*paddr) = seg->ds_addr; |
---|
1072 | return (0); |
---|
1073 | } |
---|
1074 | |
---|
1075 | static void |
---|
1076 | tsec_map_dma_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) |
---|
1077 | { |
---|
1078 | u_int32_t *paddr; |
---|
1079 | |
---|
1080 | KASSERT(nseg == 1, ("wrong number of segments, should be 1")); |
---|
1081 | paddr = arg; |
---|
1082 | *paddr = segs->ds_addr; |
---|
1083 | } |
---|
1084 | |
---|
1085 | static int |
---|
1086 | tsec_alloc_dma_desc(device_t dev, bus_dma_tag_t *dtag, bus_dmamap_t *dmap, |
---|
1087 | bus_size_t dsize, void **vaddr, void *raddr, const char *dname) |
---|
1088 | { |
---|
1089 | int error; |
---|
1090 | |
---|
1091 | /* Allocate a busdma tag and DMA safe memory for TX/RX descriptors. */ |
---|
1092 | error = bus_dma_tag_create(NULL, /* parent */ |
---|
1093 | PAGE_SIZE, 0, /* alignment, boundary */ |
---|
1094 | BUS_SPACE_MAXADDR_32BIT, /* lowaddr */ |
---|
1095 | BUS_SPACE_MAXADDR, /* highaddr */ |
---|
1096 | NULL, NULL, /* filtfunc, filtfuncarg */ |
---|
1097 | dsize, 1, /* maxsize, nsegments */ |
---|
1098 | dsize, 0, /* maxsegsz, flags */ |
---|
1099 | NULL, NULL, /* lockfunc, lockfuncarg */ |
---|
1100 | dtag); /* dmat */ |
---|
1101 | |
---|
1102 | if (error) { |
---|
1103 | device_printf(dev, "failed to allocate busdma %s tag\n", |
---|
1104 | dname); |
---|
1105 | (*vaddr) = NULL; |
---|
1106 | return (ENXIO); |
---|
1107 | } |
---|
1108 | |
---|
1109 | error = bus_dmamem_alloc(*dtag, vaddr, BUS_DMA_NOWAIT | BUS_DMA_ZERO, |
---|
1110 | dmap); |
---|
1111 | if (error) { |
---|
1112 | device_printf(dev, "failed to allocate %s DMA safe memory\n", |
---|
1113 | dname); |
---|
1114 | bus_dma_tag_destroy(*dtag); |
---|
1115 | (*vaddr) = NULL; |
---|
1116 | return (ENXIO); |
---|
1117 | } |
---|
1118 | |
---|
1119 | error = bus_dmamap_load(*dtag, *dmap, *vaddr, dsize, |
---|
1120 | tsec_map_dma_addr, raddr, BUS_DMA_NOWAIT); |
---|
1121 | if (error) { |
---|
1122 | device_printf(dev, "cannot get address of the %s " |
---|
1123 | "descriptors\n", dname); |
---|
1124 | bus_dmamem_free(*dtag, *vaddr, *dmap); |
---|
1125 | bus_dma_tag_destroy(*dtag); |
---|
1126 | (*vaddr) = NULL; |
---|
1127 | return (ENXIO); |
---|
1128 | } |
---|
1129 | |
---|
1130 | return (0); |
---|
1131 | } |
---|
1132 | |
---|
1133 | static void |
---|
1134 | tsec_free_dma_desc(bus_dma_tag_t dtag, bus_dmamap_t dmap, void *vaddr) |
---|
1135 | { |
---|
1136 | |
---|
1137 | if (vaddr == NULL) |
---|
1138 | return; |
---|
1139 | |
---|
1140 | /* Unmap descriptors from DMA memory */ |
---|
1141 | bus_dmamap_sync(dtag, dmap, BUS_DMASYNC_POSTREAD | |
---|
1142 | BUS_DMASYNC_POSTWRITE); |
---|
1143 | bus_dmamap_unload(dtag, dmap); |
---|
1144 | |
---|
1145 | /* Free descriptors memory */ |
---|
1146 | bus_dmamem_free(dtag, vaddr, dmap); |
---|
1147 | |
---|
1148 | /* Destroy descriptors tag */ |
---|
1149 | bus_dma_tag_destroy(dtag); |
---|
1150 | } |
---|
1151 | |
---|
1152 | static void |
---|
1153 | tsec_free_dma(struct tsec_softc *sc) |
---|
1154 | { |
---|
1155 | int i; |
---|
1156 | |
---|
1157 | /* Free TX maps */ |
---|
1158 | for (i = 0; i < TSEC_TX_NUM_DESC; i++) |
---|
1159 | if (sc->tx_map_data[i] != NULL) |
---|
1160 | bus_dmamap_destroy(sc->tsec_tx_mtag, |
---|
1161 | sc->tx_map_data[i]); |
---|
1162 | /* Destroy tag for TX mbufs */ |
---|
1163 | bus_dma_tag_destroy(sc->tsec_tx_mtag); |
---|
1164 | |
---|
1165 | /* Free RX mbufs and maps */ |
---|
1166 | for (i = 0; i < TSEC_RX_NUM_DESC; i++) { |
---|
1167 | if (sc->rx_data[i].mbuf) { |
---|
1168 | /* Unload buffer from DMA */ |
---|
1169 | bus_dmamap_sync(sc->tsec_rx_mtag, sc->rx_data[i].map, |
---|
1170 | BUS_DMASYNC_POSTREAD); |
---|
1171 | bus_dmamap_unload(sc->tsec_rx_mtag, |
---|
1172 | sc->rx_data[i].map); |
---|
1173 | |
---|
1174 | /* Free buffer */ |
---|
1175 | m_freem(sc->rx_data[i].mbuf); |
---|
1176 | } |
---|
1177 | /* Destroy map for this buffer */ |
---|
1178 | if (sc->rx_data[i].map != NULL) |
---|
1179 | bus_dmamap_destroy(sc->tsec_rx_mtag, |
---|
1180 | sc->rx_data[i].map); |
---|
1181 | } |
---|
1182 | /* Destroy tag for RX mbufs */ |
---|
1183 | bus_dma_tag_destroy(sc->tsec_rx_mtag); |
---|
1184 | |
---|
1185 | /* Unload TX/RX descriptors */ |
---|
1186 | tsec_free_dma_desc(sc->tsec_tx_dtag, sc->tsec_tx_dmap, |
---|
1187 | sc->tsec_tx_vaddr); |
---|
1188 | tsec_free_dma_desc(sc->tsec_rx_dtag, sc->tsec_rx_dmap, |
---|
1189 | sc->tsec_rx_vaddr); |
---|
1190 | } |
---|
1191 | |
---|
1192 | static void |
---|
1193 | tsec_stop(struct tsec_softc *sc) |
---|
1194 | { |
---|
1195 | struct ifnet *ifp; |
---|
1196 | struct mbuf *m0; |
---|
1197 | bus_dmamap_t *mapp; |
---|
1198 | uint32_t tmpval; |
---|
1199 | |
---|
1200 | TSEC_GLOBAL_LOCK_ASSERT(sc); |
---|
1201 | |
---|
1202 | ifp = sc->tsec_ifp; |
---|
1203 | |
---|
1204 | /* Disable interface and watchdog timer */ |
---|
1205 | callout_stop(&sc->tsec_callout); |
---|
1206 | ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE); |
---|
1207 | sc->tsec_watchdog = 0; |
---|
1208 | |
---|
1209 | /* Disable all interrupts and stop DMA */ |
---|
1210 | tsec_intrs_ctl(sc, 0); |
---|
1211 | tsec_dma_ctl(sc, 0); |
---|
1212 | |
---|
1213 | /* Remove pending data from TX queue */ |
---|
1214 | while (!TSEC_EMPTYQ_TX_MBUF(sc)) { |
---|
1215 | m0 = TSEC_GET_TX_MBUF(sc); |
---|
1216 | mapp = TSEC_GET_TX_MAP(sc); |
---|
1217 | |
---|
1218 | bus_dmamap_sync(sc->tsec_tx_mtag, *mapp, |
---|
1219 | BUS_DMASYNC_POSTWRITE); |
---|
1220 | bus_dmamap_unload(sc->tsec_tx_mtag, *mapp); |
---|
1221 | |
---|
1222 | TSEC_FREE_TX_MAP(sc, mapp); |
---|
1223 | m_freem(m0); |
---|
1224 | } |
---|
1225 | |
---|
1226 | /* Disable RX and TX */ |
---|
1227 | tmpval = TSEC_READ(sc, TSEC_REG_MACCFG1); |
---|
1228 | tmpval &= ~(TSEC_MACCFG1_RX_EN | TSEC_MACCFG1_TX_EN); |
---|
1229 | TSEC_WRITE(sc, TSEC_REG_MACCFG1, tmpval); |
---|
1230 | DELAY(10); |
---|
1231 | } |
---|
1232 | |
---|
1233 | static void |
---|
1234 | tsec_tick(void *arg) |
---|
1235 | { |
---|
1236 | struct tsec_softc *sc = arg; |
---|
1237 | struct ifnet *ifp; |
---|
1238 | int link; |
---|
1239 | |
---|
1240 | TSEC_GLOBAL_LOCK(sc); |
---|
1241 | |
---|
1242 | tsec_watchdog(sc); |
---|
1243 | |
---|
1244 | ifp = sc->tsec_ifp; |
---|
1245 | link = sc->tsec_link; |
---|
1246 | |
---|
1247 | mii_tick(sc->tsec_mii); |
---|
1248 | |
---|
1249 | if (link == 0 && sc->tsec_link == 1 && |
---|
1250 | (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))) |
---|
1251 | tsec_start_locked(ifp); |
---|
1252 | |
---|
1253 | /* Schedule another timeout one second from now. */ |
---|
1254 | callout_reset(&sc->tsec_callout, hz, tsec_tick, sc); |
---|
1255 | |
---|
1256 | TSEC_GLOBAL_UNLOCK(sc); |
---|
1257 | } |
---|
1258 | |
---|
1259 | /* |
---|
1260 | * This is the core RX routine. It replenishes mbufs in the descriptor and |
---|
1261 | * sends data which have been dma'ed into host memory to upper layer. |
---|
1262 | * |
---|
1263 | * Loops at most count times if count is > 0, or until done if count < 0. |
---|
1264 | */ |
---|
1265 | static int |
---|
1266 | tsec_receive_intr_locked(struct tsec_softc *sc, int count) |
---|
1267 | { |
---|
1268 | struct tsec_desc *rx_desc; |
---|
1269 | struct ifnet *ifp; |
---|
1270 | struct rx_data_type *rx_data; |
---|
1271 | struct mbuf *m; |
---|
1272 | device_t dev; |
---|
1273 | uint32_t i; |
---|
1274 | int c, rx_npkts; |
---|
1275 | uint16_t flags; |
---|
1276 | |
---|
1277 | TSEC_RECEIVE_LOCK_ASSERT(sc); |
---|
1278 | |
---|
1279 | ifp = sc->tsec_ifp; |
---|
1280 | rx_data = sc->rx_data; |
---|
1281 | dev = sc->dev; |
---|
1282 | rx_npkts = 0; |
---|
1283 | |
---|
1284 | bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap, |
---|
1285 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); |
---|
1286 | |
---|
1287 | for (c = 0; ; c++) { |
---|
1288 | if (count >= 0 && count-- == 0) |
---|
1289 | break; |
---|
1290 | |
---|
1291 | rx_desc = TSEC_GET_CUR_RX_DESC(sc); |
---|
1292 | flags = rx_desc->flags; |
---|
1293 | |
---|
1294 | /* Check if there is anything to receive */ |
---|
1295 | if ((flags & TSEC_RXBD_E) || (c >= TSEC_RX_NUM_DESC)) { |
---|
1296 | /* |
---|
1297 | * Avoid generating another interrupt |
---|
1298 | */ |
---|
1299 | if (flags & TSEC_RXBD_E) |
---|
1300 | TSEC_WRITE(sc, TSEC_REG_IEVENT, |
---|
1301 | TSEC_IEVENT_RXB | TSEC_IEVENT_RXF); |
---|
1302 | /* |
---|
1303 | * We didn't consume current descriptor and have to |
---|
1304 | * return it to the queue |
---|
1305 | */ |
---|
1306 | TSEC_BACK_CUR_RX_DESC(sc); |
---|
1307 | break; |
---|
1308 | } |
---|
1309 | |
---|
1310 | if (flags & (TSEC_RXBD_LG | TSEC_RXBD_SH | TSEC_RXBD_NO | |
---|
1311 | TSEC_RXBD_CR | TSEC_RXBD_OV | TSEC_RXBD_TR)) { |
---|
1312 | |
---|
1313 | rx_desc->length = 0; |
---|
1314 | rx_desc->flags = (rx_desc->flags & |
---|
1315 | ~TSEC_RXBD_ZEROONINIT) | TSEC_RXBD_E | TSEC_RXBD_I; |
---|
1316 | |
---|
1317 | if (sc->frame != NULL) { |
---|
1318 | m_free(sc->frame); |
---|
1319 | sc->frame = NULL; |
---|
1320 | } |
---|
1321 | |
---|
1322 | continue; |
---|
1323 | } |
---|
1324 | |
---|
1325 | /* Ok... process frame */ |
---|
1326 | i = TSEC_GET_CUR_RX_DESC_CNT(sc); |
---|
1327 | m = rx_data[i].mbuf; |
---|
1328 | m->m_len = rx_desc->length; |
---|
1329 | |
---|
1330 | if (sc->frame != NULL) { |
---|
1331 | if ((flags & TSEC_RXBD_L) != 0) |
---|
1332 | m->m_len -= m_length(sc->frame, NULL); |
---|
1333 | |
---|
1334 | m->m_flags &= ~M_PKTHDR; |
---|
1335 | m_cat(sc->frame, m); |
---|
1336 | } else { |
---|
1337 | sc->frame = m; |
---|
1338 | } |
---|
1339 | |
---|
1340 | m = NULL; |
---|
1341 | |
---|
1342 | if ((flags & TSEC_RXBD_L) != 0) { |
---|
1343 | m = sc->frame; |
---|
1344 | sc->frame = NULL; |
---|
1345 | } |
---|
1346 | |
---|
1347 | if (tsec_new_rxbuf(sc->tsec_rx_mtag, rx_data[i].map, |
---|
1348 | &rx_data[i].mbuf, &rx_data[i].paddr)) { |
---|
1349 | ifp->if_ierrors++; |
---|
1350 | /* |
---|
1351 | * We ran out of mbufs; didn't consume current |
---|
1352 | * descriptor and have to return it to the queue. |
---|
1353 | */ |
---|
1354 | TSEC_BACK_CUR_RX_DESC(sc); |
---|
1355 | break; |
---|
1356 | } |
---|
1357 | |
---|
1358 | /* Attach new buffer to descriptor and clear flags */ |
---|
1359 | rx_desc->bufptr = rx_data[i].paddr; |
---|
1360 | rx_desc->length = 0; |
---|
1361 | rx_desc->flags = (rx_desc->flags & ~TSEC_RXBD_ZEROONINIT) | |
---|
1362 | TSEC_RXBD_E | TSEC_RXBD_I; |
---|
1363 | |
---|
1364 | if (m != NULL) { |
---|
1365 | m->m_pkthdr.rcvif = ifp; |
---|
1366 | |
---|
1367 | m_fixhdr(m); |
---|
1368 | m_adj(m, -ETHER_CRC_LEN); |
---|
1369 | |
---|
1370 | if (sc->is_etsec) |
---|
1371 | tsec_offload_process_frame(sc, m); |
---|
1372 | |
---|
1373 | TSEC_RECEIVE_UNLOCK(sc); |
---|
1374 | (*ifp->if_input)(ifp, m); |
---|
1375 | TSEC_RECEIVE_LOCK(sc); |
---|
1376 | rx_npkts++; |
---|
1377 | } |
---|
1378 | } |
---|
1379 | |
---|
1380 | bus_dmamap_sync(sc->tsec_rx_dtag, sc->tsec_rx_dmap, |
---|
1381 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
---|
1382 | |
---|
1383 | /* |
---|
1384 | * Make sure TSEC receiver is not halted. |
---|
1385 | * |
---|
1386 | * Various conditions can stop the TSEC receiver, but not all are |
---|
1387 | * signaled and handled by error interrupt, so make sure the receiver |
---|
1388 | * is running. Writing to TSEC_REG_RSTAT restarts the receiver when |
---|
1389 | * halted, and is harmless if already running. |
---|
1390 | */ |
---|
1391 | TSEC_WRITE(sc, TSEC_REG_RSTAT, TSEC_RSTAT_QHLT); |
---|
1392 | return (rx_npkts); |
---|
1393 | } |
---|
1394 | |
---|
1395 | void |
---|
1396 | tsec_receive_intr(void *arg) |
---|
1397 | { |
---|
1398 | struct tsec_softc *sc = arg; |
---|
1399 | |
---|
1400 | TSEC_RECEIVE_LOCK(sc); |
---|
1401 | |
---|
1402 | #ifdef DEVICE_POLLING |
---|
1403 | if (sc->tsec_ifp->if_capenable & IFCAP_POLLING) { |
---|
1404 | TSEC_RECEIVE_UNLOCK(sc); |
---|
1405 | return; |
---|
1406 | } |
---|
1407 | #endif |
---|
1408 | |
---|
1409 | /* Confirm the interrupt was received by driver */ |
---|
1410 | TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_RXB | TSEC_IEVENT_RXF); |
---|
1411 | tsec_receive_intr_locked(sc, -1); |
---|
1412 | |
---|
1413 | TSEC_RECEIVE_UNLOCK(sc); |
---|
1414 | } |
---|
1415 | |
---|
1416 | static void |
---|
1417 | tsec_transmit_intr_locked(struct tsec_softc *sc) |
---|
1418 | { |
---|
1419 | struct tsec_desc *tx_desc; |
---|
1420 | struct ifnet *ifp; |
---|
1421 | struct mbuf *m0; |
---|
1422 | bus_dmamap_t *mapp; |
---|
1423 | int send = 0; |
---|
1424 | |
---|
1425 | TSEC_TRANSMIT_LOCK_ASSERT(sc); |
---|
1426 | |
---|
1427 | ifp = sc->tsec_ifp; |
---|
1428 | |
---|
1429 | /* Update collision statistics */ |
---|
1430 | ifp->if_collisions += TSEC_READ(sc, TSEC_REG_MON_TNCL); |
---|
1431 | |
---|
1432 | /* Reset collision counters in hardware */ |
---|
1433 | TSEC_WRITE(sc, TSEC_REG_MON_TSCL, 0); |
---|
1434 | TSEC_WRITE(sc, TSEC_REG_MON_TMCL, 0); |
---|
1435 | TSEC_WRITE(sc, TSEC_REG_MON_TLCL, 0); |
---|
1436 | TSEC_WRITE(sc, TSEC_REG_MON_TXCL, 0); |
---|
1437 | TSEC_WRITE(sc, TSEC_REG_MON_TNCL, 0); |
---|
1438 | |
---|
1439 | bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, |
---|
1440 | BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); |
---|
1441 | |
---|
1442 | while (TSEC_CUR_DIFF_DIRTY_TX_DESC(sc)) { |
---|
1443 | tx_desc = TSEC_GET_DIRTY_TX_DESC(sc); |
---|
1444 | if (tx_desc->flags & TSEC_TXBD_R) { |
---|
1445 | TSEC_BACK_DIRTY_TX_DESC(sc); |
---|
1446 | break; |
---|
1447 | } |
---|
1448 | |
---|
1449 | if ((tx_desc->flags & TSEC_TXBD_L) == 0) |
---|
1450 | continue; |
---|
1451 | |
---|
1452 | /* |
---|
1453 | * This is the last buf in this packet, so unmap and free it. |
---|
1454 | */ |
---|
1455 | m0 = TSEC_GET_TX_MBUF(sc); |
---|
1456 | mapp = TSEC_GET_TX_MAP(sc); |
---|
1457 | |
---|
1458 | bus_dmamap_sync(sc->tsec_tx_mtag, *mapp, |
---|
1459 | BUS_DMASYNC_POSTWRITE); |
---|
1460 | bus_dmamap_unload(sc->tsec_tx_mtag, *mapp); |
---|
1461 | |
---|
1462 | TSEC_FREE_TX_MAP(sc, mapp); |
---|
1463 | m_freem(m0); |
---|
1464 | |
---|
1465 | ifp->if_opackets++; |
---|
1466 | send = 1; |
---|
1467 | } |
---|
1468 | bus_dmamap_sync(sc->tsec_tx_dtag, sc->tsec_tx_dmap, |
---|
1469 | BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); |
---|
1470 | |
---|
1471 | if (send) { |
---|
1472 | /* Now send anything that was pending */ |
---|
1473 | ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; |
---|
1474 | tsec_start_locked(ifp); |
---|
1475 | |
---|
1476 | /* Stop wathdog if all sent */ |
---|
1477 | if (TSEC_EMPTYQ_TX_MBUF(sc)) |
---|
1478 | sc->tsec_watchdog = 0; |
---|
1479 | } |
---|
1480 | } |
---|
1481 | |
---|
1482 | void |
---|
1483 | tsec_transmit_intr(void *arg) |
---|
1484 | { |
---|
1485 | struct tsec_softc *sc = arg; |
---|
1486 | |
---|
1487 | TSEC_TRANSMIT_LOCK(sc); |
---|
1488 | |
---|
1489 | #ifdef DEVICE_POLLING |
---|
1490 | if (sc->tsec_ifp->if_capenable & IFCAP_POLLING) { |
---|
1491 | TSEC_TRANSMIT_UNLOCK(sc); |
---|
1492 | return; |
---|
1493 | } |
---|
1494 | #endif |
---|
1495 | /* Confirm the interrupt was received by driver */ |
---|
1496 | TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_TXB | TSEC_IEVENT_TXF); |
---|
1497 | tsec_transmit_intr_locked(sc); |
---|
1498 | |
---|
1499 | TSEC_TRANSMIT_UNLOCK(sc); |
---|
1500 | } |
---|
1501 | |
---|
1502 | static void |
---|
1503 | tsec_error_intr_locked(struct tsec_softc *sc, int count) |
---|
1504 | { |
---|
1505 | struct ifnet *ifp; |
---|
1506 | uint32_t eflags; |
---|
1507 | |
---|
1508 | TSEC_GLOBAL_LOCK_ASSERT(sc); |
---|
1509 | |
---|
1510 | ifp = sc->tsec_ifp; |
---|
1511 | |
---|
1512 | eflags = TSEC_READ(sc, TSEC_REG_IEVENT); |
---|
1513 | |
---|
1514 | /* Clear events bits in hardware */ |
---|
1515 | TSEC_WRITE(sc, TSEC_REG_IEVENT, TSEC_IEVENT_RXC | TSEC_IEVENT_BSY | |
---|
1516 | TSEC_IEVENT_EBERR | TSEC_IEVENT_MSRO | TSEC_IEVENT_BABT | |
---|
1517 | TSEC_IEVENT_TXC | TSEC_IEVENT_TXE | TSEC_IEVENT_LC | |
---|
1518 | TSEC_IEVENT_CRL | TSEC_IEVENT_XFUN); |
---|
1519 | |
---|
1520 | /* Check transmitter errors */ |
---|
1521 | if (eflags & TSEC_IEVENT_TXE) { |
---|
1522 | ifp->if_oerrors++; |
---|
1523 | |
---|
1524 | if (eflags & TSEC_IEVENT_LC) |
---|
1525 | ifp->if_collisions++; |
---|
1526 | |
---|
1527 | TSEC_WRITE(sc, TSEC_REG_TSTAT, TSEC_TSTAT_THLT); |
---|
1528 | } |
---|
1529 | |
---|
1530 | /* Check receiver errors */ |
---|
1531 | if (eflags & TSEC_IEVENT_BSY) { |
---|
1532 | ifp->if_ierrors++; |
---|
1533 | ifp->if_iqdrops++; |
---|
1534 | |
---|
1535 | /* Get data from RX buffers */ |
---|
1536 | tsec_receive_intr_locked(sc, count); |
---|
1537 | } |
---|
1538 | |
---|
1539 | if (ifp->if_flags & IFF_DEBUG) |
---|
1540 | if_printf(ifp, "tsec_error_intr(): event flags: 0x%x\n", |
---|
1541 | eflags); |
---|
1542 | |
---|
1543 | if (eflags & TSEC_IEVENT_EBERR) { |
---|
1544 | if_printf(ifp, "System bus error occurred during" |
---|
1545 | "DMA transaction (flags: 0x%x)\n", eflags); |
---|
1546 | tsec_init_locked(sc); |
---|
1547 | } |
---|
1548 | |
---|
1549 | if (eflags & TSEC_IEVENT_BABT) |
---|
1550 | ifp->if_oerrors++; |
---|
1551 | |
---|
1552 | if (eflags & TSEC_IEVENT_BABR) |
---|
1553 | ifp->if_ierrors++; |
---|
1554 | } |
---|
1555 | |
---|
1556 | void |
---|
1557 | tsec_error_intr(void *arg) |
---|
1558 | { |
---|
1559 | struct tsec_softc *sc = arg; |
---|
1560 | |
---|
1561 | TSEC_GLOBAL_LOCK(sc); |
---|
1562 | tsec_error_intr_locked(sc, -1); |
---|
1563 | TSEC_GLOBAL_UNLOCK(sc); |
---|
1564 | } |
---|
1565 | |
---|
1566 | int |
---|
1567 | tsec_miibus_readreg(device_t dev, int phy, int reg) |
---|
1568 | { |
---|
1569 | struct tsec_softc *sc; |
---|
1570 | uint32_t timeout; |
---|
1571 | |
---|
1572 | sc = device_get_softc(dev); |
---|
1573 | |
---|
1574 | TSEC_WRITE(sc->phy_sc, TSEC_REG_MIIMADD, (phy << 8) | reg); |
---|
1575 | TSEC_WRITE(sc->phy_sc, TSEC_REG_MIIMCOM, 0); |
---|
1576 | TSEC_WRITE(sc->phy_sc, TSEC_REG_MIIMCOM, TSEC_MIIMCOM_READCYCLE); |
---|
1577 | |
---|
1578 | timeout = TSEC_READ_RETRY; |
---|
1579 | while (--timeout && TSEC_READ(sc->phy_sc, TSEC_REG_MIIMIND) & |
---|
1580 | (TSEC_MIIMIND_NOTVALID | TSEC_MIIMIND_BUSY)) |
---|
1581 | DELAY(TSEC_READ_DELAY); |
---|
1582 | |
---|
1583 | if (timeout == 0) |
---|
1584 | device_printf(dev, "Timeout while reading from PHY!\n"); |
---|
1585 | |
---|
1586 | return (TSEC_READ(sc->phy_sc, TSEC_REG_MIIMSTAT)); |
---|
1587 | } |
---|
1588 | |
---|
1589 | int |
---|
1590 | tsec_miibus_writereg(device_t dev, int phy, int reg, int value) |
---|
1591 | { |
---|
1592 | struct tsec_softc *sc; |
---|
1593 | uint32_t timeout; |
---|
1594 | |
---|
1595 | sc = device_get_softc(dev); |
---|
1596 | |
---|
1597 | TSEC_WRITE(sc->phy_sc, TSEC_REG_MIIMADD, (phy << 8) | reg); |
---|
1598 | TSEC_WRITE(sc->phy_sc, TSEC_REG_MIIMCON, value); |
---|
1599 | |
---|
1600 | timeout = TSEC_READ_RETRY; |
---|
1601 | while (--timeout && (TSEC_READ(sc->phy_sc, TSEC_REG_MIIMIND) & |
---|
1602 | TSEC_MIIMIND_BUSY)) |
---|
1603 | DELAY(TSEC_READ_DELAY); |
---|
1604 | |
---|
1605 | if (timeout == 0) |
---|
1606 | device_printf(dev, "Timeout while writing to PHY!\n"); |
---|
1607 | |
---|
1608 | return (0); |
---|
1609 | } |
---|
1610 | |
---|
1611 | void |
---|
1612 | tsec_miibus_statchg(device_t dev) |
---|
1613 | { |
---|
1614 | struct tsec_softc *sc; |
---|
1615 | struct mii_data *mii; |
---|
1616 | uint32_t ecntrl, id, tmp; |
---|
1617 | int link; |
---|
1618 | |
---|
1619 | sc = device_get_softc(dev); |
---|
1620 | mii = sc->tsec_mii; |
---|
1621 | link = ((mii->mii_media_status & IFM_ACTIVE) ? 1 : 0); |
---|
1622 | |
---|
1623 | tmp = TSEC_READ(sc, TSEC_REG_MACCFG2) & ~TSEC_MACCFG2_IF; |
---|
1624 | |
---|
1625 | if ((mii->mii_media_active & IFM_GMASK) == IFM_FDX) |
---|
1626 | tmp |= TSEC_MACCFG2_FULLDUPLEX; |
---|
1627 | else |
---|
1628 | tmp &= ~TSEC_MACCFG2_FULLDUPLEX; |
---|
1629 | |
---|
1630 | switch (IFM_SUBTYPE(mii->mii_media_active)) { |
---|
1631 | case IFM_1000_T: |
---|
1632 | case IFM_1000_SX: |
---|
1633 | tmp |= TSEC_MACCFG2_GMII; |
---|
1634 | sc->tsec_link = link; |
---|
1635 | break; |
---|
1636 | case IFM_100_TX: |
---|
1637 | case IFM_10_T: |
---|
1638 | tmp |= TSEC_MACCFG2_MII; |
---|
1639 | sc->tsec_link = link; |
---|
1640 | break; |
---|
1641 | case IFM_NONE: |
---|
1642 | if (link) |
---|
1643 | device_printf(dev, "No speed selected but link " |
---|
1644 | "active!\n"); |
---|
1645 | sc->tsec_link = 0; |
---|
1646 | return; |
---|
1647 | default: |
---|
1648 | sc->tsec_link = 0; |
---|
1649 | device_printf(dev, "Unknown speed (%d), link %s!\n", |
---|
1650 | IFM_SUBTYPE(mii->mii_media_active), |
---|
1651 | ((link) ? "up" : "down")); |
---|
1652 | return; |
---|
1653 | } |
---|
1654 | TSEC_WRITE(sc, TSEC_REG_MACCFG2, tmp); |
---|
1655 | |
---|
1656 | /* XXX kludge - use circumstantial evidence for reduced mode. */ |
---|
1657 | id = TSEC_READ(sc, TSEC_REG_ID2); |
---|
1658 | if (id & 0xffff) { |
---|
1659 | ecntrl = TSEC_READ(sc, TSEC_REG_ECNTRL) & ~TSEC_ECNTRL_R100M; |
---|
1660 | ecntrl |= (tmp & TSEC_MACCFG2_MII) ? TSEC_ECNTRL_R100M : 0; |
---|
1661 | TSEC_WRITE(sc, TSEC_REG_ECNTRL, ecntrl); |
---|
1662 | } |
---|
1663 | } |
---|
1664 | |
---|
1665 | #ifndef __rtems__ |
---|
1666 | static void |
---|
1667 | tsec_add_sysctls(struct tsec_softc *sc) |
---|
1668 | { |
---|
1669 | struct sysctl_ctx_list *ctx; |
---|
1670 | struct sysctl_oid_list *children; |
---|
1671 | struct sysctl_oid *tree; |
---|
1672 | |
---|
1673 | ctx = device_get_sysctl_ctx(sc->dev); |
---|
1674 | children = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)); |
---|
1675 | tree = SYSCTL_ADD_NODE(ctx, children, OID_AUTO, "int_coal", |
---|
1676 | CTLFLAG_RD, 0, "TSEC Interrupts coalescing"); |
---|
1677 | children = SYSCTL_CHILDREN(tree); |
---|
1678 | |
---|
1679 | SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_time", |
---|
1680 | CTLTYPE_UINT | CTLFLAG_RW, sc, TSEC_IC_RX, tsec_sysctl_ic_time, |
---|
1681 | "I", "IC RX time threshold (0-65535)"); |
---|
1682 | SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "rx_count", |
---|
1683 | CTLTYPE_UINT | CTLFLAG_RW, sc, TSEC_IC_RX, tsec_sysctl_ic_count, |
---|
1684 | "I", "IC RX frame count threshold (0-255)"); |
---|
1685 | |
---|
1686 | SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_time", |
---|
1687 | CTLTYPE_UINT | CTLFLAG_RW, sc, TSEC_IC_TX, tsec_sysctl_ic_time, |
---|
1688 | "I", "IC TX time threshold (0-65535)"); |
---|
1689 | SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "tx_count", |
---|
1690 | CTLTYPE_UINT | CTLFLAG_RW, sc, TSEC_IC_TX, tsec_sysctl_ic_count, |
---|
1691 | "I", "IC TX frame count threshold (0-255)"); |
---|
1692 | } |
---|
1693 | |
---|
1694 | /* |
---|
1695 | * With Interrupt Coalescing (IC) active, a transmit/receive frame |
---|
1696 | * interrupt is raised either upon: |
---|
1697 | * |
---|
1698 | * - threshold-defined period of time elapsed, or |
---|
1699 | * - threshold-defined number of frames is received/transmitted, |
---|
1700 | * whichever occurs first. |
---|
1701 | * |
---|
1702 | * The following sysctls regulate IC behaviour (for TX/RX separately): |
---|
1703 | * |
---|
1704 | * dev.tsec.<unit>.int_coal.rx_time |
---|
1705 | * dev.tsec.<unit>.int_coal.rx_count |
---|
1706 | * dev.tsec.<unit>.int_coal.tx_time |
---|
1707 | * dev.tsec.<unit>.int_coal.tx_count |
---|
1708 | * |
---|
1709 | * Values: |
---|
1710 | * |
---|
1711 | * - 0 for either time or count disables IC on the given TX/RX path |
---|
1712 | * |
---|
1713 | * - count: 1-255 (expresses frame count number; note that value of 1 is |
---|
1714 | * effectively IC off) |
---|
1715 | * |
---|
1716 | * - time: 1-65535 (value corresponds to a real time period and is |
---|
1717 | * expressed in units equivalent to 64 TSEC interface clocks, i.e. one timer |
---|
1718 | * threshold unit is 26.5 us, 2.56 us, or 512 ns, corresponding to 10 Mbps, |
---|
1719 | * 100 Mbps, or 1Gbps, respectively. For detailed discussion consult the |
---|
1720 | * TSEC reference manual. |
---|
1721 | */ |
---|
1722 | static int |
---|
1723 | tsec_sysctl_ic_time(SYSCTL_HANDLER_ARGS) |
---|
1724 | { |
---|
1725 | int error; |
---|
1726 | uint32_t time; |
---|
1727 | struct tsec_softc *sc = (struct tsec_softc *)arg1; |
---|
1728 | |
---|
1729 | time = (arg2 == TSEC_IC_RX) ? sc->rx_ic_time : sc->tx_ic_time; |
---|
1730 | |
---|
1731 | error = sysctl_handle_int(oidp, &time, 0, req); |
---|
1732 | if (error != 0) |
---|
1733 | return (error); |
---|
1734 | |
---|
1735 | if (time > 65535) |
---|
1736 | return (EINVAL); |
---|
1737 | |
---|
1738 | TSEC_IC_LOCK(sc); |
---|
1739 | if (arg2 == TSEC_IC_RX) { |
---|
1740 | sc->rx_ic_time = time; |
---|
1741 | tsec_set_rxic(sc); |
---|
1742 | } else { |
---|
1743 | sc->tx_ic_time = time; |
---|
1744 | tsec_set_txic(sc); |
---|
1745 | } |
---|
1746 | TSEC_IC_UNLOCK(sc); |
---|
1747 | |
---|
1748 | return (0); |
---|
1749 | } |
---|
1750 | |
---|
1751 | static int |
---|
1752 | tsec_sysctl_ic_count(SYSCTL_HANDLER_ARGS) |
---|
1753 | { |
---|
1754 | int error; |
---|
1755 | uint32_t count; |
---|
1756 | struct tsec_softc *sc = (struct tsec_softc *)arg1; |
---|
1757 | |
---|
1758 | count = (arg2 == TSEC_IC_RX) ? sc->rx_ic_count : sc->tx_ic_count; |
---|
1759 | |
---|
1760 | error = sysctl_handle_int(oidp, &count, 0, req); |
---|
1761 | if (error != 0) |
---|
1762 | return (error); |
---|
1763 | |
---|
1764 | if (count > 255) |
---|
1765 | return (EINVAL); |
---|
1766 | |
---|
1767 | TSEC_IC_LOCK(sc); |
---|
1768 | if (arg2 == TSEC_IC_RX) { |
---|
1769 | sc->rx_ic_count = count; |
---|
1770 | tsec_set_rxic(sc); |
---|
1771 | } else { |
---|
1772 | sc->tx_ic_count = count; |
---|
1773 | tsec_set_txic(sc); |
---|
1774 | } |
---|
1775 | TSEC_IC_UNLOCK(sc); |
---|
1776 | |
---|
1777 | return (0); |
---|
1778 | } |
---|
1779 | #endif /* __rtems__ */ |
---|
1780 | |
---|
1781 | static void |
---|
1782 | tsec_set_rxic(struct tsec_softc *sc) |
---|
1783 | { |
---|
1784 | uint32_t rxic_val; |
---|
1785 | |
---|
1786 | if (sc->rx_ic_count == 0 || sc->rx_ic_time == 0) |
---|
1787 | /* Disable RX IC */ |
---|
1788 | rxic_val = 0; |
---|
1789 | else { |
---|
1790 | rxic_val = 0x80000000; |
---|
1791 | rxic_val |= (sc->rx_ic_count << 21); |
---|
1792 | rxic_val |= sc->rx_ic_time; |
---|
1793 | } |
---|
1794 | |
---|
1795 | TSEC_WRITE(sc, TSEC_REG_RXIC, rxic_val); |
---|
1796 | } |
---|
1797 | |
---|
1798 | static void |
---|
1799 | tsec_set_txic(struct tsec_softc *sc) |
---|
1800 | { |
---|
1801 | uint32_t txic_val; |
---|
1802 | |
---|
1803 | if (sc->tx_ic_count == 0 || sc->tx_ic_time == 0) |
---|
1804 | /* Disable TX IC */ |
---|
1805 | txic_val = 0; |
---|
1806 | else { |
---|
1807 | txic_val = 0x80000000; |
---|
1808 | txic_val |= (sc->tx_ic_count << 21); |
---|
1809 | txic_val |= sc->tx_ic_time; |
---|
1810 | } |
---|
1811 | |
---|
1812 | TSEC_WRITE(sc, TSEC_REG_TXIC, txic_val); |
---|
1813 | } |
---|
1814 | |
---|
1815 | static void |
---|
1816 | tsec_offload_setup(struct tsec_softc *sc) |
---|
1817 | { |
---|
1818 | struct ifnet *ifp = sc->tsec_ifp; |
---|
1819 | uint32_t reg; |
---|
1820 | |
---|
1821 | TSEC_GLOBAL_LOCK_ASSERT(sc); |
---|
1822 | |
---|
1823 | reg = TSEC_READ(sc, TSEC_REG_TCTRL); |
---|
1824 | reg |= TSEC_TCTRL_IPCSEN | TSEC_TCTRL_TUCSEN; |
---|
1825 | |
---|
1826 | if (ifp->if_capenable & IFCAP_TXCSUM) |
---|
1827 | ifp->if_hwassist = TSEC_CHECKSUM_FEATURES; |
---|
1828 | else |
---|
1829 | ifp->if_hwassist = 0; |
---|
1830 | |
---|
1831 | TSEC_WRITE(sc, TSEC_REG_TCTRL, reg); |
---|
1832 | |
---|
1833 | reg = TSEC_READ(sc, TSEC_REG_RCTRL); |
---|
1834 | reg &= ~(TSEC_RCTRL_IPCSEN | TSEC_RCTRL_TUCSEN | TSEC_RCTRL_PRSDEP); |
---|
1835 | reg |= TSEC_RCTRL_PRSDEP_PARSE_L2 | TSEC_RCTRL_VLEX; |
---|
1836 | |
---|
1837 | if (ifp->if_capenable & IFCAP_RXCSUM) |
---|
1838 | reg |= TSEC_RCTRL_IPCSEN | TSEC_RCTRL_TUCSEN | |
---|
1839 | TSEC_RCTRL_PRSDEP_PARSE_L234; |
---|
1840 | |
---|
1841 | TSEC_WRITE(sc, TSEC_REG_RCTRL, reg); |
---|
1842 | } |
---|
1843 | |
---|
1844 | |
---|
1845 | static void |
---|
1846 | tsec_offload_process_frame(struct tsec_softc *sc, struct mbuf *m) |
---|
1847 | { |
---|
1848 | struct tsec_rx_fcb rx_fcb; |
---|
1849 | int csum_flags = 0; |
---|
1850 | int protocol, flags; |
---|
1851 | |
---|
1852 | TSEC_RECEIVE_LOCK_ASSERT(sc); |
---|
1853 | |
---|
1854 | m_copydata(m, 0, sizeof(struct tsec_rx_fcb), (caddr_t)(&rx_fcb)); |
---|
1855 | flags = rx_fcb.flags; |
---|
1856 | protocol = rx_fcb.protocol; |
---|
1857 | |
---|
1858 | if (TSEC_RX_FCB_IP_CSUM_CHECKED(flags)) { |
---|
1859 | csum_flags |= CSUM_IP_CHECKED; |
---|
1860 | |
---|
1861 | if ((flags & TSEC_RX_FCB_IP_CSUM_ERROR) == 0) |
---|
1862 | csum_flags |= CSUM_IP_VALID; |
---|
1863 | } |
---|
1864 | |
---|
1865 | if ((protocol == IPPROTO_TCP || protocol == IPPROTO_UDP) && |
---|
1866 | TSEC_RX_FCB_TCP_UDP_CSUM_CHECKED(flags) && |
---|
1867 | (flags & TSEC_RX_FCB_TCP_UDP_CSUM_ERROR) == 0) { |
---|
1868 | |
---|
1869 | csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR; |
---|
1870 | m->m_pkthdr.csum_data = 0xFFFF; |
---|
1871 | } |
---|
1872 | |
---|
1873 | m->m_pkthdr.csum_flags = csum_flags; |
---|
1874 | |
---|
1875 | if (flags & TSEC_RX_FCB_VLAN) { |
---|
1876 | m->m_pkthdr.ether_vtag = rx_fcb.vlan; |
---|
1877 | m->m_flags |= M_VLANTAG; |
---|
1878 | } |
---|
1879 | |
---|
1880 | m_adj(m, sizeof(struct tsec_rx_fcb)); |
---|
1881 | } |
---|
1882 | |
---|
1883 | static void |
---|
1884 | tsec_setup_multicast(struct tsec_softc *sc) |
---|
1885 | { |
---|
1886 | uint32_t hashtable[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; |
---|
1887 | struct ifnet *ifp = sc->tsec_ifp; |
---|
1888 | struct ifmultiaddr *ifma; |
---|
1889 | uint32_t h; |
---|
1890 | int i; |
---|
1891 | |
---|
1892 | TSEC_GLOBAL_LOCK_ASSERT(sc); |
---|
1893 | |
---|
1894 | if (ifp->if_flags & IFF_ALLMULTI) { |
---|
1895 | for (i = 0; i < 8; i++) |
---|
1896 | TSEC_WRITE(sc, TSEC_REG_GADDR(i), 0xFFFFFFFF); |
---|
1897 | |
---|
1898 | return; |
---|
1899 | } |
---|
1900 | |
---|
1901 | if_maddr_rlock(ifp); |
---|
1902 | TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) { |
---|
1903 | |
---|
1904 | if (ifma->ifma_addr->sa_family != AF_LINK) |
---|
1905 | continue; |
---|
1906 | |
---|
1907 | h = (ether_crc32_be(LLADDR((struct sockaddr_dl *) |
---|
1908 | ifma->ifma_addr), ETHER_ADDR_LEN) >> 24) & 0xFF; |
---|
1909 | |
---|
1910 | hashtable[(h >> 5)] |= 1 << (0x1F - (h & 0x1F)); |
---|
1911 | } |
---|
1912 | if_maddr_runlock(ifp); |
---|
1913 | |
---|
1914 | for (i = 0; i < 8; i++) |
---|
1915 | TSEC_WRITE(sc, TSEC_REG_GADDR(i), hashtable[i]); |
---|
1916 | } |
---|
1917 | |
---|
1918 | static int |
---|
1919 | tsec_set_mtu(struct tsec_softc *sc, unsigned int mtu) |
---|
1920 | { |
---|
1921 | |
---|
1922 | mtu += ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN + ETHER_CRC_LEN; |
---|
1923 | |
---|
1924 | TSEC_GLOBAL_LOCK_ASSERT(sc); |
---|
1925 | |
---|
1926 | if (mtu >= TSEC_MIN_FRAME_SIZE && mtu <= TSEC_MAX_FRAME_SIZE) { |
---|
1927 | TSEC_WRITE(sc, TSEC_REG_MAXFRM, mtu); |
---|
1928 | return (mtu); |
---|
1929 | } |
---|
1930 | |
---|
1931 | return (0); |
---|
1932 | } |
---|