1 | /* $NetBSD: if_gfe.c,v 1.13.8.1 2005/04/29 11:28:56 kent Exp $ */ |
---|
2 | |
---|
3 | /* |
---|
4 | * Copyright (c) 2002 Allegro Networks, Inc., Wasabi Systems, Inc. |
---|
5 | * All rights reserved. |
---|
6 | * |
---|
7 | * Copyright 2004: Enable hardware cache snooping. Kate Feng <feng1@bnl.gov> |
---|
8 | * |
---|
9 | * Redistribution and use in source and binary forms, with or without |
---|
10 | * modification, are permitted provided that the following conditions |
---|
11 | * are met: |
---|
12 | * 1. Redistributions of source code must retain the above copyright |
---|
13 | * notice, this list of conditions and the following disclaimer. |
---|
14 | * 2. Redistributions in binary form must reproduce the above copyright |
---|
15 | * notice, this list of conditions and the following disclaimer in the |
---|
16 | * documentation and/or other materials provided with the distribution. |
---|
17 | * 3. All advertising materials mentioning features or use of this software |
---|
18 | * must display the following acknowledgement: |
---|
19 | * This product includes software developed for the NetBSD Project by |
---|
20 | * Allegro Networks, Inc., and Wasabi Systems, Inc. |
---|
21 | * 4. The name of Allegro Networks, Inc. may not be used to endorse |
---|
22 | * or promote products derived from this software without specific prior |
---|
23 | * written permission. |
---|
24 | * 5. The name of Wasabi Systems, Inc. may not be used to endorse |
---|
25 | * or promote products derived from this software without specific prior |
---|
26 | * written permission. |
---|
27 | * |
---|
28 | * THIS SOFTWARE IS PROVIDED BY ALLEGRO NETWORKS, INC. AND |
---|
29 | * WASABI SYSTEMS, INC. ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, |
---|
30 | * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY |
---|
31 | * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
---|
32 | * IN NO EVENT SHALL EITHER ALLEGRO NETWORKS, INC. OR WASABI SYSTEMS, INC. |
---|
33 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
---|
34 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
---|
35 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
---|
36 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
---|
37 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
---|
38 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
---|
39 | * POSSIBILITY OF SUCH DAMAGE. |
---|
40 | */ |
---|
41 | |
---|
42 | /* |
---|
43 | * if_gfe.c -- GT ethernet MAC driver |
---|
44 | */ |
---|
45 | |
---|
46 | /* Enable hardware cache snooping; |
---|
47 | * Copyright Shuchen K. Feng <feng1@bnl.gov>, 2004 |
---|
48 | */ |
---|
49 | |
---|
50 | #ifdef __rtems__ |
---|
51 | #include <rtemscompat.h> |
---|
52 | #include <string.h> |
---|
53 | #include <stdio.h> |
---|
54 | #include <inttypes.h> |
---|
55 | #endif |
---|
56 | |
---|
57 | #include <sys/cdefs.h> |
---|
58 | #ifndef __rtems__ |
---|
59 | __KERNEL_RCSID(0, "$NetBSD: if_gfe.c,v 1.13.8.1 2005/04/29 11:28:56 kent Exp $"); |
---|
60 | |
---|
61 | #include "opt_inet.h" |
---|
62 | #include "bpfilter.h" |
---|
63 | #endif |
---|
64 | |
---|
65 | #include <sys/param.h> |
---|
66 | #include <sys/types.h> |
---|
67 | #ifndef __rtems__ |
---|
68 | #include <sys/inttypes.h> |
---|
69 | #include <sys/queue.h> |
---|
70 | #endif |
---|
71 | |
---|
72 | #ifndef __rtems__ |
---|
73 | #include <uvm/uvm_extern.h> |
---|
74 | |
---|
75 | #include <sys/callout.h> |
---|
76 | #include <sys/device.h> |
---|
77 | #endif |
---|
78 | #include <sys/errno.h> |
---|
79 | #include <sys/ioctl.h> |
---|
80 | #include <sys/mbuf.h> |
---|
81 | #include <sys/socket.h> |
---|
82 | |
---|
83 | #ifndef __rtems__ |
---|
84 | #include <machine/bus.h> |
---|
85 | #endif |
---|
86 | |
---|
87 | #include <net/if.h> |
---|
88 | #include <net/if_dl.h> |
---|
89 | #include <net/if_media.h> |
---|
90 | #ifndef __rtems__ |
---|
91 | #include <net/if_ether.h> |
---|
92 | #else |
---|
93 | #include <netinet/in.h> |
---|
94 | #include <netinet/if_ether.h> |
---|
95 | #include <net/ethernet.h> |
---|
96 | #include <rtems/rtems_mii_ioctl.h> |
---|
97 | #endif |
---|
98 | |
---|
99 | #ifdef INET |
---|
100 | #include <netinet/in.h> |
---|
101 | #ifndef __rtems__ |
---|
102 | #include <netinet/if_inarp.h> |
---|
103 | #endif |
---|
104 | #endif |
---|
105 | #if NBPFILTER > 0 |
---|
106 | #include <net/bpf.h> |
---|
107 | #endif |
---|
108 | |
---|
109 | #ifndef __rtems__ |
---|
110 | #include <dev/mii/miivar.h> |
---|
111 | |
---|
112 | #include <dev/marvell/gtintrreg.h> |
---|
113 | #include <dev/marvell/gtethreg.h> |
---|
114 | |
---|
115 | #include <dev/marvell/gtvar.h> |
---|
116 | #include <dev/marvell/if_gfevar.h> |
---|
117 | #else |
---|
118 | #include <bsp/gtintrreg.h> |
---|
119 | #include <bsp/gtreg.h> |
---|
120 | #include "gtethreg.h" |
---|
121 | |
---|
122 | #include "gtvar.h" |
---|
123 | #include "if_gfevar.h" |
---|
124 | #include <rtemscompat1.h> |
---|
125 | #define ether_sprintf ether_sprintf_macro |
---|
126 | #endif |
---|
127 | |
---|
128 | #define GE_READ(sc, reg) \ |
---|
129 | bus_space_read_4((sc)->sc_gt_memt, (sc)->sc_memh, ETH__ ## reg) |
---|
130 | #define GE_WRITE(sc, reg, v) \ |
---|
131 | bus_space_write_4((sc)->sc_gt_memt, (sc)->sc_memh, ETH__ ## reg, (v)) |
---|
132 | |
---|
133 | #define GT_READ(sc, reg) \ |
---|
134 | bus_space_read_4((sc)->sc_gt_memt, (sc)->sc_gt_memh, reg) |
---|
135 | #define GT_WRITE(sc, reg, v) \ |
---|
136 | bus_space_write_4((sc)->sc_gt_memt, (sc)->sc_gt_memh, reg, (v)) |
---|
137 | |
---|
138 | #define GE_DEBUG |
---|
139 | #if 0 |
---|
140 | #define GE_NOHASH |
---|
141 | #define GE_NORX |
---|
142 | #endif |
---|
143 | |
---|
144 | #ifdef GE_DEBUG |
---|
145 | #define GE_DPRINTF(sc, a) do \ |
---|
146 | if ((sc)->sc_ec.ec_if.if_flags & IFF_DEBUG) \ |
---|
147 | printf a; \ |
---|
148 | while (0) |
---|
149 | #define GE_FUNC_ENTER(sc, func) GE_DPRINTF(sc, ("[" func)) |
---|
150 | #define GE_FUNC_EXIT(sc, str) GE_DPRINTF(sc, (str "]")) |
---|
151 | #else |
---|
152 | #define GE_DPRINTF(sc, a) do { } while (0) |
---|
153 | #define GE_FUNC_ENTER(sc, func) do { } while (0) |
---|
154 | #define GE_FUNC_EXIT(sc, str) do { } while (0) |
---|
155 | #endif |
---|
156 | enum gfe_whack_op { |
---|
157 | GE_WHACK_START, GE_WHACK_RESTART, |
---|
158 | GE_WHACK_CHANGE, GE_WHACK_STOP |
---|
159 | }; |
---|
160 | |
---|
161 | enum gfe_hash_op { |
---|
162 | GE_HASH_ADD, GE_HASH_REMOVE, |
---|
163 | }; |
---|
164 | |
---|
165 | |
---|
166 | #if 1 |
---|
167 | #define htogt32(a) htobe32(a) |
---|
168 | #define gt32toh(a) be32toh(a) |
---|
169 | #else |
---|
170 | #define htogt32(a) htole32(a) |
---|
171 | #define gt32toh(a) le32toh(a) |
---|
172 | #endif |
---|
173 | |
---|
174 | #ifdef __rtems__ |
---|
175 | #define htobe32 htonl |
---|
176 | #define be32toh ntohl |
---|
177 | #endif |
---|
178 | |
---|
179 | #define GE_RXDSYNC(sc, rxq, n, ops) \ |
---|
180 | bus_dmamap_sync((sc)->sc_dmat, (rxq)->rxq_desc_mem.gdm_map, \ |
---|
181 | (n) * sizeof((rxq)->rxq_descs[0]), sizeof((rxq)->rxq_descs[0]), \ |
---|
182 | (ops)) |
---|
183 | #define GE_RXDPRESYNC(sc, rxq, n) \ |
---|
184 | GE_RXDSYNC(sc, rxq, n, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE) |
---|
185 | #define GE_RXDPOSTSYNC(sc, rxq, n) \ |
---|
186 | GE_RXDSYNC(sc, rxq, n, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE) |
---|
187 | |
---|
188 | #define GE_TXDSYNC(sc, txq, n, ops) \ |
---|
189 | bus_dmamap_sync((sc)->sc_dmat, (txq)->txq_desc_mem.gdm_map, \ |
---|
190 | (n) * sizeof((txq)->txq_descs[0]), sizeof((txq)->txq_descs[0]), \ |
---|
191 | (ops)) |
---|
192 | #define GE_TXDPRESYNC(sc, txq, n) \ |
---|
193 | GE_TXDSYNC(sc, txq, n, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE) |
---|
194 | #define GE_TXDPOSTSYNC(sc, txq, n) \ |
---|
195 | GE_TXDSYNC(sc, txq, n, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE) |
---|
196 | |
---|
197 | #define STATIC |
---|
198 | |
---|
199 | #ifndef __rtems__ |
---|
200 | STATIC int gfe_match (struct device *, struct cfdata *, void *); |
---|
201 | STATIC void gfe_attach (struct device *, struct device *, void *); |
---|
202 | #else |
---|
203 | STATIC int gfe_probe (device_t); |
---|
204 | STATIC int gfe_attach (device_t); |
---|
205 | STATIC void gfe_init (void*); |
---|
206 | #endif |
---|
207 | |
---|
208 | STATIC int gfe_dmamem_alloc(struct gfe_softc *, struct gfe_dmamem *, int, |
---|
209 | size_t, int); |
---|
210 | STATIC void gfe_dmamem_free(struct gfe_softc *, struct gfe_dmamem *); |
---|
211 | |
---|
212 | #ifndef __rtems__ |
---|
213 | STATIC int gfe_ifioctl (struct ifnet *, u_long, caddr_t); |
---|
214 | #else |
---|
215 | STATIC int gfe_ifioctl (struct ifnet *, ioctl_command_t, caddr_t); |
---|
216 | #endif |
---|
217 | STATIC void gfe_ifstart (struct ifnet *); |
---|
218 | STATIC void gfe_ifwatchdog (struct ifnet *); |
---|
219 | |
---|
220 | #ifndef __rtems__ |
---|
221 | STATIC int gfe_mii_mediachange (struct ifnet *); |
---|
222 | STATIC void gfe_mii_mediastatus (struct ifnet *, struct ifmediareq *); |
---|
223 | STATIC int gfe_mii_read (struct device *, int, int); |
---|
224 | STATIC void gfe_mii_write (struct device *, int, int, int); |
---|
225 | STATIC void gfe_mii_statchg (struct device *); |
---|
226 | #endif |
---|
227 | |
---|
228 | STATIC void gfe_tick(void *arg); |
---|
229 | |
---|
230 | STATIC void gfe_tx_restart(void *); |
---|
231 | STATIC int gfe_tx_enqueue(struct gfe_softc *, enum gfe_txprio); |
---|
232 | STATIC uint32_t gfe_tx_done(struct gfe_softc *, enum gfe_txprio, uint32_t); |
---|
233 | STATIC void gfe_tx_cleanup(struct gfe_softc *, enum gfe_txprio, int); |
---|
234 | STATIC int gfe_tx_txqalloc(struct gfe_softc *, enum gfe_txprio); |
---|
235 | STATIC int gfe_tx_start(struct gfe_softc *, enum gfe_txprio); |
---|
236 | STATIC void gfe_tx_stop(struct gfe_softc *, enum gfe_whack_op); |
---|
237 | |
---|
238 | STATIC void gfe_rx_cleanup(struct gfe_softc *, enum gfe_rxprio); |
---|
239 | STATIC void gfe_rx_get(struct gfe_softc *, enum gfe_rxprio); |
---|
240 | STATIC int gfe_rx_prime(struct gfe_softc *); |
---|
241 | STATIC uint32_t gfe_rx_process(struct gfe_softc *, uint32_t, uint32_t); |
---|
242 | STATIC int gfe_rx_rxqalloc(struct gfe_softc *, enum gfe_rxprio); |
---|
243 | STATIC int gfe_rx_rxqinit(struct gfe_softc *, enum gfe_rxprio); |
---|
244 | STATIC void gfe_rx_stop(struct gfe_softc *, enum gfe_whack_op); |
---|
245 | |
---|
246 | STATIC int gfe_intr(void *); |
---|
247 | |
---|
248 | STATIC int gfe_whack(struct gfe_softc *, enum gfe_whack_op); |
---|
249 | |
---|
250 | STATIC int gfe_hash_compute(struct gfe_softc *, const uint8_t [ETHER_ADDR_LEN]); |
---|
251 | STATIC int gfe_hash_entry_op(struct gfe_softc *, enum gfe_hash_op, |
---|
252 | enum gfe_rxprio, const uint8_t [ETHER_ADDR_LEN]); |
---|
253 | #ifndef __rtems__ |
---|
254 | STATIC int gfe_hash_multichg(struct ethercom *, const struct ether_multi *, |
---|
255 | u_long); |
---|
256 | #endif |
---|
257 | STATIC int gfe_hash_fill(struct gfe_softc *); |
---|
258 | STATIC int gfe_hash_alloc(struct gfe_softc *); |
---|
259 | |
---|
260 | #ifndef __rtems__ |
---|
261 | /* Linkup to the rest of the kernel */ |
---|
262 | CFATTACH_DECL(gfe, sizeof(struct gfe_softc), |
---|
263 | gfe_match, gfe_attach, NULL, NULL); |
---|
264 | #else |
---|
265 | net_drv_tbl_t METHODS = { |
---|
266 | n_probe : gfe_probe, |
---|
267 | n_attach : gfe_attach, |
---|
268 | n_detach : 0, |
---|
269 | n_intr : (void (*)(void*))gfe_intr, |
---|
270 | }; |
---|
271 | |
---|
272 | int |
---|
273 | gfe_mii_read(int phy, void *arg, unsigned reg, uint32_t *pval); |
---|
274 | int |
---|
275 | gfe_mii_write(int phy, void *arg, unsigned reg, uint32_t val); |
---|
276 | |
---|
277 | struct rtems_mdio_info |
---|
278 | gfe_mdio_access = { |
---|
279 | mdio_r: gfe_mii_read, |
---|
280 | mdio_w: gfe_mii_write, |
---|
281 | has_gmii: 0 |
---|
282 | }; |
---|
283 | |
---|
284 | #endif |
---|
285 | |
---|
286 | extern struct cfdriver gfe_cd; |
---|
287 | |
---|
288 | #ifndef __rtems__ |
---|
289 | int |
---|
290 | gfe_match(struct device *parent, struct cfdata *cf, void *aux) |
---|
291 | { |
---|
292 | struct gt_softc *gt = (struct gt_softc *) parent; |
---|
293 | struct gt_attach_args *ga = aux; |
---|
294 | uint8_t enaddr[6]; |
---|
295 | |
---|
296 | if (!GT_ETHEROK(gt, ga, &gfe_cd)) |
---|
297 | return 0; |
---|
298 | |
---|
299 | if (gtget_macaddr(gt, ga->ga_unit, enaddr) < 0) |
---|
300 | return 0; |
---|
301 | |
---|
302 | if (enaddr[0] == 0 && enaddr[1] == 0 && enaddr[2] == 0 && |
---|
303 | enaddr[3] == 0 && enaddr[4] == 0 && enaddr[5] == 0) |
---|
304 | return 0; |
---|
305 | |
---|
306 | return 1; |
---|
307 | } |
---|
308 | #else |
---|
309 | int |
---|
310 | gfe_probe(device_t dev) |
---|
311 | { |
---|
312 | switch ( BSP_getDiscoveryVersion(0) ) { |
---|
313 | case GT_64260_A: |
---|
314 | case GT_64260_B: |
---|
315 | return 0; |
---|
316 | default: |
---|
317 | break; |
---|
318 | } |
---|
319 | return -1; |
---|
320 | } |
---|
321 | |
---|
322 | void |
---|
323 | gfe_init(void *arg) |
---|
324 | { |
---|
325 | struct gfe_softc *sc = arg; |
---|
326 | if ( sc->sc_ec.ec_if.if_flags & IFF_RUNNING ) |
---|
327 | gfe_whack(sc, GE_WHACK_RESTART); |
---|
328 | else |
---|
329 | gfe_whack(sc, GE_WHACK_START); |
---|
330 | } |
---|
331 | #endif |
---|
332 | |
---|
333 | /* |
---|
334 | * Attach this instance, and then all the sub-devices |
---|
335 | */ |
---|
336 | #ifndef __rtems__ |
---|
337 | void |
---|
338 | gfe_attach(struct device *parent, struct device *self, void *aux) |
---|
339 | #else |
---|
340 | int |
---|
341 | gfe_attach(device_t dev) |
---|
342 | #endif |
---|
343 | { |
---|
344 | #ifndef __rtems__ |
---|
345 | struct gt_attach_args * const ga = aux; |
---|
346 | struct gt_softc * const gt = (struct gt_softc *) parent; |
---|
347 | struct gfe_softc * const sc = (struct gfe_softc *) self; |
---|
348 | #else |
---|
349 | struct gfe_softc * const sc = device_get_softc(dev); |
---|
350 | #endif |
---|
351 | struct ifnet * const ifp = &sc->sc_ec.ec_if; |
---|
352 | uint32_t data; |
---|
353 | uint8_t enaddr[6]; |
---|
354 | int phyaddr; |
---|
355 | uint32_t sdcr; |
---|
356 | int error; |
---|
357 | #ifdef __rtems__ |
---|
358 | SPRINTFVARDECL; |
---|
359 | #endif |
---|
360 | |
---|
361 | #ifndef __rtems__ |
---|
362 | GT_ETHERFOUND(gt, ga); |
---|
363 | |
---|
364 | sc->sc_gt_memt = ga->ga_memt; |
---|
365 | sc->sc_gt_memh = ga->ga_memh; |
---|
366 | sc->sc_dmat = ga->ga_dmat; |
---|
367 | sc->sc_macno = ga->ga_unit; |
---|
368 | |
---|
369 | if (bus_space_subregion(sc->sc_gt_memt, sc->sc_gt_memh, |
---|
370 | ETH_BASE(sc->sc_macno), ETH_SIZE, &sc->sc_memh)) { |
---|
371 | aprint_error(": failed to map registers\n"); |
---|
372 | } |
---|
373 | |
---|
374 | callout_init(&sc->sc_co); |
---|
375 | #else |
---|
376 | /* sc_macno, irq_no and sc_gt_memh must be filled in by 'setup' */ |
---|
377 | |
---|
378 | /* make ring sizes even numbers so that we have always multiple |
---|
379 | * cache lines (paranoia) |
---|
380 | */ |
---|
381 | if ( (sc->num_rxdesc = dev->d_ifconfig->rbuf_count) & 1 ) |
---|
382 | sc->num_rxdesc++; |
---|
383 | if ( 0 == sc->num_rxdesc ) |
---|
384 | sc->num_rxdesc = 64; |
---|
385 | |
---|
386 | if ( (sc->num_txdesc = dev->d_ifconfig->xbuf_count) & 1 ) |
---|
387 | sc->num_txdesc++; |
---|
388 | if ( 0 == sc->num_txdesc ) |
---|
389 | sc->num_txdesc = 256; |
---|
390 | |
---|
391 | /* Enable hardware cache snooping; |
---|
392 | * Copyright Shuchen K. Feng <feng1@bnl.gov>, 2004 |
---|
393 | */ |
---|
394 | /* regs are eth0: 0xf200/0xf204, eth1 0xf220/0xf224, eth2: 0xf240/0xf244 */ |
---|
395 | { |
---|
396 | uint32_t v; |
---|
397 | v = GT_READ(sc, ETH_ACTL_0_LO + (sc->sc_macno<<5)); |
---|
398 | v |= RxBSnoopEn|TxBSnoopEn|RxDSnoopEn|TxDSnoopEn; |
---|
399 | GT_WRITE(sc, ETH_ACTL_0_LO + (sc->sc_macno<<5), v); |
---|
400 | |
---|
401 | v = GT_READ(sc, ETH_ACTL_0_HI + (sc->sc_macno<<5)); |
---|
402 | v |= HashSnoopEn; |
---|
403 | GT_WRITE(sc, ETH_ACTL_0_HI + (sc->sc_macno<<5), v); |
---|
404 | } |
---|
405 | |
---|
406 | #endif |
---|
407 | |
---|
408 | data = bus_space_read_4(sc->sc_gt_memt, sc->sc_gt_memh, ETH_EPAR); |
---|
409 | #ifdef __rtems__ |
---|
410 | sc->sc_phyaddr = |
---|
411 | #endif |
---|
412 | phyaddr = ETH_EPAR_PhyAD_GET(data, sc->sc_macno); |
---|
413 | |
---|
414 | #ifndef __rtems__ |
---|
415 | gtget_macaddr(gt, sc->sc_macno, enaddr); |
---|
416 | #else |
---|
417 | memset( enaddr, 0, ETHER_ADDR_LEN ); |
---|
418 | if ( !memcmp(enaddr, sc->arpcom.ac_enaddr, ETHER_ADDR_LEN) ) { |
---|
419 | aprint_error(": MAC address not set (pass to rtems_gfe_setup())\n"); |
---|
420 | return -1; |
---|
421 | } |
---|
422 | /* mac address needs to be provided by 'setup' */ |
---|
423 | memcpy(enaddr, sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); |
---|
424 | #endif |
---|
425 | |
---|
426 | sc->sc_pcr = GE_READ(sc, EPCR); |
---|
427 | sc->sc_pcxr = GE_READ(sc, EPCXR); |
---|
428 | sc->sc_intrmask = GE_READ(sc, EIMR) | ETH_IR_MIIPhySTC; |
---|
429 | |
---|
430 | aprint_normal(": address %s", ether_sprintf(enaddr)); |
---|
431 | |
---|
432 | #if defined(DEBUG) |
---|
433 | aprint_normal(", pcr %#x, pcxr %#x", sc->sc_pcr, sc->sc_pcxr); |
---|
434 | #endif |
---|
435 | |
---|
436 | sc->sc_pcxr &= ~ETH_EPCXR_PRIOrx_Override; |
---|
437 | #ifndef __rtems__ |
---|
438 | if (sc->sc_dev.dv_cfdata->cf_flags & 1) { |
---|
439 | aprint_normal(", phy %d (rmii)", phyaddr); |
---|
440 | sc->sc_pcxr |= ETH_EPCXR_RMIIEn; |
---|
441 | } else |
---|
442 | #endif |
---|
443 | { |
---|
444 | aprint_normal(", phy %d (mii)", phyaddr); |
---|
445 | sc->sc_pcxr &= ~ETH_EPCXR_RMIIEn; |
---|
446 | } |
---|
447 | #ifndef __rtems__ |
---|
448 | if (sc->sc_dev.dv_cfdata->cf_flags & 2) |
---|
449 | sc->sc_flags |= GE_NOFREE; |
---|
450 | #endif |
---|
451 | sc->sc_pcxr &= ~(3 << 14); |
---|
452 | sc->sc_pcxr |= (ETH_EPCXR_MFL_1536 << 14); |
---|
453 | |
---|
454 | if (sc->sc_pcr & ETH_EPCR_EN) { |
---|
455 | int tries = 1000; |
---|
456 | /* |
---|
457 | * Abort transmitter and receiver and wait for them to quiese |
---|
458 | */ |
---|
459 | GE_WRITE(sc, ESDCMR, ETH_ESDCMR_AR|ETH_ESDCMR_AT); |
---|
460 | do { |
---|
461 | delay(100); |
---|
462 | } while (tries-- > 0 && (GE_READ(sc, ESDCMR) & (ETH_ESDCMR_AR|ETH_ESDCMR_AT))); |
---|
463 | } |
---|
464 | |
---|
465 | sc->sc_pcr &= ~(ETH_EPCR_EN | ETH_EPCR_RBM | ETH_EPCR_PM | ETH_EPCR_PBF); |
---|
466 | |
---|
467 | #if defined(DEBUG) |
---|
468 | aprint_normal(", pcr %#x, pcxr %#x", sc->sc_pcr, sc->sc_pcxr); |
---|
469 | #endif |
---|
470 | |
---|
471 | /* |
---|
472 | * Now turn off the GT. If it didn't quiese, too ***ing bad. |
---|
473 | */ |
---|
474 | GE_WRITE(sc, EPCR, sc->sc_pcr); |
---|
475 | #ifndef __rtems__ |
---|
476 | GE_WRITE(sc, EIMR, sc->sc_intrmask); |
---|
477 | #else |
---|
478 | GE_WRITE(sc, EICR, 0); |
---|
479 | GE_WRITE(sc, EIMR, 0); |
---|
480 | #endif |
---|
481 | sdcr = GE_READ(sc, ESDCR); |
---|
482 | ETH_ESDCR_BSZ_SET(sdcr, ETH_ESDCR_BSZ_4); |
---|
483 | sdcr |= ETH_ESDCR_RIFB; |
---|
484 | GE_WRITE(sc, ESDCR, sdcr); |
---|
485 | sc->sc_max_frame_length = 1536; |
---|
486 | |
---|
487 | aprint_normal("\n"); |
---|
488 | #ifndef __rtems__ |
---|
489 | sc->sc_mii.mii_ifp = ifp; |
---|
490 | sc->sc_mii.mii_readreg = gfe_mii_read; |
---|
491 | sc->sc_mii.mii_writereg = gfe_mii_write; |
---|
492 | sc->sc_mii.mii_statchg = gfe_mii_statchg; |
---|
493 | |
---|
494 | ifmedia_init(&sc->sc_mii.mii_media, 0, gfe_mii_mediachange, |
---|
495 | gfe_mii_mediastatus); |
---|
496 | |
---|
497 | mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, phyaddr, |
---|
498 | MII_OFFSET_ANY, MIIF_NOISOLATE); |
---|
499 | if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { |
---|
500 | ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); |
---|
501 | ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); |
---|
502 | } else { |
---|
503 | ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); |
---|
504 | } |
---|
505 | |
---|
506 | strcpy(ifp->if_xname, sc->sc_dev.dv_xname); |
---|
507 | #else |
---|
508 | if_initname(ifp, device_get_name(dev), device_get_unit(dev)); |
---|
509 | ifp->if_mtu = ETHERMTU; |
---|
510 | ifp->if_output = ether_output; |
---|
511 | ifp->if_init = gfe_init; |
---|
512 | ifp->if_snd.ifq_maxlen = GE_TXDESC_MAX - 1; |
---|
513 | ifp->if_baudrate = 10000000; |
---|
514 | #endif |
---|
515 | ifp->if_softc = sc; |
---|
516 | /* ifp->if_mowner = &sc->sc_mowner; */ |
---|
517 | ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; |
---|
518 | #if 0 |
---|
519 | ifp->if_flags |= IFF_DEBUG; |
---|
520 | #endif |
---|
521 | ifp->if_ioctl = gfe_ifioctl; |
---|
522 | ifp->if_start = gfe_ifstart; |
---|
523 | ifp->if_watchdog = gfe_ifwatchdog; |
---|
524 | |
---|
525 | if (sc->sc_flags & GE_NOFREE) { |
---|
526 | error = gfe_rx_rxqalloc(sc, GE_RXPRIO_HI); |
---|
527 | if (!error) |
---|
528 | error = gfe_rx_rxqalloc(sc, GE_RXPRIO_MEDHI); |
---|
529 | if (!error) |
---|
530 | error = gfe_rx_rxqalloc(sc, GE_RXPRIO_MEDLO); |
---|
531 | if (!error) |
---|
532 | error = gfe_rx_rxqalloc(sc, GE_RXPRIO_LO); |
---|
533 | if (!error) |
---|
534 | error = gfe_tx_txqalloc(sc, GE_TXPRIO_HI); |
---|
535 | if (!error) |
---|
536 | error = gfe_hash_alloc(sc); |
---|
537 | if (error) |
---|
538 | aprint_error( |
---|
539 | "%s: failed to allocate resources: %d\n", |
---|
540 | ifp->if_xname, error); |
---|
541 | } |
---|
542 | |
---|
543 | if_attach(ifp); |
---|
544 | #ifndef __rtems__ |
---|
545 | ether_ifattach(ifp, enaddr); |
---|
546 | #else |
---|
547 | ether_ifattach(ifp); |
---|
548 | #endif |
---|
549 | #if NBPFILTER > 0 |
---|
550 | bpfattach(ifp, DLT_EN10MB, sizeof(struct ether_header)); |
---|
551 | #endif |
---|
552 | #if NRND > 0 |
---|
553 | rnd_attach_source(&sc->sc_rnd_source, self->dv_xname, RND_TYPE_NET, 0); |
---|
554 | #endif |
---|
555 | #ifndef __rtems__ |
---|
556 | intr_establish(IRQ_ETH0 + sc->sc_macno, IST_LEVEL, IPL_NET, |
---|
557 | gfe_intr, sc); |
---|
558 | #else |
---|
559 | return 0; |
---|
560 | #endif |
---|
561 | } |
---|
562 | |
---|
563 | int |
---|
564 | gfe_dmamem_alloc(struct gfe_softc *sc, struct gfe_dmamem *gdm, int maxsegs, |
---|
565 | size_t size, int flags) |
---|
566 | { |
---|
567 | int error = 0; |
---|
568 | GE_FUNC_ENTER(sc, "gfe_dmamem_alloc"); |
---|
569 | |
---|
570 | KASSERT(gdm->gdm_kva == NULL); |
---|
571 | gdm->gdm_size = size; |
---|
572 | gdm->gdm_maxsegs = maxsegs; |
---|
573 | |
---|
574 | #ifndef __rtems__ |
---|
575 | error = bus_dmamem_alloc(sc->sc_dmat, gdm->gdm_size, PAGE_SIZE, |
---|
576 | gdm->gdm_size, gdm->gdm_segs, gdm->gdm_maxsegs, &gdm->gdm_nsegs, |
---|
577 | BUS_DMA_NOWAIT); |
---|
578 | if (error) |
---|
579 | goto fail; |
---|
580 | |
---|
581 | error = bus_dmamem_map(sc->sc_dmat, gdm->gdm_segs, gdm->gdm_nsegs, |
---|
582 | gdm->gdm_size, &gdm->gdm_kva, flags | BUS_DMA_NOWAIT); |
---|
583 | if (error) |
---|
584 | goto fail; |
---|
585 | |
---|
586 | error = bus_dmamap_create(sc->sc_dmat, gdm->gdm_size, gdm->gdm_nsegs, |
---|
587 | gdm->gdm_size, 0, BUS_DMA_ALLOCNOW|BUS_DMA_NOWAIT, &gdm->gdm_map); |
---|
588 | if (error) |
---|
589 | goto fail; |
---|
590 | |
---|
591 | error = bus_dmamap_load(sc->sc_dmat, gdm->gdm_map, gdm->gdm_kva, |
---|
592 | gdm->gdm_size, NULL, BUS_DMA_NOWAIT); |
---|
593 | if (error) |
---|
594 | goto fail; |
---|
595 | #else |
---|
596 | gdm->gdm_segs[0].ds_len = size; |
---|
597 | |
---|
598 | /* FIXME: probably we can relax the alignment */ |
---|
599 | if ( ! ( gdm->gdm_unaligned_buf = malloc( size + PAGE_SIZE - 1, M_DEVBUF, M_NOWAIT ) ) ) |
---|
600 | goto fail; |
---|
601 | |
---|
602 | gdm->gdm_map = gdm; |
---|
603 | gdm->gdm_nsegs = 1; |
---|
604 | gdm->gdm_kva = (caddr_t)(gdm->gdm_segs[0].ds_addr = _DO_ALIGN(gdm->gdm_unaligned_buf, PAGE_SIZE)); |
---|
605 | #endif |
---|
606 | |
---|
607 | /* invalidate from cache */ |
---|
608 | bus_dmamap_sync(sc->sc_dmat, gdm->gdm_map, 0, gdm->gdm_size, |
---|
609 | BUS_DMASYNC_PREREAD); |
---|
610 | fail: |
---|
611 | if (error) { |
---|
612 | gfe_dmamem_free(sc, gdm); |
---|
613 | GE_DPRINTF(sc, (":err=%d", error)); |
---|
614 | } |
---|
615 | GE_DPRINTF(sc, (":kva=%p/%#x,map=%p,nsegs=%d,pa=%" PRIx32 "/%" PRIx32, |
---|
616 | gdm->gdm_kva, gdm->gdm_size, gdm->gdm_map, gdm->gdm_map->dm_nsegs, |
---|
617 | gdm->gdm_map->dm_segs->ds_addr, gdm->gdm_map->dm_segs->ds_len)); |
---|
618 | GE_FUNC_EXIT(sc, ""); |
---|
619 | return error; |
---|
620 | } |
---|
621 | |
---|
622 | void |
---|
623 | gfe_dmamem_free(struct gfe_softc *sc, struct gfe_dmamem *gdm) |
---|
624 | { |
---|
625 | GE_FUNC_ENTER(sc, "gfe_dmamem_free"); |
---|
626 | #ifndef __rtems__ |
---|
627 | if (gdm->gdm_map) |
---|
628 | bus_dmamap_destroy(sc->sc_dmat, gdm->gdm_map); |
---|
629 | if (gdm->gdm_kva) |
---|
630 | bus_dmamem_unmap(sc->sc_dmat, gdm->gdm_kva, gdm->gdm_size); |
---|
631 | if (gdm->gdm_nsegs > 0) |
---|
632 | bus_dmamem_free(sc->sc_dmat, gdm->gdm_segs, gdm->gdm_nsegs); |
---|
633 | #else |
---|
634 | if (gdm->gdm_nsegs > 0) |
---|
635 | free(gdm->gdm_unaligned_buf, M_DEVBUF); |
---|
636 | #endif |
---|
637 | gdm->gdm_map = NULL; |
---|
638 | gdm->gdm_kva = NULL; |
---|
639 | gdm->gdm_nsegs = 0; |
---|
640 | GE_FUNC_EXIT(sc, ""); |
---|
641 | } |
---|
642 | |
---|
643 | #ifndef __rtems__ |
---|
644 | int |
---|
645 | gfe_ifioctl(struct ifnet *ifp, u_long cmd, caddr_t data) |
---|
646 | #else |
---|
647 | int |
---|
648 | gfe_ifioctl(struct ifnet *ifp, ioctl_command_t cmd, caddr_t data) |
---|
649 | #endif |
---|
650 | { |
---|
651 | struct gfe_softc * const sc = ifp->if_softc; |
---|
652 | struct ifreq *ifr = (struct ifreq *) data; |
---|
653 | #ifndef __rtems__ |
---|
654 | struct ifaddr *ifa = (struct ifaddr *) data; |
---|
655 | #endif |
---|
656 | int s, error = 0; |
---|
657 | |
---|
658 | GE_FUNC_ENTER(sc, "gfe_ifioctl"); |
---|
659 | s = splnet(); |
---|
660 | |
---|
661 | switch (cmd) { |
---|
662 | #ifndef __rtems__ |
---|
663 | case SIOCSIFADDR: |
---|
664 | ifp->if_flags |= IFF_UP; |
---|
665 | switch (ifa->ifa_addr->sa_family) { |
---|
666 | #ifdef INET |
---|
667 | case AF_INET: |
---|
668 | error = gfe_whack(sc, GE_WHACK_START); |
---|
669 | if (error == 0) |
---|
670 | arp_ifinit(ifp, ifa); |
---|
671 | break; |
---|
672 | #endif |
---|
673 | default: |
---|
674 | error = gfe_whack(sc, GE_WHACK_START); |
---|
675 | break; |
---|
676 | } |
---|
677 | break; |
---|
678 | #endif |
---|
679 | |
---|
680 | case SIOCSIFFLAGS: |
---|
681 | if ((sc->sc_ec.ec_if.if_flags & IFF_PROMISC) == 0) |
---|
682 | sc->sc_pcr &= ~ETH_EPCR_PM; |
---|
683 | else |
---|
684 | sc->sc_pcr |= ETH_EPCR_PM; |
---|
685 | switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) { |
---|
686 | case IFF_UP|IFF_RUNNING:/* active->active, update */ |
---|
687 | error = gfe_whack(sc, GE_WHACK_CHANGE); |
---|
688 | break; |
---|
689 | case IFF_RUNNING: /* not up, so we stop */ |
---|
690 | error = gfe_whack(sc, GE_WHACK_STOP); |
---|
691 | break; |
---|
692 | case IFF_UP: /* not running, so we start */ |
---|
693 | error = gfe_whack(sc, GE_WHACK_START); |
---|
694 | break; |
---|
695 | case 0: /* idle->idle: do nothing */ |
---|
696 | break; |
---|
697 | } |
---|
698 | break; |
---|
699 | |
---|
700 | case SIOCADDMULTI: |
---|
701 | case SIOCDELMULTI: |
---|
702 | error = (cmd == SIOCADDMULTI) |
---|
703 | ? ether_addmulti(ifr, &sc->sc_ec) |
---|
704 | : ether_delmulti(ifr, &sc->sc_ec); |
---|
705 | if (error == ENETRESET) { |
---|
706 | if (ifp->if_flags & IFF_RUNNING) |
---|
707 | #if !defined(__rtems__) |
---|
708 | error = gfe_whack(sc, GE_WHACK_CHANGE); |
---|
709 | #else |
---|
710 | /* doing GE_WHACK_CHANGE seems wrong - that |
---|
711 | * doesn't do anything to the hash table. |
---|
712 | * Therefore we perform a stop/start sequence. |
---|
713 | */ |
---|
714 | { |
---|
715 | error = gfe_whack(sc, GE_WHACK_STOP); |
---|
716 | if ( error ) |
---|
717 | break; |
---|
718 | error = gfe_whack(sc, GE_WHACK_START); |
---|
719 | } |
---|
720 | #endif |
---|
721 | else |
---|
722 | error = 0; |
---|
723 | } |
---|
724 | break; |
---|
725 | |
---|
726 | case SIOCSIFMTU: |
---|
727 | if (ifr->ifr_mtu > ETHERMTU || ifr->ifr_mtu < ETHERMIN) { |
---|
728 | error = EINVAL; |
---|
729 | break; |
---|
730 | } |
---|
731 | ifp->if_mtu = ifr->ifr_mtu; |
---|
732 | break; |
---|
733 | |
---|
734 | case SIOCSIFMEDIA: |
---|
735 | case SIOCGIFMEDIA: |
---|
736 | #ifndef __rtems__ |
---|
737 | error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); |
---|
738 | #else |
---|
739 | error = rtems_mii_ioctl(&gfe_mdio_access, sc, cmd, &ifr->ifr_media); |
---|
740 | #endif |
---|
741 | break; |
---|
742 | |
---|
743 | default: |
---|
744 | #ifndef __rtems__ |
---|
745 | error = EINVAL; |
---|
746 | #else |
---|
747 | error = ether_ioctl(ifp, cmd, data); |
---|
748 | #endif |
---|
749 | break; |
---|
750 | } |
---|
751 | splx(s); |
---|
752 | GE_FUNC_EXIT(sc, ""); |
---|
753 | return error; |
---|
754 | } |
---|
755 | |
---|
756 | void |
---|
757 | gfe_ifstart(struct ifnet *ifp) |
---|
758 | { |
---|
759 | struct gfe_softc * const sc = ifp->if_softc; |
---|
760 | struct mbuf *m; |
---|
761 | |
---|
762 | GE_FUNC_ENTER(sc, "gfe_ifstart"); |
---|
763 | |
---|
764 | if ((ifp->if_flags & IFF_RUNNING) == 0) { |
---|
765 | GE_FUNC_EXIT(sc, "$"); |
---|
766 | return; |
---|
767 | } |
---|
768 | |
---|
769 | for (;;) { |
---|
770 | IF_DEQUEUE(&ifp->if_snd, m); |
---|
771 | if (m == NULL) { |
---|
772 | ifp->if_flags &= ~IFF_OACTIVE; |
---|
773 | GE_FUNC_EXIT(sc, ""); |
---|
774 | return; |
---|
775 | } |
---|
776 | |
---|
777 | /* |
---|
778 | * No space in the pending queue? try later. |
---|
779 | */ |
---|
780 | if (IF_QFULL(&sc->sc_txq[GE_TXPRIO_HI].txq_pendq)) |
---|
781 | break; |
---|
782 | |
---|
783 | /* |
---|
784 | * Try to enqueue a mbuf to the device. If that fails, we |
---|
785 | * can always try to map the next mbuf. |
---|
786 | */ |
---|
787 | IF_ENQUEUE(&sc->sc_txq[GE_TXPRIO_HI].txq_pendq, m); |
---|
788 | GE_DPRINTF(sc, (">")); |
---|
789 | #ifndef GE_NOTX |
---|
790 | (void) gfe_tx_enqueue(sc, GE_TXPRIO_HI); |
---|
791 | #endif |
---|
792 | } |
---|
793 | |
---|
794 | /* |
---|
795 | * Attempt to queue the mbuf for send failed. |
---|
796 | */ |
---|
797 | IF_PREPEND(&ifp->if_snd, m); |
---|
798 | ifp->if_flags |= IFF_OACTIVE; |
---|
799 | GE_FUNC_EXIT(sc, "%%"); |
---|
800 | } |
---|
801 | |
---|
802 | void |
---|
803 | gfe_ifwatchdog(struct ifnet *ifp) |
---|
804 | { |
---|
805 | struct gfe_softc * const sc = ifp->if_softc; |
---|
806 | struct gfe_txqueue * const txq = &sc->sc_txq[GE_TXPRIO_HI]; |
---|
807 | |
---|
808 | GE_FUNC_ENTER(sc, "gfe_ifwatchdog"); |
---|
809 | printf("%s: device timeout", sc->sc_dev.dv_xname); |
---|
810 | if (ifp->if_flags & IFF_RUNNING) { |
---|
811 | uint32_t curtxdnum = (bus_space_read_4(sc->sc_gt_memt, sc->sc_gt_memh, txq->txq_ectdp) - txq->txq_desc_busaddr) / sizeof(txq->txq_descs[0]); |
---|
812 | GE_TXDPOSTSYNC(sc, txq, txq->txq_fi); |
---|
813 | GE_TXDPOSTSYNC(sc, txq, curtxdnum); |
---|
814 | printf(" (fi=%d(%#x),lo=%d,cur=%" PRId32 "(%#x),icm=%#x) ", |
---|
815 | txq->txq_fi, txq->txq_descs[txq->txq_fi].ed_cmdsts, |
---|
816 | txq->txq_lo, curtxdnum, txq->txq_descs[curtxdnum].ed_cmdsts, |
---|
817 | GE_READ(sc, EICR)); |
---|
818 | GE_TXDPRESYNC(sc, txq, txq->txq_fi); |
---|
819 | GE_TXDPRESYNC(sc, txq, curtxdnum); |
---|
820 | } |
---|
821 | printf("\n"); |
---|
822 | ifp->if_oerrors++; |
---|
823 | (void) gfe_whack(sc, GE_WHACK_RESTART); |
---|
824 | GE_FUNC_EXIT(sc, ""); |
---|
825 | } |
---|
826 | |
---|
827 | #ifdef __rtems__ |
---|
828 | static struct mbuf * |
---|
829 | gfe_newbuf(struct mbuf *m) |
---|
830 | { |
---|
831 | if ( !m ) { |
---|
832 | MGETHDR(m, M_DONTWAIT, MT_DATA); |
---|
833 | if ( !m ) |
---|
834 | return 0; |
---|
835 | MCLGET(m, M_DONTWAIT); |
---|
836 | if ( !(M_EXT & m->m_flags) ) { |
---|
837 | m_freem(m); |
---|
838 | return 0; |
---|
839 | } |
---|
840 | } else { |
---|
841 | m->m_data = m->m_ext.ext_buf; |
---|
842 | } |
---|
843 | m->m_len = m->m_pkthdr.len = MCLBYTES; |
---|
844 | #if 0 |
---|
845 | m_adj(m, 2); /* so payload is 16-byte aligned */ |
---|
846 | #endif |
---|
847 | return m; |
---|
848 | } |
---|
849 | #endif |
---|
850 | |
---|
851 | int |
---|
852 | gfe_rx_rxqalloc(struct gfe_softc *sc, enum gfe_rxprio rxprio) |
---|
853 | { |
---|
854 | struct gfe_rxqueue * const rxq = &sc->sc_rxq[rxprio]; |
---|
855 | int error; |
---|
856 | |
---|
857 | GE_FUNC_ENTER(sc, "gfe_rx_rxqalloc"); |
---|
858 | GE_DPRINTF(sc, ("(%d)", rxprio)); |
---|
859 | |
---|
860 | error = gfe_dmamem_alloc(sc, &rxq->rxq_desc_mem, 1, |
---|
861 | GE_RXDESC_MEMSIZE, BUS_DMA_NOCACHE); |
---|
862 | if (error) { |
---|
863 | GE_FUNC_EXIT(sc, "!!"); |
---|
864 | return error; |
---|
865 | } |
---|
866 | |
---|
867 | #ifndef __rtems__ |
---|
868 | error = gfe_dmamem_alloc(sc, &rxq->rxq_buf_mem, GE_RXBUF_NSEGS, |
---|
869 | GE_RXBUF_MEMSIZE, 0); |
---|
870 | #else |
---|
871 | if ( ! (rxq->rxq_bufs = malloc( sizeof(*rxq->rxq_bufs) * GE_RXDESC_MAX, M_DEVBUF, M_NOWAIT ) ) ) { |
---|
872 | error = -1; |
---|
873 | } else { |
---|
874 | int i; |
---|
875 | for ( i = 0; i<GE_RXDESC_MAX; i++ ) { |
---|
876 | if ( !(rxq->rxq_bufs[i] = gfe_newbuf(0)) ) { |
---|
877 | fprintf(stderr,"gfe: Not enough mbuf clusters to initialize RX ring!\n"); |
---|
878 | while (--i >=0 ) { |
---|
879 | m_freem(rxq->rxq_bufs[i]); |
---|
880 | } |
---|
881 | free(rxq->rxq_bufs, M_DEVBUF); |
---|
882 | rxq->rxq_bufs = 0; |
---|
883 | error = -1; |
---|
884 | break; |
---|
885 | } |
---|
886 | } |
---|
887 | } |
---|
888 | #endif |
---|
889 | if (error) { |
---|
890 | GE_FUNC_EXIT(sc, "!!!"); |
---|
891 | return error; |
---|
892 | } |
---|
893 | GE_FUNC_EXIT(sc, ""); |
---|
894 | return error; |
---|
895 | } |
---|
896 | |
---|
897 | int |
---|
898 | gfe_rx_rxqinit(struct gfe_softc *sc, enum gfe_rxprio rxprio) |
---|
899 | { |
---|
900 | struct gfe_rxqueue * const rxq = &sc->sc_rxq[rxprio]; |
---|
901 | volatile struct gt_eth_desc *rxd; |
---|
902 | #ifndef __rtems__ |
---|
903 | const bus_dma_segment_t *ds; |
---|
904 | #endif |
---|
905 | int idx; |
---|
906 | bus_addr_t nxtaddr; |
---|
907 | #ifndef __rtems__ |
---|
908 | bus_size_t boff; |
---|
909 | #endif |
---|
910 | |
---|
911 | GE_FUNC_ENTER(sc, "gfe_rx_rxqinit"); |
---|
912 | GE_DPRINTF(sc, ("(%d)", rxprio)); |
---|
913 | |
---|
914 | if ((sc->sc_flags & GE_NOFREE) == 0) { |
---|
915 | int error = gfe_rx_rxqalloc(sc, rxprio); |
---|
916 | if (error) { |
---|
917 | GE_FUNC_EXIT(sc, "!"); |
---|
918 | return error; |
---|
919 | } |
---|
920 | } else { |
---|
921 | KASSERT(rxq->rxq_desc_mem.gdm_kva != NULL); |
---|
922 | #ifndef __rtems__ |
---|
923 | KASSERT(rxq->rxq_buf_mem.gdm_kva != NULL); |
---|
924 | #else |
---|
925 | KASSERT(rxq->rxq_bufs != NULL); |
---|
926 | #endif |
---|
927 | } |
---|
928 | |
---|
929 | memset(rxq->rxq_desc_mem.gdm_kva, 0, GE_RXDESC_MEMSIZE); |
---|
930 | |
---|
931 | rxq->rxq_descs = |
---|
932 | (volatile struct gt_eth_desc *) rxq->rxq_desc_mem.gdm_kva; |
---|
933 | rxq->rxq_desc_busaddr = rxq->rxq_desc_mem.gdm_map->dm_segs[0].ds_addr; |
---|
934 | #ifndef __rtems__ |
---|
935 | rxq->rxq_bufs = (struct gfe_rxbuf *) rxq->rxq_buf_mem.gdm_kva; |
---|
936 | #endif |
---|
937 | rxq->rxq_fi = 0; |
---|
938 | rxq->rxq_active = GE_RXDESC_MAX; |
---|
939 | for (idx = 0, rxd = rxq->rxq_descs, |
---|
940 | #ifndef __rtems__ |
---|
941 | boff = 0, ds = rxq->rxq_buf_mem.gdm_map->dm_segs, |
---|
942 | #endif |
---|
943 | nxtaddr = rxq->rxq_desc_busaddr + sizeof(*rxd); |
---|
944 | idx < GE_RXDESC_MAX; |
---|
945 | idx++, rxd++, nxtaddr += sizeof(*rxd)) { |
---|
946 | #ifndef __rtems__ |
---|
947 | rxd->ed_lencnt = htogt32(GE_RXBUF_SIZE << 16); |
---|
948 | #else |
---|
949 | rxd->ed_lencnt = htogt32(MCLBYTES << 16); |
---|
950 | #endif |
---|
951 | rxd->ed_cmdsts = htogt32(RX_CMD_F|RX_CMD_L|RX_CMD_O|RX_CMD_EI); |
---|
952 | #ifndef __rtems__ |
---|
953 | rxd->ed_bufptr = htogt32(ds->ds_addr + boff); |
---|
954 | #else |
---|
955 | rxd->ed_bufptr = htogt32(mtod(rxq->rxq_bufs[idx], uint32_t)); |
---|
956 | #endif |
---|
957 | /* |
---|
958 | * update the nxtptr to point to the next txd. |
---|
959 | */ |
---|
960 | if (idx == GE_RXDESC_MAX - 1) |
---|
961 | nxtaddr = rxq->rxq_desc_busaddr; |
---|
962 | rxd->ed_nxtptr = htogt32(nxtaddr); |
---|
963 | #ifndef __rtems__ |
---|
964 | boff += GE_RXBUF_SIZE; |
---|
965 | if (boff == ds->ds_len) { |
---|
966 | ds++; |
---|
967 | boff = 0; |
---|
968 | } |
---|
969 | #endif |
---|
970 | } |
---|
971 | bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_mem.gdm_map, 0, |
---|
972 | rxq->rxq_desc_mem.gdm_map->dm_mapsize, |
---|
973 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
---|
974 | #ifndef __rtems__ |
---|
975 | bus_dmamap_sync(sc->sc_dmat, rxq->rxq_buf_mem.gdm_map, 0, |
---|
976 | rxq->rxq_buf_mem.gdm_map->dm_mapsize, |
---|
977 | BUS_DMASYNC_PREREAD); |
---|
978 | #else |
---|
979 | /* FIXME: we leave this call in here so compilation fails |
---|
980 | * if bus_dmamap_sync() is ever fleshed-out to implement |
---|
981 | * software cache coherency... |
---|
982 | */ |
---|
983 | bus_dmamap_sync(sc->sc_dmat, rxq->rxq_buf_mem.gdm_map, 0, |
---|
984 | rxq->rxq_buf_mem.gdm_map->dm_mapsize, |
---|
985 | BUS_DMASYNC_PREREAD); |
---|
986 | #endif |
---|
987 | |
---|
988 | rxq->rxq_intrbits = ETH_IR_RxBuffer|ETH_IR_RxError; |
---|
989 | switch (rxprio) { |
---|
990 | case GE_RXPRIO_HI: |
---|
991 | rxq->rxq_intrbits |= ETH_IR_RxBuffer_3|ETH_IR_RxError_3; |
---|
992 | rxq->rxq_efrdp = ETH_EFRDP3(sc->sc_macno); |
---|
993 | rxq->rxq_ecrdp = ETH_ECRDP3(sc->sc_macno); |
---|
994 | break; |
---|
995 | case GE_RXPRIO_MEDHI: |
---|
996 | rxq->rxq_intrbits |= ETH_IR_RxBuffer_2|ETH_IR_RxError_2; |
---|
997 | rxq->rxq_efrdp = ETH_EFRDP2(sc->sc_macno); |
---|
998 | rxq->rxq_ecrdp = ETH_ECRDP2(sc->sc_macno); |
---|
999 | break; |
---|
1000 | case GE_RXPRIO_MEDLO: |
---|
1001 | rxq->rxq_intrbits |= ETH_IR_RxBuffer_1|ETH_IR_RxError_1; |
---|
1002 | rxq->rxq_efrdp = ETH_EFRDP1(sc->sc_macno); |
---|
1003 | rxq->rxq_ecrdp = ETH_ECRDP1(sc->sc_macno); |
---|
1004 | break; |
---|
1005 | case GE_RXPRIO_LO: |
---|
1006 | rxq->rxq_intrbits |= ETH_IR_RxBuffer_0|ETH_IR_RxError_0; |
---|
1007 | rxq->rxq_efrdp = ETH_EFRDP0(sc->sc_macno); |
---|
1008 | rxq->rxq_ecrdp = ETH_ECRDP0(sc->sc_macno); |
---|
1009 | break; |
---|
1010 | } |
---|
1011 | GE_FUNC_EXIT(sc, ""); |
---|
1012 | return 0; |
---|
1013 | } |
---|
1014 | |
---|
1015 | void |
---|
1016 | gfe_rx_get(struct gfe_softc *sc, enum gfe_rxprio rxprio) |
---|
1017 | { |
---|
1018 | struct ifnet * const ifp = &sc->sc_ec.ec_if; |
---|
1019 | struct gfe_rxqueue * const rxq = &sc->sc_rxq[rxprio]; |
---|
1020 | #ifndef __rtems__ |
---|
1021 | struct mbuf *m = rxq->rxq_curpkt; |
---|
1022 | #else |
---|
1023 | struct mbuf *m; |
---|
1024 | #endif |
---|
1025 | |
---|
1026 | GE_FUNC_ENTER(sc, "gfe_rx_get"); |
---|
1027 | GE_DPRINTF(sc, ("(%d)", rxprio)); |
---|
1028 | |
---|
1029 | while (rxq->rxq_active > 0) { |
---|
1030 | volatile struct gt_eth_desc *rxd = &rxq->rxq_descs[rxq->rxq_fi]; |
---|
1031 | #ifndef __rtems__ |
---|
1032 | struct gfe_rxbuf *rxb = &rxq->rxq_bufs[rxq->rxq_fi]; |
---|
1033 | #else |
---|
1034 | struct mbuf **rxb = &rxq->rxq_bufs[rxq->rxq_fi]; |
---|
1035 | #endif |
---|
1036 | const struct ether_header *eh; |
---|
1037 | unsigned int cmdsts; |
---|
1038 | size_t buflen; |
---|
1039 | |
---|
1040 | GE_RXDPOSTSYNC(sc, rxq, rxq->rxq_fi); |
---|
1041 | cmdsts = gt32toh(rxd->ed_cmdsts); |
---|
1042 | GE_DPRINTF(sc, (":%d=%#x", rxq->rxq_fi, cmdsts)); |
---|
1043 | rxq->rxq_cmdsts = cmdsts; |
---|
1044 | /* |
---|
1045 | * Sometimes the GE "forgets" to reset the ownership bit. |
---|
1046 | * But if the length has been rewritten, the packet is ours |
---|
1047 | * so pretend the O bit is set. |
---|
1048 | */ |
---|
1049 | buflen = gt32toh(rxd->ed_lencnt) & 0xffff; |
---|
1050 | if ((cmdsts & RX_CMD_O) && buflen == 0) { |
---|
1051 | GE_RXDPRESYNC(sc, rxq, rxq->rxq_fi); |
---|
1052 | break; |
---|
1053 | } |
---|
1054 | |
---|
1055 | /* |
---|
1056 | * If this is not a single buffer packet with no errors |
---|
1057 | * or for some reason it's bigger than our frame size, |
---|
1058 | * ignore it and go to the next packet. |
---|
1059 | */ |
---|
1060 | if ((cmdsts & (RX_CMD_F|RX_CMD_L|RX_STS_ES)) != |
---|
1061 | (RX_CMD_F|RX_CMD_L) || |
---|
1062 | buflen > sc->sc_max_frame_length) { |
---|
1063 | GE_DPRINTF(sc, ("!")); |
---|
1064 | --rxq->rxq_active; |
---|
1065 | ifp->if_ipackets++; |
---|
1066 | ifp->if_ierrors++; |
---|
1067 | |
---|
1068 | *rxb = gfe_newbuf(*rxb); |
---|
1069 | goto give_it_back; |
---|
1070 | } |
---|
1071 | |
---|
1072 | /* CRC is included with the packet; trim it off. */ |
---|
1073 | buflen -= ETHER_CRC_LEN; |
---|
1074 | |
---|
1075 | #ifndef __rtems__ |
---|
1076 | if (m == NULL) { |
---|
1077 | MGETHDR(m, M_DONTWAIT, MT_DATA); |
---|
1078 | if (m == NULL) { |
---|
1079 | GE_DPRINTF(sc, ("?")); |
---|
1080 | break; |
---|
1081 | } |
---|
1082 | } |
---|
1083 | if ((m->m_flags & M_EXT) == 0 && buflen > MHLEN - 2) { |
---|
1084 | MCLGET(m, M_DONTWAIT); |
---|
1085 | if ((m->m_flags & M_EXT) == 0) { |
---|
1086 | GE_DPRINTF(sc, ("?")); |
---|
1087 | break; |
---|
1088 | } |
---|
1089 | } |
---|
1090 | m->m_data += 2; |
---|
1091 | m->m_len = 0; |
---|
1092 | m->m_pkthdr.len = 0; |
---|
1093 | m->m_pkthdr.rcvif = ifp; |
---|
1094 | #else |
---|
1095 | if ( ! (m=gfe_newbuf(0)) ) { |
---|
1096 | /* recycle old buffer */ |
---|
1097 | *rxb = gfe_newbuf(*rxb); |
---|
1098 | goto give_it_back; |
---|
1099 | } |
---|
1100 | /* swap mbufs */ |
---|
1101 | { |
---|
1102 | struct mbuf *tmp = *rxb; |
---|
1103 | *rxb = m; |
---|
1104 | m = tmp; |
---|
1105 | rxd->ed_bufptr = htogt32(mtod(*rxb, uint32_t)); |
---|
1106 | } |
---|
1107 | #endif |
---|
1108 | rxq->rxq_cmdsts = cmdsts; |
---|
1109 | --rxq->rxq_active; |
---|
1110 | |
---|
1111 | #ifdef __rtems__ |
---|
1112 | /* FIXME: we leave this call in here so compilation fails |
---|
1113 | * if bus_dmamap_sync() is ever fleshed-out to implement |
---|
1114 | * software cache coherency... |
---|
1115 | */ |
---|
1116 | bus_dmamap_sync(sc->sc_dmat, rxq->rxq_buf_mem.gdm_map, |
---|
1117 | rxq->rxq_fi * sizeof(*rxb), buflen, BUS_DMASYNC_POSTREAD); |
---|
1118 | #else |
---|
1119 | bus_dmamap_sync(sc->sc_dmat, rxq->rxq_buf_mem.gdm_map, |
---|
1120 | rxq->rxq_fi * sizeof(*rxb), buflen, BUS_DMASYNC_POSTREAD); |
---|
1121 | |
---|
1122 | KASSERT(m->m_len == 0 && m->m_pkthdr.len == 0); |
---|
1123 | memcpy(m->m_data + m->m_len, rxb->rb_data, buflen); |
---|
1124 | #endif |
---|
1125 | |
---|
1126 | m->m_len = buflen; |
---|
1127 | m->m_pkthdr.len = buflen; |
---|
1128 | |
---|
1129 | ifp->if_ipackets++; |
---|
1130 | #if NBPFILTER > 0 |
---|
1131 | if (ifp->if_bpf != NULL) |
---|
1132 | bpf_mtap(ifp->if_bpf, m); |
---|
1133 | #endif |
---|
1134 | |
---|
1135 | eh = (const struct ether_header *) m->m_data; |
---|
1136 | if ((ifp->if_flags & IFF_PROMISC) || |
---|
1137 | (rxq->rxq_cmdsts & RX_STS_M) == 0 || |
---|
1138 | (rxq->rxq_cmdsts & RX_STS_HE) || |
---|
1139 | (eh->ether_dhost[0] & 1) != 0 || |
---|
1140 | memcmp(eh->ether_dhost, |
---|
1141 | #ifndef __rtems__ |
---|
1142 | LLADDR(ifp->if_sadl), |
---|
1143 | #else |
---|
1144 | sc->sc_ec.ac_enaddr, |
---|
1145 | #endif |
---|
1146 | ETHER_ADDR_LEN) == 0) { |
---|
1147 | #ifndef __rtems__ |
---|
1148 | (*ifp->if_input)(ifp, m); |
---|
1149 | m = NULL; |
---|
1150 | #else |
---|
1151 | DO_ETHER_INPUT_SKIPPING_ETHER_HEADER(ifp,m); |
---|
1152 | #endif |
---|
1153 | GE_DPRINTF(sc, (">")); |
---|
1154 | } else { |
---|
1155 | #ifndef __rtems__ |
---|
1156 | m->m_len = 0; |
---|
1157 | m->m_pkthdr.len = 0; |
---|
1158 | #else |
---|
1159 | m_freem(m); |
---|
1160 | #endif |
---|
1161 | GE_DPRINTF(sc, ("+")); |
---|
1162 | } |
---|
1163 | rxq->rxq_cmdsts = 0; |
---|
1164 | |
---|
1165 | give_it_back: |
---|
1166 | rxd->ed_lencnt &= ~0xffff; /* zero out length */ |
---|
1167 | rxd->ed_cmdsts = htogt32(RX_CMD_F|RX_CMD_L|RX_CMD_O|RX_CMD_EI); |
---|
1168 | #if 0 |
---|
1169 | GE_DPRINTF(sc, ("([%d]->%08lx.%08lx.%08lx.%08lx)", |
---|
1170 | rxq->rxq_fi, |
---|
1171 | ((unsigned long *)rxd)[0], ((unsigned long *)rxd)[1], |
---|
1172 | ((unsigned long *)rxd)[2], ((unsigned long *)rxd)[3])); |
---|
1173 | #endif |
---|
1174 | GE_RXDPRESYNC(sc, rxq, rxq->rxq_fi); |
---|
1175 | if (++rxq->rxq_fi == GE_RXDESC_MAX) |
---|
1176 | rxq->rxq_fi = 0; |
---|
1177 | rxq->rxq_active++; |
---|
1178 | } |
---|
1179 | #ifndef __rtems__ |
---|
1180 | rxq->rxq_curpkt = m; |
---|
1181 | #endif |
---|
1182 | GE_FUNC_EXIT(sc, ""); |
---|
1183 | } |
---|
1184 | |
---|
1185 | uint32_t |
---|
1186 | gfe_rx_process(struct gfe_softc *sc, uint32_t cause, uint32_t intrmask) |
---|
1187 | { |
---|
1188 | struct ifnet * const ifp = &sc->sc_ec.ec_if; |
---|
1189 | struct gfe_rxqueue *rxq; |
---|
1190 | uint32_t rxbits; |
---|
1191 | #define RXPRIO_DECODER 0xffffaa50 |
---|
1192 | GE_FUNC_ENTER(sc, "gfe_rx_process"); |
---|
1193 | |
---|
1194 | rxbits = ETH_IR_RxBuffer_GET(cause); |
---|
1195 | while (rxbits) { |
---|
1196 | enum gfe_rxprio rxprio = (RXPRIO_DECODER >> (rxbits * 2)) & 3; |
---|
1197 | GE_DPRINTF(sc, ("%1" PRIx32, rxbits)); |
---|
1198 | rxbits &= ~(1 << rxprio); |
---|
1199 | gfe_rx_get(sc, rxprio); |
---|
1200 | } |
---|
1201 | |
---|
1202 | rxbits = ETH_IR_RxError_GET(cause); |
---|
1203 | while (rxbits) { |
---|
1204 | enum gfe_rxprio rxprio = (RXPRIO_DECODER >> (rxbits * 2)) & 3; |
---|
1205 | uint32_t masks[(GE_RXDESC_MAX + 31) / 32]; |
---|
1206 | int idx; |
---|
1207 | rxbits &= ~(1 << rxprio); |
---|
1208 | rxq = &sc->sc_rxq[rxprio]; |
---|
1209 | sc->sc_idlemask |= (rxq->rxq_intrbits & ETH_IR_RxBits); |
---|
1210 | intrmask &= ~(rxq->rxq_intrbits & ETH_IR_RxBits); |
---|
1211 | if ((sc->sc_tickflags & GE_TICK_RX_RESTART) == 0) { |
---|
1212 | sc->sc_tickflags |= GE_TICK_RX_RESTART; |
---|
1213 | callout_reset(&sc->sc_co, 1, gfe_tick, sc); |
---|
1214 | } |
---|
1215 | ifp->if_ierrors++; |
---|
1216 | GE_DPRINTF(sc, ("%s: rx queue %d filled at %u\n", |
---|
1217 | sc->sc_dev.dv_xname, rxprio, rxq->rxq_fi)); |
---|
1218 | memset(masks, 0, sizeof(masks)); |
---|
1219 | bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_mem.gdm_map, |
---|
1220 | 0, rxq->rxq_desc_mem.gdm_size, |
---|
1221 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); |
---|
1222 | for (idx = 0; idx < GE_RXDESC_MAX; idx++) { |
---|
1223 | volatile struct gt_eth_desc *rxd = &rxq->rxq_descs[idx]; |
---|
1224 | |
---|
1225 | if (RX_CMD_O & gt32toh(rxd->ed_cmdsts)) |
---|
1226 | masks[idx/32] |= 1 << (idx & 31); |
---|
1227 | } |
---|
1228 | bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_mem.gdm_map, |
---|
1229 | 0, rxq->rxq_desc_mem.gdm_size, |
---|
1230 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
---|
1231 | #if defined(DEBUG) |
---|
1232 | printf("%s: rx queue %d filled at %u=%#x(%#x/%#x)\n", |
---|
1233 | sc->sc_dev.dv_xname, rxprio, rxq->rxq_fi, |
---|
1234 | rxq->rxq_cmdsts, masks[0], masks[1]); |
---|
1235 | #endif |
---|
1236 | } |
---|
1237 | if ((intrmask & ETH_IR_RxBits) == 0) |
---|
1238 | intrmask &= ~(ETH_IR_RxBuffer|ETH_IR_RxError); |
---|
1239 | |
---|
1240 | GE_FUNC_EXIT(sc, ""); |
---|
1241 | return intrmask; |
---|
1242 | } |
---|
1243 | |
---|
1244 | int |
---|
1245 | gfe_rx_prime(struct gfe_softc *sc) |
---|
1246 | { |
---|
1247 | struct gfe_rxqueue *rxq; |
---|
1248 | int error; |
---|
1249 | |
---|
1250 | GE_FUNC_ENTER(sc, "gfe_rx_prime"); |
---|
1251 | |
---|
1252 | error = gfe_rx_rxqinit(sc, GE_RXPRIO_HI); |
---|
1253 | if (error) |
---|
1254 | goto bail; |
---|
1255 | rxq = &sc->sc_rxq[GE_RXPRIO_HI]; |
---|
1256 | if ((sc->sc_flags & GE_RXACTIVE) == 0) { |
---|
1257 | GE_WRITE(sc, EFRDP3, rxq->rxq_desc_busaddr); |
---|
1258 | GE_WRITE(sc, ECRDP3, rxq->rxq_desc_busaddr); |
---|
1259 | } |
---|
1260 | sc->sc_intrmask |= rxq->rxq_intrbits; |
---|
1261 | |
---|
1262 | error = gfe_rx_rxqinit(sc, GE_RXPRIO_MEDHI); |
---|
1263 | if (error) |
---|
1264 | goto bail; |
---|
1265 | if ((sc->sc_flags & GE_RXACTIVE) == 0) { |
---|
1266 | rxq = &sc->sc_rxq[GE_RXPRIO_MEDHI]; |
---|
1267 | GE_WRITE(sc, EFRDP2, rxq->rxq_desc_busaddr); |
---|
1268 | GE_WRITE(sc, ECRDP2, rxq->rxq_desc_busaddr); |
---|
1269 | sc->sc_intrmask |= rxq->rxq_intrbits; |
---|
1270 | } |
---|
1271 | |
---|
1272 | error = gfe_rx_rxqinit(sc, GE_RXPRIO_MEDLO); |
---|
1273 | if (error) |
---|
1274 | goto bail; |
---|
1275 | if ((sc->sc_flags & GE_RXACTIVE) == 0) { |
---|
1276 | rxq = &sc->sc_rxq[GE_RXPRIO_MEDLO]; |
---|
1277 | GE_WRITE(sc, EFRDP1, rxq->rxq_desc_busaddr); |
---|
1278 | GE_WRITE(sc, ECRDP1, rxq->rxq_desc_busaddr); |
---|
1279 | sc->sc_intrmask |= rxq->rxq_intrbits; |
---|
1280 | } |
---|
1281 | |
---|
1282 | error = gfe_rx_rxqinit(sc, GE_RXPRIO_LO); |
---|
1283 | if (error) |
---|
1284 | goto bail; |
---|
1285 | if ((sc->sc_flags & GE_RXACTIVE) == 0) { |
---|
1286 | rxq = &sc->sc_rxq[GE_RXPRIO_LO]; |
---|
1287 | GE_WRITE(sc, EFRDP0, rxq->rxq_desc_busaddr); |
---|
1288 | GE_WRITE(sc, ECRDP0, rxq->rxq_desc_busaddr); |
---|
1289 | sc->sc_intrmask |= rxq->rxq_intrbits; |
---|
1290 | } |
---|
1291 | |
---|
1292 | bail: |
---|
1293 | GE_FUNC_EXIT(sc, ""); |
---|
1294 | return error; |
---|
1295 | } |
---|
1296 | |
---|
1297 | void |
---|
1298 | gfe_rx_cleanup(struct gfe_softc *sc, enum gfe_rxprio rxprio) |
---|
1299 | { |
---|
1300 | struct gfe_rxqueue *rxq = &sc->sc_rxq[rxprio]; |
---|
1301 | GE_FUNC_ENTER(sc, "gfe_rx_cleanup"); |
---|
1302 | if (rxq == NULL) { |
---|
1303 | GE_FUNC_EXIT(sc, ""); |
---|
1304 | return; |
---|
1305 | } |
---|
1306 | |
---|
1307 | #ifndef __rtems__ |
---|
1308 | if (rxq->rxq_curpkt) |
---|
1309 | m_freem(rxq->rxq_curpkt); |
---|
1310 | #endif |
---|
1311 | if ((sc->sc_flags & GE_NOFREE) == 0) { |
---|
1312 | gfe_dmamem_free(sc, &rxq->rxq_desc_mem); |
---|
1313 | #ifndef __rtems__ |
---|
1314 | gfe_dmamem_free(sc, &rxq->rxq_buf_mem); |
---|
1315 | #else |
---|
1316 | if ( rxq->rxq_bufs ) { |
---|
1317 | int i; |
---|
1318 | for ( i=0; i<GE_RXDESC_MAX; i++ ) { |
---|
1319 | if ( rxq->rxq_bufs[i] ) { |
---|
1320 | m_freem(rxq->rxq_bufs[i]); |
---|
1321 | } |
---|
1322 | } |
---|
1323 | free(rxq->rxq_bufs, M_DEVBUF); |
---|
1324 | } |
---|
1325 | #endif |
---|
1326 | } |
---|
1327 | GE_FUNC_EXIT(sc, ""); |
---|
1328 | } |
---|
1329 | |
---|
1330 | void |
---|
1331 | gfe_rx_stop(struct gfe_softc *sc, enum gfe_whack_op op) |
---|
1332 | { |
---|
1333 | GE_FUNC_ENTER(sc, "gfe_rx_stop"); |
---|
1334 | sc->sc_flags &= ~GE_RXACTIVE; |
---|
1335 | sc->sc_idlemask &= ~(ETH_IR_RxBits|ETH_IR_RxBuffer|ETH_IR_RxError); |
---|
1336 | sc->sc_intrmask &= ~(ETH_IR_RxBits|ETH_IR_RxBuffer|ETH_IR_RxError); |
---|
1337 | GE_WRITE(sc, EIMR, sc->sc_intrmask); |
---|
1338 | GE_WRITE(sc, ESDCMR, ETH_ESDCMR_AR); |
---|
1339 | do { |
---|
1340 | delay(10); |
---|
1341 | } while (GE_READ(sc, ESDCMR) & ETH_ESDCMR_AR); |
---|
1342 | gfe_rx_cleanup(sc, GE_RXPRIO_HI); |
---|
1343 | gfe_rx_cleanup(sc, GE_RXPRIO_MEDHI); |
---|
1344 | gfe_rx_cleanup(sc, GE_RXPRIO_MEDLO); |
---|
1345 | gfe_rx_cleanup(sc, GE_RXPRIO_LO); |
---|
1346 | GE_FUNC_EXIT(sc, ""); |
---|
1347 | } |
---|
1348 | |
---|
1349 | void |
---|
1350 | gfe_tick(void *arg) |
---|
1351 | { |
---|
1352 | struct gfe_softc * const sc = arg; |
---|
1353 | uint32_t intrmask; |
---|
1354 | unsigned int tickflags; |
---|
1355 | int s; |
---|
1356 | |
---|
1357 | GE_FUNC_ENTER(sc, "gfe_tick"); |
---|
1358 | |
---|
1359 | s = splnet(); |
---|
1360 | |
---|
1361 | tickflags = sc->sc_tickflags; |
---|
1362 | sc->sc_tickflags = 0; |
---|
1363 | intrmask = sc->sc_intrmask; |
---|
1364 | if (tickflags & GE_TICK_TX_IFSTART) |
---|
1365 | gfe_ifstart(&sc->sc_ec.ec_if); |
---|
1366 | if (tickflags & GE_TICK_RX_RESTART) { |
---|
1367 | intrmask |= sc->sc_idlemask; |
---|
1368 | if (sc->sc_idlemask & (ETH_IR_RxBuffer_3|ETH_IR_RxError_3)) { |
---|
1369 | struct gfe_rxqueue *rxq = &sc->sc_rxq[GE_RXPRIO_HI]; |
---|
1370 | rxq->rxq_fi = 0; |
---|
1371 | GE_WRITE(sc, EFRDP3, rxq->rxq_desc_busaddr); |
---|
1372 | GE_WRITE(sc, ECRDP3, rxq->rxq_desc_busaddr); |
---|
1373 | } |
---|
1374 | if (sc->sc_idlemask & (ETH_IR_RxBuffer_2|ETH_IR_RxError_2)) { |
---|
1375 | struct gfe_rxqueue *rxq = &sc->sc_rxq[GE_RXPRIO_MEDHI]; |
---|
1376 | rxq->rxq_fi = 0; |
---|
1377 | GE_WRITE(sc, EFRDP2, rxq->rxq_desc_busaddr); |
---|
1378 | GE_WRITE(sc, ECRDP2, rxq->rxq_desc_busaddr); |
---|
1379 | } |
---|
1380 | if (sc->sc_idlemask & (ETH_IR_RxBuffer_1|ETH_IR_RxError_1)) { |
---|
1381 | struct gfe_rxqueue *rxq = &sc->sc_rxq[GE_RXPRIO_MEDLO]; |
---|
1382 | rxq->rxq_fi = 0; |
---|
1383 | GE_WRITE(sc, EFRDP1, rxq->rxq_desc_busaddr); |
---|
1384 | GE_WRITE(sc, ECRDP1, rxq->rxq_desc_busaddr); |
---|
1385 | } |
---|
1386 | if (sc->sc_idlemask & (ETH_IR_RxBuffer_0|ETH_IR_RxError_0)) { |
---|
1387 | struct gfe_rxqueue *rxq = &sc->sc_rxq[GE_RXPRIO_LO]; |
---|
1388 | rxq->rxq_fi = 0; |
---|
1389 | GE_WRITE(sc, EFRDP0, rxq->rxq_desc_busaddr); |
---|
1390 | GE_WRITE(sc, ECRDP0, rxq->rxq_desc_busaddr); |
---|
1391 | } |
---|
1392 | sc->sc_idlemask = 0; |
---|
1393 | } |
---|
1394 | if (intrmask != sc->sc_intrmask) { |
---|
1395 | sc->sc_intrmask = intrmask; |
---|
1396 | GE_WRITE(sc, EIMR, sc->sc_intrmask); |
---|
1397 | } |
---|
1398 | gfe_intr(sc); |
---|
1399 | splx(s); |
---|
1400 | |
---|
1401 | GE_FUNC_EXIT(sc, ""); |
---|
1402 | } |
---|
1403 | |
---|
1404 | static int |
---|
1405 | gfe_free_slots(struct gfe_softc *sc, struct gfe_txqueue *const txq) |
---|
1406 | { |
---|
1407 | struct ifnet * const ifp = &sc->sc_ec.ec_if; |
---|
1408 | #ifndef __rtems__ |
---|
1409 | const int dcache_line_size = curcpu()->ci_ci.dcache_line_size; |
---|
1410 | #endif |
---|
1411 | int got = 0; |
---|
1412 | int fi = txq->txq_fi; |
---|
1413 | volatile struct gt_eth_desc *txd = &txq->txq_descs[fi]; |
---|
1414 | uint32_t cmdsts; |
---|
1415 | #ifndef __rtems__ |
---|
1416 | size_t pktlen; |
---|
1417 | #endif |
---|
1418 | |
---|
1419 | GE_FUNC_ENTER(sc, "gfe_free_slots"); |
---|
1420 | |
---|
1421 | #ifdef __rtems__ |
---|
1422 | do { |
---|
1423 | #endif |
---|
1424 | GE_TXDPOSTSYNC(sc, txq, fi); |
---|
1425 | if ((cmdsts = gt32toh(txd->ed_cmdsts)) & TX_CMD_O) { |
---|
1426 | int nextin; |
---|
1427 | |
---|
1428 | if (txq->txq_nactive == 1) { |
---|
1429 | GE_TXDPRESYNC(sc, txq, fi); |
---|
1430 | GE_FUNC_EXIT(sc, ""); |
---|
1431 | return -1; |
---|
1432 | } |
---|
1433 | /* |
---|
1434 | * Sometimes the Discovery forgets to update the |
---|
1435 | * ownership bit in the descriptor. See if we own the |
---|
1436 | * descriptor after it (since we know we've turned |
---|
1437 | * that to the Discovery and if we own it now then the |
---|
1438 | * Discovery gave it back). If we do, we know the |
---|
1439 | * Discovery gave back this one but forgot to mark it |
---|
1440 | * as ours. |
---|
1441 | */ |
---|
1442 | nextin = fi + 1; |
---|
1443 | if (nextin == GE_TXDESC_MAX) |
---|
1444 | nextin = 0; |
---|
1445 | GE_TXDPOSTSYNC(sc, txq, nextin); |
---|
1446 | if (gt32toh(txq->txq_descs[nextin].ed_cmdsts) & TX_CMD_O) { |
---|
1447 | GE_TXDPRESYNC(sc, txq, fi); |
---|
1448 | GE_TXDPRESYNC(sc, txq, nextin); |
---|
1449 | GE_FUNC_EXIT(sc, ""); |
---|
1450 | return -1; |
---|
1451 | } |
---|
1452 | #ifdef DEBUG |
---|
1453 | printf("%s: gfe_free_slots: transmitter resynced at %d\n", |
---|
1454 | sc->sc_dev.dv_xname, fi); |
---|
1455 | #endif |
---|
1456 | } |
---|
1457 | got++; |
---|
1458 | #ifdef __rtems__ |
---|
1459 | txd++; |
---|
1460 | fi++; |
---|
1461 | } while ( ! ( TX_CMD_LAST & cmdsts ) ); |
---|
1462 | |
---|
1463 | { struct mbuf *m; |
---|
1464 | IF_DEQUEUE(&txq->txq_sentq, m); |
---|
1465 | m_freem(m); |
---|
1466 | } |
---|
1467 | #endif |
---|
1468 | #if 0 |
---|
1469 | GE_DPRINTF(sc, ("([%d]<-%08lx.%08lx.%08lx.%08lx)", |
---|
1470 | txq->txq_lo, |
---|
1471 | ((unsigned long *)txd)[0], ((unsigned long *)txd)[1], |
---|
1472 | ((unsigned long *)txd)[2], ((unsigned long *)txd)[3])); |
---|
1473 | #endif |
---|
1474 | GE_DPRINTF(sc, ("(%d)", fi)); |
---|
1475 | txq->txq_fi = fi; |
---|
1476 | if ( txq->txq_fi >= GE_TXDESC_MAX) |
---|
1477 | txq->txq_fi -= GE_TXDESC_MAX; |
---|
1478 | #ifndef __rtems__ |
---|
1479 | txq->txq_inptr = gt32toh(txd->ed_bufptr) - txq->txq_buf_busaddr; |
---|
1480 | pktlen = (gt32toh(txd->ed_lencnt) >> 16) & 0xffff; |
---|
1481 | bus_dmamap_sync(sc->sc_dmat, txq->txq_buf_mem.gdm_map, |
---|
1482 | txq->txq_inptr, pktlen, BUS_DMASYNC_POSTWRITE); |
---|
1483 | txq->txq_inptr += roundup(pktlen, dcache_line_size); |
---|
1484 | #endif |
---|
1485 | |
---|
1486 | /* statistics */ |
---|
1487 | ifp->if_opackets++; |
---|
1488 | #ifdef __rtems__ |
---|
1489 | /* FIXME: should we check errors on every fragment? */ |
---|
1490 | #endif |
---|
1491 | if (cmdsts & TX_STS_ES) |
---|
1492 | ifp->if_oerrors++; |
---|
1493 | |
---|
1494 | /* txd->ed_bufptr = 0; */ |
---|
1495 | |
---|
1496 | txq->txq_nactive -= got; |
---|
1497 | |
---|
1498 | GE_FUNC_EXIT(sc, ""); |
---|
1499 | |
---|
1500 | return got; |
---|
1501 | } |
---|
1502 | |
---|
1503 | #ifndef __rtems__ |
---|
1504 | int |
---|
1505 | gfe_tx_enqueue(struct gfe_softc *sc, enum gfe_txprio txprio) |
---|
1506 | { |
---|
1507 | #ifndef __rtems__ |
---|
1508 | const int dcache_line_size = curcpu()->ci_ci.dcache_line_size; |
---|
1509 | #else |
---|
1510 | #ifndef PPC_CACHE_ALIGNMENT |
---|
1511 | #error "Unknown cache alignment for your CPU" |
---|
1512 | #endif |
---|
1513 | const int dcache_line_size = PPC_CACHE_ALIGNMENT; |
---|
1514 | #endif |
---|
1515 | struct ifnet * const ifp = &sc->sc_ec.ec_if; |
---|
1516 | struct gfe_txqueue * const txq = &sc->sc_txq[txprio]; |
---|
1517 | volatile struct gt_eth_desc * const txd = &txq->txq_descs[txq->txq_lo]; |
---|
1518 | uint32_t intrmask = sc->sc_intrmask; |
---|
1519 | size_t buflen; |
---|
1520 | struct mbuf *m; |
---|
1521 | |
---|
1522 | GE_FUNC_ENTER(sc, "gfe_tx_enqueue"); |
---|
1523 | |
---|
1524 | /* |
---|
1525 | * Anything in the pending queue to enqueue? if not, punt. Likewise |
---|
1526 | * if the txq is not yet created. |
---|
1527 | * otherwise grab its dmamap. |
---|
1528 | */ |
---|
1529 | if (txq == NULL || (m = txq->txq_pendq.ifq_head) == NULL) { |
---|
1530 | GE_FUNC_EXIT(sc, "-"); |
---|
1531 | return 0; |
---|
1532 | } |
---|
1533 | |
---|
1534 | /* |
---|
1535 | * Have we [over]consumed our limit of descriptors? |
---|
1536 | * Do we have enough free descriptors? |
---|
1537 | */ |
---|
1538 | if (GE_TXDESC_MAX == txq->txq_nactive + 2) { |
---|
1539 | if ( gfe_free_slots(sc, txq) <= 0 ) |
---|
1540 | return 0; |
---|
1541 | } |
---|
1542 | |
---|
1543 | buflen = roundup(m->m_pkthdr.len, dcache_line_size); |
---|
1544 | |
---|
1545 | /* |
---|
1546 | * If this packet would wrap around the end of the buffer, reset back |
---|
1547 | * to the beginning. |
---|
1548 | */ |
---|
1549 | if (txq->txq_outptr + buflen > GE_TXBUF_SIZE) { |
---|
1550 | txq->txq_ei_gapcount += GE_TXBUF_SIZE - txq->txq_outptr; |
---|
1551 | txq->txq_outptr = 0; |
---|
1552 | } |
---|
1553 | |
---|
1554 | /* |
---|
1555 | * Make sure the output packet doesn't run over the beginning of |
---|
1556 | * what we've already given the GT. |
---|
1557 | */ |
---|
1558 | if (txq->txq_nactive > 0 && txq->txq_outptr <= txq->txq_inptr && |
---|
1559 | txq->txq_outptr + buflen > txq->txq_inptr) { |
---|
1560 | intrmask |= txq->txq_intrbits & |
---|
1561 | (ETH_IR_TxBufferHigh|ETH_IR_TxBufferLow); |
---|
1562 | if (sc->sc_intrmask != intrmask) { |
---|
1563 | sc->sc_intrmask = intrmask; |
---|
1564 | GE_WRITE(sc, EIMR, sc->sc_intrmask); |
---|
1565 | } |
---|
1566 | GE_FUNC_EXIT(sc, "#"); |
---|
1567 | return 0; |
---|
1568 | } |
---|
1569 | |
---|
1570 | /* |
---|
1571 | * The end-of-list descriptor we put on last time is the starting point |
---|
1572 | * for this packet. The GT is supposed to terminate list processing on |
---|
1573 | * a NULL nxtptr but that currently is broken so a CPU-owned descriptor |
---|
1574 | * must terminate the list. |
---|
1575 | */ |
---|
1576 | intrmask = sc->sc_intrmask; |
---|
1577 | |
---|
1578 | m_copydata(m, 0, m->m_pkthdr.len, |
---|
1579 | txq->txq_buf_mem.gdm_kva + txq->txq_outptr); |
---|
1580 | bus_dmamap_sync(sc->sc_dmat, txq->txq_buf_mem.gdm_map, |
---|
1581 | txq->txq_outptr, buflen, BUS_DMASYNC_PREWRITE); |
---|
1582 | txd->ed_bufptr = htogt32(txq->txq_buf_busaddr + txq->txq_outptr); |
---|
1583 | txd->ed_lencnt = htogt32(m->m_pkthdr.len << 16); |
---|
1584 | GE_TXDPRESYNC(sc, txq, txq->txq_lo); |
---|
1585 | |
---|
1586 | /* |
---|
1587 | * Request a buffer interrupt every 2/3 of the way thru the transmit |
---|
1588 | * buffer. |
---|
1589 | */ |
---|
1590 | txq->txq_ei_gapcount += buflen; |
---|
1591 | if (txq->txq_ei_gapcount > 2 * GE_TXBUF_SIZE / 3) { |
---|
1592 | txd->ed_cmdsts = htogt32(TX_CMD_FIRST|TX_CMD_LAST|TX_CMD_EI); |
---|
1593 | txq->txq_ei_gapcount = 0; |
---|
1594 | } else { |
---|
1595 | txd->ed_cmdsts = htogt32(TX_CMD_FIRST|TX_CMD_LAST); |
---|
1596 | } |
---|
1597 | #if 0 |
---|
1598 | GE_DPRINTF(sc, ("([%d]->%08lx.%08lx.%08lx.%08lx)", txq->txq_lo, |
---|
1599 | ((unsigned long *)txd)[0], ((unsigned long *)txd)[1], |
---|
1600 | ((unsigned long *)txd)[2], ((unsigned long *)txd)[3])); |
---|
1601 | #endif |
---|
1602 | GE_TXDPRESYNC(sc, txq, txq->txq_lo); |
---|
1603 | |
---|
1604 | txq->txq_outptr += buflen; |
---|
1605 | /* |
---|
1606 | * Tell the SDMA engine to "Fetch!" |
---|
1607 | */ |
---|
1608 | GE_WRITE(sc, ESDCMR, |
---|
1609 | txq->txq_esdcmrbits & (ETH_ESDCMR_TXDH|ETH_ESDCMR_TXDL)); |
---|
1610 | |
---|
1611 | GE_DPRINTF(sc, ("(%d)", txq->txq_lo)); |
---|
1612 | |
---|
1613 | /* |
---|
1614 | * Update the last out appropriately. |
---|
1615 | */ |
---|
1616 | txq->txq_nactive++; |
---|
1617 | if (++txq->txq_lo == GE_TXDESC_MAX) |
---|
1618 | txq->txq_lo = 0; |
---|
1619 | |
---|
1620 | /* |
---|
1621 | * Move mbuf from the pending queue to the snd queue. |
---|
1622 | */ |
---|
1623 | IF_DEQUEUE(&txq->txq_pendq, m); |
---|
1624 | #if NBPFILTER > 0 |
---|
1625 | if (ifp->if_bpf != NULL) |
---|
1626 | bpf_mtap(ifp->if_bpf, m); |
---|
1627 | #endif |
---|
1628 | m_freem(m); |
---|
1629 | ifp->if_flags &= ~IFF_OACTIVE; |
---|
1630 | |
---|
1631 | /* |
---|
1632 | * Since we have put an item into the packet queue, we now want |
---|
1633 | * an interrupt when the transmit queue finishes processing the |
---|
1634 | * list. But only update the mask if needs changing. |
---|
1635 | */ |
---|
1636 | intrmask |= txq->txq_intrbits & (ETH_IR_TxEndHigh|ETH_IR_TxEndLow); |
---|
1637 | if (sc->sc_intrmask != intrmask) { |
---|
1638 | sc->sc_intrmask = intrmask; |
---|
1639 | GE_WRITE(sc, EIMR, sc->sc_intrmask); |
---|
1640 | } |
---|
1641 | if (ifp->if_timer == 0) |
---|
1642 | ifp->if_timer = 5; |
---|
1643 | GE_FUNC_EXIT(sc, "*"); |
---|
1644 | return 1; |
---|
1645 | } |
---|
1646 | |
---|
1647 | #else |
---|
1648 | |
---|
1649 | #ifdef __PPC__ |
---|
1650 | static inline void membarrier(void) |
---|
1651 | { |
---|
1652 | asm volatile("sync":::"memory"); |
---|
1653 | } |
---|
1654 | #else |
---|
1655 | #error "memory synchronization for your CPU not implemented" |
---|
1656 | #endif |
---|
1657 | |
---|
1658 | |
---|
1659 | void |
---|
1660 | gfe_assign_desc(volatile struct gt_eth_desc *const d, struct mbuf *m, uint32_t flags) |
---|
1661 | { |
---|
1662 | d->ed_cmdsts = htogt32(flags | TX_CMD_GC | TX_CMD_P); |
---|
1663 | d->ed_bufptr = htogt32(mtod(m, uint32_t)); |
---|
1664 | d->ed_lencnt = htogt32(m->m_len << 16); |
---|
1665 | } |
---|
1666 | |
---|
1667 | int |
---|
1668 | gfe_tx_enqueue(struct gfe_softc *sc, enum gfe_txprio txprio) |
---|
1669 | { |
---|
1670 | struct ifnet * const ifp = &sc->sc_ec.ec_if; |
---|
1671 | struct gfe_txqueue * const txq = &sc->sc_txq[txprio]; |
---|
1672 | volatile struct gt_eth_desc * const txd = &txq->txq_descs[txq->txq_lo]; |
---|
1673 | #define NEXT_TXD(d) ((d)+1 < &txq->txq_descs[GE_TXDESC_MAX] ? (d)+1 : txq->txq_descs) |
---|
1674 | volatile struct gt_eth_desc *l,*d; |
---|
1675 | uint32_t intrmask = sc->sc_intrmask; |
---|
1676 | struct mbuf *m_head,*m,*m1; |
---|
1677 | int avail, used; |
---|
1678 | |
---|
1679 | GE_FUNC_ENTER(sc, "gfe_tx_enqueue"); |
---|
1680 | |
---|
1681 | /* |
---|
1682 | * Anything in the pending queue to enqueue? if not, punt. Likewise |
---|
1683 | * if the txq is not yet created. |
---|
1684 | * otherwise grab its dmamap. |
---|
1685 | */ |
---|
1686 | if (txq == NULL || (m_head = txq->txq_pendq.ifq_head) == NULL) { |
---|
1687 | GE_FUNC_EXIT(sc, "-"); |
---|
1688 | return 0; |
---|
1689 | } |
---|
1690 | |
---|
1691 | /* find 1st mbuf with actual data; m_head is not NULL at this point */ |
---|
1692 | for ( m1=m_head; 0 == m1->m_len; ) { |
---|
1693 | if ( ! (m1=m1->m_next) ) { |
---|
1694 | /* nothing to send */ |
---|
1695 | IF_DEQUEUE(&txq->txq_pendq, m_head); |
---|
1696 | m_freem(m_head); |
---|
1697 | return 0; |
---|
1698 | } |
---|
1699 | } |
---|
1700 | |
---|
1701 | avail = GE_TXDESC_MAX - 1 - txq->txq_nactive; |
---|
1702 | |
---|
1703 | if ( avail < 1 && (avail += gfe_free_slots(sc, txq)) < 1 ) |
---|
1704 | return 0; |
---|
1705 | |
---|
1706 | avail--; |
---|
1707 | |
---|
1708 | l = txd; |
---|
1709 | d = NEXT_TXD(txd); |
---|
1710 | |
---|
1711 | for ( m=m1->m_next, used = 1; m; m=m->m_next ) { |
---|
1712 | if ( 0 == m->m_len ) |
---|
1713 | continue; /* skip empty mbufs */ |
---|
1714 | |
---|
1715 | if ( avail < 1 && (avail += gfe_free_slots(sc, txq)) < 1 ) { |
---|
1716 | /* not enough descriptors; cleanup */ |
---|
1717 | for ( l = NEXT_TXD(txd); l!=d; l = NEXT_TXD(l) ) { |
---|
1718 | l->ed_cmdsts = 0; |
---|
1719 | avail++; |
---|
1720 | } |
---|
1721 | avail++; |
---|
1722 | if ( used >= GE_TXDESC_MAX-1 ) |
---|
1723 | panic("mbuf chain (#%i) longer than TX ring (#%i); configuration error!", |
---|
1724 | used, GE_TXDESC_MAX-1); |
---|
1725 | return 0; |
---|
1726 | } |
---|
1727 | used++; |
---|
1728 | avail--; |
---|
1729 | |
---|
1730 | /* fill this slot */ |
---|
1731 | gfe_assign_desc(d, m, TX_CMD_O); |
---|
1732 | |
---|
1733 | bus_dmamap_sync(sc->sc_dmat, /* TODO */, |
---|
1734 | mtod(m, uint32_t), m->m_len, BUS_DMASYNC_PREWRITE); |
---|
1735 | |
---|
1736 | l = d; |
---|
1737 | d = NEXT_TXD(d); |
---|
1738 | |
---|
1739 | GE_TXDPRESYNC(sc, txq, l - txq->txq_descs); |
---|
1740 | } |
---|
1741 | |
---|
1742 | /* fill first slot */ |
---|
1743 | gfe_assign_desc(txd, m1, TX_CMD_F); |
---|
1744 | |
---|
1745 | bus_dmamap_sync(sc->sc_dmat, /* TODO */, |
---|
1746 | mtod(m1, uint32_t), m1->m_len, BUS_DMASYNC_PREWRITE); |
---|
1747 | |
---|
1748 | /* tag last slot; this covers where 1st = last */ |
---|
1749 | l->ed_cmdsts |= htonl(TX_CMD_L | TX_CMD_EI); |
---|
1750 | |
---|
1751 | GE_TXDPRESYNC(sc, txq, l - txq->txq_descs); |
---|
1752 | |
---|
1753 | /* |
---|
1754 | * The end-of-list descriptor we put on last time is the starting point |
---|
1755 | * for this packet. The GT is supposed to terminate list processing on |
---|
1756 | * a NULL nxtptr but that currently is broken so a CPU-owned descriptor |
---|
1757 | * must terminate the list. |
---|
1758 | */ |
---|
1759 | d = NEXT_TXD(l); |
---|
1760 | |
---|
1761 | out_be32(&d->ed_cmdsts,0); |
---|
1762 | |
---|
1763 | GE_TXDPRESYNC(sc, txq, d - txq->txq_descs); |
---|
1764 | |
---|
1765 | membarrier(); |
---|
1766 | |
---|
1767 | /* turn over the whole chain by flipping the ownership of the first desc */ |
---|
1768 | txd->ed_cmdsts |= htonl(TX_CMD_O); |
---|
1769 | |
---|
1770 | GE_TXDPRESYNC(sc, txq, txq->txq_lo); |
---|
1771 | |
---|
1772 | |
---|
1773 | intrmask = sc->sc_intrmask; |
---|
1774 | |
---|
1775 | #if 0 |
---|
1776 | GE_DPRINTF(sc, ("([%d]->%08lx.%08lx.%08lx.%08lx)", txq->txq_lo, |
---|
1777 | ((unsigned long *)txd)[0], ((unsigned long *)txd)[1], |
---|
1778 | ((unsigned long *)txd)[2], ((unsigned long *)txd)[3])); |
---|
1779 | #endif |
---|
1780 | |
---|
1781 | membarrier(); |
---|
1782 | |
---|
1783 | /* |
---|
1784 | * Tell the SDMA engine to "Fetch!" |
---|
1785 | */ |
---|
1786 | GE_WRITE(sc, ESDCMR, |
---|
1787 | txq->txq_esdcmrbits & (ETH_ESDCMR_TXDH|ETH_ESDCMR_TXDL)); |
---|
1788 | |
---|
1789 | GE_DPRINTF(sc, ("(%d)", txq->txq_lo)); |
---|
1790 | |
---|
1791 | /* |
---|
1792 | * Update the last out appropriately. |
---|
1793 | */ |
---|
1794 | txq->txq_nactive += used; |
---|
1795 | txq->txq_lo += used; |
---|
1796 | if ( txq->txq_lo >= GE_TXDESC_MAX ) |
---|
1797 | txq->txq_lo -= GE_TXDESC_MAX; |
---|
1798 | |
---|
1799 | /* |
---|
1800 | * Move mbuf from the pending queue to the snd queue. |
---|
1801 | */ |
---|
1802 | IF_DEQUEUE(&txq->txq_pendq, m_head); |
---|
1803 | |
---|
1804 | IF_ENQUEUE(&txq->txq_sentq, m_head); |
---|
1805 | |
---|
1806 | #if NBPFILTER > 0 |
---|
1807 | if (ifp->if_bpf != NULL) |
---|
1808 | bpf_mtap(ifp->if_bpf, m_head); |
---|
1809 | #endif |
---|
1810 | ifp->if_flags &= ~IFF_OACTIVE; |
---|
1811 | |
---|
1812 | /* |
---|
1813 | * Since we have put an item into the packet queue, we now want |
---|
1814 | * an interrupt when the transmit queue finishes processing the |
---|
1815 | * list. But only update the mask if needs changing. |
---|
1816 | */ |
---|
1817 | intrmask |= txq->txq_intrbits & (ETH_IR_TxEndHigh|ETH_IR_TxEndLow); |
---|
1818 | if (sc->sc_intrmask != intrmask) { |
---|
1819 | sc->sc_intrmask = intrmask; |
---|
1820 | GE_WRITE(sc, EIMR, sc->sc_intrmask); |
---|
1821 | } |
---|
1822 | if (ifp->if_timer == 0) |
---|
1823 | ifp->if_timer = 5; |
---|
1824 | GE_FUNC_EXIT(sc, "*"); |
---|
1825 | return 1; |
---|
1826 | } |
---|
1827 | #endif |
---|
1828 | |
---|
1829 | uint32_t |
---|
1830 | gfe_tx_done(struct gfe_softc *sc, enum gfe_txprio txprio, uint32_t intrmask) |
---|
1831 | { |
---|
1832 | struct gfe_txqueue * const txq = &sc->sc_txq[txprio]; |
---|
1833 | struct ifnet * const ifp = &sc->sc_ec.ec_if; |
---|
1834 | |
---|
1835 | GE_FUNC_ENTER(sc, "gfe_tx_done"); |
---|
1836 | |
---|
1837 | if (txq == NULL) { |
---|
1838 | GE_FUNC_EXIT(sc, ""); |
---|
1839 | return intrmask; |
---|
1840 | } |
---|
1841 | |
---|
1842 | while (txq->txq_nactive > 0) { |
---|
1843 | if ( gfe_free_slots(sc, txq) < 0 ) |
---|
1844 | return intrmask; |
---|
1845 | ifp->if_timer = 5; |
---|
1846 | } |
---|
1847 | if (txq->txq_nactive != 0) |
---|
1848 | panic("%s: transmit fifo%d empty but active count (%d) not 0!", |
---|
1849 | sc->sc_dev.dv_xname, txprio, txq->txq_nactive); |
---|
1850 | ifp->if_timer = 0; |
---|
1851 | intrmask &= ~(txq->txq_intrbits & (ETH_IR_TxEndHigh|ETH_IR_TxEndLow)); |
---|
1852 | intrmask &= ~(txq->txq_intrbits & (ETH_IR_TxBufferHigh|ETH_IR_TxBufferLow)); |
---|
1853 | GE_FUNC_EXIT(sc, ""); |
---|
1854 | return intrmask; |
---|
1855 | } |
---|
1856 | |
---|
1857 | int |
---|
1858 | gfe_tx_txqalloc(struct gfe_softc *sc, enum gfe_txprio txprio) |
---|
1859 | { |
---|
1860 | struct gfe_txqueue * const txq = &sc->sc_txq[txprio]; |
---|
1861 | int error; |
---|
1862 | |
---|
1863 | GE_FUNC_ENTER(sc, "gfe_tx_txqalloc"); |
---|
1864 | |
---|
1865 | error = gfe_dmamem_alloc(sc, &txq->txq_desc_mem, 1, |
---|
1866 | GE_TXDESC_MEMSIZE, BUS_DMA_NOCACHE); |
---|
1867 | if (error) { |
---|
1868 | GE_FUNC_EXIT(sc, ""); |
---|
1869 | return error; |
---|
1870 | } |
---|
1871 | #ifndef __rtems__ |
---|
1872 | error = gfe_dmamem_alloc(sc, &txq->txq_buf_mem, 1, GE_TXBUF_SIZE, 0); |
---|
1873 | if (error) { |
---|
1874 | gfe_dmamem_free(sc, &txq->txq_desc_mem); |
---|
1875 | GE_FUNC_EXIT(sc, ""); |
---|
1876 | return error; |
---|
1877 | } |
---|
1878 | #endif |
---|
1879 | GE_FUNC_EXIT(sc, ""); |
---|
1880 | return 0; |
---|
1881 | } |
---|
1882 | |
---|
1883 | int |
---|
1884 | gfe_tx_start(struct gfe_softc *sc, enum gfe_txprio txprio) |
---|
1885 | { |
---|
1886 | struct gfe_txqueue * const txq = &sc->sc_txq[txprio]; |
---|
1887 | volatile struct gt_eth_desc *txd; |
---|
1888 | unsigned int i; |
---|
1889 | bus_addr_t addr; |
---|
1890 | |
---|
1891 | GE_FUNC_ENTER(sc, "gfe_tx_start"); |
---|
1892 | |
---|
1893 | sc->sc_intrmask &= ~(ETH_IR_TxEndHigh|ETH_IR_TxBufferHigh| |
---|
1894 | ETH_IR_TxEndLow |ETH_IR_TxBufferLow); |
---|
1895 | |
---|
1896 | if (sc->sc_flags & GE_NOFREE) { |
---|
1897 | KASSERT(txq->txq_desc_mem.gdm_kva != NULL); |
---|
1898 | #ifndef __rtems__ |
---|
1899 | KASSERT(txq->txq_buf_mem.gdm_kva != NULL); |
---|
1900 | #endif |
---|
1901 | } else { |
---|
1902 | int error = gfe_tx_txqalloc(sc, txprio); |
---|
1903 | if (error) { |
---|
1904 | GE_FUNC_EXIT(sc, "!"); |
---|
1905 | return error; |
---|
1906 | } |
---|
1907 | } |
---|
1908 | |
---|
1909 | txq->txq_descs = |
---|
1910 | (volatile struct gt_eth_desc *) txq->txq_desc_mem.gdm_kva; |
---|
1911 | txq->txq_desc_busaddr = txq->txq_desc_mem.gdm_map->dm_segs[0].ds_addr; |
---|
1912 | #ifndef __rtems__ |
---|
1913 | txq->txq_buf_busaddr = txq->txq_buf_mem.gdm_map->dm_segs[0].ds_addr; |
---|
1914 | #else |
---|
1915 | /* never used */ |
---|
1916 | memset(&txq->txq_pendq,0,sizeof(txq->txq_pendq)); |
---|
1917 | memset(&txq->txq_sentq,0,sizeof(txq->txq_sentq)); |
---|
1918 | txq->txq_sentq.ifq_maxlen = 100000; |
---|
1919 | #endif |
---|
1920 | |
---|
1921 | txq->txq_pendq.ifq_maxlen = 10; |
---|
1922 | #ifndef __rtems__ |
---|
1923 | txq->txq_ei_gapcount = 0; |
---|
1924 | #endif |
---|
1925 | txq->txq_nactive = 0; |
---|
1926 | txq->txq_fi = 0; |
---|
1927 | txq->txq_lo = 0; |
---|
1928 | #ifndef __rtems__ |
---|
1929 | txq->txq_ei_gapcount = 0; |
---|
1930 | txq->txq_inptr = GE_TXBUF_SIZE; |
---|
1931 | txq->txq_outptr = 0; |
---|
1932 | #endif |
---|
1933 | for (i = 0, txd = txq->txq_descs, |
---|
1934 | addr = txq->txq_desc_busaddr + sizeof(*txd); |
---|
1935 | i < GE_TXDESC_MAX - 1; |
---|
1936 | i++, txd++, addr += sizeof(*txd)) { |
---|
1937 | /* |
---|
1938 | * update the nxtptr to point to the next txd. |
---|
1939 | */ |
---|
1940 | txd->ed_cmdsts = 0; |
---|
1941 | txd->ed_nxtptr = htogt32(addr); |
---|
1942 | } |
---|
1943 | txq->txq_descs[GE_TXDESC_MAX-1].ed_nxtptr = |
---|
1944 | htogt32(txq->txq_desc_busaddr); |
---|
1945 | bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_mem.gdm_map, 0, |
---|
1946 | GE_TXDESC_MEMSIZE, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
---|
1947 | |
---|
1948 | switch (txprio) { |
---|
1949 | case GE_TXPRIO_HI: |
---|
1950 | txq->txq_intrbits = ETH_IR_TxEndHigh|ETH_IR_TxBufferHigh; |
---|
1951 | txq->txq_esdcmrbits = ETH_ESDCMR_TXDH; |
---|
1952 | txq->txq_epsrbits = ETH_EPSR_TxHigh; |
---|
1953 | txq->txq_ectdp = ETH_ECTDP1(sc->sc_macno); |
---|
1954 | GE_WRITE(sc, ECTDP1, txq->txq_desc_busaddr); |
---|
1955 | break; |
---|
1956 | |
---|
1957 | case GE_TXPRIO_LO: |
---|
1958 | txq->txq_intrbits = ETH_IR_TxEndLow|ETH_IR_TxBufferLow; |
---|
1959 | txq->txq_esdcmrbits = ETH_ESDCMR_TXDL; |
---|
1960 | txq->txq_epsrbits = ETH_EPSR_TxLow; |
---|
1961 | txq->txq_ectdp = ETH_ECTDP0(sc->sc_macno); |
---|
1962 | GE_WRITE(sc, ECTDP0, txq->txq_desc_busaddr); |
---|
1963 | break; |
---|
1964 | |
---|
1965 | case GE_TXPRIO_NONE: |
---|
1966 | break; |
---|
1967 | } |
---|
1968 | #if 0 |
---|
1969 | GE_DPRINTF(sc, ("(ectdp=%#x", txq->txq_ectdp)); |
---|
1970 | gt_write(sc->sc_dev.dv_parent, txq->txq_ectdp, txq->txq_desc_busaddr); |
---|
1971 | GE_DPRINTF(sc, (")")); |
---|
1972 | #endif |
---|
1973 | |
---|
1974 | /* |
---|
1975 | * If we are restarting, there may be packets in the pending queue |
---|
1976 | * waiting to be enqueued. Try enqueuing packets from both priority |
---|
1977 | * queues until the pending queue is empty or there no room for them |
---|
1978 | * on the device. |
---|
1979 | */ |
---|
1980 | while (gfe_tx_enqueue(sc, txprio)) |
---|
1981 | continue; |
---|
1982 | |
---|
1983 | GE_FUNC_EXIT(sc, ""); |
---|
1984 | return 0; |
---|
1985 | } |
---|
1986 | |
---|
1987 | void |
---|
1988 | gfe_tx_cleanup(struct gfe_softc *sc, enum gfe_txprio txprio, int flush) |
---|
1989 | { |
---|
1990 | struct gfe_txqueue * const txq = &sc->sc_txq[txprio]; |
---|
1991 | |
---|
1992 | GE_FUNC_ENTER(sc, "gfe_tx_cleanup"); |
---|
1993 | if (txq == NULL) { |
---|
1994 | GE_FUNC_EXIT(sc, ""); |
---|
1995 | return; |
---|
1996 | } |
---|
1997 | |
---|
1998 | if (!flush) { |
---|
1999 | GE_FUNC_EXIT(sc, ""); |
---|
2000 | return; |
---|
2001 | } |
---|
2002 | |
---|
2003 | #ifdef __rtems__ |
---|
2004 | /* reclaim mbufs that were never sent */ |
---|
2005 | { |
---|
2006 | struct mbuf *m; |
---|
2007 | while ( txq->txq_sentq.ifq_head ) { |
---|
2008 | IF_DEQUEUE(&txq->txq_sentq, m); |
---|
2009 | m_freem(m); |
---|
2010 | } |
---|
2011 | } |
---|
2012 | #endif |
---|
2013 | |
---|
2014 | if ((sc->sc_flags & GE_NOFREE) == 0) { |
---|
2015 | gfe_dmamem_free(sc, &txq->txq_desc_mem); |
---|
2016 | #ifndef __rtems__ |
---|
2017 | gfe_dmamem_free(sc, &txq->txq_buf_mem); |
---|
2018 | #endif |
---|
2019 | } |
---|
2020 | GE_FUNC_EXIT(sc, "-F"); |
---|
2021 | } |
---|
2022 | |
---|
2023 | void |
---|
2024 | gfe_tx_stop(struct gfe_softc *sc, enum gfe_whack_op op) |
---|
2025 | { |
---|
2026 | GE_FUNC_ENTER(sc, "gfe_tx_stop"); |
---|
2027 | |
---|
2028 | GE_WRITE(sc, ESDCMR, ETH_ESDCMR_STDH|ETH_ESDCMR_STDL); |
---|
2029 | |
---|
2030 | sc->sc_intrmask = gfe_tx_done(sc, GE_TXPRIO_HI, sc->sc_intrmask); |
---|
2031 | sc->sc_intrmask = gfe_tx_done(sc, GE_TXPRIO_LO, sc->sc_intrmask); |
---|
2032 | sc->sc_intrmask &= ~(ETH_IR_TxEndHigh|ETH_IR_TxBufferHigh| |
---|
2033 | ETH_IR_TxEndLow |ETH_IR_TxBufferLow); |
---|
2034 | |
---|
2035 | gfe_tx_cleanup(sc, GE_TXPRIO_HI, op == GE_WHACK_STOP); |
---|
2036 | gfe_tx_cleanup(sc, GE_TXPRIO_LO, op == GE_WHACK_STOP); |
---|
2037 | |
---|
2038 | sc->sc_ec.ec_if.if_timer = 0; |
---|
2039 | GE_FUNC_EXIT(sc, ""); |
---|
2040 | } |
---|
2041 | |
---|
2042 | int |
---|
2043 | gfe_intr(void *arg) |
---|
2044 | { |
---|
2045 | struct gfe_softc * const sc = arg; |
---|
2046 | uint32_t cause; |
---|
2047 | uint32_t intrmask = sc->sc_intrmask; |
---|
2048 | int claim = 0; |
---|
2049 | int cnt; |
---|
2050 | |
---|
2051 | GE_FUNC_ENTER(sc, "gfe_intr"); |
---|
2052 | |
---|
2053 | for (cnt = 0; cnt < 4; cnt++) { |
---|
2054 | if (sc->sc_intrmask != intrmask) { |
---|
2055 | sc->sc_intrmask = intrmask; |
---|
2056 | GE_WRITE(sc, EIMR, sc->sc_intrmask); |
---|
2057 | } |
---|
2058 | cause = GE_READ(sc, EICR); |
---|
2059 | cause &= sc->sc_intrmask; |
---|
2060 | GE_DPRINTF(sc, (".%#" PRIx32, cause)); |
---|
2061 | if (cause == 0) |
---|
2062 | break; |
---|
2063 | |
---|
2064 | claim = 1; |
---|
2065 | |
---|
2066 | GE_WRITE(sc, EICR, ~cause); |
---|
2067 | #ifndef GE_NORX |
---|
2068 | if (cause & (ETH_IR_RxBuffer|ETH_IR_RxError)) |
---|
2069 | intrmask = gfe_rx_process(sc, cause, intrmask); |
---|
2070 | #endif |
---|
2071 | |
---|
2072 | #ifndef GE_NOTX |
---|
2073 | if (cause & (ETH_IR_TxBufferHigh|ETH_IR_TxEndHigh)) |
---|
2074 | intrmask = gfe_tx_done(sc, GE_TXPRIO_HI, intrmask); |
---|
2075 | if (cause & (ETH_IR_TxBufferLow|ETH_IR_TxEndLow)) |
---|
2076 | intrmask = gfe_tx_done(sc, GE_TXPRIO_LO, intrmask); |
---|
2077 | #endif |
---|
2078 | if (cause & ETH_IR_MIIPhySTC) { |
---|
2079 | sc->sc_flags |= GE_PHYSTSCHG; |
---|
2080 | /* intrmask &= ~ETH_IR_MIIPhySTC; */ |
---|
2081 | } |
---|
2082 | } |
---|
2083 | |
---|
2084 | while (gfe_tx_enqueue(sc, GE_TXPRIO_HI)) |
---|
2085 | continue; |
---|
2086 | while (gfe_tx_enqueue(sc, GE_TXPRIO_LO)) |
---|
2087 | continue; |
---|
2088 | |
---|
2089 | GE_FUNC_EXIT(sc, ""); |
---|
2090 | return claim; |
---|
2091 | } |
---|
2092 | |
---|
2093 | #ifndef __rtems__ |
---|
2094 | int |
---|
2095 | gfe_mii_mediachange (struct ifnet *ifp) |
---|
2096 | { |
---|
2097 | struct gfe_softc *sc = ifp->if_softc; |
---|
2098 | |
---|
2099 | if (ifp->if_flags & IFF_UP) |
---|
2100 | mii_mediachg(&sc->sc_mii); |
---|
2101 | |
---|
2102 | return (0); |
---|
2103 | } |
---|
2104 | void |
---|
2105 | gfe_mii_mediastatus (struct ifnet *ifp, struct ifmediareq *ifmr) |
---|
2106 | { |
---|
2107 | struct gfe_softc *sc = ifp->if_softc; |
---|
2108 | |
---|
2109 | if (sc->sc_flags & GE_PHYSTSCHG) { |
---|
2110 | sc->sc_flags &= ~GE_PHYSTSCHG; |
---|
2111 | mii_pollstat(&sc->sc_mii); |
---|
2112 | } |
---|
2113 | ifmr->ifm_status = sc->sc_mii.mii_media_status; |
---|
2114 | ifmr->ifm_active = sc->sc_mii.mii_media_active; |
---|
2115 | } |
---|
2116 | |
---|
2117 | int |
---|
2118 | gfe_mii_read (struct device *self, int phy, int reg) |
---|
2119 | { |
---|
2120 | return gt_mii_read(self, self->dv_parent, phy, reg); |
---|
2121 | } |
---|
2122 | |
---|
2123 | void |
---|
2124 | gfe_mii_write (struct device *self, int phy, int reg, int value) |
---|
2125 | { |
---|
2126 | gt_mii_write(self, self->dv_parent, phy, reg, value); |
---|
2127 | } |
---|
2128 | |
---|
2129 | void |
---|
2130 | gfe_mii_statchg (struct device *self) |
---|
2131 | { |
---|
2132 | /* struct gfe_softc *sc = (struct gfe_softc *) self; */ |
---|
2133 | /* do nothing? */ |
---|
2134 | } |
---|
2135 | |
---|
2136 | #else |
---|
2137 | int |
---|
2138 | gfe_mii_read(int phy, void *arg, unsigned reg, uint32_t *pval) |
---|
2139 | { |
---|
2140 | struct gfe_softc *sc = arg; |
---|
2141 | uint32_t data; |
---|
2142 | int count = 10000; |
---|
2143 | |
---|
2144 | if ( 0 != phy ) |
---|
2145 | return -1; /* invalid index */ |
---|
2146 | |
---|
2147 | phy = sc->sc_phyaddr; |
---|
2148 | |
---|
2149 | do { |
---|
2150 | DELAY(10); |
---|
2151 | data = GT_READ(sc, ETH_ESMIR); |
---|
2152 | } while ((data & ETH_ESMIR_Busy) && count-- > 0); |
---|
2153 | |
---|
2154 | if (count == 0) { |
---|
2155 | fprintf(stderr,"%s: mii read for phy %d reg %d busied out\n", |
---|
2156 | sc->sc_dev.dv_xname, phy, reg); |
---|
2157 | *pval = ETH_ESMIR_Value_GET(data); |
---|
2158 | return -1; |
---|
2159 | } |
---|
2160 | |
---|
2161 | GT_WRITE(sc, ETH_ESMIR, ETH_ESMIR_READ(phy, reg)); |
---|
2162 | |
---|
2163 | count = 10000; |
---|
2164 | do { |
---|
2165 | DELAY(10); |
---|
2166 | data = GT_READ(sc, ETH_ESMIR); |
---|
2167 | } while ((data & ETH_ESMIR_ReadValid) == 0 && count-- > 0); |
---|
2168 | |
---|
2169 | if (count == 0) |
---|
2170 | printf("%s: mii read for phy %d reg %d timed out\n", |
---|
2171 | sc->sc_dev.dv_xname, phy, reg); |
---|
2172 | #if defined(GTMIIDEBUG) |
---|
2173 | printf("%s: mii_read(%d, %d): %#x data %#x\n", |
---|
2174 | sc->sc_dev.dv_xname, phy, reg, |
---|
2175 | data, ETH_ESMIR_Value_GET(data)); |
---|
2176 | #endif |
---|
2177 | *pval = ETH_ESMIR_Value_GET(data); |
---|
2178 | return 0; |
---|
2179 | } |
---|
2180 | |
---|
2181 | int |
---|
2182 | gfe_mii_write(int phy, void *arg, unsigned reg, uint32_t value) |
---|
2183 | { |
---|
2184 | struct gfe_softc *sc = arg; |
---|
2185 | uint32_t data; |
---|
2186 | int count = 10000; |
---|
2187 | |
---|
2188 | if ( 0 != phy ) |
---|
2189 | return -1; /* invalid index */ |
---|
2190 | |
---|
2191 | phy = sc->sc_phyaddr; |
---|
2192 | |
---|
2193 | do { |
---|
2194 | DELAY(10); |
---|
2195 | data = GT_READ(sc, ETH_ESMIR); |
---|
2196 | } while ((data & ETH_ESMIR_Busy) && count-- > 0); |
---|
2197 | |
---|
2198 | if (count == 0) { |
---|
2199 | fprintf(stderr, "%s: mii write for phy %d reg %d busied out (busy)\n", |
---|
2200 | sc->sc_dev.dv_xname, phy, reg); |
---|
2201 | return -1; |
---|
2202 | } |
---|
2203 | |
---|
2204 | GT_WRITE(sc, ETH_ESMIR, |
---|
2205 | ETH_ESMIR_WRITE(phy, reg, value)); |
---|
2206 | |
---|
2207 | count = 10000; |
---|
2208 | do { |
---|
2209 | DELAY(10); |
---|
2210 | data = GT_READ(sc, ETH_ESMIR); |
---|
2211 | } while ((data & ETH_ESMIR_Busy) && count-- > 0); |
---|
2212 | |
---|
2213 | if (count == 0) |
---|
2214 | printf("%s: mii write for phy %d reg %d timed out\n", |
---|
2215 | sc->sc_dev.dv_xname, phy, reg); |
---|
2216 | #if defined(GTMIIDEBUG) |
---|
2217 | printf("%s: mii_write(%d, %d, %#x)\n", |
---|
2218 | sc->sc_dev.dv_xname, phy, reg, value); |
---|
2219 | #endif |
---|
2220 | return 0; |
---|
2221 | } |
---|
2222 | |
---|
2223 | #endif |
---|
2224 | int |
---|
2225 | gfe_whack(struct gfe_softc *sc, enum gfe_whack_op op) |
---|
2226 | { |
---|
2227 | int error = 0; |
---|
2228 | GE_FUNC_ENTER(sc, "gfe_whack"); |
---|
2229 | |
---|
2230 | switch (op) { |
---|
2231 | case GE_WHACK_RESTART: |
---|
2232 | #ifndef GE_NOTX |
---|
2233 | gfe_tx_stop(sc, op); |
---|
2234 | #endif |
---|
2235 | /* sc->sc_ec.ec_if.if_flags &= ~IFF_RUNNING; */ |
---|
2236 | /* FALLTHROUGH */ |
---|
2237 | case GE_WHACK_START: |
---|
2238 | #ifndef GE_NOHASH |
---|
2239 | if (error == 0 && sc->sc_hashtable == NULL) { |
---|
2240 | error = gfe_hash_alloc(sc); |
---|
2241 | if (error) |
---|
2242 | break; |
---|
2243 | } |
---|
2244 | if (op != GE_WHACK_RESTART) |
---|
2245 | gfe_hash_fill(sc); |
---|
2246 | #endif |
---|
2247 | #ifndef GE_NORX |
---|
2248 | if (op != GE_WHACK_RESTART) { |
---|
2249 | error = gfe_rx_prime(sc); |
---|
2250 | if (error) |
---|
2251 | break; |
---|
2252 | } |
---|
2253 | #endif |
---|
2254 | #ifndef GE_NOTX |
---|
2255 | error = gfe_tx_start(sc, GE_TXPRIO_HI); |
---|
2256 | if (error) |
---|
2257 | break; |
---|
2258 | #endif |
---|
2259 | sc->sc_ec.ec_if.if_flags |= IFF_RUNNING; |
---|
2260 | GE_WRITE(sc, EPCR, sc->sc_pcr | ETH_EPCR_EN); |
---|
2261 | GE_WRITE(sc, EPCXR, sc->sc_pcxr); |
---|
2262 | GE_WRITE(sc, EICR, 0); |
---|
2263 | GE_WRITE(sc, EIMR, sc->sc_intrmask); |
---|
2264 | #ifndef GE_NOHASH |
---|
2265 | GE_WRITE(sc, EHTPR, sc->sc_hash_mem.gdm_map->dm_segs->ds_addr); |
---|
2266 | #endif |
---|
2267 | #ifndef GE_NORX |
---|
2268 | GE_WRITE(sc, ESDCMR, ETH_ESDCMR_ERD); |
---|
2269 | sc->sc_flags |= GE_RXACTIVE; |
---|
2270 | #endif |
---|
2271 | /* FALLTHROUGH */ |
---|
2272 | case GE_WHACK_CHANGE: |
---|
2273 | GE_DPRINTF(sc, ("(pcr=%#x,imr=%#x)", |
---|
2274 | GE_READ(sc, EPCR), GE_READ(sc, EIMR))); |
---|
2275 | GE_WRITE(sc, EPCR, sc->sc_pcr | ETH_EPCR_EN); |
---|
2276 | GE_WRITE(sc, EIMR, sc->sc_intrmask); |
---|
2277 | gfe_ifstart(&sc->sc_ec.ec_if); |
---|
2278 | GE_DPRINTF(sc, ("(ectdp0=%#x, ectdp1=%#x)", |
---|
2279 | GE_READ(sc, ECTDP0), GE_READ(sc, ECTDP1))); |
---|
2280 | GE_FUNC_EXIT(sc, ""); |
---|
2281 | return error; |
---|
2282 | case GE_WHACK_STOP: |
---|
2283 | break; |
---|
2284 | } |
---|
2285 | |
---|
2286 | #ifdef GE_DEBUG |
---|
2287 | if (error) |
---|
2288 | GE_DPRINTF(sc, (" failed: %d\n", error)); |
---|
2289 | #endif |
---|
2290 | GE_WRITE(sc, EPCR, sc->sc_pcr); |
---|
2291 | GE_WRITE(sc, EIMR, 0); |
---|
2292 | sc->sc_ec.ec_if.if_flags &= ~IFF_RUNNING; |
---|
2293 | #ifndef GE_NOTX |
---|
2294 | gfe_tx_stop(sc, GE_WHACK_STOP); |
---|
2295 | #endif |
---|
2296 | #ifndef GE_NORX |
---|
2297 | gfe_rx_stop(sc, GE_WHACK_STOP); |
---|
2298 | #endif |
---|
2299 | #ifndef GE_NOHASH |
---|
2300 | if ((sc->sc_flags & GE_NOFREE) == 0) { |
---|
2301 | gfe_dmamem_free(sc, &sc->sc_hash_mem); |
---|
2302 | sc->sc_hashtable = NULL; |
---|
2303 | } |
---|
2304 | #endif |
---|
2305 | |
---|
2306 | GE_FUNC_EXIT(sc, ""); |
---|
2307 | return error; |
---|
2308 | } |
---|
2309 | |
---|
2310 | int |
---|
2311 | gfe_hash_compute(struct gfe_softc *sc, const uint8_t eaddr[ETHER_ADDR_LEN]) |
---|
2312 | { |
---|
2313 | uint32_t w0, add0, add1; |
---|
2314 | uint32_t result; |
---|
2315 | #ifdef __rtems__ |
---|
2316 | SPRINTFVARDECL; |
---|
2317 | #endif |
---|
2318 | |
---|
2319 | GE_FUNC_ENTER(sc, "gfe_hash_compute"); |
---|
2320 | add0 = ((uint32_t) eaddr[5] << 0) | |
---|
2321 | ((uint32_t) eaddr[4] << 8) | |
---|
2322 | ((uint32_t) eaddr[3] << 16); |
---|
2323 | |
---|
2324 | add0 = ((add0 & 0x00f0f0f0) >> 4) | ((add0 & 0x000f0f0f) << 4); |
---|
2325 | add0 = ((add0 & 0x00cccccc) >> 2) | ((add0 & 0x00333333) << 2); |
---|
2326 | add0 = ((add0 & 0x00aaaaaa) >> 1) | ((add0 & 0x00555555) << 1); |
---|
2327 | |
---|
2328 | add1 = ((uint32_t) eaddr[2] << 0) | |
---|
2329 | ((uint32_t) eaddr[1] << 8) | |
---|
2330 | ((uint32_t) eaddr[0] << 16); |
---|
2331 | |
---|
2332 | add1 = ((add1 & 0x00f0f0f0) >> 4) | ((add1 & 0x000f0f0f) << 4); |
---|
2333 | add1 = ((add1 & 0x00cccccc) >> 2) | ((add1 & 0x00333333) << 2); |
---|
2334 | add1 = ((add1 & 0x00aaaaaa) >> 1) | ((add1 & 0x00555555) << 1); |
---|
2335 | |
---|
2336 | GE_DPRINTF(sc, ("%s=", ether_sprintf(eaddr))); |
---|
2337 | /* |
---|
2338 | * hashResult is the 15 bits Hash entry address. |
---|
2339 | * ethernetADD is a 48 bit number, which is derived from the Ethernet |
---|
2340 | * MAC address, by nibble swapping in every byte (i.e MAC address |
---|
2341 | * of 0x123456789abc translates to ethernetADD of 0x21436587a9cb). |
---|
2342 | */ |
---|
2343 | |
---|
2344 | if ((sc->sc_pcr & ETH_EPCR_HM) == 0) { |
---|
2345 | /* |
---|
2346 | * hashResult[14:0] = hashFunc0(ethernetADD[47:0]) |
---|
2347 | * |
---|
2348 | * hashFunc0 calculates the hashResult in the following manner: |
---|
2349 | * hashResult[ 8:0] = ethernetADD[14:8,1,0] |
---|
2350 | * XOR ethernetADD[23:15] XOR ethernetADD[32:24] |
---|
2351 | */ |
---|
2352 | result = (add0 & 3) | ((add0 >> 6) & ~3); |
---|
2353 | result ^= (add0 >> 15) ^ (add1 >> 0); |
---|
2354 | result &= 0x1ff; |
---|
2355 | /* |
---|
2356 | * hashResult[14:9] = ethernetADD[7:2] |
---|
2357 | */ |
---|
2358 | result |= (add0 & ~3) << 7; /* excess bits will be masked */ |
---|
2359 | GE_DPRINTF(sc, ("0(%#"PRIx32")", result & 0x7fff)); |
---|
2360 | } else { |
---|
2361 | #define TRIBITFLIP 073516240 /* yes its in octal */ |
---|
2362 | /* |
---|
2363 | * hashResult[14:0] = hashFunc1(ethernetADD[47:0]) |
---|
2364 | * |
---|
2365 | * hashFunc1 calculates the hashResult in the following manner: |
---|
2366 | * hashResult[08:00] = ethernetADD[06:14] |
---|
2367 | * XOR ethernetADD[15:23] XOR ethernetADD[24:32] |
---|
2368 | */ |
---|
2369 | w0 = ((add0 >> 6) ^ (add0 >> 15) ^ (add1)) & 0x1ff; |
---|
2370 | /* |
---|
2371 | * Now bitswap those 9 bits |
---|
2372 | */ |
---|
2373 | result = 0; |
---|
2374 | result |= ((TRIBITFLIP >> (((w0 >> 0) & 7) * 3)) & 7) << 6; |
---|
2375 | result |= ((TRIBITFLIP >> (((w0 >> 3) & 7) * 3)) & 7) << 3; |
---|
2376 | result |= ((TRIBITFLIP >> (((w0 >> 6) & 7) * 3)) & 7) << 0; |
---|
2377 | |
---|
2378 | /* |
---|
2379 | * hashResult[14:09] = ethernetADD[00:05] |
---|
2380 | */ |
---|
2381 | result |= ((TRIBITFLIP >> (((add0 >> 0) & 7) * 3)) & 7) << 12; |
---|
2382 | result |= ((TRIBITFLIP >> (((add0 >> 3) & 7) * 3)) & 7) << 9; |
---|
2383 | GE_DPRINTF(sc, ("1(%#"PRIx32")", result)); |
---|
2384 | } |
---|
2385 | GE_FUNC_EXIT(sc, ""); |
---|
2386 | return result & ((sc->sc_pcr & ETH_EPCR_HS_512) ? 0x7ff : 0x7fff); |
---|
2387 | } |
---|
2388 | |
---|
2389 | int |
---|
2390 | gfe_hash_entry_op(struct gfe_softc *sc, enum gfe_hash_op op, |
---|
2391 | enum gfe_rxprio prio, const uint8_t eaddr[ETHER_ADDR_LEN]) |
---|
2392 | { |
---|
2393 | uint64_t he; |
---|
2394 | uint64_t *maybe_he_p = NULL; |
---|
2395 | int limit; |
---|
2396 | int hash; |
---|
2397 | int maybe_hash = 0; |
---|
2398 | |
---|
2399 | GE_FUNC_ENTER(sc, "gfe_hash_entry_op"); |
---|
2400 | |
---|
2401 | hash = gfe_hash_compute(sc, eaddr); |
---|
2402 | |
---|
2403 | if (sc->sc_hashtable == NULL) { |
---|
2404 | panic("%s:%d: hashtable == NULL!", sc->sc_dev.dv_xname, |
---|
2405 | __LINE__); |
---|
2406 | } |
---|
2407 | |
---|
2408 | /* |
---|
2409 | * Assume we are going to insert so create the hash entry we |
---|
2410 | * are going to insert. We also use it to match entries we |
---|
2411 | * will be removing. |
---|
2412 | */ |
---|
2413 | he = ((uint64_t) eaddr[5] << 43) | |
---|
2414 | ((uint64_t) eaddr[4] << 35) | |
---|
2415 | ((uint64_t) eaddr[3] << 27) | |
---|
2416 | ((uint64_t) eaddr[2] << 19) | |
---|
2417 | ((uint64_t) eaddr[1] << 11) | |
---|
2418 | ((uint64_t) eaddr[0] << 3) | |
---|
2419 | HSH_PRIO_INS(prio) | HSH_V | HSH_R; |
---|
2420 | |
---|
2421 | /* |
---|
2422 | * The GT will search upto 12 entries for a hit, so we must mimic that. |
---|
2423 | */ |
---|
2424 | hash &= sc->sc_hashmask / sizeof(he); |
---|
2425 | for (limit = HSH_LIMIT; limit > 0 ; --limit) { |
---|
2426 | /* |
---|
2427 | * Does the GT wrap at the end, stop at the, or overrun the |
---|
2428 | * end? Assume it wraps for now. Stash a copy of the |
---|
2429 | * current hash entry. |
---|
2430 | */ |
---|
2431 | uint64_t *he_p = &sc->sc_hashtable[hash]; |
---|
2432 | uint64_t thishe = *he_p; |
---|
2433 | |
---|
2434 | /* |
---|
2435 | * If the hash entry isn't valid, that break the chain. And |
---|
2436 | * this entry a good candidate for reuse. |
---|
2437 | */ |
---|
2438 | if ((thishe & HSH_V) == 0) { |
---|
2439 | maybe_he_p = he_p; |
---|
2440 | break; |
---|
2441 | } |
---|
2442 | |
---|
2443 | /* |
---|
2444 | * If the hash entry has the same address we are looking for |
---|
2445 | * then ... if we are removing and the skip bit is set, its |
---|
2446 | * already been removed. if are adding and the skip bit is |
---|
2447 | * clear, then its already added. In either return EBUSY |
---|
2448 | * indicating the op has already been done. Otherwise flip |
---|
2449 | * the skip bit and return 0. |
---|
2450 | */ |
---|
2451 | if (((he ^ thishe) & HSH_ADDR_MASK) == 0) { |
---|
2452 | if (((op == GE_HASH_REMOVE) && (thishe & HSH_S)) || |
---|
2453 | ((op == GE_HASH_ADD) && (thishe & HSH_S) == 0)) |
---|
2454 | return EBUSY; |
---|
2455 | *he_p = thishe ^ HSH_S; |
---|
2456 | bus_dmamap_sync(sc->sc_dmat, sc->sc_hash_mem.gdm_map, |
---|
2457 | hash * sizeof(he), sizeof(he), |
---|
2458 | BUS_DMASYNC_PREWRITE); |
---|
2459 | GE_FUNC_EXIT(sc, "^"); |
---|
2460 | return 0; |
---|
2461 | } |
---|
2462 | |
---|
2463 | /* |
---|
2464 | * If we haven't found a slot for the entry and this entry |
---|
2465 | * is currently being skipped, return this entry. |
---|
2466 | */ |
---|
2467 | if (maybe_he_p == NULL && (thishe & HSH_S)) { |
---|
2468 | maybe_he_p = he_p; |
---|
2469 | maybe_hash = hash; |
---|
2470 | } |
---|
2471 | |
---|
2472 | hash = (hash + 1) & (sc->sc_hashmask / sizeof(he)); |
---|
2473 | } |
---|
2474 | |
---|
2475 | /* |
---|
2476 | * If we got here, then there was no entry to remove. |
---|
2477 | */ |
---|
2478 | if (op == GE_HASH_REMOVE) { |
---|
2479 | GE_FUNC_EXIT(sc, "?"); |
---|
2480 | return ENOENT; |
---|
2481 | } |
---|
2482 | |
---|
2483 | /* |
---|
2484 | * If we couldn't find a slot, return an error. |
---|
2485 | */ |
---|
2486 | if (maybe_he_p == NULL) { |
---|
2487 | GE_FUNC_EXIT(sc, "!"); |
---|
2488 | return ENOSPC; |
---|
2489 | } |
---|
2490 | |
---|
2491 | /* Update the entry. |
---|
2492 | */ |
---|
2493 | *maybe_he_p = he; |
---|
2494 | bus_dmamap_sync(sc->sc_dmat, sc->sc_hash_mem.gdm_map, |
---|
2495 | maybe_hash * sizeof(he), sizeof(he), BUS_DMASYNC_PREWRITE); |
---|
2496 | GE_FUNC_EXIT(sc, "+"); |
---|
2497 | return 0; |
---|
2498 | } |
---|
2499 | |
---|
2500 | #ifndef __rtems__ |
---|
2501 | int |
---|
2502 | gfe_hash_multichg(struct ethercom *ec, const struct ether_multi *enm, u_long cmd) |
---|
2503 | { |
---|
2504 | struct gfe_softc * const sc = ec->ec_if.if_softc; |
---|
2505 | int error; |
---|
2506 | enum gfe_hash_op op; |
---|
2507 | enum gfe_rxprio prio; |
---|
2508 | #ifdef __rtems__ |
---|
2509 | SPRINTFVARDECL; |
---|
2510 | #endif |
---|
2511 | |
---|
2512 | GE_FUNC_ENTER(sc, "hash_multichg"); |
---|
2513 | /* |
---|
2514 | * Is this a wildcard entry? If so and its being removed, recompute. |
---|
2515 | */ |
---|
2516 | if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN) != 0) { |
---|
2517 | if (cmd == SIOCDELMULTI) { |
---|
2518 | GE_FUNC_EXIT(sc, ""); |
---|
2519 | return ENETRESET; |
---|
2520 | } |
---|
2521 | |
---|
2522 | /* |
---|
2523 | * Switch in |
---|
2524 | */ |
---|
2525 | sc->sc_flags |= GE_ALLMULTI; |
---|
2526 | if ((sc->sc_pcr & ETH_EPCR_PM) == 0) { |
---|
2527 | sc->sc_pcr |= ETH_EPCR_PM; |
---|
2528 | GE_WRITE(sc, EPCR, sc->sc_pcr); |
---|
2529 | GE_FUNC_EXIT(sc, ""); |
---|
2530 | return 0; |
---|
2531 | } |
---|
2532 | GE_FUNC_EXIT(sc, ""); |
---|
2533 | return ENETRESET; |
---|
2534 | } |
---|
2535 | |
---|
2536 | prio = GE_RXPRIO_MEDLO; |
---|
2537 | op = (cmd == SIOCDELMULTI ? GE_HASH_REMOVE : GE_HASH_ADD); |
---|
2538 | |
---|
2539 | if (sc->sc_hashtable == NULL) { |
---|
2540 | GE_FUNC_EXIT(sc, ""); |
---|
2541 | return 0; |
---|
2542 | } |
---|
2543 | |
---|
2544 | error = gfe_hash_entry_op(sc, op, prio, enm->enm_addrlo); |
---|
2545 | if (error == EBUSY) { |
---|
2546 | printf("%s: multichg: tried to %s %s again\n", |
---|
2547 | sc->sc_dev.dv_xname, |
---|
2548 | cmd == SIOCDELMULTI ? "remove" : "add", |
---|
2549 | ether_sprintf(enm->enm_addrlo)); |
---|
2550 | GE_FUNC_EXIT(sc, ""); |
---|
2551 | return 0; |
---|
2552 | } |
---|
2553 | |
---|
2554 | if (error == ENOENT) { |
---|
2555 | printf("%s: multichg: failed to remove %s: not in table\n", |
---|
2556 | sc->sc_dev.dv_xname, |
---|
2557 | ether_sprintf(enm->enm_addrlo)); |
---|
2558 | GE_FUNC_EXIT(sc, ""); |
---|
2559 | return 0; |
---|
2560 | } |
---|
2561 | |
---|
2562 | if (error == ENOSPC) { |
---|
2563 | printf("%s: multichg: failed to add %s: no space; regenerating table\n", |
---|
2564 | sc->sc_dev.dv_xname, |
---|
2565 | ether_sprintf(enm->enm_addrlo)); |
---|
2566 | GE_FUNC_EXIT(sc, ""); |
---|
2567 | return ENETRESET; |
---|
2568 | } |
---|
2569 | GE_DPRINTF(sc, ("%s: multichg: %s: %s succeeded\n", |
---|
2570 | sc->sc_dev.dv_xname, |
---|
2571 | cmd == SIOCDELMULTI ? "remove" : "add", |
---|
2572 | ether_sprintf(enm->enm_addrlo))); |
---|
2573 | GE_FUNC_EXIT(sc, ""); |
---|
2574 | return 0; |
---|
2575 | } |
---|
2576 | #endif |
---|
2577 | |
---|
2578 | int |
---|
2579 | gfe_hash_fill(struct gfe_softc *sc) |
---|
2580 | { |
---|
2581 | struct ether_multistep step; |
---|
2582 | struct ether_multi *enm; |
---|
2583 | int error; |
---|
2584 | |
---|
2585 | GE_FUNC_ENTER(sc, "gfe_hash_fill"); |
---|
2586 | |
---|
2587 | #ifndef __rtems__ |
---|
2588 | error = gfe_hash_entry_op(sc, GE_HASH_ADD, GE_RXPRIO_HI, |
---|
2589 | LLADDR(sc->sc_ec.ec_if.if_sadl)); |
---|
2590 | #else |
---|
2591 | error = gfe_hash_entry_op(sc, GE_HASH_ADD, GE_RXPRIO_HI, sc->sc_ec.ac_enaddr); |
---|
2592 | #endif |
---|
2593 | if (error) { |
---|
2594 | GE_FUNC_EXIT(sc, "!"); |
---|
2595 | return error; |
---|
2596 | } |
---|
2597 | |
---|
2598 | sc->sc_flags &= ~GE_ALLMULTI; |
---|
2599 | if ((sc->sc_ec.ec_if.if_flags & IFF_PROMISC) == 0) |
---|
2600 | sc->sc_pcr &= ~ETH_EPCR_PM; |
---|
2601 | else |
---|
2602 | sc->sc_pcr |= ETH_EPCR_PM; |
---|
2603 | ETHER_FIRST_MULTI(step, &sc->sc_ec, enm); |
---|
2604 | while (enm != NULL) { |
---|
2605 | if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { |
---|
2606 | sc->sc_flags |= GE_ALLMULTI; |
---|
2607 | sc->sc_pcr |= ETH_EPCR_PM; |
---|
2608 | } else { |
---|
2609 | error = gfe_hash_entry_op(sc, GE_HASH_ADD, |
---|
2610 | GE_RXPRIO_MEDLO, enm->enm_addrlo); |
---|
2611 | if (error == ENOSPC) |
---|
2612 | break; |
---|
2613 | } |
---|
2614 | ETHER_NEXT_MULTI(step, enm); |
---|
2615 | } |
---|
2616 | |
---|
2617 | GE_FUNC_EXIT(sc, ""); |
---|
2618 | return error; |
---|
2619 | } |
---|
2620 | |
---|
2621 | int |
---|
2622 | gfe_hash_alloc(struct gfe_softc *sc) |
---|
2623 | { |
---|
2624 | int error; |
---|
2625 | GE_FUNC_ENTER(sc, "gfe_hash_alloc"); |
---|
2626 | sc->sc_hashmask = (sc->sc_pcr & ETH_EPCR_HS_512 ? 16 : 256)*1024 - 1; |
---|
2627 | error = gfe_dmamem_alloc(sc, &sc->sc_hash_mem, 1, sc->sc_hashmask + 1, |
---|
2628 | BUS_DMA_NOCACHE); |
---|
2629 | if (error) { |
---|
2630 | printf("%s: failed to allocate %d bytes for hash table: %d\n", |
---|
2631 | sc->sc_dev.dv_xname, sc->sc_hashmask + 1, error); |
---|
2632 | GE_FUNC_EXIT(sc, ""); |
---|
2633 | return error; |
---|
2634 | } |
---|
2635 | sc->sc_hashtable = (uint64_t *) sc->sc_hash_mem.gdm_kva; |
---|
2636 | memset(sc->sc_hashtable, 0, sc->sc_hashmask + 1); |
---|
2637 | bus_dmamap_sync(sc->sc_dmat, sc->sc_hash_mem.gdm_map, |
---|
2638 | 0, sc->sc_hashmask + 1, BUS_DMASYNC_PREWRITE); |
---|
2639 | GE_FUNC_EXIT(sc, ""); |
---|
2640 | return 0; |
---|
2641 | } |
---|