1 | /* $NetBSD: if_gfe.c,v 1.13.8.1 2005/04/29 11:28:56 kent Exp $ */ |
---|
2 | |
---|
3 | /* |
---|
4 | * Copyright (c) 2002 Allegro Networks, Inc., Wasabi Systems, Inc. |
---|
5 | * All rights reserved. |
---|
6 | * |
---|
7 | * Copyright 2004: Enable hardware cache snooping. Kate Feng <feng1@bnl.gov> |
---|
8 | * |
---|
9 | * Redistribution and use in source and binary forms, with or without |
---|
10 | * modification, are permitted provided that the following conditions |
---|
11 | * are met: |
---|
12 | * 1. Redistributions of source code must retain the above copyright |
---|
13 | * notice, this list of conditions and the following disclaimer. |
---|
14 | * 2. Redistributions in binary form must reproduce the above copyright |
---|
15 | * notice, this list of conditions and the following disclaimer in the |
---|
16 | * documentation and/or other materials provided with the distribution. |
---|
17 | * 3. All advertising materials mentioning features or use of this software |
---|
18 | * must display the following acknowledgement: |
---|
19 | * This product includes software developed for the NetBSD Project by |
---|
20 | * Allegro Networks, Inc., and Wasabi Systems, Inc. |
---|
21 | * 4. The name of Allegro Networks, Inc. may not be used to endorse |
---|
22 | * or promote products derived from this software without specific prior |
---|
23 | * written permission. |
---|
24 | * 5. The name of Wasabi Systems, Inc. may not be used to endorse |
---|
25 | * or promote products derived from this software without specific prior |
---|
26 | * written permission. |
---|
27 | * |
---|
28 | * THIS SOFTWARE IS PROVIDED BY ALLEGRO NETWORKS, INC. AND |
---|
29 | * WASABI SYSTEMS, INC. ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, |
---|
30 | * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY |
---|
31 | * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. |
---|
32 | * IN NO EVENT SHALL EITHER ALLEGRO NETWORKS, INC. OR WASABI SYSTEMS, INC. |
---|
33 | * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
---|
34 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
---|
35 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
---|
36 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
---|
37 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
---|
38 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
---|
39 | * POSSIBILITY OF SUCH DAMAGE. |
---|
40 | */ |
---|
41 | |
---|
42 | /* |
---|
43 | * if_gfe.c -- GT ethernet MAC driver |
---|
44 | */ |
---|
45 | |
---|
46 | /* Enable hardware cache snooping; |
---|
47 | * Copyright Shuchen K. Feng <feng1@bnl.gov>, 2004 |
---|
48 | */ |
---|
49 | |
---|
50 | #ifdef __rtems__ |
---|
51 | #include "rtemscompat_defs.h" |
---|
52 | #include "../porting/rtemscompat.h" |
---|
53 | #include <string.h> |
---|
54 | #include <stdio.h> |
---|
55 | #include <inttypes.h> |
---|
56 | #endif |
---|
57 | |
---|
58 | #include <sys/cdefs.h> |
---|
59 | #ifndef __rtems__ |
---|
60 | __KERNEL_RCSID(0, "$NetBSD: if_gfe.c,v 1.13.8.1 2005/04/29 11:28:56 kent Exp $"); |
---|
61 | |
---|
62 | #include "opt_inet.h" |
---|
63 | #include "bpfilter.h" |
---|
64 | #endif |
---|
65 | |
---|
66 | #include <sys/param.h> |
---|
67 | #include <sys/types.h> |
---|
68 | #ifndef __rtems__ |
---|
69 | #include <sys/inttypes.h> |
---|
70 | #include <sys/queue.h> |
---|
71 | #endif |
---|
72 | |
---|
73 | #ifndef __rtems__ |
---|
74 | #include <uvm/uvm_extern.h> |
---|
75 | |
---|
76 | #include <sys/callout.h> |
---|
77 | #include <sys/device.h> |
---|
78 | #endif |
---|
79 | #include <sys/errno.h> |
---|
80 | #include <sys/mbuf.h> |
---|
81 | #include <sys/socket.h> |
---|
82 | #include <sys/sockio.h> |
---|
83 | |
---|
84 | #ifndef __rtems__ |
---|
85 | #include <machine/bus.h> |
---|
86 | #endif |
---|
87 | |
---|
88 | #include <net/if.h> |
---|
89 | #include <net/if_dl.h> |
---|
90 | #include <net/if_media.h> |
---|
91 | #ifndef __rtems__ |
---|
92 | #include <net/if_ether.h> |
---|
93 | #else |
---|
94 | #include <netinet/in.h> |
---|
95 | #include <netinet/if_ether.h> |
---|
96 | #include <net/ethernet.h> |
---|
97 | #include <rtems/rtems_mii_ioctl.h> |
---|
98 | #endif |
---|
99 | |
---|
100 | #ifdef INET |
---|
101 | #include <netinet/in.h> |
---|
102 | #ifndef __rtems__ |
---|
103 | #include <netinet/if_inarp.h> |
---|
104 | #endif |
---|
105 | #endif |
---|
106 | #if NBPFILTER > 0 |
---|
107 | #include <net/bpf.h> |
---|
108 | #endif |
---|
109 | |
---|
110 | #ifndef __rtems__ |
---|
111 | #include <dev/mii/miivar.h> |
---|
112 | |
---|
113 | #include <dev/marvell/gtintrreg.h> |
---|
114 | #include <dev/marvell/gtethreg.h> |
---|
115 | |
---|
116 | #include <dev/marvell/gtvar.h> |
---|
117 | #include <dev/marvell/if_gfevar.h> |
---|
118 | #else |
---|
119 | #include <bsp/gtintrreg.h> |
---|
120 | #include <bsp/gtreg.h> |
---|
121 | #include "gtethreg.h" |
---|
122 | |
---|
123 | #include "gtvar.h" |
---|
124 | #include "if_gfevar.h" |
---|
125 | #include "../porting/rtemscompat1.h" |
---|
126 | #define ether_sprintf ether_sprintf_macro |
---|
127 | #endif |
---|
128 | |
---|
129 | #define GE_READ(sc, reg) \ |
---|
130 | bus_space_read_4((sc)->sc_gt_memt, (sc)->sc_memh, ETH__ ## reg) |
---|
131 | #define GE_WRITE(sc, reg, v) \ |
---|
132 | bus_space_write_4((sc)->sc_gt_memt, (sc)->sc_memh, ETH__ ## reg, (v)) |
---|
133 | |
---|
134 | #define GT_READ(sc, reg) \ |
---|
135 | bus_space_read_4((sc)->sc_gt_memt, (sc)->sc_gt_memh, reg) |
---|
136 | #define GT_WRITE(sc, reg, v) \ |
---|
137 | bus_space_write_4((sc)->sc_gt_memt, (sc)->sc_gt_memh, reg, (v)) |
---|
138 | |
---|
139 | #define GE_DEBUG |
---|
140 | #if 0 |
---|
141 | #define GE_NOHASH |
---|
142 | #define GE_NORX |
---|
143 | #endif |
---|
144 | |
---|
145 | #ifdef GE_DEBUG |
---|
146 | #define GE_DPRINTF(sc, a) do \ |
---|
147 | if ((sc)->sc_ec.ec_if.if_flags & IFF_DEBUG) \ |
---|
148 | printf a; \ |
---|
149 | while (0) |
---|
150 | #define GE_FUNC_ENTER(sc, func) GE_DPRINTF(sc, ("[" func)) |
---|
151 | #define GE_FUNC_EXIT(sc, str) GE_DPRINTF(sc, (str "]")) |
---|
152 | #else |
---|
153 | #define GE_DPRINTF(sc, a) do { } while (0) |
---|
154 | #define GE_FUNC_ENTER(sc, func) do { } while (0) |
---|
155 | #define GE_FUNC_EXIT(sc, str) do { } while (0) |
---|
156 | #endif |
---|
157 | enum gfe_whack_op { |
---|
158 | GE_WHACK_START, GE_WHACK_RESTART, |
---|
159 | GE_WHACK_CHANGE, GE_WHACK_STOP |
---|
160 | }; |
---|
161 | |
---|
162 | enum gfe_hash_op { |
---|
163 | GE_HASH_ADD, GE_HASH_REMOVE, |
---|
164 | }; |
---|
165 | |
---|
166 | |
---|
167 | #if 1 |
---|
168 | #define htogt32(a) htobe32(a) |
---|
169 | #define gt32toh(a) be32toh(a) |
---|
170 | #else |
---|
171 | #define htogt32(a) htole32(a) |
---|
172 | #define gt32toh(a) le32toh(a) |
---|
173 | #endif |
---|
174 | |
---|
175 | #ifdef __rtems__ |
---|
176 | #define htobe32 htonl |
---|
177 | #define be32toh ntohl |
---|
178 | #endif |
---|
179 | |
---|
180 | #define GE_RXDSYNC(sc, rxq, n, ops) \ |
---|
181 | bus_dmamap_sync((sc)->sc_dmat, (rxq)->rxq_desc_mem.gdm_map, \ |
---|
182 | (n) * sizeof((rxq)->rxq_descs[0]), sizeof((rxq)->rxq_descs[0]), \ |
---|
183 | (ops)) |
---|
184 | #define GE_RXDPRESYNC(sc, rxq, n) \ |
---|
185 | GE_RXDSYNC(sc, rxq, n, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE) |
---|
186 | #define GE_RXDPOSTSYNC(sc, rxq, n) \ |
---|
187 | GE_RXDSYNC(sc, rxq, n, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE) |
---|
188 | |
---|
189 | #define GE_TXDSYNC(sc, txq, n, ops) \ |
---|
190 | bus_dmamap_sync((sc)->sc_dmat, (txq)->txq_desc_mem.gdm_map, \ |
---|
191 | (n) * sizeof((txq)->txq_descs[0]), sizeof((txq)->txq_descs[0]), \ |
---|
192 | (ops)) |
---|
193 | #define GE_TXDPRESYNC(sc, txq, n) \ |
---|
194 | GE_TXDSYNC(sc, txq, n, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE) |
---|
195 | #define GE_TXDPOSTSYNC(sc, txq, n) \ |
---|
196 | GE_TXDSYNC(sc, txq, n, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE) |
---|
197 | |
---|
198 | #define STATIC |
---|
199 | |
---|
200 | #ifndef __rtems__ |
---|
201 | STATIC int gfe_match (struct device *, struct cfdata *, void *); |
---|
202 | STATIC void gfe_attach (struct device *, struct device *, void *); |
---|
203 | #else |
---|
204 | STATIC int gfe_probe (device_t); |
---|
205 | STATIC int gfe_attach (device_t); |
---|
206 | STATIC void gfe_init (void*); |
---|
207 | #endif |
---|
208 | |
---|
209 | STATIC int gfe_dmamem_alloc(struct gfe_softc *, struct gfe_dmamem *, int, |
---|
210 | size_t, int); |
---|
211 | STATIC void gfe_dmamem_free(struct gfe_softc *, struct gfe_dmamem *); |
---|
212 | |
---|
213 | #ifndef __rtems__ |
---|
214 | STATIC int gfe_ifioctl (struct ifnet *, u_long, caddr_t); |
---|
215 | #else |
---|
216 | STATIC int gfe_ifioctl (struct ifnet *, ioctl_command_t, caddr_t); |
---|
217 | #endif |
---|
218 | STATIC void gfe_ifstart (struct ifnet *); |
---|
219 | STATIC void gfe_ifwatchdog (struct ifnet *); |
---|
220 | |
---|
221 | #ifndef __rtems__ |
---|
222 | STATIC int gfe_mii_mediachange (struct ifnet *); |
---|
223 | STATIC void gfe_mii_mediastatus (struct ifnet *, struct ifmediareq *); |
---|
224 | STATIC int gfe_mii_read (struct device *, int, int); |
---|
225 | STATIC void gfe_mii_write (struct device *, int, int, int); |
---|
226 | STATIC void gfe_mii_statchg (struct device *); |
---|
227 | #endif |
---|
228 | |
---|
229 | STATIC void gfe_tick(void *arg); |
---|
230 | |
---|
231 | STATIC void gfe_tx_restart(void *); |
---|
232 | STATIC void gfe_assign_desc(volatile struct gt_eth_desc *, struct mbuf *, |
---|
233 | uint32_t); |
---|
234 | STATIC int gfe_tx_enqueue(struct gfe_softc *, enum gfe_txprio); |
---|
235 | STATIC uint32_t gfe_tx_done(struct gfe_softc *, enum gfe_txprio, uint32_t); |
---|
236 | STATIC void gfe_tx_cleanup(struct gfe_softc *, enum gfe_txprio, int); |
---|
237 | STATIC int gfe_tx_txqalloc(struct gfe_softc *, enum gfe_txprio); |
---|
238 | STATIC int gfe_tx_start(struct gfe_softc *, enum gfe_txprio); |
---|
239 | STATIC void gfe_tx_stop(struct gfe_softc *, enum gfe_whack_op); |
---|
240 | |
---|
241 | STATIC void gfe_rx_cleanup(struct gfe_softc *, enum gfe_rxprio); |
---|
242 | STATIC void gfe_rx_get(struct gfe_softc *, enum gfe_rxprio); |
---|
243 | STATIC int gfe_rx_prime(struct gfe_softc *); |
---|
244 | STATIC uint32_t gfe_rx_process(struct gfe_softc *, uint32_t, uint32_t); |
---|
245 | STATIC int gfe_rx_rxqalloc(struct gfe_softc *, enum gfe_rxprio); |
---|
246 | STATIC int gfe_rx_rxqinit(struct gfe_softc *, enum gfe_rxprio); |
---|
247 | STATIC void gfe_rx_stop(struct gfe_softc *, enum gfe_whack_op); |
---|
248 | |
---|
249 | STATIC int gfe_intr(void *); |
---|
250 | |
---|
251 | STATIC int gfe_whack(struct gfe_softc *, enum gfe_whack_op); |
---|
252 | |
---|
253 | STATIC int gfe_hash_compute(struct gfe_softc *, const uint8_t [ETHER_ADDR_LEN]); |
---|
254 | STATIC int gfe_hash_entry_op(struct gfe_softc *, enum gfe_hash_op, |
---|
255 | enum gfe_rxprio, const uint8_t [ETHER_ADDR_LEN]); |
---|
256 | #ifndef __rtems__ |
---|
257 | STATIC int gfe_hash_multichg(struct ethercom *, const struct ether_multi *, |
---|
258 | u_long); |
---|
259 | #endif |
---|
260 | STATIC int gfe_hash_fill(struct gfe_softc *); |
---|
261 | STATIC int gfe_hash_alloc(struct gfe_softc *); |
---|
262 | |
---|
263 | #ifndef __rtems__ |
---|
264 | /* Linkup to the rest of the kernel */ |
---|
265 | CFATTACH_DECL(gfe, sizeof(struct gfe_softc), |
---|
266 | gfe_match, gfe_attach, NULL, NULL); |
---|
267 | #else |
---|
268 | net_drv_tbl_t METHODS = { |
---|
269 | n_probe : gfe_probe, |
---|
270 | n_attach : gfe_attach, |
---|
271 | n_detach : 0, |
---|
272 | n_intr : (void (*)(void*))gfe_intr, |
---|
273 | }; |
---|
274 | |
---|
275 | int |
---|
276 | gfe_mii_read(int phy, void *arg, unsigned reg, uint32_t *pval); |
---|
277 | int |
---|
278 | gfe_mii_write(int phy, void *arg, unsigned reg, uint32_t val); |
---|
279 | |
---|
280 | struct rtems_mdio_info |
---|
281 | gfe_mdio_access = { |
---|
282 | mdio_r: gfe_mii_read, |
---|
283 | mdio_w: gfe_mii_write, |
---|
284 | has_gmii: 0 |
---|
285 | }; |
---|
286 | |
---|
287 | #endif |
---|
288 | |
---|
289 | extern struct cfdriver gfe_cd; |
---|
290 | |
---|
291 | #ifndef __rtems__ |
---|
292 | int |
---|
293 | gfe_match(struct device *parent, struct cfdata *cf, void *aux) |
---|
294 | { |
---|
295 | struct gt_softc *gt = (struct gt_softc *) parent; |
---|
296 | struct gt_attach_args *ga = aux; |
---|
297 | uint8_t enaddr[6]; |
---|
298 | |
---|
299 | if (!GT_ETHEROK(gt, ga, &gfe_cd)) |
---|
300 | return 0; |
---|
301 | |
---|
302 | if (gtget_macaddr(gt, ga->ga_unit, enaddr) < 0) |
---|
303 | return 0; |
---|
304 | |
---|
305 | if (enaddr[0] == 0 && enaddr[1] == 0 && enaddr[2] == 0 && |
---|
306 | enaddr[3] == 0 && enaddr[4] == 0 && enaddr[5] == 0) |
---|
307 | return 0; |
---|
308 | |
---|
309 | return 1; |
---|
310 | } |
---|
311 | #else |
---|
312 | int |
---|
313 | gfe_probe(device_t dev) |
---|
314 | { |
---|
315 | switch ( BSP_getDiscoveryVersion(0) ) { |
---|
316 | case GT_64260_A: |
---|
317 | case GT_64260_B: |
---|
318 | return 0; |
---|
319 | default: |
---|
320 | break; |
---|
321 | } |
---|
322 | return -1; |
---|
323 | } |
---|
324 | |
---|
325 | void |
---|
326 | gfe_init(void *arg) |
---|
327 | { |
---|
328 | struct gfe_softc *sc = arg; |
---|
329 | if ( sc->sc_ec.ec_if.if_flags & IFF_RUNNING ) |
---|
330 | gfe_whack(sc, GE_WHACK_RESTART); |
---|
331 | else |
---|
332 | gfe_whack(sc, GE_WHACK_START); |
---|
333 | } |
---|
334 | #endif |
---|
335 | |
---|
336 | /* |
---|
337 | * Attach this instance, and then all the sub-devices |
---|
338 | */ |
---|
339 | #ifndef __rtems__ |
---|
340 | void |
---|
341 | gfe_attach(struct device *parent, struct device *self, void *aux) |
---|
342 | #else |
---|
343 | int |
---|
344 | gfe_attach(device_t dev) |
---|
345 | #endif |
---|
346 | { |
---|
347 | #ifndef __rtems__ |
---|
348 | struct gt_attach_args * const ga = aux; |
---|
349 | struct gt_softc * const gt = (struct gt_softc *) parent; |
---|
350 | struct gfe_softc * const sc = (struct gfe_softc *) self; |
---|
351 | #else |
---|
352 | struct gfe_softc * const sc = device_get_softc(dev); |
---|
353 | #endif |
---|
354 | struct ifnet * const ifp = &sc->sc_ec.ec_if; |
---|
355 | uint32_t data; |
---|
356 | uint8_t enaddr[6]; |
---|
357 | int phyaddr; |
---|
358 | uint32_t sdcr; |
---|
359 | int error; |
---|
360 | #ifdef __rtems__ |
---|
361 | SPRINTFVARDECL; |
---|
362 | #endif |
---|
363 | |
---|
364 | #ifndef __rtems__ |
---|
365 | GT_ETHERFOUND(gt, ga); |
---|
366 | |
---|
367 | sc->sc_gt_memt = ga->ga_memt; |
---|
368 | sc->sc_gt_memh = ga->ga_memh; |
---|
369 | sc->sc_dmat = ga->ga_dmat; |
---|
370 | sc->sc_macno = ga->ga_unit; |
---|
371 | |
---|
372 | if (bus_space_subregion(sc->sc_gt_memt, sc->sc_gt_memh, |
---|
373 | ETH_BASE(sc->sc_macno), ETH_SIZE, &sc->sc_memh)) { |
---|
374 | aprint_error(": failed to map registers\n"); |
---|
375 | } |
---|
376 | |
---|
377 | callout_init(&sc->sc_co); |
---|
378 | #else |
---|
379 | /* sc_macno, irq_no and sc_gt_memh must be filled in by 'setup' */ |
---|
380 | |
---|
381 | /* make ring sizes even numbers so that we have always multiple |
---|
382 | * cache lines (paranoia) |
---|
383 | */ |
---|
384 | if ( (sc->num_rxdesc = dev->d_ifconfig->rbuf_count) & 1 ) |
---|
385 | sc->num_rxdesc++; |
---|
386 | if ( 0 == sc->num_rxdesc ) |
---|
387 | sc->num_rxdesc = 64; |
---|
388 | |
---|
389 | if ( (sc->num_txdesc = dev->d_ifconfig->xbuf_count) & 1 ) |
---|
390 | sc->num_txdesc++; |
---|
391 | if ( 0 == sc->num_txdesc ) |
---|
392 | sc->num_txdesc = 256; |
---|
393 | |
---|
394 | /* Enable hardware cache snooping; |
---|
395 | * Copyright Shuchen K. Feng <feng1@bnl.gov>, 2004 |
---|
396 | */ |
---|
397 | /* regs are eth0: 0xf200/0xf204, eth1 0xf220/0xf224, eth2: 0xf240/0xf244 */ |
---|
398 | { |
---|
399 | uint32_t v; |
---|
400 | v = GT_READ(sc, ETH_ACTL_0_LO + (sc->sc_macno<<5)); |
---|
401 | v |= RxBSnoopEn|TxBSnoopEn|RxDSnoopEn|TxDSnoopEn; |
---|
402 | GT_WRITE(sc, ETH_ACTL_0_LO + (sc->sc_macno<<5), v); |
---|
403 | |
---|
404 | v = GT_READ(sc, ETH_ACTL_0_HI + (sc->sc_macno<<5)); |
---|
405 | v |= HashSnoopEn; |
---|
406 | GT_WRITE(sc, ETH_ACTL_0_HI + (sc->sc_macno<<5), v); |
---|
407 | } |
---|
408 | |
---|
409 | #endif |
---|
410 | |
---|
411 | data = bus_space_read_4(sc->sc_gt_memt, sc->sc_gt_memh, ETH_EPAR); |
---|
412 | #ifdef __rtems__ |
---|
413 | sc->sc_phyaddr = |
---|
414 | #endif |
---|
415 | phyaddr = ETH_EPAR_PhyAD_GET(data, sc->sc_macno); |
---|
416 | |
---|
417 | #ifndef __rtems__ |
---|
418 | gtget_macaddr(gt, sc->sc_macno, enaddr); |
---|
419 | #else |
---|
420 | memset( enaddr, 0, ETHER_ADDR_LEN ); |
---|
421 | if ( !memcmp(enaddr, sc->arpcom.ac_enaddr, ETHER_ADDR_LEN) ) { |
---|
422 | aprint_error(": MAC address not set (pass to rtems_gfe_setup())\n"); |
---|
423 | return -1; |
---|
424 | } |
---|
425 | /* mac address needs to be provided by 'setup' */ |
---|
426 | memcpy(enaddr, sc->arpcom.ac_enaddr, ETHER_ADDR_LEN); |
---|
427 | #endif |
---|
428 | |
---|
429 | sc->sc_pcr = GE_READ(sc, EPCR); |
---|
430 | sc->sc_pcxr = GE_READ(sc, EPCXR); |
---|
431 | sc->sc_intrmask = GE_READ(sc, EIMR) | ETH_IR_MIIPhySTC; |
---|
432 | |
---|
433 | aprint_normal(": address %s", ether_sprintf(enaddr)); |
---|
434 | |
---|
435 | #if defined(DEBUG) |
---|
436 | aprint_normal(", pcr %#x, pcxr %#x", sc->sc_pcr, sc->sc_pcxr); |
---|
437 | #endif |
---|
438 | |
---|
439 | sc->sc_pcxr &= ~ETH_EPCXR_PRIOrx_Override; |
---|
440 | #ifndef __rtems__ |
---|
441 | if (sc->sc_dev.dv_cfdata->cf_flags & 1) { |
---|
442 | aprint_normal(", phy %d (rmii)", phyaddr); |
---|
443 | sc->sc_pcxr |= ETH_EPCXR_RMIIEn; |
---|
444 | } else |
---|
445 | #endif |
---|
446 | { |
---|
447 | aprint_normal(", phy %d (mii)", phyaddr); |
---|
448 | sc->sc_pcxr &= ~ETH_EPCXR_RMIIEn; |
---|
449 | } |
---|
450 | #ifndef __rtems__ |
---|
451 | if (sc->sc_dev.dv_cfdata->cf_flags & 2) |
---|
452 | sc->sc_flags |= GE_NOFREE; |
---|
453 | #endif |
---|
454 | sc->sc_pcxr &= ~(3 << 14); |
---|
455 | sc->sc_pcxr |= (ETH_EPCXR_MFL_1536 << 14); |
---|
456 | |
---|
457 | if (sc->sc_pcr & ETH_EPCR_EN) { |
---|
458 | int tries = 1000; |
---|
459 | /* |
---|
460 | * Abort transmitter and receiver and wait for them to quiese |
---|
461 | */ |
---|
462 | GE_WRITE(sc, ESDCMR, ETH_ESDCMR_AR|ETH_ESDCMR_AT); |
---|
463 | do { |
---|
464 | delay(100); |
---|
465 | } while (tries-- > 0 && (GE_READ(sc, ESDCMR) & (ETH_ESDCMR_AR|ETH_ESDCMR_AT))); |
---|
466 | } |
---|
467 | |
---|
468 | sc->sc_pcr &= ~(ETH_EPCR_EN | ETH_EPCR_RBM | ETH_EPCR_PM | ETH_EPCR_PBF); |
---|
469 | |
---|
470 | #if defined(DEBUG) |
---|
471 | aprint_normal(", pcr %#x, pcxr %#x", sc->sc_pcr, sc->sc_pcxr); |
---|
472 | #endif |
---|
473 | |
---|
474 | /* |
---|
475 | * Now turn off the GT. If it didn't quiese, too ***ing bad. |
---|
476 | */ |
---|
477 | GE_WRITE(sc, EPCR, sc->sc_pcr); |
---|
478 | #ifndef __rtems__ |
---|
479 | GE_WRITE(sc, EIMR, sc->sc_intrmask); |
---|
480 | #else |
---|
481 | GE_WRITE(sc, EICR, 0); |
---|
482 | GE_WRITE(sc, EIMR, 0); |
---|
483 | #endif |
---|
484 | sdcr = GE_READ(sc, ESDCR); |
---|
485 | ETH_ESDCR_BSZ_SET(sdcr, ETH_ESDCR_BSZ_4); |
---|
486 | sdcr |= ETH_ESDCR_RIFB; |
---|
487 | GE_WRITE(sc, ESDCR, sdcr); |
---|
488 | sc->sc_max_frame_length = 1536; |
---|
489 | |
---|
490 | aprint_normal("\n"); |
---|
491 | #ifndef __rtems__ |
---|
492 | sc->sc_mii.mii_ifp = ifp; |
---|
493 | sc->sc_mii.mii_readreg = gfe_mii_read; |
---|
494 | sc->sc_mii.mii_writereg = gfe_mii_write; |
---|
495 | sc->sc_mii.mii_statchg = gfe_mii_statchg; |
---|
496 | |
---|
497 | ifmedia_init(&sc->sc_mii.mii_media, 0, gfe_mii_mediachange, |
---|
498 | gfe_mii_mediastatus); |
---|
499 | |
---|
500 | mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, phyaddr, |
---|
501 | MII_OFFSET_ANY, MIIF_NOISOLATE); |
---|
502 | if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { |
---|
503 | ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); |
---|
504 | ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); |
---|
505 | } else { |
---|
506 | ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); |
---|
507 | } |
---|
508 | |
---|
509 | strcpy(ifp->if_xname, sc->sc_dev.dv_xname); |
---|
510 | #else |
---|
511 | if_initname(ifp, device_get_name(dev), device_get_unit(dev)); |
---|
512 | ifp->if_mtu = ETHERMTU; |
---|
513 | ifp->if_output = ether_output; |
---|
514 | ifp->if_init = gfe_init; |
---|
515 | ifp->if_snd.ifq_maxlen = GE_TXDESC_MAX - 1; |
---|
516 | ifp->if_baudrate = 10000000; |
---|
517 | #endif |
---|
518 | ifp->if_softc = sc; |
---|
519 | /* ifp->if_mowner = &sc->sc_mowner; */ |
---|
520 | ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; |
---|
521 | #if 0 |
---|
522 | ifp->if_flags |= IFF_DEBUG; |
---|
523 | #endif |
---|
524 | ifp->if_ioctl = gfe_ifioctl; |
---|
525 | ifp->if_start = gfe_ifstart; |
---|
526 | ifp->if_watchdog = gfe_ifwatchdog; |
---|
527 | |
---|
528 | if (sc->sc_flags & GE_NOFREE) { |
---|
529 | error = gfe_rx_rxqalloc(sc, GE_RXPRIO_HI); |
---|
530 | if (!error) |
---|
531 | error = gfe_rx_rxqalloc(sc, GE_RXPRIO_MEDHI); |
---|
532 | if (!error) |
---|
533 | error = gfe_rx_rxqalloc(sc, GE_RXPRIO_MEDLO); |
---|
534 | if (!error) |
---|
535 | error = gfe_rx_rxqalloc(sc, GE_RXPRIO_LO); |
---|
536 | if (!error) |
---|
537 | error = gfe_tx_txqalloc(sc, GE_TXPRIO_HI); |
---|
538 | if (!error) |
---|
539 | error = gfe_hash_alloc(sc); |
---|
540 | if (error) |
---|
541 | aprint_error( |
---|
542 | "%s: failed to allocate resources: %d\n", |
---|
543 | ifp->if_xname, error); |
---|
544 | } |
---|
545 | |
---|
546 | if_attach(ifp); |
---|
547 | #ifndef __rtems__ |
---|
548 | ether_ifattach(ifp, enaddr); |
---|
549 | #else |
---|
550 | ether_ifattach(ifp); |
---|
551 | #endif |
---|
552 | #if NBPFILTER > 0 |
---|
553 | bpfattach(ifp, DLT_EN10MB, sizeof(struct ether_header)); |
---|
554 | #endif |
---|
555 | #if NRND > 0 |
---|
556 | rnd_attach_source(&sc->sc_rnd_source, self->dv_xname, RND_TYPE_NET, 0); |
---|
557 | #endif |
---|
558 | #ifndef __rtems__ |
---|
559 | intr_establish(IRQ_ETH0 + sc->sc_macno, IST_LEVEL, IPL_NET, |
---|
560 | gfe_intr, sc); |
---|
561 | #else |
---|
562 | return 0; |
---|
563 | #endif |
---|
564 | } |
---|
565 | |
---|
566 | int |
---|
567 | gfe_dmamem_alloc(struct gfe_softc *sc, struct gfe_dmamem *gdm, int maxsegs, |
---|
568 | size_t size, int flags) |
---|
569 | { |
---|
570 | int error = 0; |
---|
571 | GE_FUNC_ENTER(sc, "gfe_dmamem_alloc"); |
---|
572 | |
---|
573 | KASSERT(gdm->gdm_kva == NULL); |
---|
574 | gdm->gdm_size = size; |
---|
575 | gdm->gdm_maxsegs = maxsegs; |
---|
576 | |
---|
577 | #ifndef __rtems__ |
---|
578 | error = bus_dmamem_alloc(sc->sc_dmat, gdm->gdm_size, PAGE_SIZE, |
---|
579 | gdm->gdm_size, gdm->gdm_segs, gdm->gdm_maxsegs, &gdm->gdm_nsegs, |
---|
580 | BUS_DMA_NOWAIT); |
---|
581 | if (error) |
---|
582 | goto fail; |
---|
583 | |
---|
584 | error = bus_dmamem_map(sc->sc_dmat, gdm->gdm_segs, gdm->gdm_nsegs, |
---|
585 | gdm->gdm_size, &gdm->gdm_kva, flags | BUS_DMA_NOWAIT); |
---|
586 | if (error) |
---|
587 | goto fail; |
---|
588 | |
---|
589 | error = bus_dmamap_create(sc->sc_dmat, gdm->gdm_size, gdm->gdm_nsegs, |
---|
590 | gdm->gdm_size, 0, BUS_DMA_ALLOCNOW|BUS_DMA_NOWAIT, &gdm->gdm_map); |
---|
591 | if (error) |
---|
592 | goto fail; |
---|
593 | |
---|
594 | error = bus_dmamap_load(sc->sc_dmat, gdm->gdm_map, gdm->gdm_kva, |
---|
595 | gdm->gdm_size, NULL, BUS_DMA_NOWAIT); |
---|
596 | if (error) |
---|
597 | goto fail; |
---|
598 | #else |
---|
599 | gdm->gdm_segs[0].ds_len = size; |
---|
600 | |
---|
601 | /* FIXME: probably we can relax the alignment */ |
---|
602 | if ( ! ( gdm->gdm_unaligned_buf = malloc( size + PAGE_SIZE - 1, M_DEVBUF, M_NOWAIT ) ) ) |
---|
603 | goto fail; |
---|
604 | |
---|
605 | gdm->gdm_map = gdm; |
---|
606 | gdm->gdm_nsegs = 1; |
---|
607 | gdm->gdm_kva = (caddr_t)(gdm->gdm_segs[0].ds_addr = _DO_ALIGN(gdm->gdm_unaligned_buf, PAGE_SIZE)); |
---|
608 | #endif |
---|
609 | |
---|
610 | /* invalidate from cache */ |
---|
611 | bus_dmamap_sync(sc->sc_dmat, gdm->gdm_map, 0, gdm->gdm_size, |
---|
612 | BUS_DMASYNC_PREREAD); |
---|
613 | fail: |
---|
614 | if (error) { |
---|
615 | gfe_dmamem_free(sc, gdm); |
---|
616 | GE_DPRINTF(sc, (":err=%d", error)); |
---|
617 | } |
---|
618 | GE_DPRINTF(sc, (":kva=%p/%#x,map=%p,nsegs=%d,pa=%" PRIx32 "/%" PRIx32, |
---|
619 | gdm->gdm_kva, gdm->gdm_size, gdm->gdm_map, gdm->gdm_map->dm_nsegs, |
---|
620 | gdm->gdm_map->dm_segs->ds_addr, gdm->gdm_map->dm_segs->ds_len)); |
---|
621 | GE_FUNC_EXIT(sc, ""); |
---|
622 | return error; |
---|
623 | } |
---|
624 | |
---|
625 | void |
---|
626 | gfe_dmamem_free(struct gfe_softc *sc, struct gfe_dmamem *gdm) |
---|
627 | { |
---|
628 | GE_FUNC_ENTER(sc, "gfe_dmamem_free"); |
---|
629 | #ifndef __rtems__ |
---|
630 | if (gdm->gdm_map) |
---|
631 | bus_dmamap_destroy(sc->sc_dmat, gdm->gdm_map); |
---|
632 | if (gdm->gdm_kva) |
---|
633 | bus_dmamem_unmap(sc->sc_dmat, gdm->gdm_kva, gdm->gdm_size); |
---|
634 | if (gdm->gdm_nsegs > 0) |
---|
635 | bus_dmamem_free(sc->sc_dmat, gdm->gdm_segs, gdm->gdm_nsegs); |
---|
636 | #else |
---|
637 | if (gdm->gdm_nsegs > 0) |
---|
638 | free(gdm->gdm_unaligned_buf, M_DEVBUF); |
---|
639 | #endif |
---|
640 | gdm->gdm_map = NULL; |
---|
641 | gdm->gdm_kva = NULL; |
---|
642 | gdm->gdm_nsegs = 0; |
---|
643 | GE_FUNC_EXIT(sc, ""); |
---|
644 | } |
---|
645 | |
---|
646 | #ifndef __rtems__ |
---|
647 | int |
---|
648 | gfe_ifioctl(struct ifnet *ifp, u_long cmd, caddr_t data) |
---|
649 | #else |
---|
650 | int |
---|
651 | gfe_ifioctl(struct ifnet *ifp, ioctl_command_t cmd, caddr_t data) |
---|
652 | #endif |
---|
653 | { |
---|
654 | struct gfe_softc * const sc = ifp->if_softc; |
---|
655 | struct ifreq *ifr = (struct ifreq *) data; |
---|
656 | #ifndef __rtems__ |
---|
657 | struct ifaddr *ifa = (struct ifaddr *) data; |
---|
658 | #endif |
---|
659 | int s, error = 0; |
---|
660 | |
---|
661 | GE_FUNC_ENTER(sc, "gfe_ifioctl"); |
---|
662 | s = splnet(); |
---|
663 | |
---|
664 | switch (cmd) { |
---|
665 | #ifndef __rtems__ |
---|
666 | case SIOCSIFADDR: |
---|
667 | ifp->if_flags |= IFF_UP; |
---|
668 | switch (ifa->ifa_addr->sa_family) { |
---|
669 | #ifdef INET |
---|
670 | case AF_INET: |
---|
671 | error = gfe_whack(sc, GE_WHACK_START); |
---|
672 | if (error == 0) |
---|
673 | arp_ifinit(ifp, ifa); |
---|
674 | break; |
---|
675 | #endif |
---|
676 | default: |
---|
677 | error = gfe_whack(sc, GE_WHACK_START); |
---|
678 | break; |
---|
679 | } |
---|
680 | break; |
---|
681 | #endif |
---|
682 | |
---|
683 | case SIOCSIFFLAGS: |
---|
684 | if ((sc->sc_ec.ec_if.if_flags & IFF_PROMISC) == 0) |
---|
685 | sc->sc_pcr &= ~ETH_EPCR_PM; |
---|
686 | else |
---|
687 | sc->sc_pcr |= ETH_EPCR_PM; |
---|
688 | switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) { |
---|
689 | case IFF_UP|IFF_RUNNING:/* active->active, update */ |
---|
690 | error = gfe_whack(sc, GE_WHACK_CHANGE); |
---|
691 | break; |
---|
692 | case IFF_RUNNING: /* not up, so we stop */ |
---|
693 | error = gfe_whack(sc, GE_WHACK_STOP); |
---|
694 | break; |
---|
695 | case IFF_UP: /* not running, so we start */ |
---|
696 | error = gfe_whack(sc, GE_WHACK_START); |
---|
697 | break; |
---|
698 | case 0: /* idle->idle: do nothing */ |
---|
699 | break; |
---|
700 | } |
---|
701 | break; |
---|
702 | |
---|
703 | case SIOCADDMULTI: |
---|
704 | case SIOCDELMULTI: |
---|
705 | error = (cmd == SIOCADDMULTI) |
---|
706 | ? ether_addmulti(ifr, &sc->sc_ec) |
---|
707 | : ether_delmulti(ifr, &sc->sc_ec); |
---|
708 | if (error == ENETRESET) { |
---|
709 | if (ifp->if_flags & IFF_RUNNING) |
---|
710 | #if !defined(__rtems__) |
---|
711 | error = gfe_whack(sc, GE_WHACK_CHANGE); |
---|
712 | #else |
---|
713 | /* doing GE_WHACK_CHANGE seems wrong - that |
---|
714 | * doesn't do anything to the hash table. |
---|
715 | * Therefore we perform a stop/start sequence. |
---|
716 | */ |
---|
717 | { |
---|
718 | error = gfe_whack(sc, GE_WHACK_STOP); |
---|
719 | if ( error ) |
---|
720 | break; |
---|
721 | error = gfe_whack(sc, GE_WHACK_START); |
---|
722 | } |
---|
723 | #endif |
---|
724 | else |
---|
725 | error = 0; |
---|
726 | } |
---|
727 | break; |
---|
728 | |
---|
729 | case SIOCSIFMTU: |
---|
730 | if (ifr->ifr_mtu > ETHERMTU || ifr->ifr_mtu < ETHERMIN) { |
---|
731 | error = EINVAL; |
---|
732 | break; |
---|
733 | } |
---|
734 | ifp->if_mtu = ifr->ifr_mtu; |
---|
735 | break; |
---|
736 | |
---|
737 | case SIOCSIFMEDIA: |
---|
738 | case SIOCGIFMEDIA: |
---|
739 | #ifndef __rtems__ |
---|
740 | error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); |
---|
741 | #else |
---|
742 | error = rtems_mii_ioctl(&gfe_mdio_access, sc, cmd, &ifr->ifr_media); |
---|
743 | #endif |
---|
744 | break; |
---|
745 | |
---|
746 | default: |
---|
747 | #ifndef __rtems__ |
---|
748 | error = EINVAL; |
---|
749 | #else |
---|
750 | error = ether_ioctl(ifp, cmd, data); |
---|
751 | #endif |
---|
752 | break; |
---|
753 | } |
---|
754 | splx(s); |
---|
755 | GE_FUNC_EXIT(sc, ""); |
---|
756 | return error; |
---|
757 | } |
---|
758 | |
---|
759 | void |
---|
760 | gfe_ifstart(struct ifnet *ifp) |
---|
761 | { |
---|
762 | struct gfe_softc * const sc = ifp->if_softc; |
---|
763 | struct mbuf *m; |
---|
764 | |
---|
765 | GE_FUNC_ENTER(sc, "gfe_ifstart"); |
---|
766 | |
---|
767 | if ((ifp->if_flags & IFF_RUNNING) == 0) { |
---|
768 | GE_FUNC_EXIT(sc, "$"); |
---|
769 | return; |
---|
770 | } |
---|
771 | |
---|
772 | for (;;) { |
---|
773 | IF_DEQUEUE(&ifp->if_snd, m); |
---|
774 | if (m == NULL) { |
---|
775 | ifp->if_flags &= ~IFF_OACTIVE; |
---|
776 | GE_FUNC_EXIT(sc, ""); |
---|
777 | return; |
---|
778 | } |
---|
779 | |
---|
780 | /* |
---|
781 | * No space in the pending queue? try later. |
---|
782 | */ |
---|
783 | if (IF_QFULL(&sc->sc_txq[GE_TXPRIO_HI].txq_pendq)) |
---|
784 | break; |
---|
785 | |
---|
786 | /* |
---|
787 | * Try to enqueue a mbuf to the device. If that fails, we |
---|
788 | * can always try to map the next mbuf. |
---|
789 | */ |
---|
790 | IF_ENQUEUE(&sc->sc_txq[GE_TXPRIO_HI].txq_pendq, m); |
---|
791 | GE_DPRINTF(sc, (">")); |
---|
792 | #ifndef GE_NOTX |
---|
793 | (void) gfe_tx_enqueue(sc, GE_TXPRIO_HI); |
---|
794 | #endif |
---|
795 | } |
---|
796 | |
---|
797 | /* |
---|
798 | * Attempt to queue the mbuf for send failed. |
---|
799 | */ |
---|
800 | IF_PREPEND(&ifp->if_snd, m); |
---|
801 | ifp->if_flags |= IFF_OACTIVE; |
---|
802 | GE_FUNC_EXIT(sc, "%%"); |
---|
803 | } |
---|
804 | |
---|
805 | void |
---|
806 | gfe_ifwatchdog(struct ifnet *ifp) |
---|
807 | { |
---|
808 | struct gfe_softc * const sc = ifp->if_softc; |
---|
809 | struct gfe_txqueue * const txq = &sc->sc_txq[GE_TXPRIO_HI]; |
---|
810 | |
---|
811 | GE_FUNC_ENTER(sc, "gfe_ifwatchdog"); |
---|
812 | printf("%s: device timeout", sc->sc_dev.dv_xname); |
---|
813 | if (ifp->if_flags & IFF_RUNNING) { |
---|
814 | uint32_t curtxdnum = (bus_space_read_4(sc->sc_gt_memt, sc->sc_gt_memh, txq->txq_ectdp) - txq->txq_desc_busaddr) / sizeof(txq->txq_descs[0]); |
---|
815 | GE_TXDPOSTSYNC(sc, txq, txq->txq_fi); |
---|
816 | GE_TXDPOSTSYNC(sc, txq, curtxdnum); |
---|
817 | printf(" (fi=%d(%#" PRIx32 "),lo=%d,cur=%" PRIx32 "(%#" PRIx32 "),icm=%#" PRIx32 ") ", |
---|
818 | txq->txq_fi, txq->txq_descs[txq->txq_fi].ed_cmdsts, |
---|
819 | txq->txq_lo, curtxdnum, txq->txq_descs[curtxdnum].ed_cmdsts, |
---|
820 | GE_READ(sc, EICR)); |
---|
821 | GE_TXDPRESYNC(sc, txq, txq->txq_fi); |
---|
822 | GE_TXDPRESYNC(sc, txq, curtxdnum); |
---|
823 | } |
---|
824 | printf("\n"); |
---|
825 | ifp->if_oerrors++; |
---|
826 | (void) gfe_whack(sc, GE_WHACK_RESTART); |
---|
827 | GE_FUNC_EXIT(sc, ""); |
---|
828 | } |
---|
829 | |
---|
830 | #ifdef __rtems__ |
---|
831 | static struct mbuf * |
---|
832 | gfe_newbuf(struct mbuf *m) |
---|
833 | { |
---|
834 | if ( !m ) { |
---|
835 | MGETHDR(m, M_DONTWAIT, MT_DATA); |
---|
836 | if ( !m ) |
---|
837 | return 0; |
---|
838 | MCLGET(m, M_DONTWAIT); |
---|
839 | if ( !(M_EXT & m->m_flags) ) { |
---|
840 | m_freem(m); |
---|
841 | return 0; |
---|
842 | } |
---|
843 | } else { |
---|
844 | m->m_data = m->m_ext.ext_buf; |
---|
845 | } |
---|
846 | m->m_len = m->m_pkthdr.len = MCLBYTES; |
---|
847 | #if 0 |
---|
848 | m_adj(m, 2); /* so payload is 16-byte aligned */ |
---|
849 | #endif |
---|
850 | return m; |
---|
851 | } |
---|
852 | #endif |
---|
853 | |
---|
854 | int |
---|
855 | gfe_rx_rxqalloc(struct gfe_softc *sc, enum gfe_rxprio rxprio) |
---|
856 | { |
---|
857 | struct gfe_rxqueue * const rxq = &sc->sc_rxq[rxprio]; |
---|
858 | int error; |
---|
859 | |
---|
860 | GE_FUNC_ENTER(sc, "gfe_rx_rxqalloc"); |
---|
861 | GE_DPRINTF(sc, ("(%d)", rxprio)); |
---|
862 | |
---|
863 | error = gfe_dmamem_alloc(sc, &rxq->rxq_desc_mem, 1, |
---|
864 | GE_RXDESC_MEMSIZE, BUS_DMA_NOCACHE); |
---|
865 | if (error) { |
---|
866 | GE_FUNC_EXIT(sc, "!!"); |
---|
867 | return error; |
---|
868 | } |
---|
869 | |
---|
870 | #ifndef __rtems__ |
---|
871 | error = gfe_dmamem_alloc(sc, &rxq->rxq_buf_mem, GE_RXBUF_NSEGS, |
---|
872 | GE_RXBUF_MEMSIZE, 0); |
---|
873 | #else |
---|
874 | if ( ! (rxq->rxq_bufs = malloc( sizeof(*rxq->rxq_bufs) * GE_RXDESC_MAX, M_DEVBUF, M_NOWAIT ) ) ) { |
---|
875 | error = -1; |
---|
876 | } else { |
---|
877 | int i; |
---|
878 | for ( i = 0; i<GE_RXDESC_MAX; i++ ) { |
---|
879 | if ( !(rxq->rxq_bufs[i] = gfe_newbuf(0)) ) { |
---|
880 | fprintf(stderr,"gfe: Not enough mbuf clusters to initialize RX ring!\n"); |
---|
881 | while (--i >=0 ) { |
---|
882 | m_freem(rxq->rxq_bufs[i]); |
---|
883 | } |
---|
884 | free(rxq->rxq_bufs, M_DEVBUF); |
---|
885 | rxq->rxq_bufs = 0; |
---|
886 | error = -1; |
---|
887 | break; |
---|
888 | } |
---|
889 | } |
---|
890 | } |
---|
891 | #endif |
---|
892 | if (error) { |
---|
893 | GE_FUNC_EXIT(sc, "!!!"); |
---|
894 | return error; |
---|
895 | } |
---|
896 | GE_FUNC_EXIT(sc, ""); |
---|
897 | return error; |
---|
898 | } |
---|
899 | |
---|
900 | int |
---|
901 | gfe_rx_rxqinit(struct gfe_softc *sc, enum gfe_rxprio rxprio) |
---|
902 | { |
---|
903 | struct gfe_rxqueue * const rxq = &sc->sc_rxq[rxprio]; |
---|
904 | volatile struct gt_eth_desc *rxd; |
---|
905 | #ifndef __rtems__ |
---|
906 | const bus_dma_segment_t *ds; |
---|
907 | #endif |
---|
908 | int idx; |
---|
909 | bus_addr_t nxtaddr; |
---|
910 | #ifndef __rtems__ |
---|
911 | bus_size_t boff; |
---|
912 | #endif |
---|
913 | |
---|
914 | GE_FUNC_ENTER(sc, "gfe_rx_rxqinit"); |
---|
915 | GE_DPRINTF(sc, ("(%d)", rxprio)); |
---|
916 | |
---|
917 | if ((sc->sc_flags & GE_NOFREE) == 0) { |
---|
918 | int error = gfe_rx_rxqalloc(sc, rxprio); |
---|
919 | if (error) { |
---|
920 | GE_FUNC_EXIT(sc, "!"); |
---|
921 | return error; |
---|
922 | } |
---|
923 | } else { |
---|
924 | KASSERT(rxq->rxq_desc_mem.gdm_kva != NULL); |
---|
925 | #ifndef __rtems__ |
---|
926 | KASSERT(rxq->rxq_buf_mem.gdm_kva != NULL); |
---|
927 | #else |
---|
928 | KASSERT(rxq->rxq_bufs != NULL); |
---|
929 | #endif |
---|
930 | } |
---|
931 | |
---|
932 | memset(rxq->rxq_desc_mem.gdm_kva, 0, GE_RXDESC_MEMSIZE); |
---|
933 | |
---|
934 | rxq->rxq_descs = |
---|
935 | (volatile struct gt_eth_desc *) rxq->rxq_desc_mem.gdm_kva; |
---|
936 | rxq->rxq_desc_busaddr = rxq->rxq_desc_mem.gdm_map->dm_segs[0].ds_addr; |
---|
937 | #ifndef __rtems__ |
---|
938 | rxq->rxq_bufs = (struct gfe_rxbuf *) rxq->rxq_buf_mem.gdm_kva; |
---|
939 | #endif |
---|
940 | rxq->rxq_fi = 0; |
---|
941 | rxq->rxq_active = GE_RXDESC_MAX; |
---|
942 | for (idx = 0, rxd = rxq->rxq_descs, |
---|
943 | #ifndef __rtems__ |
---|
944 | boff = 0, ds = rxq->rxq_buf_mem.gdm_map->dm_segs, |
---|
945 | #endif |
---|
946 | nxtaddr = rxq->rxq_desc_busaddr + sizeof(*rxd); |
---|
947 | idx < GE_RXDESC_MAX; |
---|
948 | idx++, rxd++, nxtaddr += sizeof(*rxd)) { |
---|
949 | #ifndef __rtems__ |
---|
950 | rxd->ed_lencnt = htogt32(GE_RXBUF_SIZE << 16); |
---|
951 | #else |
---|
952 | rxd->ed_lencnt = htogt32(MCLBYTES << 16); |
---|
953 | #endif |
---|
954 | rxd->ed_cmdsts = htogt32(RX_CMD_F|RX_CMD_L|RX_CMD_O|RX_CMD_EI); |
---|
955 | #ifndef __rtems__ |
---|
956 | rxd->ed_bufptr = htogt32(ds->ds_addr + boff); |
---|
957 | #else |
---|
958 | rxd->ed_bufptr = htogt32(mtod(rxq->rxq_bufs[idx], uint32_t)); |
---|
959 | #endif |
---|
960 | /* |
---|
961 | * update the nxtptr to point to the next txd. |
---|
962 | */ |
---|
963 | if (idx == GE_RXDESC_MAX - 1) |
---|
964 | nxtaddr = rxq->rxq_desc_busaddr; |
---|
965 | rxd->ed_nxtptr = htogt32(nxtaddr); |
---|
966 | #ifndef __rtems__ |
---|
967 | boff += GE_RXBUF_SIZE; |
---|
968 | if (boff == ds->ds_len) { |
---|
969 | ds++; |
---|
970 | boff = 0; |
---|
971 | } |
---|
972 | #endif |
---|
973 | } |
---|
974 | bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_mem.gdm_map, 0, |
---|
975 | rxq->rxq_desc_mem.gdm_map->dm_mapsize, |
---|
976 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
---|
977 | #ifndef __rtems__ |
---|
978 | bus_dmamap_sync(sc->sc_dmat, rxq->rxq_buf_mem.gdm_map, 0, |
---|
979 | rxq->rxq_buf_mem.gdm_map->dm_mapsize, |
---|
980 | BUS_DMASYNC_PREREAD); |
---|
981 | #else |
---|
982 | /* FIXME: we leave this call in here so compilation fails |
---|
983 | * if bus_dmamap_sync() is ever fleshed-out to implement |
---|
984 | * software cache coherency... |
---|
985 | */ |
---|
986 | bus_dmamap_sync(sc->sc_dmat, rxq->rxq_buf_mem.gdm_map, 0, |
---|
987 | rxq->rxq_buf_mem.gdm_map->dm_mapsize, |
---|
988 | BUS_DMASYNC_PREREAD); |
---|
989 | #endif |
---|
990 | |
---|
991 | rxq->rxq_intrbits = ETH_IR_RxBuffer|ETH_IR_RxError; |
---|
992 | switch (rxprio) { |
---|
993 | case GE_RXPRIO_HI: |
---|
994 | rxq->rxq_intrbits |= ETH_IR_RxBuffer_3|ETH_IR_RxError_3; |
---|
995 | rxq->rxq_efrdp = ETH_EFRDP3(sc->sc_macno); |
---|
996 | rxq->rxq_ecrdp = ETH_ECRDP3(sc->sc_macno); |
---|
997 | break; |
---|
998 | case GE_RXPRIO_MEDHI: |
---|
999 | rxq->rxq_intrbits |= ETH_IR_RxBuffer_2|ETH_IR_RxError_2; |
---|
1000 | rxq->rxq_efrdp = ETH_EFRDP2(sc->sc_macno); |
---|
1001 | rxq->rxq_ecrdp = ETH_ECRDP2(sc->sc_macno); |
---|
1002 | break; |
---|
1003 | case GE_RXPRIO_MEDLO: |
---|
1004 | rxq->rxq_intrbits |= ETH_IR_RxBuffer_1|ETH_IR_RxError_1; |
---|
1005 | rxq->rxq_efrdp = ETH_EFRDP1(sc->sc_macno); |
---|
1006 | rxq->rxq_ecrdp = ETH_ECRDP1(sc->sc_macno); |
---|
1007 | break; |
---|
1008 | case GE_RXPRIO_LO: |
---|
1009 | rxq->rxq_intrbits |= ETH_IR_RxBuffer_0|ETH_IR_RxError_0; |
---|
1010 | rxq->rxq_efrdp = ETH_EFRDP0(sc->sc_macno); |
---|
1011 | rxq->rxq_ecrdp = ETH_ECRDP0(sc->sc_macno); |
---|
1012 | break; |
---|
1013 | } |
---|
1014 | GE_FUNC_EXIT(sc, ""); |
---|
1015 | return 0; |
---|
1016 | } |
---|
1017 | |
---|
1018 | void |
---|
1019 | gfe_rx_get(struct gfe_softc *sc, enum gfe_rxprio rxprio) |
---|
1020 | { |
---|
1021 | struct ifnet * const ifp = &sc->sc_ec.ec_if; |
---|
1022 | struct gfe_rxqueue * const rxq = &sc->sc_rxq[rxprio]; |
---|
1023 | #ifndef __rtems__ |
---|
1024 | struct mbuf *m = rxq->rxq_curpkt; |
---|
1025 | #else |
---|
1026 | struct mbuf *m; |
---|
1027 | #endif |
---|
1028 | |
---|
1029 | GE_FUNC_ENTER(sc, "gfe_rx_get"); |
---|
1030 | GE_DPRINTF(sc, ("(%d)", rxprio)); |
---|
1031 | |
---|
1032 | while (rxq->rxq_active > 0) { |
---|
1033 | volatile struct gt_eth_desc *rxd = &rxq->rxq_descs[rxq->rxq_fi]; |
---|
1034 | #ifndef __rtems__ |
---|
1035 | struct gfe_rxbuf *rxb = &rxq->rxq_bufs[rxq->rxq_fi]; |
---|
1036 | #else |
---|
1037 | struct mbuf **rxb = &rxq->rxq_bufs[rxq->rxq_fi]; |
---|
1038 | #endif |
---|
1039 | const struct ether_header *eh; |
---|
1040 | unsigned int cmdsts; |
---|
1041 | size_t buflen; |
---|
1042 | |
---|
1043 | GE_RXDPOSTSYNC(sc, rxq, rxq->rxq_fi); |
---|
1044 | cmdsts = gt32toh(rxd->ed_cmdsts); |
---|
1045 | GE_DPRINTF(sc, (":%d=%#x", rxq->rxq_fi, cmdsts)); |
---|
1046 | rxq->rxq_cmdsts = cmdsts; |
---|
1047 | /* |
---|
1048 | * Sometimes the GE "forgets" to reset the ownership bit. |
---|
1049 | * But if the length has been rewritten, the packet is ours |
---|
1050 | * so pretend the O bit is set. |
---|
1051 | */ |
---|
1052 | buflen = gt32toh(rxd->ed_lencnt) & 0xffff; |
---|
1053 | if ((cmdsts & RX_CMD_O) && buflen == 0) { |
---|
1054 | GE_RXDPRESYNC(sc, rxq, rxq->rxq_fi); |
---|
1055 | break; |
---|
1056 | } |
---|
1057 | |
---|
1058 | /* |
---|
1059 | * If this is not a single buffer packet with no errors |
---|
1060 | * or for some reason it's bigger than our frame size, |
---|
1061 | * ignore it and go to the next packet. |
---|
1062 | */ |
---|
1063 | if ((cmdsts & (RX_CMD_F|RX_CMD_L|RX_STS_ES)) != |
---|
1064 | (RX_CMD_F|RX_CMD_L) || |
---|
1065 | buflen > sc->sc_max_frame_length) { |
---|
1066 | GE_DPRINTF(sc, ("!")); |
---|
1067 | --rxq->rxq_active; |
---|
1068 | ifp->if_ipackets++; |
---|
1069 | ifp->if_ierrors++; |
---|
1070 | |
---|
1071 | *rxb = gfe_newbuf(*rxb); |
---|
1072 | goto give_it_back; |
---|
1073 | } |
---|
1074 | |
---|
1075 | /* CRC is included with the packet; trim it off. */ |
---|
1076 | buflen -= ETHER_CRC_LEN; |
---|
1077 | |
---|
1078 | #ifndef __rtems__ |
---|
1079 | if (m == NULL) { |
---|
1080 | MGETHDR(m, M_DONTWAIT, MT_DATA); |
---|
1081 | if (m == NULL) { |
---|
1082 | GE_DPRINTF(sc, ("?")); |
---|
1083 | break; |
---|
1084 | } |
---|
1085 | } |
---|
1086 | if ((m->m_flags & M_EXT) == 0 && buflen > MHLEN - 2) { |
---|
1087 | MCLGET(m, M_DONTWAIT); |
---|
1088 | if ((m->m_flags & M_EXT) == 0) { |
---|
1089 | GE_DPRINTF(sc, ("?")); |
---|
1090 | break; |
---|
1091 | } |
---|
1092 | } |
---|
1093 | m->m_data += 2; |
---|
1094 | m->m_len = 0; |
---|
1095 | m->m_pkthdr.len = 0; |
---|
1096 | m->m_pkthdr.rcvif = ifp; |
---|
1097 | #else |
---|
1098 | if ( ! (m=gfe_newbuf(0)) ) { |
---|
1099 | /* recycle old buffer */ |
---|
1100 | *rxb = gfe_newbuf(*rxb); |
---|
1101 | goto give_it_back; |
---|
1102 | } |
---|
1103 | /* swap mbufs */ |
---|
1104 | { |
---|
1105 | struct mbuf *tmp = *rxb; |
---|
1106 | *rxb = m; |
---|
1107 | m = tmp; |
---|
1108 | rxd->ed_bufptr = htogt32(mtod(*rxb, uint32_t)); |
---|
1109 | } |
---|
1110 | #endif |
---|
1111 | rxq->rxq_cmdsts = cmdsts; |
---|
1112 | --rxq->rxq_active; |
---|
1113 | |
---|
1114 | #ifdef __rtems__ |
---|
1115 | /* FIXME: we leave this call in here so compilation fails |
---|
1116 | * if bus_dmamap_sync() is ever fleshed-out to implement |
---|
1117 | * software cache coherency... |
---|
1118 | */ |
---|
1119 | bus_dmamap_sync(sc->sc_dmat, rxq->rxq_buf_mem.gdm_map, |
---|
1120 | rxq->rxq_fi * sizeof(*rxb), buflen, BUS_DMASYNC_POSTREAD); |
---|
1121 | #else |
---|
1122 | bus_dmamap_sync(sc->sc_dmat, rxq->rxq_buf_mem.gdm_map, |
---|
1123 | rxq->rxq_fi * sizeof(*rxb), buflen, BUS_DMASYNC_POSTREAD); |
---|
1124 | |
---|
1125 | KASSERT(m->m_len == 0 && m->m_pkthdr.len == 0); |
---|
1126 | memcpy(m->m_data + m->m_len, rxb->rb_data, buflen); |
---|
1127 | #endif |
---|
1128 | |
---|
1129 | m->m_len = buflen; |
---|
1130 | m->m_pkthdr.len = buflen; |
---|
1131 | |
---|
1132 | ifp->if_ipackets++; |
---|
1133 | #if NBPFILTER > 0 |
---|
1134 | if (ifp->if_bpf != NULL) |
---|
1135 | bpf_mtap(ifp->if_bpf, m); |
---|
1136 | #endif |
---|
1137 | |
---|
1138 | eh = (const struct ether_header *) m->m_data; |
---|
1139 | if ((ifp->if_flags & IFF_PROMISC) || |
---|
1140 | (rxq->rxq_cmdsts & RX_STS_M) == 0 || |
---|
1141 | (rxq->rxq_cmdsts & RX_STS_HE) || |
---|
1142 | (eh->ether_dhost[0] & 1) != 0 || |
---|
1143 | memcmp(eh->ether_dhost, |
---|
1144 | #ifndef __rtems__ |
---|
1145 | LLADDR(ifp->if_sadl), |
---|
1146 | #else |
---|
1147 | sc->sc_ec.ac_enaddr, |
---|
1148 | #endif |
---|
1149 | ETHER_ADDR_LEN) == 0) { |
---|
1150 | #ifndef __rtems__ |
---|
1151 | (*ifp->if_input)(ifp, m); |
---|
1152 | m = NULL; |
---|
1153 | #else |
---|
1154 | DO_ETHER_INPUT_SKIPPING_ETHER_HEADER(ifp,m); |
---|
1155 | #endif |
---|
1156 | GE_DPRINTF(sc, (">")); |
---|
1157 | } else { |
---|
1158 | #ifndef __rtems__ |
---|
1159 | m->m_len = 0; |
---|
1160 | m->m_pkthdr.len = 0; |
---|
1161 | #else |
---|
1162 | m_freem(m); |
---|
1163 | #endif |
---|
1164 | GE_DPRINTF(sc, ("+")); |
---|
1165 | } |
---|
1166 | rxq->rxq_cmdsts = 0; |
---|
1167 | |
---|
1168 | give_it_back: |
---|
1169 | rxd->ed_lencnt &= ~0xffff; /* zero out length */ |
---|
1170 | rxd->ed_cmdsts = htogt32(RX_CMD_F|RX_CMD_L|RX_CMD_O|RX_CMD_EI); |
---|
1171 | #if 0 |
---|
1172 | GE_DPRINTF(sc, ("([%d]->%08lx.%08lx.%08lx.%08lx)", |
---|
1173 | rxq->rxq_fi, |
---|
1174 | ((unsigned long *)rxd)[0], ((unsigned long *)rxd)[1], |
---|
1175 | ((unsigned long *)rxd)[2], ((unsigned long *)rxd)[3])); |
---|
1176 | #endif |
---|
1177 | GE_RXDPRESYNC(sc, rxq, rxq->rxq_fi); |
---|
1178 | if (++rxq->rxq_fi == GE_RXDESC_MAX) |
---|
1179 | rxq->rxq_fi = 0; |
---|
1180 | rxq->rxq_active++; |
---|
1181 | } |
---|
1182 | #ifndef __rtems__ |
---|
1183 | rxq->rxq_curpkt = m; |
---|
1184 | #endif |
---|
1185 | GE_FUNC_EXIT(sc, ""); |
---|
1186 | } |
---|
1187 | |
---|
1188 | uint32_t |
---|
1189 | gfe_rx_process(struct gfe_softc *sc, uint32_t cause, uint32_t intrmask) |
---|
1190 | { |
---|
1191 | struct ifnet * const ifp = &sc->sc_ec.ec_if; |
---|
1192 | struct gfe_rxqueue *rxq; |
---|
1193 | uint32_t rxbits; |
---|
1194 | #define RXPRIO_DECODER 0xffffaa50 |
---|
1195 | GE_FUNC_ENTER(sc, "gfe_rx_process"); |
---|
1196 | |
---|
1197 | rxbits = ETH_IR_RxBuffer_GET(cause); |
---|
1198 | while (rxbits) { |
---|
1199 | enum gfe_rxprio rxprio = (RXPRIO_DECODER >> (rxbits * 2)) & 3; |
---|
1200 | GE_DPRINTF(sc, ("%1" PRIx32, rxbits)); |
---|
1201 | rxbits &= ~(1 << rxprio); |
---|
1202 | gfe_rx_get(sc, rxprio); |
---|
1203 | } |
---|
1204 | |
---|
1205 | rxbits = ETH_IR_RxError_GET(cause); |
---|
1206 | while (rxbits) { |
---|
1207 | enum gfe_rxprio rxprio = (RXPRIO_DECODER >> (rxbits * 2)) & 3; |
---|
1208 | uint32_t masks[(GE_RXDESC_MAX + 31) / 32]; |
---|
1209 | int idx; |
---|
1210 | rxbits &= ~(1 << rxprio); |
---|
1211 | rxq = &sc->sc_rxq[rxprio]; |
---|
1212 | sc->sc_idlemask |= (rxq->rxq_intrbits & ETH_IR_RxBits); |
---|
1213 | intrmask &= ~(rxq->rxq_intrbits & ETH_IR_RxBits); |
---|
1214 | if ((sc->sc_tickflags & GE_TICK_RX_RESTART) == 0) { |
---|
1215 | sc->sc_tickflags |= GE_TICK_RX_RESTART; |
---|
1216 | callout_reset(&sc->sc_co, 1, gfe_tick, sc); |
---|
1217 | } |
---|
1218 | ifp->if_ierrors++; |
---|
1219 | GE_DPRINTF(sc, ("%s: rx queue %d filled at %u\n", |
---|
1220 | sc->sc_dev.dv_xname, rxprio, rxq->rxq_fi)); |
---|
1221 | memset(masks, 0, sizeof(masks)); |
---|
1222 | bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_mem.gdm_map, |
---|
1223 | 0, rxq->rxq_desc_mem.gdm_size, |
---|
1224 | BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); |
---|
1225 | for (idx = 0; idx < GE_RXDESC_MAX; idx++) { |
---|
1226 | volatile struct gt_eth_desc *rxd = &rxq->rxq_descs[idx]; |
---|
1227 | |
---|
1228 | if (RX_CMD_O & gt32toh(rxd->ed_cmdsts)) |
---|
1229 | masks[idx/32] |= 1 << (idx & 31); |
---|
1230 | } |
---|
1231 | bus_dmamap_sync(sc->sc_dmat, rxq->rxq_desc_mem.gdm_map, |
---|
1232 | 0, rxq->rxq_desc_mem.gdm_size, |
---|
1233 | BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
---|
1234 | #if defined(DEBUG) |
---|
1235 | printf("%s: rx queue %d filled at %u=%#x(%#x/%#x)\n", |
---|
1236 | sc->sc_dev.dv_xname, rxprio, rxq->rxq_fi, |
---|
1237 | rxq->rxq_cmdsts, masks[0], masks[1]); |
---|
1238 | #endif |
---|
1239 | } |
---|
1240 | if ((intrmask & ETH_IR_RxBits) == 0) |
---|
1241 | intrmask &= ~(ETH_IR_RxBuffer|ETH_IR_RxError); |
---|
1242 | |
---|
1243 | GE_FUNC_EXIT(sc, ""); |
---|
1244 | return intrmask; |
---|
1245 | } |
---|
1246 | |
---|
1247 | int |
---|
1248 | gfe_rx_prime(struct gfe_softc *sc) |
---|
1249 | { |
---|
1250 | struct gfe_rxqueue *rxq; |
---|
1251 | int error; |
---|
1252 | |
---|
1253 | GE_FUNC_ENTER(sc, "gfe_rx_prime"); |
---|
1254 | |
---|
1255 | error = gfe_rx_rxqinit(sc, GE_RXPRIO_HI); |
---|
1256 | if (error) |
---|
1257 | goto bail; |
---|
1258 | rxq = &sc->sc_rxq[GE_RXPRIO_HI]; |
---|
1259 | if ((sc->sc_flags & GE_RXACTIVE) == 0) { |
---|
1260 | GE_WRITE(sc, EFRDP3, rxq->rxq_desc_busaddr); |
---|
1261 | GE_WRITE(sc, ECRDP3, rxq->rxq_desc_busaddr); |
---|
1262 | } |
---|
1263 | sc->sc_intrmask |= rxq->rxq_intrbits; |
---|
1264 | |
---|
1265 | error = gfe_rx_rxqinit(sc, GE_RXPRIO_MEDHI); |
---|
1266 | if (error) |
---|
1267 | goto bail; |
---|
1268 | if ((sc->sc_flags & GE_RXACTIVE) == 0) { |
---|
1269 | rxq = &sc->sc_rxq[GE_RXPRIO_MEDHI]; |
---|
1270 | GE_WRITE(sc, EFRDP2, rxq->rxq_desc_busaddr); |
---|
1271 | GE_WRITE(sc, ECRDP2, rxq->rxq_desc_busaddr); |
---|
1272 | sc->sc_intrmask |= rxq->rxq_intrbits; |
---|
1273 | } |
---|
1274 | |
---|
1275 | error = gfe_rx_rxqinit(sc, GE_RXPRIO_MEDLO); |
---|
1276 | if (error) |
---|
1277 | goto bail; |
---|
1278 | if ((sc->sc_flags & GE_RXACTIVE) == 0) { |
---|
1279 | rxq = &sc->sc_rxq[GE_RXPRIO_MEDLO]; |
---|
1280 | GE_WRITE(sc, EFRDP1, rxq->rxq_desc_busaddr); |
---|
1281 | GE_WRITE(sc, ECRDP1, rxq->rxq_desc_busaddr); |
---|
1282 | sc->sc_intrmask |= rxq->rxq_intrbits; |
---|
1283 | } |
---|
1284 | |
---|
1285 | error = gfe_rx_rxqinit(sc, GE_RXPRIO_LO); |
---|
1286 | if (error) |
---|
1287 | goto bail; |
---|
1288 | if ((sc->sc_flags & GE_RXACTIVE) == 0) { |
---|
1289 | rxq = &sc->sc_rxq[GE_RXPRIO_LO]; |
---|
1290 | GE_WRITE(sc, EFRDP0, rxq->rxq_desc_busaddr); |
---|
1291 | GE_WRITE(sc, ECRDP0, rxq->rxq_desc_busaddr); |
---|
1292 | sc->sc_intrmask |= rxq->rxq_intrbits; |
---|
1293 | } |
---|
1294 | |
---|
1295 | bail: |
---|
1296 | GE_FUNC_EXIT(sc, ""); |
---|
1297 | return error; |
---|
1298 | } |
---|
1299 | |
---|
1300 | void |
---|
1301 | gfe_rx_cleanup(struct gfe_softc *sc, enum gfe_rxprio rxprio) |
---|
1302 | { |
---|
1303 | struct gfe_rxqueue *rxq = &sc->sc_rxq[rxprio]; |
---|
1304 | GE_FUNC_ENTER(sc, "gfe_rx_cleanup"); |
---|
1305 | if (rxq == NULL) { |
---|
1306 | GE_FUNC_EXIT(sc, ""); |
---|
1307 | return; |
---|
1308 | } |
---|
1309 | |
---|
1310 | #ifndef __rtems__ |
---|
1311 | if (rxq->rxq_curpkt) |
---|
1312 | m_freem(rxq->rxq_curpkt); |
---|
1313 | #endif |
---|
1314 | if ((sc->sc_flags & GE_NOFREE) == 0) { |
---|
1315 | gfe_dmamem_free(sc, &rxq->rxq_desc_mem); |
---|
1316 | #ifndef __rtems__ |
---|
1317 | gfe_dmamem_free(sc, &rxq->rxq_buf_mem); |
---|
1318 | #else |
---|
1319 | if ( rxq->rxq_bufs ) { |
---|
1320 | int i; |
---|
1321 | for ( i=0; i<GE_RXDESC_MAX; i++ ) { |
---|
1322 | if ( rxq->rxq_bufs[i] ) { |
---|
1323 | m_freem(rxq->rxq_bufs[i]); |
---|
1324 | } |
---|
1325 | } |
---|
1326 | free(rxq->rxq_bufs, M_DEVBUF); |
---|
1327 | } |
---|
1328 | #endif |
---|
1329 | } |
---|
1330 | GE_FUNC_EXIT(sc, ""); |
---|
1331 | } |
---|
1332 | |
---|
1333 | void |
---|
1334 | gfe_rx_stop(struct gfe_softc *sc, enum gfe_whack_op op) |
---|
1335 | { |
---|
1336 | GE_FUNC_ENTER(sc, "gfe_rx_stop"); |
---|
1337 | sc->sc_flags &= ~GE_RXACTIVE; |
---|
1338 | sc->sc_idlemask &= ~(ETH_IR_RxBits|ETH_IR_RxBuffer|ETH_IR_RxError); |
---|
1339 | sc->sc_intrmask &= ~(ETH_IR_RxBits|ETH_IR_RxBuffer|ETH_IR_RxError); |
---|
1340 | GE_WRITE(sc, EIMR, sc->sc_intrmask); |
---|
1341 | GE_WRITE(sc, ESDCMR, ETH_ESDCMR_AR); |
---|
1342 | do { |
---|
1343 | delay(10); |
---|
1344 | } while (GE_READ(sc, ESDCMR) & ETH_ESDCMR_AR); |
---|
1345 | gfe_rx_cleanup(sc, GE_RXPRIO_HI); |
---|
1346 | gfe_rx_cleanup(sc, GE_RXPRIO_MEDHI); |
---|
1347 | gfe_rx_cleanup(sc, GE_RXPRIO_MEDLO); |
---|
1348 | gfe_rx_cleanup(sc, GE_RXPRIO_LO); |
---|
1349 | GE_FUNC_EXIT(sc, ""); |
---|
1350 | } |
---|
1351 | |
---|
1352 | void |
---|
1353 | gfe_tick(void *arg) |
---|
1354 | { |
---|
1355 | struct gfe_softc * const sc = arg; |
---|
1356 | uint32_t intrmask; |
---|
1357 | unsigned int tickflags; |
---|
1358 | int s; |
---|
1359 | |
---|
1360 | GE_FUNC_ENTER(sc, "gfe_tick"); |
---|
1361 | |
---|
1362 | s = splnet(); |
---|
1363 | |
---|
1364 | tickflags = sc->sc_tickflags; |
---|
1365 | sc->sc_tickflags = 0; |
---|
1366 | intrmask = sc->sc_intrmask; |
---|
1367 | if (tickflags & GE_TICK_TX_IFSTART) |
---|
1368 | gfe_ifstart(&sc->sc_ec.ec_if); |
---|
1369 | if (tickflags & GE_TICK_RX_RESTART) { |
---|
1370 | intrmask |= sc->sc_idlemask; |
---|
1371 | if (sc->sc_idlemask & (ETH_IR_RxBuffer_3|ETH_IR_RxError_3)) { |
---|
1372 | struct gfe_rxqueue *rxq = &sc->sc_rxq[GE_RXPRIO_HI]; |
---|
1373 | rxq->rxq_fi = 0; |
---|
1374 | GE_WRITE(sc, EFRDP3, rxq->rxq_desc_busaddr); |
---|
1375 | GE_WRITE(sc, ECRDP3, rxq->rxq_desc_busaddr); |
---|
1376 | } |
---|
1377 | if (sc->sc_idlemask & (ETH_IR_RxBuffer_2|ETH_IR_RxError_2)) { |
---|
1378 | struct gfe_rxqueue *rxq = &sc->sc_rxq[GE_RXPRIO_MEDHI]; |
---|
1379 | rxq->rxq_fi = 0; |
---|
1380 | GE_WRITE(sc, EFRDP2, rxq->rxq_desc_busaddr); |
---|
1381 | GE_WRITE(sc, ECRDP2, rxq->rxq_desc_busaddr); |
---|
1382 | } |
---|
1383 | if (sc->sc_idlemask & (ETH_IR_RxBuffer_1|ETH_IR_RxError_1)) { |
---|
1384 | struct gfe_rxqueue *rxq = &sc->sc_rxq[GE_RXPRIO_MEDLO]; |
---|
1385 | rxq->rxq_fi = 0; |
---|
1386 | GE_WRITE(sc, EFRDP1, rxq->rxq_desc_busaddr); |
---|
1387 | GE_WRITE(sc, ECRDP1, rxq->rxq_desc_busaddr); |
---|
1388 | } |
---|
1389 | if (sc->sc_idlemask & (ETH_IR_RxBuffer_0|ETH_IR_RxError_0)) { |
---|
1390 | struct gfe_rxqueue *rxq = &sc->sc_rxq[GE_RXPRIO_LO]; |
---|
1391 | rxq->rxq_fi = 0; |
---|
1392 | GE_WRITE(sc, EFRDP0, rxq->rxq_desc_busaddr); |
---|
1393 | GE_WRITE(sc, ECRDP0, rxq->rxq_desc_busaddr); |
---|
1394 | } |
---|
1395 | sc->sc_idlemask = 0; |
---|
1396 | } |
---|
1397 | if (intrmask != sc->sc_intrmask) { |
---|
1398 | sc->sc_intrmask = intrmask; |
---|
1399 | GE_WRITE(sc, EIMR, sc->sc_intrmask); |
---|
1400 | } |
---|
1401 | gfe_intr(sc); |
---|
1402 | splx(s); |
---|
1403 | |
---|
1404 | GE_FUNC_EXIT(sc, ""); |
---|
1405 | } |
---|
1406 | |
---|
1407 | static int |
---|
1408 | gfe_free_slots(struct gfe_softc *sc, struct gfe_txqueue *const txq) |
---|
1409 | { |
---|
1410 | struct ifnet * const ifp = &sc->sc_ec.ec_if; |
---|
1411 | #ifndef __rtems__ |
---|
1412 | const int dcache_line_size = curcpu()->ci_ci.dcache_line_size; |
---|
1413 | #endif |
---|
1414 | int got = 0; |
---|
1415 | int fi = txq->txq_fi; |
---|
1416 | volatile struct gt_eth_desc *txd = &txq->txq_descs[fi]; |
---|
1417 | uint32_t cmdsts; |
---|
1418 | #ifndef __rtems__ |
---|
1419 | size_t pktlen; |
---|
1420 | #endif |
---|
1421 | |
---|
1422 | GE_FUNC_ENTER(sc, "gfe_free_slots"); |
---|
1423 | |
---|
1424 | #ifdef __rtems__ |
---|
1425 | do { |
---|
1426 | #endif |
---|
1427 | GE_TXDPOSTSYNC(sc, txq, fi); |
---|
1428 | if ((cmdsts = gt32toh(txd->ed_cmdsts)) & TX_CMD_O) { |
---|
1429 | int nextin; |
---|
1430 | |
---|
1431 | if (txq->txq_nactive == 1) { |
---|
1432 | GE_TXDPRESYNC(sc, txq, fi); |
---|
1433 | GE_FUNC_EXIT(sc, ""); |
---|
1434 | return -1; |
---|
1435 | } |
---|
1436 | /* |
---|
1437 | * Sometimes the Discovery forgets to update the |
---|
1438 | * ownership bit in the descriptor. See if we own the |
---|
1439 | * descriptor after it (since we know we've turned |
---|
1440 | * that to the Discovery and if we own it now then the |
---|
1441 | * Discovery gave it back). If we do, we know the |
---|
1442 | * Discovery gave back this one but forgot to mark it |
---|
1443 | * as ours. |
---|
1444 | */ |
---|
1445 | nextin = fi + 1; |
---|
1446 | if (nextin == GE_TXDESC_MAX) |
---|
1447 | nextin = 0; |
---|
1448 | GE_TXDPOSTSYNC(sc, txq, nextin); |
---|
1449 | if (gt32toh(txq->txq_descs[nextin].ed_cmdsts) & TX_CMD_O) { |
---|
1450 | GE_TXDPRESYNC(sc, txq, fi); |
---|
1451 | GE_TXDPRESYNC(sc, txq, nextin); |
---|
1452 | GE_FUNC_EXIT(sc, ""); |
---|
1453 | return -1; |
---|
1454 | } |
---|
1455 | #ifdef DEBUG |
---|
1456 | printf("%s: gfe_free_slots: transmitter resynced at %d\n", |
---|
1457 | sc->sc_dev.dv_xname, fi); |
---|
1458 | #endif |
---|
1459 | } |
---|
1460 | got++; |
---|
1461 | #ifdef __rtems__ |
---|
1462 | txd++; |
---|
1463 | fi++; |
---|
1464 | } while ( ! ( TX_CMD_LAST & cmdsts ) ); |
---|
1465 | |
---|
1466 | { struct mbuf *m; |
---|
1467 | IF_DEQUEUE(&txq->txq_sentq, m); |
---|
1468 | m_freem(m); |
---|
1469 | } |
---|
1470 | #endif |
---|
1471 | #if 0 |
---|
1472 | GE_DPRINTF(sc, ("([%d]<-%08lx.%08lx.%08lx.%08lx)", |
---|
1473 | txq->txq_lo, |
---|
1474 | ((unsigned long *)txd)[0], ((unsigned long *)txd)[1], |
---|
1475 | ((unsigned long *)txd)[2], ((unsigned long *)txd)[3])); |
---|
1476 | #endif |
---|
1477 | GE_DPRINTF(sc, ("(%d)", fi)); |
---|
1478 | txq->txq_fi = fi; |
---|
1479 | if ( txq->txq_fi >= GE_TXDESC_MAX) |
---|
1480 | txq->txq_fi -= GE_TXDESC_MAX; |
---|
1481 | #ifndef __rtems__ |
---|
1482 | txq->txq_inptr = gt32toh(txd->ed_bufptr) - txq->txq_buf_busaddr; |
---|
1483 | pktlen = (gt32toh(txd->ed_lencnt) >> 16) & 0xffff; |
---|
1484 | bus_dmamap_sync(sc->sc_dmat, txq->txq_buf_mem.gdm_map, |
---|
1485 | txq->txq_inptr, pktlen, BUS_DMASYNC_POSTWRITE); |
---|
1486 | txq->txq_inptr += roundup(pktlen, dcache_line_size); |
---|
1487 | #endif |
---|
1488 | |
---|
1489 | /* statistics */ |
---|
1490 | ifp->if_opackets++; |
---|
1491 | #ifdef __rtems__ |
---|
1492 | /* FIXME: should we check errors on every fragment? */ |
---|
1493 | #endif |
---|
1494 | if (cmdsts & TX_STS_ES) |
---|
1495 | ifp->if_oerrors++; |
---|
1496 | |
---|
1497 | /* txd->ed_bufptr = 0; */ |
---|
1498 | |
---|
1499 | txq->txq_nactive -= got; |
---|
1500 | |
---|
1501 | GE_FUNC_EXIT(sc, ""); |
---|
1502 | |
---|
1503 | return got; |
---|
1504 | } |
---|
1505 | |
---|
1506 | #ifndef __rtems__ |
---|
1507 | int |
---|
1508 | gfe_tx_enqueue(struct gfe_softc *sc, enum gfe_txprio txprio) |
---|
1509 | { |
---|
1510 | #ifndef __rtems__ |
---|
1511 | const int dcache_line_size = curcpu()->ci_ci.dcache_line_size; |
---|
1512 | #else |
---|
1513 | #ifndef PPC_CACHE_ALIGNMENT |
---|
1514 | #error "Unknown cache alignment for your CPU" |
---|
1515 | #endif |
---|
1516 | const int dcache_line_size = PPC_CACHE_ALIGNMENT; |
---|
1517 | #endif |
---|
1518 | struct ifnet * const ifp = &sc->sc_ec.ec_if; |
---|
1519 | struct gfe_txqueue * const txq = &sc->sc_txq[txprio]; |
---|
1520 | volatile struct gt_eth_desc * const txd = &txq->txq_descs[txq->txq_lo]; |
---|
1521 | uint32_t intrmask = sc->sc_intrmask; |
---|
1522 | size_t buflen; |
---|
1523 | struct mbuf *m; |
---|
1524 | |
---|
1525 | GE_FUNC_ENTER(sc, "gfe_tx_enqueue"); |
---|
1526 | |
---|
1527 | /* |
---|
1528 | * Anything in the pending queue to enqueue? if not, punt. Likewise |
---|
1529 | * if the txq is not yet created. |
---|
1530 | * otherwise grab its dmamap. |
---|
1531 | */ |
---|
1532 | if (txq == NULL || (m = txq->txq_pendq.ifq_head) == NULL) { |
---|
1533 | GE_FUNC_EXIT(sc, "-"); |
---|
1534 | return 0; |
---|
1535 | } |
---|
1536 | |
---|
1537 | /* |
---|
1538 | * Have we [over]consumed our limit of descriptors? |
---|
1539 | * Do we have enough free descriptors? |
---|
1540 | */ |
---|
1541 | if (GE_TXDESC_MAX == txq->txq_nactive + 2) { |
---|
1542 | if ( gfe_free_slots(sc, txq) <= 0 ) |
---|
1543 | return 0; |
---|
1544 | } |
---|
1545 | |
---|
1546 | buflen = roundup(m->m_pkthdr.len, dcache_line_size); |
---|
1547 | |
---|
1548 | /* |
---|
1549 | * If this packet would wrap around the end of the buffer, reset back |
---|
1550 | * to the beginning. |
---|
1551 | */ |
---|
1552 | if (txq->txq_outptr + buflen > GE_TXBUF_SIZE) { |
---|
1553 | txq->txq_ei_gapcount += GE_TXBUF_SIZE - txq->txq_outptr; |
---|
1554 | txq->txq_outptr = 0; |
---|
1555 | } |
---|
1556 | |
---|
1557 | /* |
---|
1558 | * Make sure the output packet doesn't run over the beginning of |
---|
1559 | * what we've already given the GT. |
---|
1560 | */ |
---|
1561 | if (txq->txq_nactive > 0 && txq->txq_outptr <= txq->txq_inptr && |
---|
1562 | txq->txq_outptr + buflen > txq->txq_inptr) { |
---|
1563 | intrmask |= txq->txq_intrbits & |
---|
1564 | (ETH_IR_TxBufferHigh|ETH_IR_TxBufferLow); |
---|
1565 | if (sc->sc_intrmask != intrmask) { |
---|
1566 | sc->sc_intrmask = intrmask; |
---|
1567 | GE_WRITE(sc, EIMR, sc->sc_intrmask); |
---|
1568 | } |
---|
1569 | GE_FUNC_EXIT(sc, "#"); |
---|
1570 | return 0; |
---|
1571 | } |
---|
1572 | |
---|
1573 | /* |
---|
1574 | * The end-of-list descriptor we put on last time is the starting point |
---|
1575 | * for this packet. The GT is supposed to terminate list processing on |
---|
1576 | * a NULL nxtptr but that currently is broken so a CPU-owned descriptor |
---|
1577 | * must terminate the list. |
---|
1578 | */ |
---|
1579 | intrmask = sc->sc_intrmask; |
---|
1580 | |
---|
1581 | m_copydata(m, 0, m->m_pkthdr.len, |
---|
1582 | txq->txq_buf_mem.gdm_kva + txq->txq_outptr); |
---|
1583 | bus_dmamap_sync(sc->sc_dmat, txq->txq_buf_mem.gdm_map, |
---|
1584 | txq->txq_outptr, buflen, BUS_DMASYNC_PREWRITE); |
---|
1585 | txd->ed_bufptr = htogt32(txq->txq_buf_busaddr + txq->txq_outptr); |
---|
1586 | txd->ed_lencnt = htogt32(m->m_pkthdr.len << 16); |
---|
1587 | GE_TXDPRESYNC(sc, txq, txq->txq_lo); |
---|
1588 | |
---|
1589 | /* |
---|
1590 | * Request a buffer interrupt every 2/3 of the way thru the transmit |
---|
1591 | * buffer. |
---|
1592 | */ |
---|
1593 | txq->txq_ei_gapcount += buflen; |
---|
1594 | if (txq->txq_ei_gapcount > 2 * GE_TXBUF_SIZE / 3) { |
---|
1595 | txd->ed_cmdsts = htogt32(TX_CMD_FIRST|TX_CMD_LAST|TX_CMD_EI); |
---|
1596 | txq->txq_ei_gapcount = 0; |
---|
1597 | } else { |
---|
1598 | txd->ed_cmdsts = htogt32(TX_CMD_FIRST|TX_CMD_LAST); |
---|
1599 | } |
---|
1600 | #if 0 |
---|
1601 | GE_DPRINTF(sc, ("([%d]->%08lx.%08lx.%08lx.%08lx)", txq->txq_lo, |
---|
1602 | ((unsigned long *)txd)[0], ((unsigned long *)txd)[1], |
---|
1603 | ((unsigned long *)txd)[2], ((unsigned long *)txd)[3])); |
---|
1604 | #endif |
---|
1605 | GE_TXDPRESYNC(sc, txq, txq->txq_lo); |
---|
1606 | |
---|
1607 | txq->txq_outptr += buflen; |
---|
1608 | /* |
---|
1609 | * Tell the SDMA engine to "Fetch!" |
---|
1610 | */ |
---|
1611 | GE_WRITE(sc, ESDCMR, |
---|
1612 | txq->txq_esdcmrbits & (ETH_ESDCMR_TXDH|ETH_ESDCMR_TXDL)); |
---|
1613 | |
---|
1614 | GE_DPRINTF(sc, ("(%d)", txq->txq_lo)); |
---|
1615 | |
---|
1616 | /* |
---|
1617 | * Update the last out appropriately. |
---|
1618 | */ |
---|
1619 | txq->txq_nactive++; |
---|
1620 | if (++txq->txq_lo == GE_TXDESC_MAX) |
---|
1621 | txq->txq_lo = 0; |
---|
1622 | |
---|
1623 | /* |
---|
1624 | * Move mbuf from the pending queue to the snd queue. |
---|
1625 | */ |
---|
1626 | IF_DEQUEUE(&txq->txq_pendq, m); |
---|
1627 | #if NBPFILTER > 0 |
---|
1628 | if (ifp->if_bpf != NULL) |
---|
1629 | bpf_mtap(ifp->if_bpf, m); |
---|
1630 | #endif |
---|
1631 | m_freem(m); |
---|
1632 | ifp->if_flags &= ~IFF_OACTIVE; |
---|
1633 | |
---|
1634 | /* |
---|
1635 | * Since we have put an item into the packet queue, we now want |
---|
1636 | * an interrupt when the transmit queue finishes processing the |
---|
1637 | * list. But only update the mask if needs changing. |
---|
1638 | */ |
---|
1639 | intrmask |= txq->txq_intrbits & (ETH_IR_TxEndHigh|ETH_IR_TxEndLow); |
---|
1640 | if (sc->sc_intrmask != intrmask) { |
---|
1641 | sc->sc_intrmask = intrmask; |
---|
1642 | GE_WRITE(sc, EIMR, sc->sc_intrmask); |
---|
1643 | } |
---|
1644 | if (ifp->if_timer == 0) |
---|
1645 | ifp->if_timer = 5; |
---|
1646 | GE_FUNC_EXIT(sc, "*"); |
---|
1647 | return 1; |
---|
1648 | } |
---|
1649 | |
---|
1650 | #else |
---|
1651 | |
---|
1652 | #ifdef __PPC__ |
---|
1653 | static inline void membarrier(void) |
---|
1654 | { |
---|
1655 | asm volatile("sync":::"memory"); |
---|
1656 | } |
---|
1657 | #else |
---|
1658 | #error "memory synchronization for your CPU not implemented" |
---|
1659 | #endif |
---|
1660 | |
---|
1661 | |
---|
1662 | void |
---|
1663 | gfe_assign_desc(volatile struct gt_eth_desc *const d, struct mbuf *m, uint32_t flags) |
---|
1664 | { |
---|
1665 | d->ed_cmdsts = htogt32(flags | TX_CMD_GC | TX_CMD_P); |
---|
1666 | d->ed_bufptr = htogt32(mtod(m, uint32_t)); |
---|
1667 | d->ed_lencnt = htogt32(m->m_len << 16); |
---|
1668 | } |
---|
1669 | |
---|
1670 | int |
---|
1671 | gfe_tx_enqueue(struct gfe_softc *sc, enum gfe_txprio txprio) |
---|
1672 | { |
---|
1673 | struct ifnet * const ifp = &sc->sc_ec.ec_if; |
---|
1674 | struct gfe_txqueue * const txq = &sc->sc_txq[txprio]; |
---|
1675 | volatile struct gt_eth_desc * const txd = &txq->txq_descs[txq->txq_lo]; |
---|
1676 | #define NEXT_TXD(d) ((d)+1 < &txq->txq_descs[GE_TXDESC_MAX] ? (d)+1 : txq->txq_descs) |
---|
1677 | volatile struct gt_eth_desc *l,*d; |
---|
1678 | uint32_t intrmask = sc->sc_intrmask; |
---|
1679 | struct mbuf *m_head,*m,*m1; |
---|
1680 | int avail, used; |
---|
1681 | |
---|
1682 | GE_FUNC_ENTER(sc, "gfe_tx_enqueue"); |
---|
1683 | |
---|
1684 | /* |
---|
1685 | * Anything in the pending queue to enqueue? if not, punt. Likewise |
---|
1686 | * if the txq is not yet created. |
---|
1687 | * otherwise grab its dmamap. |
---|
1688 | */ |
---|
1689 | if (txq == NULL || (m_head = txq->txq_pendq.ifq_head) == NULL) { |
---|
1690 | GE_FUNC_EXIT(sc, "-"); |
---|
1691 | return 0; |
---|
1692 | } |
---|
1693 | |
---|
1694 | /* find 1st mbuf with actual data; m_head is not NULL at this point */ |
---|
1695 | for ( m1=m_head; 0 == m1->m_len; ) { |
---|
1696 | if ( ! (m1=m1->m_next) ) { |
---|
1697 | /* nothing to send */ |
---|
1698 | IF_DEQUEUE(&txq->txq_pendq, m_head); |
---|
1699 | m_freem(m_head); |
---|
1700 | return 0; |
---|
1701 | } |
---|
1702 | } |
---|
1703 | |
---|
1704 | avail = GE_TXDESC_MAX - 1 - txq->txq_nactive; |
---|
1705 | |
---|
1706 | if ( avail < 1 && (avail += gfe_free_slots(sc, txq)) < 1 ) |
---|
1707 | return 0; |
---|
1708 | |
---|
1709 | avail--; |
---|
1710 | |
---|
1711 | l = txd; |
---|
1712 | d = NEXT_TXD(txd); |
---|
1713 | |
---|
1714 | for ( m=m1->m_next, used = 1; m; m=m->m_next ) { |
---|
1715 | if ( 0 == m->m_len ) |
---|
1716 | continue; /* skip empty mbufs */ |
---|
1717 | |
---|
1718 | if ( avail < 1 && (avail += gfe_free_slots(sc, txq)) < 1 ) { |
---|
1719 | /* not enough descriptors; cleanup */ |
---|
1720 | for ( l = NEXT_TXD(txd); l!=d; l = NEXT_TXD(l) ) { |
---|
1721 | l->ed_cmdsts = 0; |
---|
1722 | avail++; |
---|
1723 | } |
---|
1724 | avail++; |
---|
1725 | if ( used >= GE_TXDESC_MAX-1 ) |
---|
1726 | panic("mbuf chain (#%i) longer than TX ring (#%i); configuration error!", |
---|
1727 | used, GE_TXDESC_MAX-1); |
---|
1728 | return 0; |
---|
1729 | } |
---|
1730 | used++; |
---|
1731 | avail--; |
---|
1732 | |
---|
1733 | /* fill this slot */ |
---|
1734 | gfe_assign_desc(d, m, TX_CMD_O); |
---|
1735 | |
---|
1736 | bus_dmamap_sync(sc->sc_dmat, /* TODO */, |
---|
1737 | mtod(m, uint32_t), m->m_len, BUS_DMASYNC_PREWRITE); |
---|
1738 | |
---|
1739 | l = d; |
---|
1740 | d = NEXT_TXD(d); |
---|
1741 | |
---|
1742 | GE_TXDPRESYNC(sc, txq, l - txq->txq_descs); |
---|
1743 | } |
---|
1744 | |
---|
1745 | /* fill first slot */ |
---|
1746 | gfe_assign_desc(txd, m1, TX_CMD_F); |
---|
1747 | |
---|
1748 | bus_dmamap_sync(sc->sc_dmat, /* TODO */, |
---|
1749 | mtod(m1, uint32_t), m1->m_len, BUS_DMASYNC_PREWRITE); |
---|
1750 | |
---|
1751 | /* tag last slot; this covers where 1st = last */ |
---|
1752 | l->ed_cmdsts |= htonl(TX_CMD_L | TX_CMD_EI); |
---|
1753 | |
---|
1754 | GE_TXDPRESYNC(sc, txq, l - txq->txq_descs); |
---|
1755 | |
---|
1756 | /* |
---|
1757 | * The end-of-list descriptor we put on last time is the starting point |
---|
1758 | * for this packet. The GT is supposed to terminate list processing on |
---|
1759 | * a NULL nxtptr but that currently is broken so a CPU-owned descriptor |
---|
1760 | * must terminate the list. |
---|
1761 | */ |
---|
1762 | d = NEXT_TXD(l); |
---|
1763 | |
---|
1764 | out_be32((uint32_t*)&d->ed_cmdsts,0); |
---|
1765 | |
---|
1766 | GE_TXDPRESYNC(sc, txq, d - txq->txq_descs); |
---|
1767 | |
---|
1768 | membarrier(); |
---|
1769 | |
---|
1770 | /* turn over the whole chain by flipping the ownership of the first desc */ |
---|
1771 | txd->ed_cmdsts |= htonl(TX_CMD_O); |
---|
1772 | |
---|
1773 | GE_TXDPRESYNC(sc, txq, txq->txq_lo); |
---|
1774 | |
---|
1775 | |
---|
1776 | intrmask = sc->sc_intrmask; |
---|
1777 | |
---|
1778 | #if 0 |
---|
1779 | GE_DPRINTF(sc, ("([%d]->%08lx.%08lx.%08lx.%08lx)", txq->txq_lo, |
---|
1780 | ((unsigned long *)txd)[0], ((unsigned long *)txd)[1], |
---|
1781 | ((unsigned long *)txd)[2], ((unsigned long *)txd)[3])); |
---|
1782 | #endif |
---|
1783 | |
---|
1784 | membarrier(); |
---|
1785 | |
---|
1786 | /* |
---|
1787 | * Tell the SDMA engine to "Fetch!" |
---|
1788 | */ |
---|
1789 | GE_WRITE(sc, ESDCMR, |
---|
1790 | txq->txq_esdcmrbits & (ETH_ESDCMR_TXDH|ETH_ESDCMR_TXDL)); |
---|
1791 | |
---|
1792 | GE_DPRINTF(sc, ("(%d)", txq->txq_lo)); |
---|
1793 | |
---|
1794 | /* |
---|
1795 | * Update the last out appropriately. |
---|
1796 | */ |
---|
1797 | txq->txq_nactive += used; |
---|
1798 | txq->txq_lo += used; |
---|
1799 | if ( txq->txq_lo >= GE_TXDESC_MAX ) |
---|
1800 | txq->txq_lo -= GE_TXDESC_MAX; |
---|
1801 | |
---|
1802 | /* |
---|
1803 | * Move mbuf from the pending queue to the snd queue. |
---|
1804 | */ |
---|
1805 | IF_DEQUEUE(&txq->txq_pendq, m_head); |
---|
1806 | |
---|
1807 | IF_ENQUEUE(&txq->txq_sentq, m_head); |
---|
1808 | |
---|
1809 | #if NBPFILTER > 0 |
---|
1810 | if (ifp->if_bpf != NULL) |
---|
1811 | bpf_mtap(ifp->if_bpf, m_head); |
---|
1812 | #endif |
---|
1813 | ifp->if_flags &= ~IFF_OACTIVE; |
---|
1814 | |
---|
1815 | /* |
---|
1816 | * Since we have put an item into the packet queue, we now want |
---|
1817 | * an interrupt when the transmit queue finishes processing the |
---|
1818 | * list. But only update the mask if needs changing. |
---|
1819 | */ |
---|
1820 | intrmask |= txq->txq_intrbits & (ETH_IR_TxEndHigh|ETH_IR_TxEndLow); |
---|
1821 | if (sc->sc_intrmask != intrmask) { |
---|
1822 | sc->sc_intrmask = intrmask; |
---|
1823 | GE_WRITE(sc, EIMR, sc->sc_intrmask); |
---|
1824 | } |
---|
1825 | if (ifp->if_timer == 0) |
---|
1826 | ifp->if_timer = 5; |
---|
1827 | GE_FUNC_EXIT(sc, "*"); |
---|
1828 | return 1; |
---|
1829 | } |
---|
1830 | #endif |
---|
1831 | |
---|
1832 | uint32_t |
---|
1833 | gfe_tx_done(struct gfe_softc *sc, enum gfe_txprio txprio, uint32_t intrmask) |
---|
1834 | { |
---|
1835 | struct gfe_txqueue * const txq = &sc->sc_txq[txprio]; |
---|
1836 | struct ifnet * const ifp = &sc->sc_ec.ec_if; |
---|
1837 | |
---|
1838 | GE_FUNC_ENTER(sc, "gfe_tx_done"); |
---|
1839 | |
---|
1840 | if (txq == NULL) { |
---|
1841 | GE_FUNC_EXIT(sc, ""); |
---|
1842 | return intrmask; |
---|
1843 | } |
---|
1844 | |
---|
1845 | while (txq->txq_nactive > 0) { |
---|
1846 | if ( gfe_free_slots(sc, txq) < 0 ) |
---|
1847 | return intrmask; |
---|
1848 | ifp->if_timer = 5; |
---|
1849 | } |
---|
1850 | if (txq->txq_nactive != 0) |
---|
1851 | panic("%s: transmit fifo%d empty but active count (%d) not 0!", |
---|
1852 | sc->sc_dev.dv_xname, txprio, txq->txq_nactive); |
---|
1853 | ifp->if_timer = 0; |
---|
1854 | intrmask &= ~(txq->txq_intrbits & (ETH_IR_TxEndHigh|ETH_IR_TxEndLow)); |
---|
1855 | intrmask &= ~(txq->txq_intrbits & (ETH_IR_TxBufferHigh|ETH_IR_TxBufferLow)); |
---|
1856 | GE_FUNC_EXIT(sc, ""); |
---|
1857 | return intrmask; |
---|
1858 | } |
---|
1859 | |
---|
1860 | int |
---|
1861 | gfe_tx_txqalloc(struct gfe_softc *sc, enum gfe_txprio txprio) |
---|
1862 | { |
---|
1863 | struct gfe_txqueue * const txq = &sc->sc_txq[txprio]; |
---|
1864 | int error; |
---|
1865 | |
---|
1866 | GE_FUNC_ENTER(sc, "gfe_tx_txqalloc"); |
---|
1867 | |
---|
1868 | error = gfe_dmamem_alloc(sc, &txq->txq_desc_mem, 1, |
---|
1869 | GE_TXDESC_MEMSIZE, BUS_DMA_NOCACHE); |
---|
1870 | if (error) { |
---|
1871 | GE_FUNC_EXIT(sc, ""); |
---|
1872 | return error; |
---|
1873 | } |
---|
1874 | #ifndef __rtems__ |
---|
1875 | error = gfe_dmamem_alloc(sc, &txq->txq_buf_mem, 1, GE_TXBUF_SIZE, 0); |
---|
1876 | if (error) { |
---|
1877 | gfe_dmamem_free(sc, &txq->txq_desc_mem); |
---|
1878 | GE_FUNC_EXIT(sc, ""); |
---|
1879 | return error; |
---|
1880 | } |
---|
1881 | #endif |
---|
1882 | GE_FUNC_EXIT(sc, ""); |
---|
1883 | return 0; |
---|
1884 | } |
---|
1885 | |
---|
1886 | int |
---|
1887 | gfe_tx_start(struct gfe_softc *sc, enum gfe_txprio txprio) |
---|
1888 | { |
---|
1889 | struct gfe_txqueue * const txq = &sc->sc_txq[txprio]; |
---|
1890 | volatile struct gt_eth_desc *txd; |
---|
1891 | unsigned int i; |
---|
1892 | bus_addr_t addr; |
---|
1893 | |
---|
1894 | GE_FUNC_ENTER(sc, "gfe_tx_start"); |
---|
1895 | |
---|
1896 | sc->sc_intrmask &= ~(ETH_IR_TxEndHigh|ETH_IR_TxBufferHigh| |
---|
1897 | ETH_IR_TxEndLow |ETH_IR_TxBufferLow); |
---|
1898 | |
---|
1899 | if (sc->sc_flags & GE_NOFREE) { |
---|
1900 | KASSERT(txq->txq_desc_mem.gdm_kva != NULL); |
---|
1901 | #ifndef __rtems__ |
---|
1902 | KASSERT(txq->txq_buf_mem.gdm_kva != NULL); |
---|
1903 | #endif |
---|
1904 | } else { |
---|
1905 | int error = gfe_tx_txqalloc(sc, txprio); |
---|
1906 | if (error) { |
---|
1907 | GE_FUNC_EXIT(sc, "!"); |
---|
1908 | return error; |
---|
1909 | } |
---|
1910 | } |
---|
1911 | |
---|
1912 | txq->txq_descs = |
---|
1913 | (volatile struct gt_eth_desc *) txq->txq_desc_mem.gdm_kva; |
---|
1914 | txq->txq_desc_busaddr = txq->txq_desc_mem.gdm_map->dm_segs[0].ds_addr; |
---|
1915 | #ifndef __rtems__ |
---|
1916 | txq->txq_buf_busaddr = txq->txq_buf_mem.gdm_map->dm_segs[0].ds_addr; |
---|
1917 | #else |
---|
1918 | /* never used */ |
---|
1919 | memset(&txq->txq_pendq,0,sizeof(txq->txq_pendq)); |
---|
1920 | memset(&txq->txq_sentq,0,sizeof(txq->txq_sentq)); |
---|
1921 | txq->txq_sentq.ifq_maxlen = 100000; |
---|
1922 | #endif |
---|
1923 | |
---|
1924 | txq->txq_pendq.ifq_maxlen = 10; |
---|
1925 | #ifndef __rtems__ |
---|
1926 | txq->txq_ei_gapcount = 0; |
---|
1927 | #endif |
---|
1928 | txq->txq_nactive = 0; |
---|
1929 | txq->txq_fi = 0; |
---|
1930 | txq->txq_lo = 0; |
---|
1931 | #ifndef __rtems__ |
---|
1932 | txq->txq_ei_gapcount = 0; |
---|
1933 | txq->txq_inptr = GE_TXBUF_SIZE; |
---|
1934 | txq->txq_outptr = 0; |
---|
1935 | #endif |
---|
1936 | for (i = 0, txd = txq->txq_descs, |
---|
1937 | addr = txq->txq_desc_busaddr + sizeof(*txd); |
---|
1938 | i < GE_TXDESC_MAX - 1; |
---|
1939 | i++, txd++, addr += sizeof(*txd)) { |
---|
1940 | /* |
---|
1941 | * update the nxtptr to point to the next txd. |
---|
1942 | */ |
---|
1943 | txd->ed_cmdsts = 0; |
---|
1944 | txd->ed_nxtptr = htogt32(addr); |
---|
1945 | } |
---|
1946 | txq->txq_descs[GE_TXDESC_MAX-1].ed_nxtptr = |
---|
1947 | htogt32(txq->txq_desc_busaddr); |
---|
1948 | bus_dmamap_sync(sc->sc_dmat, txq->txq_desc_mem.gdm_map, 0, |
---|
1949 | GE_TXDESC_MEMSIZE, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); |
---|
1950 | |
---|
1951 | switch (txprio) { |
---|
1952 | case GE_TXPRIO_HI: |
---|
1953 | txq->txq_intrbits = ETH_IR_TxEndHigh|ETH_IR_TxBufferHigh; |
---|
1954 | txq->txq_esdcmrbits = ETH_ESDCMR_TXDH; |
---|
1955 | txq->txq_epsrbits = ETH_EPSR_TxHigh; |
---|
1956 | txq->txq_ectdp = ETH_ECTDP1(sc->sc_macno); |
---|
1957 | GE_WRITE(sc, ECTDP1, txq->txq_desc_busaddr); |
---|
1958 | break; |
---|
1959 | |
---|
1960 | case GE_TXPRIO_LO: |
---|
1961 | txq->txq_intrbits = ETH_IR_TxEndLow|ETH_IR_TxBufferLow; |
---|
1962 | txq->txq_esdcmrbits = ETH_ESDCMR_TXDL; |
---|
1963 | txq->txq_epsrbits = ETH_EPSR_TxLow; |
---|
1964 | txq->txq_ectdp = ETH_ECTDP0(sc->sc_macno); |
---|
1965 | GE_WRITE(sc, ECTDP0, txq->txq_desc_busaddr); |
---|
1966 | break; |
---|
1967 | |
---|
1968 | case GE_TXPRIO_NONE: |
---|
1969 | break; |
---|
1970 | } |
---|
1971 | #if 0 |
---|
1972 | GE_DPRINTF(sc, ("(ectdp=%#x", txq->txq_ectdp)); |
---|
1973 | gt_write(sc->sc_dev.dv_parent, txq->txq_ectdp, txq->txq_desc_busaddr); |
---|
1974 | GE_DPRINTF(sc, (")")); |
---|
1975 | #endif |
---|
1976 | |
---|
1977 | /* |
---|
1978 | * If we are restarting, there may be packets in the pending queue |
---|
1979 | * waiting to be enqueued. Try enqueuing packets from both priority |
---|
1980 | * queues until the pending queue is empty or there no room for them |
---|
1981 | * on the device. |
---|
1982 | */ |
---|
1983 | while (gfe_tx_enqueue(sc, txprio)) |
---|
1984 | continue; |
---|
1985 | |
---|
1986 | GE_FUNC_EXIT(sc, ""); |
---|
1987 | return 0; |
---|
1988 | } |
---|
1989 | |
---|
1990 | void |
---|
1991 | gfe_tx_cleanup(struct gfe_softc *sc, enum gfe_txprio txprio, int flush) |
---|
1992 | { |
---|
1993 | struct gfe_txqueue * const txq = &sc->sc_txq[txprio]; |
---|
1994 | |
---|
1995 | GE_FUNC_ENTER(sc, "gfe_tx_cleanup"); |
---|
1996 | if (txq == NULL) { |
---|
1997 | GE_FUNC_EXIT(sc, ""); |
---|
1998 | return; |
---|
1999 | } |
---|
2000 | |
---|
2001 | if (!flush) { |
---|
2002 | GE_FUNC_EXIT(sc, ""); |
---|
2003 | return; |
---|
2004 | } |
---|
2005 | |
---|
2006 | #ifdef __rtems__ |
---|
2007 | /* reclaim mbufs that were never sent */ |
---|
2008 | { |
---|
2009 | struct mbuf *m; |
---|
2010 | while ( txq->txq_sentq.ifq_head ) { |
---|
2011 | IF_DEQUEUE(&txq->txq_sentq, m); |
---|
2012 | m_freem(m); |
---|
2013 | } |
---|
2014 | } |
---|
2015 | #endif |
---|
2016 | |
---|
2017 | if ((sc->sc_flags & GE_NOFREE) == 0) { |
---|
2018 | gfe_dmamem_free(sc, &txq->txq_desc_mem); |
---|
2019 | #ifndef __rtems__ |
---|
2020 | gfe_dmamem_free(sc, &txq->txq_buf_mem); |
---|
2021 | #endif |
---|
2022 | } |
---|
2023 | GE_FUNC_EXIT(sc, "-F"); |
---|
2024 | } |
---|
2025 | |
---|
2026 | void |
---|
2027 | gfe_tx_stop(struct gfe_softc *sc, enum gfe_whack_op op) |
---|
2028 | { |
---|
2029 | GE_FUNC_ENTER(sc, "gfe_tx_stop"); |
---|
2030 | |
---|
2031 | GE_WRITE(sc, ESDCMR, ETH_ESDCMR_STDH|ETH_ESDCMR_STDL); |
---|
2032 | |
---|
2033 | sc->sc_intrmask = gfe_tx_done(sc, GE_TXPRIO_HI, sc->sc_intrmask); |
---|
2034 | sc->sc_intrmask = gfe_tx_done(sc, GE_TXPRIO_LO, sc->sc_intrmask); |
---|
2035 | sc->sc_intrmask &= ~(ETH_IR_TxEndHigh|ETH_IR_TxBufferHigh| |
---|
2036 | ETH_IR_TxEndLow |ETH_IR_TxBufferLow); |
---|
2037 | |
---|
2038 | gfe_tx_cleanup(sc, GE_TXPRIO_HI, op == GE_WHACK_STOP); |
---|
2039 | gfe_tx_cleanup(sc, GE_TXPRIO_LO, op == GE_WHACK_STOP); |
---|
2040 | |
---|
2041 | sc->sc_ec.ec_if.if_timer = 0; |
---|
2042 | GE_FUNC_EXIT(sc, ""); |
---|
2043 | } |
---|
2044 | |
---|
2045 | int |
---|
2046 | gfe_intr(void *arg) |
---|
2047 | { |
---|
2048 | struct gfe_softc * const sc = arg; |
---|
2049 | uint32_t cause; |
---|
2050 | uint32_t intrmask = sc->sc_intrmask; |
---|
2051 | int claim = 0; |
---|
2052 | int cnt; |
---|
2053 | |
---|
2054 | GE_FUNC_ENTER(sc, "gfe_intr"); |
---|
2055 | |
---|
2056 | for (cnt = 0; cnt < 4; cnt++) { |
---|
2057 | if (sc->sc_intrmask != intrmask) { |
---|
2058 | sc->sc_intrmask = intrmask; |
---|
2059 | GE_WRITE(sc, EIMR, sc->sc_intrmask); |
---|
2060 | } |
---|
2061 | cause = GE_READ(sc, EICR); |
---|
2062 | cause &= sc->sc_intrmask; |
---|
2063 | GE_DPRINTF(sc, (".%#" PRIx32, cause)); |
---|
2064 | if (cause == 0) |
---|
2065 | break; |
---|
2066 | |
---|
2067 | claim = 1; |
---|
2068 | |
---|
2069 | GE_WRITE(sc, EICR, ~cause); |
---|
2070 | #ifndef GE_NORX |
---|
2071 | if (cause & (ETH_IR_RxBuffer|ETH_IR_RxError)) |
---|
2072 | intrmask = gfe_rx_process(sc, cause, intrmask); |
---|
2073 | #endif |
---|
2074 | |
---|
2075 | #ifndef GE_NOTX |
---|
2076 | if (cause & (ETH_IR_TxBufferHigh|ETH_IR_TxEndHigh)) |
---|
2077 | intrmask = gfe_tx_done(sc, GE_TXPRIO_HI, intrmask); |
---|
2078 | if (cause & (ETH_IR_TxBufferLow|ETH_IR_TxEndLow)) |
---|
2079 | intrmask = gfe_tx_done(sc, GE_TXPRIO_LO, intrmask); |
---|
2080 | #endif |
---|
2081 | if (cause & ETH_IR_MIIPhySTC) { |
---|
2082 | sc->sc_flags |= GE_PHYSTSCHG; |
---|
2083 | /* intrmask &= ~ETH_IR_MIIPhySTC; */ |
---|
2084 | } |
---|
2085 | } |
---|
2086 | |
---|
2087 | while (gfe_tx_enqueue(sc, GE_TXPRIO_HI)) |
---|
2088 | continue; |
---|
2089 | while (gfe_tx_enqueue(sc, GE_TXPRIO_LO)) |
---|
2090 | continue; |
---|
2091 | |
---|
2092 | GE_FUNC_EXIT(sc, ""); |
---|
2093 | return claim; |
---|
2094 | } |
---|
2095 | |
---|
2096 | #ifndef __rtems__ |
---|
2097 | int |
---|
2098 | gfe_mii_mediachange (struct ifnet *ifp) |
---|
2099 | { |
---|
2100 | struct gfe_softc *sc = ifp->if_softc; |
---|
2101 | |
---|
2102 | if (ifp->if_flags & IFF_UP) |
---|
2103 | mii_mediachg(&sc->sc_mii); |
---|
2104 | |
---|
2105 | return (0); |
---|
2106 | } |
---|
2107 | void |
---|
2108 | gfe_mii_mediastatus (struct ifnet *ifp, struct ifmediareq *ifmr) |
---|
2109 | { |
---|
2110 | struct gfe_softc *sc = ifp->if_softc; |
---|
2111 | |
---|
2112 | if (sc->sc_flags & GE_PHYSTSCHG) { |
---|
2113 | sc->sc_flags &= ~GE_PHYSTSCHG; |
---|
2114 | mii_pollstat(&sc->sc_mii); |
---|
2115 | } |
---|
2116 | ifmr->ifm_status = sc->sc_mii.mii_media_status; |
---|
2117 | ifmr->ifm_active = sc->sc_mii.mii_media_active; |
---|
2118 | } |
---|
2119 | |
---|
2120 | int |
---|
2121 | gfe_mii_read (struct device *self, int phy, int reg) |
---|
2122 | { |
---|
2123 | return gt_mii_read(self, self->dv_parent, phy, reg); |
---|
2124 | } |
---|
2125 | |
---|
2126 | void |
---|
2127 | gfe_mii_write (struct device *self, int phy, int reg, int value) |
---|
2128 | { |
---|
2129 | gt_mii_write(self, self->dv_parent, phy, reg, value); |
---|
2130 | } |
---|
2131 | |
---|
2132 | void |
---|
2133 | gfe_mii_statchg (struct device *self) |
---|
2134 | { |
---|
2135 | /* struct gfe_softc *sc = (struct gfe_softc *) self; */ |
---|
2136 | /* do nothing? */ |
---|
2137 | } |
---|
2138 | |
---|
2139 | #else |
---|
2140 | int |
---|
2141 | gfe_mii_read(int phy, void *arg, unsigned reg, uint32_t *pval) |
---|
2142 | { |
---|
2143 | struct gfe_softc *sc = arg; |
---|
2144 | uint32_t data; |
---|
2145 | int count = 10000; |
---|
2146 | |
---|
2147 | if ( 0 != phy ) |
---|
2148 | return -1; /* invalid index */ |
---|
2149 | |
---|
2150 | phy = sc->sc_phyaddr; |
---|
2151 | |
---|
2152 | do { |
---|
2153 | DELAY(10); |
---|
2154 | data = GT_READ(sc, ETH_ESMIR); |
---|
2155 | } while ((data & ETH_ESMIR_Busy) && count-- > 0); |
---|
2156 | |
---|
2157 | if (count == 0) { |
---|
2158 | fprintf(stderr,"%s: mii read for phy %d reg %d busied out\n", |
---|
2159 | sc->sc_dev.dv_xname, phy, reg); |
---|
2160 | *pval = ETH_ESMIR_Value_GET(data); |
---|
2161 | return -1; |
---|
2162 | } |
---|
2163 | |
---|
2164 | GT_WRITE(sc, ETH_ESMIR, ETH_ESMIR_READ(phy, reg)); |
---|
2165 | |
---|
2166 | count = 10000; |
---|
2167 | do { |
---|
2168 | DELAY(10); |
---|
2169 | data = GT_READ(sc, ETH_ESMIR); |
---|
2170 | } while ((data & ETH_ESMIR_ReadValid) == 0 && count-- > 0); |
---|
2171 | |
---|
2172 | if (count == 0) |
---|
2173 | printf("%s: mii read for phy %d reg %d timed out\n", |
---|
2174 | sc->sc_dev.dv_xname, phy, reg); |
---|
2175 | #if defined(GTMIIDEBUG) |
---|
2176 | printf("%s: mii_read(%d, %d): %#x data %#x\n", |
---|
2177 | sc->sc_dev.dv_xname, phy, reg, |
---|
2178 | data, ETH_ESMIR_Value_GET(data)); |
---|
2179 | #endif |
---|
2180 | *pval = ETH_ESMIR_Value_GET(data); |
---|
2181 | return 0; |
---|
2182 | } |
---|
2183 | |
---|
2184 | int |
---|
2185 | gfe_mii_write(int phy, void *arg, unsigned reg, uint32_t value) |
---|
2186 | { |
---|
2187 | struct gfe_softc *sc = arg; |
---|
2188 | uint32_t data; |
---|
2189 | int count = 10000; |
---|
2190 | |
---|
2191 | if ( 0 != phy ) |
---|
2192 | return -1; /* invalid index */ |
---|
2193 | |
---|
2194 | phy = sc->sc_phyaddr; |
---|
2195 | |
---|
2196 | do { |
---|
2197 | DELAY(10); |
---|
2198 | data = GT_READ(sc, ETH_ESMIR); |
---|
2199 | } while ((data & ETH_ESMIR_Busy) && count-- > 0); |
---|
2200 | |
---|
2201 | if (count == 0) { |
---|
2202 | fprintf(stderr, "%s: mii write for phy %d reg %d busied out (busy)\n", |
---|
2203 | sc->sc_dev.dv_xname, phy, reg); |
---|
2204 | return -1; |
---|
2205 | } |
---|
2206 | |
---|
2207 | GT_WRITE(sc, ETH_ESMIR, |
---|
2208 | ETH_ESMIR_WRITE(phy, reg, value)); |
---|
2209 | |
---|
2210 | count = 10000; |
---|
2211 | do { |
---|
2212 | DELAY(10); |
---|
2213 | data = GT_READ(sc, ETH_ESMIR); |
---|
2214 | } while ((data & ETH_ESMIR_Busy) && count-- > 0); |
---|
2215 | |
---|
2216 | if (count == 0) |
---|
2217 | printf("%s: mii write for phy %d reg %d timed out\n", |
---|
2218 | sc->sc_dev.dv_xname, phy, reg); |
---|
2219 | #if defined(GTMIIDEBUG) |
---|
2220 | printf("%s: mii_write(%d, %d, %#x)\n", |
---|
2221 | sc->sc_dev.dv_xname, phy, reg, value); |
---|
2222 | #endif |
---|
2223 | return 0; |
---|
2224 | } |
---|
2225 | |
---|
2226 | #endif |
---|
2227 | int |
---|
2228 | gfe_whack(struct gfe_softc *sc, enum gfe_whack_op op) |
---|
2229 | { |
---|
2230 | int error = 0; |
---|
2231 | GE_FUNC_ENTER(sc, "gfe_whack"); |
---|
2232 | |
---|
2233 | switch (op) { |
---|
2234 | case GE_WHACK_RESTART: |
---|
2235 | #ifndef GE_NOTX |
---|
2236 | gfe_tx_stop(sc, op); |
---|
2237 | #endif |
---|
2238 | /* sc->sc_ec.ec_if.if_flags &= ~IFF_RUNNING; */ |
---|
2239 | /* FALLTHROUGH */ |
---|
2240 | case GE_WHACK_START: |
---|
2241 | #ifndef GE_NOHASH |
---|
2242 | if (error == 0 && sc->sc_hashtable == NULL) { |
---|
2243 | error = gfe_hash_alloc(sc); |
---|
2244 | if (error) |
---|
2245 | break; |
---|
2246 | } |
---|
2247 | if (op != GE_WHACK_RESTART) |
---|
2248 | gfe_hash_fill(sc); |
---|
2249 | #endif |
---|
2250 | #ifndef GE_NORX |
---|
2251 | if (op != GE_WHACK_RESTART) { |
---|
2252 | error = gfe_rx_prime(sc); |
---|
2253 | if (error) |
---|
2254 | break; |
---|
2255 | } |
---|
2256 | #endif |
---|
2257 | #ifndef GE_NOTX |
---|
2258 | error = gfe_tx_start(sc, GE_TXPRIO_HI); |
---|
2259 | if (error) |
---|
2260 | break; |
---|
2261 | #endif |
---|
2262 | sc->sc_ec.ec_if.if_flags |= IFF_RUNNING; |
---|
2263 | GE_WRITE(sc, EPCR, sc->sc_pcr | ETH_EPCR_EN); |
---|
2264 | GE_WRITE(sc, EPCXR, sc->sc_pcxr); |
---|
2265 | GE_WRITE(sc, EICR, 0); |
---|
2266 | GE_WRITE(sc, EIMR, sc->sc_intrmask); |
---|
2267 | #ifndef GE_NOHASH |
---|
2268 | GE_WRITE(sc, EHTPR, sc->sc_hash_mem.gdm_map->dm_segs->ds_addr); |
---|
2269 | #endif |
---|
2270 | #ifndef GE_NORX |
---|
2271 | GE_WRITE(sc, ESDCMR, ETH_ESDCMR_ERD); |
---|
2272 | sc->sc_flags |= GE_RXACTIVE; |
---|
2273 | #endif |
---|
2274 | /* FALLTHROUGH */ |
---|
2275 | case GE_WHACK_CHANGE: |
---|
2276 | GE_DPRINTF(sc, ("(pcr=%#" PRIx32 ",imr=%#" PRIx32 ")", |
---|
2277 | GE_READ(sc, EPCR), GE_READ(sc, EIMR))); |
---|
2278 | GE_WRITE(sc, EPCR, sc->sc_pcr | ETH_EPCR_EN); |
---|
2279 | GE_WRITE(sc, EIMR, sc->sc_intrmask); |
---|
2280 | gfe_ifstart(&sc->sc_ec.ec_if); |
---|
2281 | GE_DPRINTF(sc, ("(ectdp0=%#" PRIx32 ", ectdp1=%#" PRIx32 ")", |
---|
2282 | GE_READ(sc, ECTDP0), GE_READ(sc, ECTDP1))); |
---|
2283 | GE_FUNC_EXIT(sc, ""); |
---|
2284 | return error; |
---|
2285 | case GE_WHACK_STOP: |
---|
2286 | break; |
---|
2287 | } |
---|
2288 | |
---|
2289 | #ifdef GE_DEBUG |
---|
2290 | if (error) |
---|
2291 | GE_DPRINTF(sc, (" failed: %d\n", error)); |
---|
2292 | #endif |
---|
2293 | GE_WRITE(sc, EPCR, sc->sc_pcr); |
---|
2294 | GE_WRITE(sc, EIMR, 0); |
---|
2295 | sc->sc_ec.ec_if.if_flags &= ~IFF_RUNNING; |
---|
2296 | #ifndef GE_NOTX |
---|
2297 | gfe_tx_stop(sc, GE_WHACK_STOP); |
---|
2298 | #endif |
---|
2299 | #ifndef GE_NORX |
---|
2300 | gfe_rx_stop(sc, GE_WHACK_STOP); |
---|
2301 | #endif |
---|
2302 | #ifndef GE_NOHASH |
---|
2303 | if ((sc->sc_flags & GE_NOFREE) == 0) { |
---|
2304 | gfe_dmamem_free(sc, &sc->sc_hash_mem); |
---|
2305 | sc->sc_hashtable = NULL; |
---|
2306 | } |
---|
2307 | #endif |
---|
2308 | |
---|
2309 | GE_FUNC_EXIT(sc, ""); |
---|
2310 | return error; |
---|
2311 | } |
---|
2312 | |
---|
2313 | int |
---|
2314 | gfe_hash_compute(struct gfe_softc *sc, const uint8_t eaddr[ETHER_ADDR_LEN]) |
---|
2315 | { |
---|
2316 | uint32_t w0, add0, add1; |
---|
2317 | uint32_t result; |
---|
2318 | #ifdef __rtems__ |
---|
2319 | SPRINTFVARDECL; |
---|
2320 | #endif |
---|
2321 | |
---|
2322 | GE_FUNC_ENTER(sc, "gfe_hash_compute"); |
---|
2323 | add0 = ((uint32_t) eaddr[5] << 0) | |
---|
2324 | ((uint32_t) eaddr[4] << 8) | |
---|
2325 | ((uint32_t) eaddr[3] << 16); |
---|
2326 | |
---|
2327 | add0 = ((add0 & 0x00f0f0f0) >> 4) | ((add0 & 0x000f0f0f) << 4); |
---|
2328 | add0 = ((add0 & 0x00cccccc) >> 2) | ((add0 & 0x00333333) << 2); |
---|
2329 | add0 = ((add0 & 0x00aaaaaa) >> 1) | ((add0 & 0x00555555) << 1); |
---|
2330 | |
---|
2331 | add1 = ((uint32_t) eaddr[2] << 0) | |
---|
2332 | ((uint32_t) eaddr[1] << 8) | |
---|
2333 | ((uint32_t) eaddr[0] << 16); |
---|
2334 | |
---|
2335 | add1 = ((add1 & 0x00f0f0f0) >> 4) | ((add1 & 0x000f0f0f) << 4); |
---|
2336 | add1 = ((add1 & 0x00cccccc) >> 2) | ((add1 & 0x00333333) << 2); |
---|
2337 | add1 = ((add1 & 0x00aaaaaa) >> 1) | ((add1 & 0x00555555) << 1); |
---|
2338 | |
---|
2339 | GE_DPRINTF(sc, ("%s=", ether_sprintf(eaddr))); |
---|
2340 | /* |
---|
2341 | * hashResult is the 15 bits Hash entry address. |
---|
2342 | * ethernetADD is a 48 bit number, which is derived from the Ethernet |
---|
2343 | * MAC address, by nibble swapping in every byte (i.e MAC address |
---|
2344 | * of 0x123456789abc translates to ethernetADD of 0x21436587a9cb). |
---|
2345 | */ |
---|
2346 | |
---|
2347 | if ((sc->sc_pcr & ETH_EPCR_HM) == 0) { |
---|
2348 | /* |
---|
2349 | * hashResult[14:0] = hashFunc0(ethernetADD[47:0]) |
---|
2350 | * |
---|
2351 | * hashFunc0 calculates the hashResult in the following manner: |
---|
2352 | * hashResult[ 8:0] = ethernetADD[14:8,1,0] |
---|
2353 | * XOR ethernetADD[23:15] XOR ethernetADD[32:24] |
---|
2354 | */ |
---|
2355 | result = (add0 & 3) | ((add0 >> 6) & ~3); |
---|
2356 | result ^= (add0 >> 15) ^ (add1 >> 0); |
---|
2357 | result &= 0x1ff; |
---|
2358 | /* |
---|
2359 | * hashResult[14:9] = ethernetADD[7:2] |
---|
2360 | */ |
---|
2361 | result |= (add0 & ~3) << 7; /* excess bits will be masked */ |
---|
2362 | GE_DPRINTF(sc, ("0(%#"PRIx32")", result & 0x7fff)); |
---|
2363 | } else { |
---|
2364 | #define TRIBITFLIP 073516240 /* yes its in octal */ |
---|
2365 | /* |
---|
2366 | * hashResult[14:0] = hashFunc1(ethernetADD[47:0]) |
---|
2367 | * |
---|
2368 | * hashFunc1 calculates the hashResult in the following manner: |
---|
2369 | * hashResult[08:00] = ethernetADD[06:14] |
---|
2370 | * XOR ethernetADD[15:23] XOR ethernetADD[24:32] |
---|
2371 | */ |
---|
2372 | w0 = ((add0 >> 6) ^ (add0 >> 15) ^ (add1)) & 0x1ff; |
---|
2373 | /* |
---|
2374 | * Now bitswap those 9 bits |
---|
2375 | */ |
---|
2376 | result = 0; |
---|
2377 | result |= ((TRIBITFLIP >> (((w0 >> 0) & 7) * 3)) & 7) << 6; |
---|
2378 | result |= ((TRIBITFLIP >> (((w0 >> 3) & 7) * 3)) & 7) << 3; |
---|
2379 | result |= ((TRIBITFLIP >> (((w0 >> 6) & 7) * 3)) & 7) << 0; |
---|
2380 | |
---|
2381 | /* |
---|
2382 | * hashResult[14:09] = ethernetADD[00:05] |
---|
2383 | */ |
---|
2384 | result |= ((TRIBITFLIP >> (((add0 >> 0) & 7) * 3)) & 7) << 12; |
---|
2385 | result |= ((TRIBITFLIP >> (((add0 >> 3) & 7) * 3)) & 7) << 9; |
---|
2386 | GE_DPRINTF(sc, ("1(%#"PRIx32")", result)); |
---|
2387 | } |
---|
2388 | GE_FUNC_EXIT(sc, ""); |
---|
2389 | return result & ((sc->sc_pcr & ETH_EPCR_HS_512) ? 0x7ff : 0x7fff); |
---|
2390 | } |
---|
2391 | |
---|
2392 | int |
---|
2393 | gfe_hash_entry_op(struct gfe_softc *sc, enum gfe_hash_op op, |
---|
2394 | enum gfe_rxprio prio, const uint8_t eaddr[ETHER_ADDR_LEN]) |
---|
2395 | { |
---|
2396 | uint64_t he; |
---|
2397 | uint64_t *maybe_he_p = NULL; |
---|
2398 | int limit; |
---|
2399 | int hash; |
---|
2400 | #ifndef __rtems__ |
---|
2401 | int maybe_hash = 0; |
---|
2402 | #endif /* __rtems__ */ |
---|
2403 | |
---|
2404 | GE_FUNC_ENTER(sc, "gfe_hash_entry_op"); |
---|
2405 | |
---|
2406 | hash = gfe_hash_compute(sc, eaddr); |
---|
2407 | |
---|
2408 | if (sc->sc_hashtable == NULL) { |
---|
2409 | panic("%s:%d: hashtable == NULL!", sc->sc_dev.dv_xname, |
---|
2410 | __LINE__); |
---|
2411 | } |
---|
2412 | |
---|
2413 | /* |
---|
2414 | * Assume we are going to insert so create the hash entry we |
---|
2415 | * are going to insert. We also use it to match entries we |
---|
2416 | * will be removing. |
---|
2417 | */ |
---|
2418 | he = ((uint64_t) eaddr[5] << 43) | |
---|
2419 | ((uint64_t) eaddr[4] << 35) | |
---|
2420 | ((uint64_t) eaddr[3] << 27) | |
---|
2421 | ((uint64_t) eaddr[2] << 19) | |
---|
2422 | ((uint64_t) eaddr[1] << 11) | |
---|
2423 | ((uint64_t) eaddr[0] << 3) | |
---|
2424 | HSH_PRIO_INS(prio) | HSH_V | HSH_R; |
---|
2425 | |
---|
2426 | /* |
---|
2427 | * The GT will search upto 12 entries for a hit, so we must mimic that. |
---|
2428 | */ |
---|
2429 | hash &= sc->sc_hashmask / sizeof(he); |
---|
2430 | for (limit = HSH_LIMIT; limit > 0 ; --limit) { |
---|
2431 | /* |
---|
2432 | * Does the GT wrap at the end, stop at the, or overrun the |
---|
2433 | * end? Assume it wraps for now. Stash a copy of the |
---|
2434 | * current hash entry. |
---|
2435 | */ |
---|
2436 | uint64_t *he_p = &sc->sc_hashtable[hash]; |
---|
2437 | uint64_t thishe = *he_p; |
---|
2438 | |
---|
2439 | /* |
---|
2440 | * If the hash entry isn't valid, that break the chain. And |
---|
2441 | * this entry a good candidate for reuse. |
---|
2442 | */ |
---|
2443 | if ((thishe & HSH_V) == 0) { |
---|
2444 | maybe_he_p = he_p; |
---|
2445 | break; |
---|
2446 | } |
---|
2447 | |
---|
2448 | /* |
---|
2449 | * If the hash entry has the same address we are looking for |
---|
2450 | * then ... if we are removing and the skip bit is set, its |
---|
2451 | * already been removed. if are adding and the skip bit is |
---|
2452 | * clear, then its already added. In either return EBUSY |
---|
2453 | * indicating the op has already been done. Otherwise flip |
---|
2454 | * the skip bit and return 0. |
---|
2455 | */ |
---|
2456 | if (((he ^ thishe) & HSH_ADDR_MASK) == 0) { |
---|
2457 | if (((op == GE_HASH_REMOVE) && (thishe & HSH_S)) || |
---|
2458 | ((op == GE_HASH_ADD) && (thishe & HSH_S) == 0)) |
---|
2459 | return EBUSY; |
---|
2460 | *he_p = thishe ^ HSH_S; |
---|
2461 | bus_dmamap_sync(sc->sc_dmat, sc->sc_hash_mem.gdm_map, |
---|
2462 | hash * sizeof(he), sizeof(he), |
---|
2463 | BUS_DMASYNC_PREWRITE); |
---|
2464 | GE_FUNC_EXIT(sc, "^"); |
---|
2465 | return 0; |
---|
2466 | } |
---|
2467 | |
---|
2468 | /* |
---|
2469 | * If we haven't found a slot for the entry and this entry |
---|
2470 | * is currently being skipped, return this entry. |
---|
2471 | */ |
---|
2472 | if (maybe_he_p == NULL && (thishe & HSH_S)) { |
---|
2473 | maybe_he_p = he_p; |
---|
2474 | #ifndef __rtems__ |
---|
2475 | maybe_hash = hash; |
---|
2476 | #endif /* __rtems__ */ |
---|
2477 | } |
---|
2478 | |
---|
2479 | hash = (hash + 1) & (sc->sc_hashmask / sizeof(he)); |
---|
2480 | } |
---|
2481 | |
---|
2482 | /* |
---|
2483 | * If we got here, then there was no entry to remove. |
---|
2484 | */ |
---|
2485 | if (op == GE_HASH_REMOVE) { |
---|
2486 | GE_FUNC_EXIT(sc, "?"); |
---|
2487 | return ENOENT; |
---|
2488 | } |
---|
2489 | |
---|
2490 | /* |
---|
2491 | * If we couldn't find a slot, return an error. |
---|
2492 | */ |
---|
2493 | if (maybe_he_p == NULL) { |
---|
2494 | GE_FUNC_EXIT(sc, "!"); |
---|
2495 | return ENOSPC; |
---|
2496 | } |
---|
2497 | |
---|
2498 | /* Update the entry. |
---|
2499 | */ |
---|
2500 | *maybe_he_p = he; |
---|
2501 | bus_dmamap_sync(sc->sc_dmat, sc->sc_hash_mem.gdm_map, |
---|
2502 | maybe_hash * sizeof(he), sizeof(he), BUS_DMASYNC_PREWRITE); |
---|
2503 | GE_FUNC_EXIT(sc, "+"); |
---|
2504 | return 0; |
---|
2505 | } |
---|
2506 | |
---|
2507 | #ifndef __rtems__ |
---|
2508 | int |
---|
2509 | gfe_hash_multichg(struct ethercom *ec, const struct ether_multi *enm, u_long cmd) |
---|
2510 | { |
---|
2511 | struct gfe_softc * const sc = ec->ec_if.if_softc; |
---|
2512 | int error; |
---|
2513 | enum gfe_hash_op op; |
---|
2514 | enum gfe_rxprio prio; |
---|
2515 | #ifdef __rtems__ |
---|
2516 | SPRINTFVARDECL; |
---|
2517 | #endif |
---|
2518 | |
---|
2519 | GE_FUNC_ENTER(sc, "hash_multichg"); |
---|
2520 | /* |
---|
2521 | * Is this a wildcard entry? If so and its being removed, recompute. |
---|
2522 | */ |
---|
2523 | if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN) != 0) { |
---|
2524 | if (cmd == SIOCDELMULTI) { |
---|
2525 | GE_FUNC_EXIT(sc, ""); |
---|
2526 | return ENETRESET; |
---|
2527 | } |
---|
2528 | |
---|
2529 | /* |
---|
2530 | * Switch in |
---|
2531 | */ |
---|
2532 | sc->sc_flags |= GE_ALLMULTI; |
---|
2533 | if ((sc->sc_pcr & ETH_EPCR_PM) == 0) { |
---|
2534 | sc->sc_pcr |= ETH_EPCR_PM; |
---|
2535 | GE_WRITE(sc, EPCR, sc->sc_pcr); |
---|
2536 | GE_FUNC_EXIT(sc, ""); |
---|
2537 | return 0; |
---|
2538 | } |
---|
2539 | GE_FUNC_EXIT(sc, ""); |
---|
2540 | return ENETRESET; |
---|
2541 | } |
---|
2542 | |
---|
2543 | prio = GE_RXPRIO_MEDLO; |
---|
2544 | op = (cmd == SIOCDELMULTI ? GE_HASH_REMOVE : GE_HASH_ADD); |
---|
2545 | |
---|
2546 | if (sc->sc_hashtable == NULL) { |
---|
2547 | GE_FUNC_EXIT(sc, ""); |
---|
2548 | return 0; |
---|
2549 | } |
---|
2550 | |
---|
2551 | error = gfe_hash_entry_op(sc, op, prio, enm->enm_addrlo); |
---|
2552 | if (error == EBUSY) { |
---|
2553 | printf("%s: multichg: tried to %s %s again\n", |
---|
2554 | sc->sc_dev.dv_xname, |
---|
2555 | cmd == SIOCDELMULTI ? "remove" : "add", |
---|
2556 | ether_sprintf(enm->enm_addrlo)); |
---|
2557 | GE_FUNC_EXIT(sc, ""); |
---|
2558 | return 0; |
---|
2559 | } |
---|
2560 | |
---|
2561 | if (error == ENOENT) { |
---|
2562 | printf("%s: multichg: failed to remove %s: not in table\n", |
---|
2563 | sc->sc_dev.dv_xname, |
---|
2564 | ether_sprintf(enm->enm_addrlo)); |
---|
2565 | GE_FUNC_EXIT(sc, ""); |
---|
2566 | return 0; |
---|
2567 | } |
---|
2568 | |
---|
2569 | if (error == ENOSPC) { |
---|
2570 | printf("%s: multichg: failed to add %s: no space; regenerating table\n", |
---|
2571 | sc->sc_dev.dv_xname, |
---|
2572 | ether_sprintf(enm->enm_addrlo)); |
---|
2573 | GE_FUNC_EXIT(sc, ""); |
---|
2574 | return ENETRESET; |
---|
2575 | } |
---|
2576 | GE_DPRINTF(sc, ("%s: multichg: %s: %s succeeded\n", |
---|
2577 | sc->sc_dev.dv_xname, |
---|
2578 | cmd == SIOCDELMULTI ? "remove" : "add", |
---|
2579 | ether_sprintf(enm->enm_addrlo))); |
---|
2580 | GE_FUNC_EXIT(sc, ""); |
---|
2581 | return 0; |
---|
2582 | } |
---|
2583 | #endif |
---|
2584 | |
---|
2585 | int |
---|
2586 | gfe_hash_fill(struct gfe_softc *sc) |
---|
2587 | { |
---|
2588 | struct ether_multistep step; |
---|
2589 | struct ether_multi *enm; |
---|
2590 | int error; |
---|
2591 | |
---|
2592 | GE_FUNC_ENTER(sc, "gfe_hash_fill"); |
---|
2593 | |
---|
2594 | #ifndef __rtems__ |
---|
2595 | error = gfe_hash_entry_op(sc, GE_HASH_ADD, GE_RXPRIO_HI, |
---|
2596 | LLADDR(sc->sc_ec.ec_if.if_sadl)); |
---|
2597 | #else |
---|
2598 | error = gfe_hash_entry_op(sc, GE_HASH_ADD, GE_RXPRIO_HI, sc->sc_ec.ac_enaddr); |
---|
2599 | #endif |
---|
2600 | if (error) { |
---|
2601 | GE_FUNC_EXIT(sc, "!"); |
---|
2602 | return error; |
---|
2603 | } |
---|
2604 | |
---|
2605 | sc->sc_flags &= ~GE_ALLMULTI; |
---|
2606 | if ((sc->sc_ec.ec_if.if_flags & IFF_PROMISC) == 0) |
---|
2607 | sc->sc_pcr &= ~ETH_EPCR_PM; |
---|
2608 | else |
---|
2609 | sc->sc_pcr |= ETH_EPCR_PM; |
---|
2610 | ETHER_FIRST_MULTI(step, &sc->sc_ec, enm); |
---|
2611 | while (enm != NULL) { |
---|
2612 | if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { |
---|
2613 | sc->sc_flags |= GE_ALLMULTI; |
---|
2614 | sc->sc_pcr |= ETH_EPCR_PM; |
---|
2615 | } else { |
---|
2616 | error = gfe_hash_entry_op(sc, GE_HASH_ADD, |
---|
2617 | GE_RXPRIO_MEDLO, enm->enm_addrlo); |
---|
2618 | if (error == ENOSPC) |
---|
2619 | break; |
---|
2620 | } |
---|
2621 | ETHER_NEXT_MULTI(step, enm); |
---|
2622 | } |
---|
2623 | |
---|
2624 | GE_FUNC_EXIT(sc, ""); |
---|
2625 | return error; |
---|
2626 | } |
---|
2627 | |
---|
2628 | int |
---|
2629 | gfe_hash_alloc(struct gfe_softc *sc) |
---|
2630 | { |
---|
2631 | int error; |
---|
2632 | GE_FUNC_ENTER(sc, "gfe_hash_alloc"); |
---|
2633 | sc->sc_hashmask = (sc->sc_pcr & ETH_EPCR_HS_512 ? 16 : 256)*1024 - 1; |
---|
2634 | error = gfe_dmamem_alloc(sc, &sc->sc_hash_mem, 1, sc->sc_hashmask + 1, |
---|
2635 | BUS_DMA_NOCACHE); |
---|
2636 | if (error) { |
---|
2637 | printf("%s: failed to allocate %d bytes for hash table: %d\n", |
---|
2638 | sc->sc_dev.dv_xname, sc->sc_hashmask + 1, error); |
---|
2639 | GE_FUNC_EXIT(sc, ""); |
---|
2640 | return error; |
---|
2641 | } |
---|
2642 | sc->sc_hashtable = (uint64_t *) sc->sc_hash_mem.gdm_kva; |
---|
2643 | memset(sc->sc_hashtable, 0, sc->sc_hashmask + 1); |
---|
2644 | bus_dmamap_sync(sc->sc_dmat, sc->sc_hash_mem.gdm_map, |
---|
2645 | 0, sc->sc_hashmask + 1, BUS_DMASYNC_PREWRITE); |
---|
2646 | GE_FUNC_EXIT(sc, ""); |
---|
2647 | return 0; |
---|
2648 | } |
---|