1 | /* RTEMS driver for the mv643xx gigabit ethernet chip */ |
---|
2 | |
---|
3 | /* Acknowledgement: |
---|
4 | * |
---|
5 | * Valuable information for developing this driver was obtained |
---|
6 | * from the linux open-source driver mv643xx_eth.c which was written |
---|
7 | * by the following people and organizations: |
---|
8 | * |
---|
9 | * Matthew Dharm <mdharm@momenco.com> |
---|
10 | * rabeeh@galileo.co.il |
---|
11 | * PMC-Sierra, Inc., Manish Lachwani |
---|
12 | * Ralf Baechle <ralf@linux-mips.org> |
---|
13 | * MontaVista Software, Inc., Dale Farnsworth <dale@farnsworth.org> |
---|
14 | * Steven J. Hill <sjhill1@rockwellcollins.com>/<sjhill@realitydiluted.com> |
---|
15 | * |
---|
16 | * Note however, that in spite of the identical name of this file |
---|
17 | * (and some of the symbols used herein) this file provides a |
---|
18 | * new implementation and is the original work by the author. |
---|
19 | */ |
---|
20 | |
---|
21 | /* |
---|
22 | * Authorship |
---|
23 | * ---------- |
---|
24 | * This software (mv643xx ethernet driver for RTEMS) was |
---|
25 | * created by Till Straumann <strauman@slac.stanford.edu>, 2005-2007, |
---|
26 | * Stanford Linear Accelerator Center, Stanford University. |
---|
27 | * |
---|
28 | * Acknowledgement of sponsorship |
---|
29 | * ------------------------------ |
---|
30 | * The 'mv643xx ethernet driver for RTEMS' was produced by |
---|
31 | * the Stanford Linear Accelerator Center, Stanford University, |
---|
32 | * under Contract DE-AC03-76SFO0515 with the Department of Energy. |
---|
33 | * |
---|
34 | * Government disclaimer of liability |
---|
35 | * ---------------------------------- |
---|
36 | * Neither the United States nor the United States Department of Energy, |
---|
37 | * nor any of their employees, makes any warranty, express or implied, or |
---|
38 | * assumes any legal liability or responsibility for the accuracy, |
---|
39 | * completeness, or usefulness of any data, apparatus, product, or process |
---|
40 | * disclosed, or represents that its use would not infringe privately owned |
---|
41 | * rights. |
---|
42 | * |
---|
43 | * Stanford disclaimer of liability |
---|
44 | * -------------------------------- |
---|
45 | * Stanford University makes no representations or warranties, express or |
---|
46 | * implied, nor assumes any liability for the use of this software. |
---|
47 | * |
---|
48 | * Stanford disclaimer of copyright |
---|
49 | * -------------------------------- |
---|
50 | * Stanford University, owner of the copyright, hereby disclaims its |
---|
51 | * copyright and all other rights in this software. Hence, anyone may |
---|
52 | * freely use it for any purpose without restriction. |
---|
53 | * |
---|
54 | * Maintenance of notices |
---|
55 | * ---------------------- |
---|
56 | * In the interest of clarity regarding the origin and status of this |
---|
57 | * SLAC software, this and all the preceding Stanford University notices |
---|
58 | * are to remain affixed to any copy or derivative of this software made |
---|
59 | * or distributed by the recipient and are to be affixed to any copy of |
---|
60 | * software made or distributed by the recipient that contains a copy or |
---|
61 | * derivative of this software. |
---|
62 | * |
---|
63 | * ------------------ SLAC Software Notices, Set 4 OTT.002a, 2004 FEB 03 |
---|
64 | */ |
---|
65 | |
---|
66 | /* |
---|
67 | * NOTE: Some register (e.g., the SMI register) are SHARED among the |
---|
68 | * three devices. Concurrent access protection is provided by |
---|
69 | * the global networking semaphore. |
---|
70 | * If other drivers are running on a subset of IFs then proper |
---|
71 | * locking of all shared registers must be implemented! |
---|
72 | * |
---|
73 | * Some things I learned about this hardware can be found |
---|
74 | * further down... |
---|
75 | */ |
---|
76 | |
---|
77 | #ifndef KERNEL |
---|
78 | #define KERNEL |
---|
79 | #endif |
---|
80 | #ifndef _KERNEL |
---|
81 | #define _KERNEL |
---|
82 | #endif |
---|
83 | |
---|
84 | #include <rtems.h> |
---|
85 | #include <rtems/bspIo.h> |
---|
86 | #include <rtems/error.h> |
---|
87 | #include <bsp.h> |
---|
88 | #include <bsp/irq.h> |
---|
89 | #include <bsp/gtreg.h> |
---|
90 | #include <libcpu/byteorder.h> |
---|
91 | |
---|
92 | #include <sys/param.h> |
---|
93 | #include <sys/proc.h> |
---|
94 | #include <sys/socket.h> |
---|
95 | #include <sys/sockio.h> |
---|
96 | #include <dev/mii/mii.h> |
---|
97 | #include <net/if_var.h> |
---|
98 | #include <net/if_media.h> |
---|
99 | |
---|
100 | /* Not so nice; would be more elegant not to depend on C library but the |
---|
101 | * RTEMS-specific ioctl for dumping statistics needs stdio anyways. |
---|
102 | */ |
---|
103 | |
---|
104 | /*#define NDEBUG effectively removes all assertions |
---|
105 | * If defining NDEBUG, MAKE SURE assert() EXPRESSION HAVE NO SIDE_EFFECTS!! |
---|
106 | * This driver DOES have side-effects, so DONT DEFINE NDEBUG |
---|
107 | * Performance-critical assertions are removed by undefining MVETH_TESTING. |
---|
108 | */ |
---|
109 | |
---|
110 | #undef NDEBUG |
---|
111 | #include <assert.h> |
---|
112 | #include <stdio.h> |
---|
113 | #include <errno.h> |
---|
114 | #include <inttypes.h> |
---|
115 | |
---|
116 | #include <rtems/rtems_bsdnet.h> |
---|
117 | #include <sys/param.h> |
---|
118 | #include <sys/mbuf.h> |
---|
119 | #include <sys/socket.h> |
---|
120 | #include <sys/sockio.h> |
---|
121 | #include <net/ethernet.h> |
---|
122 | #include <net/if.h> |
---|
123 | #include <netinet/in.h> |
---|
124 | #include <netinet/if_ether.h> |
---|
125 | |
---|
126 | #include <rtems/rtems_mii_ioctl.h> |
---|
127 | #include <bsp/early_enet_link_status.h> |
---|
128 | #include <bsp/if_mve_pub.h> |
---|
129 | |
---|
130 | /* CONFIGURABLE PARAMETERS */ |
---|
131 | |
---|
132 | /* Enable Hardware Snooping; if this is disabled (undefined), |
---|
133 | * cache coherency is maintained by software. |
---|
134 | */ |
---|
135 | #undef ENABLE_HW_SNOOPING |
---|
136 | |
---|
137 | /* Compile-time debugging features */ |
---|
138 | |
---|
139 | /* Enable paranoia assertions and checks; reduce # of descriptors to minimum for stressing */ |
---|
140 | #undef MVETH_TESTING |
---|
141 | |
---|
142 | /* Enable debugging messages and some support routines (dump rings etc.) */ |
---|
143 | #undef MVETH_DEBUG |
---|
144 | |
---|
145 | /* Hack for driver development; rtems bsdnet doesn't implement detaching an interface :-( |
---|
146 | * but this hack allows us to unload/reload the driver module which makes development |
---|
147 | * a lot less painful. |
---|
148 | */ |
---|
149 | #undef MVETH_DETACH_HACK |
---|
150 | |
---|
151 | /* Ring sizes */ |
---|
152 | |
---|
153 | #ifdef MVETH_TESTING |
---|
154 | |
---|
155 | /* hard and small defaults */ |
---|
156 | #undef MV643XX_RX_RING_SIZE |
---|
157 | #define MV643XX_RX_RING_SIZE 2 |
---|
158 | #undef MV643XX_TX_RING_SIZE |
---|
159 | #define MV643XX_TX_RING_SIZE 4 |
---|
160 | |
---|
161 | #else /* MVETH_TESTING */ |
---|
162 | |
---|
163 | /* Define default ring sizes, allow override from bsp.h, Makefile,... and from ifcfg->rbuf_count/xbuf_count */ |
---|
164 | |
---|
165 | #ifndef MV643XX_RX_RING_SIZE |
---|
166 | #define MV643XX_RX_RING_SIZE 40 /* attached buffers are always 2k clusters, i.e., this |
---|
167 | * driver - with a configured ring size of 40 - constantly |
---|
168 | * locks 80k of cluster memory - your app config better |
---|
169 | * provides enough space! |
---|
170 | */ |
---|
171 | #endif |
---|
172 | |
---|
173 | #ifndef MV643XX_TX_RING_SIZE |
---|
174 | /* NOTE: tx ring size MUST be > max. # of fragments / mbufs in a chain; |
---|
175 | * in 'TESTING' mode, special code is compiled in to repackage |
---|
176 | * chains that are longer than the ring size. Normally, this is |
---|
177 | * disabled for sake of speed. |
---|
178 | * I observed chains of >17 entries regularly! |
---|
179 | * |
---|
180 | * Also, TX_NUM_TAG_SLOTS (1) must be left empty as a marker, hence |
---|
181 | * the ring size must be > max. #frags + 1. |
---|
182 | */ |
---|
183 | #define MV643XX_TX_RING_SIZE 200 /* these are smaller fragments and not occupied when |
---|
184 | * the driver is idle. |
---|
185 | */ |
---|
186 | #endif |
---|
187 | |
---|
188 | #endif /* MVETH_TESTING */ |
---|
189 | |
---|
190 | /* How many instances to we support (bsp.h could override) */ |
---|
191 | #ifndef MV643XXETH_NUM_DRIVER_SLOTS |
---|
192 | #define MV643XXETH_NUM_DRIVER_SLOTS 2 |
---|
193 | #endif |
---|
194 | |
---|
195 | #define TX_NUM_TAG_SLOTS 1 /* leave room for tag; must not be 0 */ |
---|
196 | |
---|
197 | /* This is REAL; chip reads from 64-bit down-aligned buffer |
---|
198 | * if the buffer size is < 8 !!! for buffer sizes 8 and upwards |
---|
199 | * alignment is not an issue. This was verified using the |
---|
200 | * 'mve_smallbuf_test.c' |
---|
201 | */ |
---|
202 | #define ENABLE_TX_WORKAROUND_8_BYTE_PROBLEM |
---|
203 | |
---|
204 | /* Chip register configuration values */ |
---|
205 | #define MVETH_PORT_CONFIG_VAL (0 \ |
---|
206 | | MV643XX_ETH_DFLT_RX_Q(0) \ |
---|
207 | | MV643XX_ETH_DFLT_RX_ARP_Q(0) \ |
---|
208 | | MV643XX_ETH_DFLT_RX_TCP_Q(0) \ |
---|
209 | | MV643XX_ETH_DFLT_RX_UDP_Q(0) \ |
---|
210 | | MV643XX_ETH_DFLT_RX_BPDU_Q(0) \ |
---|
211 | ) |
---|
212 | |
---|
213 | |
---|
214 | #define MVETH_PORT_XTEND_CONFIG_VAL 0 |
---|
215 | |
---|
216 | #ifdef OLDCONFIGVAL |
---|
217 | #define MVETH_SERIAL_CTRL_CONFIG_VAL (0 \ |
---|
218 | | MV643XX_ETH_FORCE_LINK_PASS \ |
---|
219 | | MV643XX_ETH_DISABLE_AUTO_NEG_FOR_FLOWCTL \ |
---|
220 | | MV643XX_ETH_ADVERTISE_SYMMETRIC_FLOWCTL \ |
---|
221 | | MV643XX_ETH_BIT9_UNKNOWN \ |
---|
222 | | MV643XX_ETH_FORCE_LINK_FAIL_DISABLE \ |
---|
223 | | MV643XX_ETH_SC_MAX_RX_1552 \ |
---|
224 | | MV643XX_ETH_SET_FULL_DUPLEX \ |
---|
225 | | MV643XX_ETH_ENBL_FLOWCTL_TX_RX_IN_FD \ |
---|
226 | ) |
---|
227 | #endif |
---|
228 | /* If we enable autoneg (duplex, speed, ...) then it seems |
---|
229 | * that the chip automatically updates link settings |
---|
230 | * (correct link settings are reflected in PORT_STATUS_R). |
---|
231 | * However, when we disable aneg in the PHY then things |
---|
232 | * can get messed up and the port doesn't work anymore. |
---|
233 | * => we follow the linux driver in disabling all aneg |
---|
234 | * in the serial config reg. and manually updating the |
---|
235 | * speed & duplex bits when the phy link status changes. |
---|
236 | * FIXME: don't know what to do about pause/flow-ctrl. |
---|
237 | * It is best to just use ANEG anyways!!! |
---|
238 | */ |
---|
239 | #define MVETH_SERIAL_CTRL_CONFIG_VAL (0 \ |
---|
240 | | MV643XX_ETH_DISABLE_AUTO_NEG_FOR_DUPLEX \ |
---|
241 | | MV643XX_ETH_DISABLE_AUTO_NEG_FOR_FLOWCTL \ |
---|
242 | | MV643XX_ETH_ADVERTISE_SYMMETRIC_FLOWCTL \ |
---|
243 | | MV643XX_ETH_BIT9_UNKNOWN \ |
---|
244 | | MV643XX_ETH_FORCE_LINK_FAIL_DISABLE \ |
---|
245 | | MV643XX_ETH_DISABLE_AUTO_NEG_SPEED_GMII \ |
---|
246 | | MV643XX_ETH_SC_MAX_RX_1552 \ |
---|
247 | ) |
---|
248 | |
---|
249 | #define MVETH_SERIAL_CTRL_CONFIG_MSK (0 \ |
---|
250 | | MV643XX_ETH_SERIAL_PORT_ENBL \ |
---|
251 | | MV643XX_ETH_FORCE_LINK_PASS \ |
---|
252 | | MV643XX_ETH_SC_MAX_RX_MASK \ |
---|
253 | ) |
---|
254 | |
---|
255 | |
---|
256 | #ifdef __PPC__ |
---|
257 | #define MVETH_SDMA_CONFIG_VAL (0 \ |
---|
258 | | MV643XX_ETH_RX_BURST_SZ_4_64BIT \ |
---|
259 | | MV643XX_ETH_TX_BURST_SZ_4_64BIT \ |
---|
260 | ) |
---|
261 | #else |
---|
262 | #define MVETH_SDMA_CONFIG_VAL (0 \ |
---|
263 | | MV643XX_ETH_RX_BURST_SZ_16_64BIT \ |
---|
264 | | MV643XX_ETH_TX_BURST_SZ_16_64BIT \ |
---|
265 | ) |
---|
266 | #endif |
---|
267 | |
---|
268 | /* minimal frame size we accept */ |
---|
269 | #define MVETH_MIN_FRAMSZ_CONFIG_VAL 40 |
---|
270 | |
---|
271 | /* END OF CONFIGURABLE SECTION */ |
---|
272 | |
---|
273 | /* |
---|
274 | * Here's stuff I learned about this chip: |
---|
275 | * |
---|
276 | * |
---|
277 | * RX interrupt flags: |
---|
278 | * |
---|
279 | * broadcast packet RX: 0x00000005 |
---|
280 | * last buf: 0x00000c05 |
---|
281 | * overrun: 0x00000c00 |
---|
282 | * unicast packet RX: 0x00000005 |
---|
283 | * bad CRC received: 0x00000005 |
---|
284 | * |
---|
285 | * clearing 0x00000004 -> clears 0x00000001 |
---|
286 | * clearing 0x00000400 -> clears 0x00000800 |
---|
287 | * |
---|
288 | * --> 0x0801 are probably some sort of summary bits. |
---|
289 | * |
---|
290 | * TX interrupt flags: |
---|
291 | * |
---|
292 | * broadcast packet in 1 buf: xcause: 0x00000001 (cause 0x00080000) |
---|
293 | * into disconn. link: " " |
---|
294 | * |
---|
295 | * in some cases, I observed xcause: 0x00000101 (reason for 0x100 unknown |
---|
296 | * but the linux driver accepts it also). |
---|
297 | * |
---|
298 | * |
---|
299 | * Here a few more ugly things about this piece of hardware I learned |
---|
300 | * (painfully, painfully; spending many many hours & nights :-() |
---|
301 | * |
---|
302 | * a) Especially in the case of 'chained' descriptors, the DMA keeps |
---|
303 | * clobbering 'cmd_sts' long after it cleared the OWNership flag!!! |
---|
304 | * Only after the whole chain is processed (OWN cleared on the |
---|
305 | * last descriptor) it is safe to change cmd_sts. |
---|
306 | * However, in the case of hardware snooping I found that the |
---|
307 | * last descriptor in chain has its cmd_sts still clobbered *after* |
---|
308 | * checking ownership!, I.e., |
---|
309 | * if ( ! OWN & cmd_sts ) { |
---|
310 | * cmd_sts = 0; |
---|
311 | * } |
---|
312 | * --> sometimes, cmd_sts is STILL != 0 here |
---|
313 | * |
---|
314 | * b) Sometimes, the OWNership flag is *not cleared*. |
---|
315 | * |
---|
316 | * c) Weird things happen if the chip finds a descriptor with 'OWN' |
---|
317 | * still set (i.e., not properly loaded), i.e., corrupted packets |
---|
318 | * are sent [with OK checksum since the chip calculates it]. |
---|
319 | * |
---|
320 | * Combine a+b+c and we end up with a real mess. |
---|
321 | * |
---|
322 | * The fact that the chip doesn't reliably reset OWN and that OTOH, |
---|
323 | * it can't be reliably reset by the driver and still, the chip needs |
---|
324 | * it for proper communication doesn't make things easy... |
---|
325 | * |
---|
326 | * Here the basic workarounds: |
---|
327 | * |
---|
328 | * - In addition to check OWN, the scavenger compares the "currently |
---|
329 | * served desc" register to the descriptor it tries to recover and |
---|
330 | * ignores OWN if they do not match. Hope this is OK. |
---|
331 | * Otherwise, we could scan the list of used descriptors and proceed |
---|
332 | * recycling descriptors if we find a !OWNed one behind the target... |
---|
333 | * |
---|
334 | * - Always keep an empty slot around to mark the end of the list of |
---|
335 | * jobs. The driver clears the descriptor ahead when enqueueing a new |
---|
336 | * packet. |
---|
337 | */ |
---|
338 | |
---|
339 | #define DRVNAME "mve" |
---|
340 | #define MAX_NUM_SLOTS 3 |
---|
341 | |
---|
342 | #if MV643XXETH_NUM_DRIVER_SLOTS > MAX_NUM_SLOTS |
---|
343 | #error "mv643xxeth: only MAX_NUM_SLOTS supported" |
---|
344 | #endif |
---|
345 | |
---|
346 | #ifdef NDEBUG |
---|
347 | #error "Driver uses assert() statements with side-effects; MUST NOT define NDEBUG" |
---|
348 | #endif |
---|
349 | |
---|
350 | #ifdef MVETH_DEBUG |
---|
351 | #define STATIC |
---|
352 | #else |
---|
353 | #define STATIC static |
---|
354 | #endif |
---|
355 | |
---|
356 | #define TX_AVAILABLE_RING_SIZE(mp) ((mp)->xbuf_count - (TX_NUM_TAG_SLOTS)) |
---|
357 | |
---|
358 | /* macros for ring alignment; proper alignment is a hardware req; . */ |
---|
359 | |
---|
360 | #ifdef ENABLE_HW_SNOOPING |
---|
361 | |
---|
362 | #define RING_ALIGNMENT 16 |
---|
363 | /* rx buffers must be 64-bit aligned (chip requirement) */ |
---|
364 | #define RX_BUF_ALIGNMENT 8 |
---|
365 | |
---|
366 | #else /* ENABLE_HW_SNOOPING */ |
---|
367 | |
---|
368 | /* Software cache management */ |
---|
369 | |
---|
370 | #ifndef __PPC__ |
---|
371 | #error "Dont' know how to deal with cache on this CPU architecture" |
---|
372 | #endif |
---|
373 | |
---|
374 | /* Ring entries are 32 bytes; coherency-critical chunks are 16 -> software coherency |
---|
375 | * management works for cache line sizes of 16 and 32 bytes only. If the line size |
---|
376 | * is bigger, the descriptors could be padded... |
---|
377 | */ |
---|
378 | #if PPC_CACHE_ALIGMENT != 16 && PPC_CACHE_ALIGNMENT != 32 |
---|
379 | #error "Cache line size must be 16 or 32" |
---|
380 | #else |
---|
381 | #define RING_ALIGNMENT PPC_CACHE_ALIGNMENT |
---|
382 | #define RX_BUF_ALIGNMENT PPC_CACHE_ALIGNMENT |
---|
383 | #endif |
---|
384 | |
---|
385 | #endif /* ENABLE_HW_SNOOPING */ |
---|
386 | |
---|
387 | |
---|
388 | /* HELPER MACROS */ |
---|
389 | |
---|
390 | /* Align base to alignment 'a' */ |
---|
391 | #define MV643XX_ALIGN(b, a) ((((uint32_t)(b)) + (a)-1) & (~((a)-1))) |
---|
392 | |
---|
393 | #define NOOP() do {} while(0) |
---|
394 | |
---|
395 | /* Function like macros */ |
---|
396 | #define MV_READ(off) \ |
---|
397 | ld_le32((volatile uint32_t *)(BSP_MV64x60_BASE + (off))) |
---|
398 | #define MV_WRITE(off, data) \ |
---|
399 | st_le32((volatile uint32_t *)(BSP_MV64x60_BASE + (off)), ((unsigned)data)) |
---|
400 | |
---|
401 | |
---|
402 | /* ENET window mapped 1:1 to CPU addresses by our BSP/MotLoad |
---|
403 | * -- if this is changed, we should think about caching the 'next' and 'buf' pointers. |
---|
404 | */ |
---|
405 | #define CPUADDR2ENET(a) ((Dma_addr_t)(a)) |
---|
406 | #define ENET2CPUADDR(a) (a) |
---|
407 | |
---|
408 | #if 1 /* Whether to automatically try to reclaim descriptors when enqueueing new packets */ |
---|
409 | #define MVETH_CLEAN_ON_SEND(mp) (BSP_mve_swipe_tx(mp)) |
---|
410 | #else |
---|
411 | #define MVETH_CLEAN_ON_SEND(mp) (-1) |
---|
412 | #endif |
---|
413 | |
---|
414 | #define NEXT_TXD(d) (d)->next |
---|
415 | #define NEXT_RXD(d) (d)->next |
---|
416 | |
---|
417 | /* REGISTER AND DESCRIPTOR OFFSET AND BIT DEFINITIONS */ |
---|
418 | |
---|
419 | /* Descriptor Definitions */ |
---|
420 | /* Rx descriptor */ |
---|
421 | #define RDESC_ERROR (1<< 0) /* Error summary */ |
---|
422 | |
---|
423 | /* Error code (bit 1&2) is only valid if summary bit is set */ |
---|
424 | #define RDESC_CRC_ERROR ( 1) |
---|
425 | #define RDESC_OVERRUN_ERROR ( 3) |
---|
426 | #define RDESC_MAX_FRAMELENGTH_ERROR ( 5) |
---|
427 | #define RDESC_RESOURCE_ERROR ( 7) |
---|
428 | |
---|
429 | #define RDESC_LAST (1<<26) /* Last Descriptor */ |
---|
430 | #define RDESC_FRST (1<<27) /* First Descriptor */ |
---|
431 | #define RDESC_INT_ENA (1<<29) /* Enable Interrupts */ |
---|
432 | #define RDESC_DMA_OWNED (1<<31) |
---|
433 | |
---|
434 | /* Tx descriptor */ |
---|
435 | #define TDESC_ERROR (1<< 0) /* Error summary */ |
---|
436 | #define TDESC_ZERO_PAD (1<<19) |
---|
437 | #define TDESC_LAST (1<<20) /* Last Descriptor */ |
---|
438 | #define TDESC_FRST (1<<21) /* First Descriptor */ |
---|
439 | #define TDESC_GEN_CRC (1<<22) |
---|
440 | #define TDESC_INT_ENA (1<<23) /* Enable Interrupts */ |
---|
441 | #define TDESC_DMA_OWNED (1<<31) |
---|
442 | |
---|
443 | |
---|
444 | |
---|
445 | /* Register Definitions */ |
---|
446 | #define MV643XX_ETH_PHY_ADDR_R (0x2000) |
---|
447 | #define MV643XX_ETH_SMI_R (0x2004) |
---|
448 | #define MV643XX_ETH_SMI_BUSY (1<<28) |
---|
449 | #define MV643XX_ETH_SMI_VALID (1<<27) |
---|
450 | #define MV643XX_ETH_SMI_OP_WR (0<<26) |
---|
451 | #define MV643XX_ETH_SMI_OP_RD (1<<26) |
---|
452 | |
---|
453 | #define MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_R(port) (0x2448 + ((port)<<10)) |
---|
454 | #define MV643XX_ETH_TX_START(queue) (0x0001<<(queue)) |
---|
455 | #define MV643XX_ETH_TX_STOP(queue) (0x0100<<(queue)) |
---|
456 | #define MV643XX_ETH_TX_START_M(queues) ((queues)&0xff) |
---|
457 | #define MV643XX_ETH_TX_STOP_M(queues) (((queues)&0xff)<<8) |
---|
458 | #define MV643XX_ETH_TX_STOP_ALL (0xff00) |
---|
459 | #define MV643XX_ETH_TX_ANY_RUNNING (0x00ff) |
---|
460 | |
---|
461 | #define MV643XX_ETH_RECEIVE_QUEUE_COMMAND_R(port) (0x2680 + ((port)<<10)) |
---|
462 | #define MV643XX_ETH_RX_START(queue) (0x0001<<(queue)) |
---|
463 | #define MV643XX_ETH_RX_STOP(queue) (0x0100<<(queue)) |
---|
464 | #define MV643XX_ETH_RX_STOP_ALL (0xff00) |
---|
465 | #define MV643XX_ETH_RX_ANY_RUNNING (0x00ff) |
---|
466 | |
---|
467 | #define MV643XX_ETH_CURRENT_SERVED_TX_DESC(port) (0x2684 + ((port)<<10)) |
---|
468 | |
---|
469 | /* The chip puts the ethernet header at offset 2 into the buffer so |
---|
470 | * that the payload is aligned |
---|
471 | */ |
---|
472 | #define ETH_RX_OFFSET 2 |
---|
473 | #define ETH_CRC_LEN 4 /* strip FCS at end of packet */ |
---|
474 | |
---|
475 | |
---|
476 | #define MV643XX_ETH_INTERRUPT_CAUSE_R(port) (0x2460 + ((port)<<10)) |
---|
477 | /* not fully understood; RX seems to raise 0x0005 or 0x0c05 if last buffer is filled and 0x0c00 |
---|
478 | * if there are no buffers |
---|
479 | */ |
---|
480 | #define MV643XX_ETH_ALL_IRQS (0x0007ffff) |
---|
481 | #define MV643XX_ETH_KNOWN_IRQS (0x00000c05) |
---|
482 | #define MV643XX_ETH_IRQ_EXT_ENA (1<<1) |
---|
483 | #define MV643XX_ETH_IRQ_RX_DONE (1<<2) |
---|
484 | #define MV643XX_ETH_IRQ_RX_NO_DESC (1<<10) |
---|
485 | |
---|
486 | #define MV643XX_ETH_INTERRUPT_EXTEND_CAUSE_R(port) (0x2464 + ((port)<<10)) |
---|
487 | /* not fully understood; TX seems to raise 0x0001 and link change is 0x00010000 |
---|
488 | * if there are no buffers |
---|
489 | */ |
---|
490 | #define MV643XX_ETH_ALL_EXT_IRQS (0x0011ffff) |
---|
491 | #define MV643XX_ETH_KNOWN_EXT_IRQS (0x00010101) |
---|
492 | #define MV643XX_ETH_EXT_IRQ_TX_DONE (1<<0) |
---|
493 | #define MV643XX_ETH_EXT_IRQ_LINK_CHG (1<<16) |
---|
494 | #define MV643XX_ETH_INTERRUPT_ENBL_R(port) (0x2468 + ((port)<<10)) |
---|
495 | #define MV643XX_ETH_INTERRUPT_EXTEND_ENBL_R(port) (0x246c + ((port)<<10)) |
---|
496 | |
---|
497 | /* port configuration */ |
---|
498 | #define MV643XX_ETH_PORT_CONFIG_R(port) (0x2400 + ((port)<<10)) |
---|
499 | #define MV643XX_ETH_UNICAST_PROMISC_MODE (1<<0) |
---|
500 | #define MV643XX_ETH_DFLT_RX_Q(q) ((q)<<1) |
---|
501 | #define MV643XX_ETH_DFLT_RX_ARP_Q(q) ((q)<<4) |
---|
502 | #define MV643XX_ETH_REJ_BCAST_IF_NOT_IP_OR_ARP (1<<7) |
---|
503 | #define MV643XX_ETH_REJ_BCAST_IF_IP (1<<8) |
---|
504 | #define MV643XX_ETH_REJ_BCAST_IF_ARP (1<<9) |
---|
505 | #define MV643XX_ETH_TX_AM_NO_UPDATE_ERR_SUMMARY (1<<12) |
---|
506 | #define MV643XX_ETH_CAPTURE_TCP_FRAMES_ENBL (1<<14) |
---|
507 | #define MV643XX_ETH_CAPTURE_UDP_FRAMES_ENBL (1<<15) |
---|
508 | #define MV643XX_ETH_DFLT_RX_TCP_Q(q) ((q)<<16) |
---|
509 | #define MV643XX_ETH_DFLT_RX_UDP_Q(q) ((q)<<19) |
---|
510 | #define MV643XX_ETH_DFLT_RX_BPDU_Q(q) ((q)<<22) |
---|
511 | |
---|
512 | |
---|
513 | |
---|
514 | #define MV643XX_ETH_PORT_CONFIG_XTEND_R(port) (0x2404 + ((port)<<10)) |
---|
515 | #define MV643XX_ETH_CLASSIFY_ENBL (1<<0) |
---|
516 | #define MV643XX_ETH_SPAN_BPDU_PACKETS_AS_NORMAL (0<<1) |
---|
517 | #define MV643XX_ETH_SPAN_BPDU_PACKETS_2_Q7 (1<<1) |
---|
518 | #define MV643XX_ETH_PARTITION_DISBL (0<<2) |
---|
519 | #define MV643XX_ETH_PARTITION_ENBL (1<<2) |
---|
520 | |
---|
521 | #define MV643XX_ETH_SDMA_CONFIG_R(port) (0x241c + ((port)<<10)) |
---|
522 | #define MV643XX_ETH_SDMA_RIFB (1<<0) |
---|
523 | #define MV643XX_ETH_RX_BURST_SZ_1_64BIT (0<<1) |
---|
524 | #define MV643XX_ETH_RX_BURST_SZ_2_64BIT (1<<1) |
---|
525 | #define MV643XX_ETH_RX_BURST_SZ_4_64BIT (2<<1) |
---|
526 | #define MV643XX_ETH_RX_BURST_SZ_8_64BIT (3<<1) |
---|
527 | #define MV643XX_ETH_RX_BURST_SZ_16_64BIT (4<<1) |
---|
528 | #define MV643XX_ETH_SMDA_BLM_RX_NO_SWAP (1<<4) |
---|
529 | #define MV643XX_ETH_SMDA_BLM_TX_NO_SWAP (1<<5) |
---|
530 | #define MV643XX_ETH_SMDA_DESC_BYTE_SWAP (1<<6) |
---|
531 | #define MV643XX_ETH_TX_BURST_SZ_1_64BIT (0<<22) |
---|
532 | #define MV643XX_ETH_TX_BURST_SZ_2_64BIT (1<<22) |
---|
533 | #define MV643XX_ETH_TX_BURST_SZ_4_64BIT (2<<22) |
---|
534 | #define MV643XX_ETH_TX_BURST_SZ_8_64BIT (3<<22) |
---|
535 | #define MV643XX_ETH_TX_BURST_SZ_16_64BIT (4<<22) |
---|
536 | |
---|
537 | #define MV643XX_ETH_RX_MIN_FRAME_SIZE_R(port) (0x247c + ((port)<<10)) |
---|
538 | |
---|
539 | |
---|
540 | #define MV643XX_ETH_SERIAL_CONTROL_R(port) (0x243c + ((port)<<10)) |
---|
541 | #define MV643XX_ETH_SERIAL_PORT_ENBL (1<<0) /* Enable serial port */ |
---|
542 | #define MV643XX_ETH_FORCE_LINK_PASS (1<<1) |
---|
543 | #define MV643XX_ETH_DISABLE_AUTO_NEG_FOR_DUPLEX (1<<2) |
---|
544 | #define MV643XX_ETH_DISABLE_AUTO_NEG_FOR_FLOWCTL (1<<3) |
---|
545 | #define MV643XX_ETH_ADVERTISE_SYMMETRIC_FLOWCTL (1<<4) |
---|
546 | #define MV643XX_ETH_FORCE_FC_MODE_TX_PAUSE_DIS (1<<5) |
---|
547 | #define MV643XX_ETH_FORCE_BP_MODE_JAM_TX (1<<7) |
---|
548 | #define MV643XX_ETH_FORCE_BP_MODE_JAM_TX_ON_RX_ERR (1<<8) |
---|
549 | #define MV643XX_ETH_BIT9_UNKNOWN (1<<9) /* unknown purpose; linux sets this */ |
---|
550 | #define MV643XX_ETH_FORCE_LINK_FAIL_DISABLE (1<<10) |
---|
551 | #define MV643XX_ETH_RETRANSMIT_FOREVER (1<<11) /* limit to 16 attempts if clear */ |
---|
552 | #define MV643XX_ETH_DISABLE_AUTO_NEG_SPEED_GMII (1<<13) |
---|
553 | #define MV643XX_ETH_DTE_ADV_1 (1<<14) |
---|
554 | #define MV643XX_ETH_AUTO_NEG_BYPASS_ENBL (1<<15) |
---|
555 | #define MV643XX_ETH_RESTART_AUTO_NEG (1<<16) |
---|
556 | #define MV643XX_ETH_SC_MAX_RX_1518 (0<<17) /* Limit RX packet size */ |
---|
557 | #define MV643XX_ETH_SC_MAX_RX_1522 (1<<17) /* Limit RX packet size */ |
---|
558 | #define MV643XX_ETH_SC_MAX_RX_1552 (2<<17) /* Limit RX packet size */ |
---|
559 | #define MV643XX_ETH_SC_MAX_RX_9022 (3<<17) /* Limit RX packet size */ |
---|
560 | #define MV643XX_ETH_SC_MAX_RX_9192 (4<<17) /* Limit RX packet size */ |
---|
561 | #define MV643XX_ETH_SC_MAX_RX_9700 (5<<17) /* Limit RX packet size */ |
---|
562 | #define MV643XX_ETH_SC_MAX_RX_MASK (7<<17) /* bitmask */ |
---|
563 | #define MV643XX_ETH_SET_EXT_LOOPBACK (1<<20) |
---|
564 | #define MV643XX_ETH_SET_FULL_DUPLEX (1<<21) |
---|
565 | #define MV643XX_ETH_ENBL_FLOWCTL_TX_RX_IN_FD (1<<22) /* enable flow ctrl on rx and tx in full-duplex */ |
---|
566 | #define MV643XX_ETH_SET_GMII_SPEED_1000 (1<<23) /* 10/100 if clear */ |
---|
567 | #define MV643XX_ETH_SET_MII_SPEED_100 (1<<24) /* 10 if clear */ |
---|
568 | |
---|
569 | #define MV643XX_ETH_PORT_STATUS_R(port) (0x2444 + ((port)<<10)) |
---|
570 | |
---|
571 | #define MV643XX_ETH_PORT_STATUS_MODE_10_BIT (1<<0) |
---|
572 | #define MV643XX_ETH_PORT_STATUS_LINK_UP (1<<1) |
---|
573 | #define MV643XX_ETH_PORT_STATUS_FDX (1<<2) |
---|
574 | #define MV643XX_ETH_PORT_STATUS_FC (1<<3) |
---|
575 | #define MV643XX_ETH_PORT_STATUS_1000 (1<<4) |
---|
576 | #define MV643XX_ETH_PORT_STATUS_100 (1<<5) |
---|
577 | /* PSR bit 6 unknown */ |
---|
578 | #define MV643XX_ETH_PORT_STATUS_TX_IN_PROGRESS (1<<7) |
---|
579 | #define MV643XX_ETH_PORT_STATUS_ANEG_BYPASSED (1<<8) |
---|
580 | #define MV643XX_ETH_PORT_STATUS_PARTITION (1<<9) |
---|
581 | #define MV643XX_ETH_PORT_STATUS_TX_FIFO_EMPTY (1<<10) |
---|
582 | |
---|
583 | #define MV643XX_ETH_MIB_COUNTERS(port) (0x3000 + ((port)<<7)) |
---|
584 | #define MV643XX_ETH_NUM_MIB_COUNTERS 32 |
---|
585 | |
---|
586 | #define MV643XX_ETH_MIB_GOOD_OCTS_RCVD_LO (0) |
---|
587 | #define MV643XX_ETH_MIB_GOOD_OCTS_RCVD_HI (1<<2) |
---|
588 | #define MV643XX_ETH_MIB_BAD_OCTS_RCVD (2<<2) |
---|
589 | #define MV643XX_ETH_MIB_INTERNAL_MAC_TX_ERR (3<<2) |
---|
590 | #define MV643XX_ETH_MIB_GOOD_FRAMES_RCVD (4<<2) |
---|
591 | #define MV643XX_ETH_MIB_BAD_FRAMES_RCVD (5<<2) |
---|
592 | #define MV643XX_ETH_MIB_BCAST_FRAMES_RCVD (6<<2) |
---|
593 | #define MV643XX_ETH_MIB_MCAST_FRAMES_RCVD (7<<2) |
---|
594 | #define MV643XX_ETH_MIB_FRAMES_64_OCTS (8<<2) |
---|
595 | #define MV643XX_ETH_MIB_FRAMES_65_127_OCTS (9<<2) |
---|
596 | #define MV643XX_ETH_MIB_FRAMES_128_255_OCTS (10<<2) |
---|
597 | #define MV643XX_ETH_MIB_FRAMES_256_511_OCTS (11<<2) |
---|
598 | #define MV643XX_ETH_MIB_FRAMES_512_1023_OCTS (12<<2) |
---|
599 | #define MV643XX_ETH_MIB_FRAMES_1024_MAX_OCTS (13<<2) |
---|
600 | #define MV643XX_ETH_MIB_GOOD_OCTS_SENT_LO (14<<2) |
---|
601 | #define MV643XX_ETH_MIB_GOOD_OCTS_SENT_HI (15<<2) |
---|
602 | #define MV643XX_ETH_MIB_GOOD_FRAMES_SENT (16<<2) |
---|
603 | #define MV643XX_ETH_MIB_EXCESSIVE_COLL (17<<2) |
---|
604 | #define MV643XX_ETH_MIB_MCAST_FRAMES_SENT (18<<2) |
---|
605 | #define MV643XX_ETH_MIB_BCAST_FRAMES_SENT (19<<2) |
---|
606 | #define MV643XX_ETH_MIB_UNREC_MAC_CTRL_RCVD (20<<2) |
---|
607 | #define MV643XX_ETH_MIB_FC_SENT (21<<2) |
---|
608 | #define MV643XX_ETH_MIB_GOOD_FC_RCVD (22<<2) |
---|
609 | #define MV643XX_ETH_MIB_BAD_FC_RCVD (23<<2) |
---|
610 | #define MV643XX_ETH_MIB_UNDERSIZE_RCVD (24<<2) |
---|
611 | #define MV643XX_ETH_MIB_FRAGMENTS_RCVD (25<<2) |
---|
612 | #define MV643XX_ETH_MIB_OVERSIZE_RCVD (26<<2) |
---|
613 | #define MV643XX_ETH_MIB_JABBER_RCVD (27<<2) |
---|
614 | #define MV643XX_ETH_MIB_MAC_RX_ERR (28<<2) |
---|
615 | #define MV643XX_ETH_MIB_BAD_CRC_EVENT (29<<2) |
---|
616 | #define MV643XX_ETH_MIB_COLL (30<<2) |
---|
617 | #define MV643XX_ETH_MIB_LATE_COLL (31<<2) |
---|
618 | |
---|
619 | #define MV643XX_ETH_DA_FILTER_SPECL_MCAST_TBL(port) (0x3400+((port)<<10)) |
---|
620 | #define MV643XX_ETH_DA_FILTER_OTHER_MCAST_TBL(port) (0x3500+((port)<<10)) |
---|
621 | #define MV643XX_ETH_DA_FILTER_UNICAST_TBL(port) (0x3600+((port)<<10)) |
---|
622 | #define MV643XX_ETH_NUM_MCAST_ENTRIES 64 |
---|
623 | #define MV643XX_ETH_NUM_UNICAST_ENTRIES 4 |
---|
624 | |
---|
625 | #define MV643XX_ETH_BAR_0 (0x2200) |
---|
626 | #define MV643XX_ETH_SIZE_R_0 (0x2204) |
---|
627 | #define MV643XX_ETH_BAR_1 (0x2208) |
---|
628 | #define MV643XX_ETH_SIZE_R_1 (0x220c) |
---|
629 | #define MV643XX_ETH_BAR_2 (0x2210) |
---|
630 | #define MV643XX_ETH_SIZE_R_2 (0x2214) |
---|
631 | #define MV643XX_ETH_BAR_3 (0x2218) |
---|
632 | #define MV643XX_ETH_SIZE_R_3 (0x221c) |
---|
633 | #define MV643XX_ETH_BAR_4 (0x2220) |
---|
634 | #define MV643XX_ETH_SIZE_R_4 (0x2224) |
---|
635 | #define MV643XX_ETH_BAR_5 (0x2228) |
---|
636 | #define MV643XX_ETH_SIZE_R_5 (0x222c) |
---|
637 | #define MV643XX_ETH_NUM_BARS 6 |
---|
638 | |
---|
639 | /* Bits in the BAR reg to program cache snooping */ |
---|
640 | #define MV64360_ENET2MEM_SNOOP_NONE 0x0000 |
---|
641 | #define MV64360_ENET2MEM_SNOOP_WT 0x1000 |
---|
642 | #define MV64360_ENET2MEM_SNOOP_WB 0x2000 |
---|
643 | #define MV64360_ENET2MEM_SNOOP_MSK 0x3000 |
---|
644 | |
---|
645 | |
---|
646 | #define MV643XX_ETH_BAR_ENBL_R (0x2290) |
---|
647 | #define MV643XX_ETH_BAR_DISABLE(bar) (1<<(bar)) |
---|
648 | #define MV643XX_ETH_BAR_DISBL_ALL 0x3f |
---|
649 | |
---|
650 | #define MV643XX_ETH_RX_Q0_CURRENT_DESC_PTR(port) (0x260c+((port)<<10)) |
---|
651 | #define MV643XX_ETH_RX_Q1_CURRENT_DESC_PTR(port) (0x261c+((port)<<10)) |
---|
652 | #define MV643XX_ETH_RX_Q2_CURRENT_DESC_PTR(port) (0x262c+((port)<<10)) |
---|
653 | #define MV643XX_ETH_RX_Q3_CURRENT_DESC_PTR(port) (0x263c+((port)<<10)) |
---|
654 | #define MV643XX_ETH_RX_Q4_CURRENT_DESC_PTR(port) (0x264c+((port)<<10)) |
---|
655 | #define MV643XX_ETH_RX_Q5_CURRENT_DESC_PTR(port) (0x265c+((port)<<10)) |
---|
656 | #define MV643XX_ETH_RX_Q6_CURRENT_DESC_PTR(port) (0x266c+((port)<<10)) |
---|
657 | #define MV643XX_ETH_RX_Q7_CURRENT_DESC_PTR(port) (0x267c+((port)<<10)) |
---|
658 | |
---|
659 | #define MV643XX_ETH_TX_Q0_CURRENT_DESC_PTR(port) (0x26c0+((port)<<10)) |
---|
660 | #define MV643XX_ETH_TX_Q1_CURRENT_DESC_PTR(port) (0x26c4+((port)<<10)) |
---|
661 | #define MV643XX_ETH_TX_Q2_CURRENT_DESC_PTR(port) (0x26c8+((port)<<10)) |
---|
662 | #define MV643XX_ETH_TX_Q3_CURRENT_DESC_PTR(port) (0x26cc+((port)<<10)) |
---|
663 | #define MV643XX_ETH_TX_Q4_CURRENT_DESC_PTR(port) (0x26d0+((port)<<10)) |
---|
664 | #define MV643XX_ETH_TX_Q5_CURRENT_DESC_PTR(port) (0x26d4+((port)<<10)) |
---|
665 | #define MV643XX_ETH_TX_Q6_CURRENT_DESC_PTR(port) (0x26d8+((port)<<10)) |
---|
666 | #define MV643XX_ETH_TX_Q7_CURRENT_DESC_PTR(port) (0x26dc+((port)<<10)) |
---|
667 | |
---|
668 | #define MV643XX_ETH_MAC_ADDR_LO(port) (0x2414+((port)<<10)) |
---|
669 | #define MV643XX_ETH_MAC_ADDR_HI(port) (0x2418+((port)<<10)) |
---|
670 | |
---|
671 | /* TYPE DEFINITIONS */ |
---|
672 | |
---|
673 | /* just to make the purpose explicit; vars of this |
---|
674 | * type may need CPU-dependent address translation, |
---|
675 | * endian conversion etc. |
---|
676 | */ |
---|
677 | typedef uint32_t Dma_addr_t; |
---|
678 | |
---|
679 | typedef volatile struct mveth_rx_desc { |
---|
680 | #ifndef __BIG_ENDIAN__ |
---|
681 | #error "descriptor declaration not implemented for little endian machines" |
---|
682 | #endif |
---|
683 | uint16_t byte_cnt; |
---|
684 | uint16_t buf_size; |
---|
685 | uint32_t cmd_sts; /* control and status */ |
---|
686 | Dma_addr_t next_desc_ptr; /* next descriptor (as seen from DMA) */ |
---|
687 | Dma_addr_t buf_ptr; |
---|
688 | /* fields below here are not used by the chip */ |
---|
689 | void *u_buf; /* user buffer */ |
---|
690 | volatile struct mveth_rx_desc *next; /* next descriptor (CPU address; next_desc_ptr is a DMA address) */ |
---|
691 | uint32_t pad[2]; |
---|
692 | } __attribute__(( aligned(RING_ALIGNMENT) )) MvEthRxDescRec, *MvEthRxDesc; |
---|
693 | |
---|
694 | typedef volatile struct mveth_tx_desc { |
---|
695 | #ifndef __BIG_ENDIAN__ |
---|
696 | #error "descriptor declaration not implemented for little endian machines" |
---|
697 | #endif |
---|
698 | uint16_t byte_cnt; |
---|
699 | uint16_t l4i_chk; |
---|
700 | uint32_t cmd_sts; /* control and status */ |
---|
701 | Dma_addr_t next_desc_ptr; /* next descriptor (as seen from DMA) */ |
---|
702 | Dma_addr_t buf_ptr; |
---|
703 | /* fields below here are not used by the chip */ |
---|
704 | uint32_t workaround[2]; /* use this space to work around the 8byte problem (is this real?) */ |
---|
705 | void *u_buf; /* user buffer */ |
---|
706 | volatile struct mveth_tx_desc *next; /* next descriptor (CPU address; next_desc_ptr is a DMA address) */ |
---|
707 | } __attribute__(( aligned(RING_ALIGNMENT) )) MvEthTxDescRec, *MvEthTxDesc; |
---|
708 | |
---|
709 | /* Assume there are never more then 64k aliasing entries */ |
---|
710 | typedef uint16_t Mc_Refcnt[MV643XX_ETH_NUM_MCAST_ENTRIES*4]; |
---|
711 | |
---|
712 | /* driver private data and bsdnet interface structure */ |
---|
713 | struct mveth_private { |
---|
714 | MvEthRxDesc rx_ring; /* pointers to aligned ring area */ |
---|
715 | MvEthTxDesc tx_ring; /* pointers to aligned ring area */ |
---|
716 | MvEthRxDesc ring_area; /* allocated ring area */ |
---|
717 | int rbuf_count, xbuf_count; /* saved ring sizes from ifconfig */ |
---|
718 | int port_num; |
---|
719 | int phy; |
---|
720 | MvEthRxDesc d_rx_t; /* tail of the RX ring; next received packet */ |
---|
721 | MvEthTxDesc d_tx_t, d_tx_h; |
---|
722 | uint32_t rx_desc_dma, tx_desc_dma; /* ring address as seen by DMA; (1:1 on this BSP) */ |
---|
723 | int avail; |
---|
724 | void (*isr)(void*); |
---|
725 | void *isr_arg; |
---|
726 | /* Callbacks to handle buffers */ |
---|
727 | void (*cleanup_txbuf)(void*, void*, int); /* callback to cleanup TX buffer */ |
---|
728 | void *cleanup_txbuf_arg; |
---|
729 | void *(*alloc_rxbuf)(int *psize, uintptr_t *paddr); /* allocate RX buffer */ |
---|
730 | void (*consume_rxbuf)(void*, void*, int); /* callback to consume RX buffer */ |
---|
731 | void *consume_rxbuf_arg; |
---|
732 | rtems_id tid; |
---|
733 | uint32_t irq_mask; /* IRQs we use */ |
---|
734 | uint32_t xirq_mask; |
---|
735 | int promisc; |
---|
736 | struct { |
---|
737 | unsigned irqs; |
---|
738 | unsigned maxchain; |
---|
739 | unsigned repack; |
---|
740 | unsigned packet; |
---|
741 | unsigned odrops; /* no counter in core code */ |
---|
742 | struct { |
---|
743 | uint64_t good_octs_rcvd; /* 64-bit */ |
---|
744 | uint32_t bad_octs_rcvd; |
---|
745 | uint32_t internal_mac_tx_err; |
---|
746 | uint32_t good_frames_rcvd; |
---|
747 | uint32_t bad_frames_rcvd; |
---|
748 | uint32_t bcast_frames_rcvd; |
---|
749 | uint32_t mcast_frames_rcvd; |
---|
750 | uint32_t frames_64_octs; |
---|
751 | uint32_t frames_65_127_octs; |
---|
752 | uint32_t frames_128_255_octs; |
---|
753 | uint32_t frames_256_511_octs; |
---|
754 | uint32_t frames_512_1023_octs; |
---|
755 | uint32_t frames_1024_max_octs; |
---|
756 | uint64_t good_octs_sent; /* 64-bit */ |
---|
757 | uint32_t good_frames_sent; |
---|
758 | uint32_t excessive_coll; |
---|
759 | uint32_t mcast_frames_sent; |
---|
760 | uint32_t bcast_frames_sent; |
---|
761 | uint32_t unrec_mac_ctrl_rcvd; |
---|
762 | uint32_t fc_sent; |
---|
763 | uint32_t good_fc_rcvd; |
---|
764 | uint32_t bad_fc_rcvd; |
---|
765 | uint32_t undersize_rcvd; |
---|
766 | uint32_t fragments_rcvd; |
---|
767 | uint32_t oversize_rcvd; |
---|
768 | uint32_t jabber_rcvd; |
---|
769 | uint32_t mac_rx_err; |
---|
770 | uint32_t bad_crc_event; |
---|
771 | uint32_t coll; |
---|
772 | uint32_t late_coll; |
---|
773 | } mib; |
---|
774 | } stats; |
---|
775 | struct { |
---|
776 | Mc_Refcnt specl, other; |
---|
777 | } mc_refcnt; |
---|
778 | }; |
---|
779 | |
---|
780 | /* stuff needed for bsdnet support */ |
---|
781 | struct mveth_bsdsupp { |
---|
782 | int oif_flags; /* old / cached if_flags */ |
---|
783 | }; |
---|
784 | |
---|
785 | struct mveth_softc { |
---|
786 | struct arpcom arpcom; |
---|
787 | struct mveth_bsdsupp bsd; |
---|
788 | struct mveth_private pvt; |
---|
789 | }; |
---|
790 | |
---|
791 | /* GLOBAL VARIABLES */ |
---|
792 | #ifdef MVETH_DEBUG_TX_DUMP |
---|
793 | int mveth_tx_dump = 0; |
---|
794 | #endif |
---|
795 | |
---|
796 | /* THE array of driver/bsdnet structs */ |
---|
797 | |
---|
798 | /* If detaching/module unloading is enabled, the main driver data |
---|
799 | * structure must remain in memory; hence it must reside in its own |
---|
800 | * 'dummy' module... |
---|
801 | */ |
---|
802 | #ifdef MVETH_DETACH_HACK |
---|
803 | extern |
---|
804 | #else |
---|
805 | STATIC |
---|
806 | #endif |
---|
807 | struct mveth_softc theMvEths[MV643XXETH_NUM_DRIVER_SLOTS] |
---|
808 | #ifndef MVETH_DETACH_HACK |
---|
809 | = {{{{0}},}} |
---|
810 | #endif |
---|
811 | ; |
---|
812 | |
---|
813 | /* daemon task id */ |
---|
814 | STATIC rtems_id mveth_tid = 0; |
---|
815 | /* register access protection mutex */ |
---|
816 | STATIC rtems_id mveth_mtx = 0; |
---|
817 | #define REGLOCK() do { \ |
---|
818 | if ( RTEMS_SUCCESSFUL != rtems_semaphore_obtain(mveth_mtx, RTEMS_WAIT, RTEMS_NO_TIMEOUT) ) \ |
---|
819 | rtems_panic(DRVNAME": unable to lock register protection mutex"); \ |
---|
820 | } while (0) |
---|
821 | #define REGUNLOCK() rtems_semaphore_release(mveth_mtx) |
---|
822 | |
---|
823 | /* Format strings for statistics messages */ |
---|
824 | static const char *mibfmt[] = { |
---|
825 | " GOOD_OCTS_RCVD: %"PRIu64"\n", |
---|
826 | 0, |
---|
827 | " BAD_OCTS_RCVD: %"PRIu32"\n", |
---|
828 | " INTERNAL_MAC_TX_ERR: %"PRIu32"\n", |
---|
829 | " GOOD_FRAMES_RCVD: %"PRIu32"\n", |
---|
830 | " BAD_FRAMES_RCVD: %"PRIu32"\n", |
---|
831 | " BCAST_FRAMES_RCVD: %"PRIu32"\n", |
---|
832 | " MCAST_FRAMES_RCVD: %"PRIu32"\n", |
---|
833 | " FRAMES_64_OCTS: %"PRIu32"\n", |
---|
834 | " FRAMES_65_127_OCTS: %"PRIu32"\n", |
---|
835 | " FRAMES_128_255_OCTS: %"PRIu32"\n", |
---|
836 | " FRAMES_256_511_OCTS: %"PRIu32"\n", |
---|
837 | " FRAMES_512_1023_OCTS:%"PRIu32"\n", |
---|
838 | " FRAMES_1024_MAX_OCTS:%"PRIu32"\n", |
---|
839 | " GOOD_OCTS_SENT: %"PRIu64"\n", |
---|
840 | 0, |
---|
841 | " GOOD_FRAMES_SENT: %"PRIu32"\n", |
---|
842 | " EXCESSIVE_COLL: %"PRIu32"\n", |
---|
843 | " MCAST_FRAMES_SENT: %"PRIu32"\n", |
---|
844 | " BCAST_FRAMES_SENT: %"PRIu32"\n", |
---|
845 | " UNREC_MAC_CTRL_RCVD: %"PRIu32"\n", |
---|
846 | " FC_SENT: %"PRIu32"\n", |
---|
847 | " GOOD_FC_RCVD: %"PRIu32"\n", |
---|
848 | " BAD_FC_RCVD: %"PRIu32"\n", |
---|
849 | " UNDERSIZE_RCVD: %"PRIu32"\n", |
---|
850 | " FRAGMENTS_RCVD: %"PRIu32"\n", |
---|
851 | " OVERSIZE_RCVD: %"PRIu32"\n", |
---|
852 | " JABBER_RCVD: %"PRIu32"\n", |
---|
853 | " MAC_RX_ERR: %"PRIu32"\n", |
---|
854 | " BAD_CRC_EVENT: %"PRIu32"\n", |
---|
855 | " COLL: %"PRIu32"\n", |
---|
856 | " LATE_COLL: %"PRIu32"\n", |
---|
857 | }; |
---|
858 | |
---|
859 | /* Interrupt Handler Connection */ |
---|
860 | |
---|
861 | /* forward decls + implementation for IRQ API funcs */ |
---|
862 | |
---|
863 | static void mveth_isr(rtems_irq_hdl_param unit); |
---|
864 | static void mveth_isr_1(rtems_irq_hdl_param unit); |
---|
865 | static void noop(const rtems_irq_connect_data *unused) {} |
---|
866 | static int noop1(const rtems_irq_connect_data *unused) { return 0; } |
---|
867 | |
---|
868 | static rtems_irq_connect_data irq_data[MAX_NUM_SLOTS] = { |
---|
869 | { |
---|
870 | BSP_IRQ_ETH0, |
---|
871 | 0, |
---|
872 | (rtems_irq_hdl_param)0, |
---|
873 | noop, |
---|
874 | noop, |
---|
875 | noop1 |
---|
876 | }, |
---|
877 | { |
---|
878 | BSP_IRQ_ETH1, |
---|
879 | 0, |
---|
880 | (rtems_irq_hdl_param)1, |
---|
881 | noop, |
---|
882 | noop, |
---|
883 | noop1 |
---|
884 | }, |
---|
885 | { |
---|
886 | BSP_IRQ_ETH2, |
---|
887 | 0, |
---|
888 | (rtems_irq_hdl_param)2, |
---|
889 | noop, |
---|
890 | noop, |
---|
891 | noop1 |
---|
892 | }, |
---|
893 | }; |
---|
894 | |
---|
895 | /* MII Ioctl Interface */ |
---|
896 | |
---|
897 | STATIC unsigned |
---|
898 | mveth_mii_read(struct mveth_private *mp, unsigned addr); |
---|
899 | |
---|
900 | STATIC unsigned |
---|
901 | mveth_mii_write(struct mveth_private *mp, unsigned addr, unsigned v); |
---|
902 | |
---|
903 | |
---|
904 | /* mdio / mii interface wrappers for rtems_mii_ioctl API */ |
---|
905 | |
---|
906 | static int mveth_mdio_r(int phy, void *uarg, unsigned reg, uint32_t *pval) |
---|
907 | { |
---|
908 | if ( phy > 1 ) |
---|
909 | return -1; |
---|
910 | |
---|
911 | *pval = mveth_mii_read(uarg, reg); |
---|
912 | return 0; |
---|
913 | } |
---|
914 | |
---|
915 | static int mveth_mdio_w(int phy, void *uarg, unsigned reg, uint32_t val) |
---|
916 | { |
---|
917 | if ( phy > 1 ) |
---|
918 | return -1; |
---|
919 | mveth_mii_write(uarg, reg, val); |
---|
920 | return 0; |
---|
921 | } |
---|
922 | |
---|
923 | static struct rtems_mdio_info mveth_mdio = { |
---|
924 | mdio_r: mveth_mdio_r, |
---|
925 | mdio_w: mveth_mdio_w, |
---|
926 | has_gmii: 1, |
---|
927 | }; |
---|
928 | |
---|
929 | /* LOW LEVEL SUPPORT ROUTINES */ |
---|
930 | |
---|
931 | /* Software Cache Coherency */ |
---|
932 | #ifndef ENABLE_HW_SNOOPING |
---|
933 | #ifndef __PPC__ |
---|
934 | #error "Software cache coherency maintenance is not implemented for your CPU architecture" |
---|
935 | #endif |
---|
936 | |
---|
937 | static inline unsigned INVAL_DESC(volatile void *d) |
---|
938 | { |
---|
939 | typedef const char cache_line[PPC_CACHE_ALIGNMENT]; |
---|
940 | asm volatile("dcbi 0, %1":"=m"(*(cache_line*)d):"r"(d)); |
---|
941 | return (unsigned)d; /* so this can be used in comma expression */ |
---|
942 | } |
---|
943 | |
---|
944 | static inline void FLUSH_DESC(volatile void *d) |
---|
945 | { |
---|
946 | typedef const char cache_line[PPC_CACHE_ALIGNMENT]; |
---|
947 | asm volatile("dcbf 0, %0"::"r"(d),"m"(*(cache_line*)d)); |
---|
948 | } |
---|
949 | |
---|
950 | static inline void FLUSH_BARRIER(void) |
---|
951 | { |
---|
952 | asm volatile("eieio"); |
---|
953 | } |
---|
954 | |
---|
955 | /* RX buffers are always cache-line aligned |
---|
956 | * ASSUMPTIONS: |
---|
957 | * - 'addr' is cache aligned |
---|
958 | * - len is a multiple >0 of cache lines |
---|
959 | */ |
---|
960 | static inline void INVAL_BUF(register uintptr_t addr, register int len) |
---|
961 | { |
---|
962 | typedef char maxbuf[2048]; /* more than an ethernet packet */ |
---|
963 | do { |
---|
964 | len -= RX_BUF_ALIGNMENT; |
---|
965 | asm volatile("dcbi %0, %1"::"b"(addr),"r"(len)); |
---|
966 | } while (len > 0); |
---|
967 | asm volatile("":"=m"(*(maxbuf*)addr)); |
---|
968 | } |
---|
969 | |
---|
970 | /* Flushing TX buffers is a little bit trickier; we don't really know their |
---|
971 | * alignment but *assume* adjacent addresses are covering 'ordinary' memory |
---|
972 | * so that flushing them does no harm! |
---|
973 | */ |
---|
974 | static inline void FLUSH_BUF(register uintptr_t addr, register int len) |
---|
975 | { |
---|
976 | asm volatile("":::"memory"); |
---|
977 | len = MV643XX_ALIGN(len, RX_BUF_ALIGNMENT); |
---|
978 | do { |
---|
979 | asm volatile("dcbf %0, %1"::"b"(addr),"r"(len)); |
---|
980 | len -= RX_BUF_ALIGNMENT; |
---|
981 | } while ( len >= 0 ); |
---|
982 | } |
---|
983 | |
---|
984 | #else /* hardware snooping enabled */ |
---|
985 | |
---|
986 | /* inline this to silence compiler warnings */ |
---|
987 | static inline int INVAL_DESC(volatile void *d) |
---|
988 | { return 0; } |
---|
989 | |
---|
990 | #define FLUSH_DESC(d) NOOP() |
---|
991 | #define INVAL_BUF(b,l) NOOP() |
---|
992 | #define FLUSH_BUF(b,l) NOOP() |
---|
993 | #define FLUSH_BARRIER() NOOP() |
---|
994 | |
---|
995 | #endif /* cache coherency support */ |
---|
996 | |
---|
997 | /* Synchronize memory access */ |
---|
998 | #ifdef __PPC__ |
---|
999 | static inline void membarrier(void) |
---|
1000 | { |
---|
1001 | asm volatile("sync":::"memory"); |
---|
1002 | } |
---|
1003 | #else |
---|
1004 | #error "memory barrier instruction not defined (yet) for this CPU" |
---|
1005 | #endif |
---|
1006 | |
---|
1007 | /* Enable and disable interrupts at the device */ |
---|
1008 | static inline void |
---|
1009 | mveth_enable_irqs(struct mveth_private *mp, uint32_t mask) |
---|
1010 | { |
---|
1011 | rtems_interrupt_level l; |
---|
1012 | uint32_t val; |
---|
1013 | rtems_interrupt_disable(l); |
---|
1014 | |
---|
1015 | val = MV_READ(MV643XX_ETH_INTERRUPT_ENBL_R(mp->port_num)); |
---|
1016 | val = (val | mask | MV643XX_ETH_IRQ_EXT_ENA) & mp->irq_mask; |
---|
1017 | |
---|
1018 | MV_WRITE(MV643XX_ETH_INTERRUPT_ENBL_R(mp->port_num), val); |
---|
1019 | |
---|
1020 | val = MV_READ(MV643XX_ETH_INTERRUPT_EXTEND_ENBL_R(mp->port_num)); |
---|
1021 | val = (val | mask) & mp->xirq_mask; |
---|
1022 | MV_WRITE(MV643XX_ETH_INTERRUPT_EXTEND_ENBL_R(mp->port_num), val); |
---|
1023 | |
---|
1024 | rtems_interrupt_enable(l); |
---|
1025 | } |
---|
1026 | |
---|
1027 | static inline uint32_t |
---|
1028 | mveth_disable_irqs(struct mveth_private *mp, uint32_t mask) |
---|
1029 | { |
---|
1030 | rtems_interrupt_level l; |
---|
1031 | uint32_t val,xval,tmp; |
---|
1032 | rtems_interrupt_disable(l); |
---|
1033 | |
---|
1034 | val = MV_READ(MV643XX_ETH_INTERRUPT_ENBL_R(mp->port_num)); |
---|
1035 | tmp = ( (val & ~mask) | MV643XX_ETH_IRQ_EXT_ENA ) & mp->irq_mask; |
---|
1036 | MV_WRITE(MV643XX_ETH_INTERRUPT_ENBL_R(mp->port_num), tmp); |
---|
1037 | |
---|
1038 | xval = MV_READ(MV643XX_ETH_INTERRUPT_EXTEND_ENBL_R(mp->port_num)); |
---|
1039 | tmp = (xval & ~mask) & mp->xirq_mask; |
---|
1040 | MV_WRITE(MV643XX_ETH_INTERRUPT_EXTEND_ENBL_R(mp->port_num), tmp); |
---|
1041 | |
---|
1042 | rtems_interrupt_enable(l); |
---|
1043 | |
---|
1044 | return (val | xval); |
---|
1045 | } |
---|
1046 | |
---|
1047 | /* This should be safe even w/o turning off interrupts if multiple |
---|
1048 | * threads ack different bits in the cause register (and ignore |
---|
1049 | * other ones) since writing 'ones' into the cause register doesn't |
---|
1050 | * 'stick'. |
---|
1051 | */ |
---|
1052 | |
---|
1053 | static inline uint32_t |
---|
1054 | mveth_ack_irqs(struct mveth_private *mp, uint32_t mask) |
---|
1055 | { |
---|
1056 | register uint32_t x,xe,p; |
---|
1057 | |
---|
1058 | p = mp->port_num; |
---|
1059 | /* Get cause */ |
---|
1060 | x = MV_READ(MV643XX_ETH_INTERRUPT_CAUSE_R(p)); |
---|
1061 | |
---|
1062 | /* Ack interrupts filtering the ones we're interested in */ |
---|
1063 | |
---|
1064 | /* Note: EXT_IRQ bit clears by itself if EXT interrupts are cleared */ |
---|
1065 | MV_WRITE(MV643XX_ETH_INTERRUPT_CAUSE_R(p), ~ (x & mp->irq_mask & mask)); |
---|
1066 | |
---|
1067 | /* linux driver tests 1<<1 as a summary bit for extended interrupts; |
---|
1068 | * the mv64360 seems to use 1<<19 for that purpose; for the moment, |
---|
1069 | * I just check both. |
---|
1070 | * Update: link status irq (1<<16 in xe) doesn't set (1<<19) in x! |
---|
1071 | */ |
---|
1072 | if ( 1 /* x & 2 */ ) |
---|
1073 | { |
---|
1074 | xe = MV_READ(MV643XX_ETH_INTERRUPT_EXTEND_CAUSE_R(p)); |
---|
1075 | |
---|
1076 | MV_WRITE(MV643XX_ETH_INTERRUPT_EXTEND_CAUSE_R(p), ~ (xe & mp->xirq_mask & mask)); |
---|
1077 | } else { |
---|
1078 | xe = 0; |
---|
1079 | } |
---|
1080 | #ifdef MVETH_TESTING |
---|
1081 | if ( ((x & MV643XX_ETH_ALL_IRQS) & ~MV643XX_ETH_KNOWN_IRQS) |
---|
1082 | || ((xe & MV643XX_ETH_ALL_EXT_IRQS) & ~MV643XX_ETH_KNOWN_EXT_IRQS) ) { |
---|
1083 | fprintf(stderr, "Unknown IRQs detected; leaving all disabled for debugging:\n"); |
---|
1084 | fprintf(stderr, "Cause reg was 0x%08x, ext cause 0x%08x\n", x, xe); |
---|
1085 | mp->irq_mask = 0; |
---|
1086 | mp->xirq_mask = 0; |
---|
1087 | } |
---|
1088 | #endif |
---|
1089 | /* luckily, the extended and 'normal' interrupts we use don't overlap so |
---|
1090 | * we can just OR them into a single word |
---|
1091 | */ |
---|
1092 | return (xe & mp->xirq_mask) | (x & mp->irq_mask); |
---|
1093 | } |
---|
1094 | |
---|
1095 | static void mveth_isr(rtems_irq_hdl_param arg) |
---|
1096 | { |
---|
1097 | unsigned unit = (unsigned)arg; |
---|
1098 | mveth_disable_irqs(&theMvEths[unit].pvt, -1); |
---|
1099 | theMvEths[unit].pvt.stats.irqs++; |
---|
1100 | rtems_bsdnet_event_send( theMvEths[unit].pvt.tid, 1<<unit ); |
---|
1101 | } |
---|
1102 | |
---|
1103 | static void mveth_isr_1(rtems_irq_hdl_param arg) |
---|
1104 | { |
---|
1105 | unsigned unit = (unsigned)arg; |
---|
1106 | struct mveth_private *mp = &theMvEths[unit].pvt; |
---|
1107 | |
---|
1108 | mp->stats.irqs++; |
---|
1109 | mp->isr(mp->isr_arg); |
---|
1110 | } |
---|
1111 | |
---|
1112 | static void |
---|
1113 | mveth_clear_mib_counters(struct mveth_private *mp) |
---|
1114 | { |
---|
1115 | register int i; |
---|
1116 | register uint32_t b; |
---|
1117 | /* reading the counters resets them */ |
---|
1118 | b = MV643XX_ETH_MIB_COUNTERS(mp->port_num); |
---|
1119 | for (i=0; i< MV643XX_ETH_NUM_MIB_COUNTERS; i++, b+=4) |
---|
1120 | (void)MV_READ(b); |
---|
1121 | } |
---|
1122 | |
---|
1123 | /* Reading a MIB register also clears it. Hence we read the lo |
---|
1124 | * register first, then the hi one. Correct reading is guaranteed since |
---|
1125 | * the 'lo' register cannot overflow after it is read since it had |
---|
1126 | * been reset to 0. |
---|
1127 | */ |
---|
1128 | static unsigned long long |
---|
1129 | read_long_mib_counter(int port_num, int idx) |
---|
1130 | { |
---|
1131 | unsigned long lo; |
---|
1132 | unsigned long long hi; |
---|
1133 | lo = MV_READ(MV643XX_ETH_MIB_COUNTERS(port_num)+(idx<<2)); |
---|
1134 | idx++; |
---|
1135 | hi = MV_READ(MV643XX_ETH_MIB_COUNTERS(port_num)+(idx<<2)); |
---|
1136 | return (hi<<32) | lo; |
---|
1137 | } |
---|
1138 | |
---|
1139 | static inline unsigned long |
---|
1140 | read_mib_counter(int port_num, int idx) |
---|
1141 | { |
---|
1142 | return MV_READ(MV643XX_ETH_MIB_COUNTERS(port_num)+(idx<<2)); |
---|
1143 | } |
---|
1144 | |
---|
1145 | |
---|
1146 | /* write ethernet address from buffer to hardware (need to change unicast filter after this) */ |
---|
1147 | static void |
---|
1148 | mveth_write_eaddr(struct mveth_private *mp, unsigned char *eaddr) |
---|
1149 | { |
---|
1150 | int i; |
---|
1151 | uint32_t x; |
---|
1152 | |
---|
1153 | /* build hi word */ |
---|
1154 | for (i=4,x=0; i; i--, eaddr++) { |
---|
1155 | x = (x<<8) | *eaddr; |
---|
1156 | } |
---|
1157 | MV_WRITE(MV643XX_ETH_MAC_ADDR_HI(mp->port_num), x); |
---|
1158 | |
---|
1159 | /* build lo word */ |
---|
1160 | for (i=2,x=0; i; i--, eaddr++) { |
---|
1161 | x = (x<<8) | *eaddr; |
---|
1162 | } |
---|
1163 | MV_WRITE(MV643XX_ETH_MAC_ADDR_LO(mp->port_num), x); |
---|
1164 | } |
---|
1165 | |
---|
1166 | /* PHY/MII Interface |
---|
1167 | * |
---|
1168 | * Read/write a PHY register; |
---|
1169 | * |
---|
1170 | * NOTE: The SMI register is shared among the three devices. |
---|
1171 | * Protection is provided by the global networking semaphore. |
---|
1172 | * If non-bsd drivers are running on a subset of IFs proper |
---|
1173 | * locking of all shared registers must be implemented! |
---|
1174 | */ |
---|
1175 | STATIC unsigned |
---|
1176 | mveth_mii_read(struct mveth_private *mp, unsigned addr) |
---|
1177 | { |
---|
1178 | unsigned v; |
---|
1179 | unsigned wc = 0; |
---|
1180 | |
---|
1181 | addr &= 0x1f; |
---|
1182 | |
---|
1183 | /* wait until not busy */ |
---|
1184 | do { |
---|
1185 | v = MV_READ(MV643XX_ETH_SMI_R); |
---|
1186 | wc++; |
---|
1187 | } while ( MV643XX_ETH_SMI_BUSY & v ); |
---|
1188 | |
---|
1189 | MV_WRITE(MV643XX_ETH_SMI_R, (addr <<21 ) | (mp->phy<<16) | MV643XX_ETH_SMI_OP_RD ); |
---|
1190 | |
---|
1191 | do { |
---|
1192 | v = MV_READ(MV643XX_ETH_SMI_R); |
---|
1193 | wc++; |
---|
1194 | } while ( MV643XX_ETH_SMI_BUSY & v ); |
---|
1195 | |
---|
1196 | if (wc>0xffff) |
---|
1197 | wc = 0xffff; |
---|
1198 | return (wc<<16) | (v & 0xffff); |
---|
1199 | } |
---|
1200 | |
---|
1201 | STATIC unsigned |
---|
1202 | mveth_mii_write(struct mveth_private *mp, unsigned addr, unsigned v) |
---|
1203 | { |
---|
1204 | unsigned wc = 0; |
---|
1205 | |
---|
1206 | addr &= 0x1f; |
---|
1207 | v &= 0xffff; |
---|
1208 | |
---|
1209 | /* busywait is ugly but not preventing ISRs or high priority tasks from |
---|
1210 | * preempting us |
---|
1211 | */ |
---|
1212 | |
---|
1213 | /* wait until not busy */ |
---|
1214 | while ( MV643XX_ETH_SMI_BUSY & MV_READ(MV643XX_ETH_SMI_R) ) |
---|
1215 | wc++ /* wait */; |
---|
1216 | |
---|
1217 | MV_WRITE(MV643XX_ETH_SMI_R, (addr <<21 ) | (mp->phy<<16) | MV643XX_ETH_SMI_OP_WR | v ); |
---|
1218 | |
---|
1219 | return wc; |
---|
1220 | } |
---|
1221 | |
---|
1222 | /* MID-LAYER SUPPORT ROUTINES */ |
---|
1223 | |
---|
1224 | /* Start TX if descriptors are exhausted */ |
---|
1225 | static __inline__ void |
---|
1226 | mveth_start_tx(struct mveth_private *mp) |
---|
1227 | { |
---|
1228 | uint32_t running; |
---|
1229 | if ( mp->avail <= 0 ) { |
---|
1230 | running = MV_READ(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_R(mp->port_num)); |
---|
1231 | if ( ! (running & MV643XX_ETH_TX_START(0)) ) { |
---|
1232 | MV_WRITE(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_R(mp->port_num), MV643XX_ETH_TX_START(0)); |
---|
1233 | } |
---|
1234 | } |
---|
1235 | } |
---|
1236 | |
---|
1237 | /* Stop TX and wait for the command queues to stop and the fifo to drain */ |
---|
1238 | static uint32_t |
---|
1239 | mveth_stop_tx(int port) |
---|
1240 | { |
---|
1241 | uint32_t active_q; |
---|
1242 | |
---|
1243 | active_q = (MV_READ(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_R(port)) & MV643XX_ETH_TX_ANY_RUNNING); |
---|
1244 | |
---|
1245 | if ( active_q ) { |
---|
1246 | /* Halt TX and wait for activity to stop */ |
---|
1247 | MV_WRITE(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_R(port), MV643XX_ETH_TX_STOP_ALL); |
---|
1248 | while ( MV643XX_ETH_TX_ANY_RUNNING & MV_READ(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_R(port)) ) |
---|
1249 | /* poll-wait */; |
---|
1250 | /* Wait for Tx FIFO to drain */ |
---|
1251 | while ( ! (MV643XX_ETH_PORT_STATUS_R(port) & MV643XX_ETH_PORT_STATUS_TX_FIFO_EMPTY) ) |
---|
1252 | /* poll-wait */; |
---|
1253 | } |
---|
1254 | |
---|
1255 | return active_q; |
---|
1256 | } |
---|
1257 | |
---|
1258 | /* update serial port settings from current link status */ |
---|
1259 | static void |
---|
1260 | mveth_update_serial_port(struct mveth_private *mp, int media) |
---|
1261 | { |
---|
1262 | int port = mp->port_num; |
---|
1263 | uint32_t old, new; |
---|
1264 | |
---|
1265 | new = old = MV_READ(MV643XX_ETH_SERIAL_CONTROL_R(port)); |
---|
1266 | |
---|
1267 | /* mask speed and duplex settings */ |
---|
1268 | new &= ~( MV643XX_ETH_SET_GMII_SPEED_1000 |
---|
1269 | | MV643XX_ETH_SET_MII_SPEED_100 |
---|
1270 | | MV643XX_ETH_SET_FULL_DUPLEX ); |
---|
1271 | |
---|
1272 | if ( IFM_FDX & media ) |
---|
1273 | new |= MV643XX_ETH_SET_FULL_DUPLEX; |
---|
1274 | |
---|
1275 | switch ( IFM_SUBTYPE(media) ) { |
---|
1276 | default: /* treat as 10 */ |
---|
1277 | break; |
---|
1278 | case IFM_100_TX: |
---|
1279 | new |= MV643XX_ETH_SET_MII_SPEED_100; |
---|
1280 | break; |
---|
1281 | case IFM_1000_T: |
---|
1282 | new |= MV643XX_ETH_SET_GMII_SPEED_1000; |
---|
1283 | break; |
---|
1284 | } |
---|
1285 | |
---|
1286 | if ( new != old ) { |
---|
1287 | if ( ! (MV643XX_ETH_SERIAL_PORT_ENBL & new) ) { |
---|
1288 | /* just write */ |
---|
1289 | MV_WRITE(MV643XX_ETH_SERIAL_CONTROL_R(port), new); |
---|
1290 | } else { |
---|
1291 | uint32_t were_running; |
---|
1292 | |
---|
1293 | were_running = mveth_stop_tx(port); |
---|
1294 | |
---|
1295 | old &= ~MV643XX_ETH_SERIAL_PORT_ENBL; |
---|
1296 | MV_WRITE(MV643XX_ETH_SERIAL_CONTROL_R(port), old); |
---|
1297 | MV_WRITE(MV643XX_ETH_SERIAL_CONTROL_R(port), new); |
---|
1298 | /* linux driver writes twice... */ |
---|
1299 | MV_WRITE(MV643XX_ETH_SERIAL_CONTROL_R(port), new); |
---|
1300 | |
---|
1301 | if ( were_running ) { |
---|
1302 | MV_WRITE(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_R(mp->port_num), MV643XX_ETH_TX_START(0)); |
---|
1303 | } |
---|
1304 | } |
---|
1305 | } |
---|
1306 | } |
---|
1307 | |
---|
1308 | /* Clear multicast filters */ |
---|
1309 | void |
---|
1310 | BSP_mve_mcast_filter_clear(struct mveth_private *mp) |
---|
1311 | { |
---|
1312 | int i; |
---|
1313 | register uint32_t s,o; |
---|
1314 | uint32_t v = mp->promisc ? 0x01010101 : 0x00000000; |
---|
1315 | s = MV643XX_ETH_DA_FILTER_SPECL_MCAST_TBL(mp->port_num); |
---|
1316 | o = MV643XX_ETH_DA_FILTER_OTHER_MCAST_TBL(mp->port_num); |
---|
1317 | for (i=0; i<MV643XX_ETH_NUM_MCAST_ENTRIES; i++) { |
---|
1318 | MV_WRITE(s,v); |
---|
1319 | MV_WRITE(o,v); |
---|
1320 | s+=4; |
---|
1321 | o+=4; |
---|
1322 | } |
---|
1323 | for (i=0; i<sizeof(mp->mc_refcnt.specl)/sizeof(mp->mc_refcnt.specl[0]); i++) { |
---|
1324 | mp->mc_refcnt.specl[i] = 0; |
---|
1325 | mp->mc_refcnt.other[i] = 0; |
---|
1326 | } |
---|
1327 | } |
---|
1328 | |
---|
1329 | void |
---|
1330 | BSP_mve_mcast_filter_accept_all(struct mveth_private *mp) |
---|
1331 | { |
---|
1332 | int i; |
---|
1333 | register uint32_t s,o; |
---|
1334 | s = MV643XX_ETH_DA_FILTER_SPECL_MCAST_TBL(mp->port_num); |
---|
1335 | o = MV643XX_ETH_DA_FILTER_OTHER_MCAST_TBL(mp->port_num); |
---|
1336 | for (i=0; i<MV643XX_ETH_NUM_MCAST_ENTRIES; i++) { |
---|
1337 | MV_WRITE(s,0x01010101); |
---|
1338 | MV_WRITE(o,0x01010101); |
---|
1339 | s+=4; |
---|
1340 | o+=4; |
---|
1341 | /* Not clear what we should do with the reference count. |
---|
1342 | * For now just increment it. |
---|
1343 | */ |
---|
1344 | for (i=0; i<sizeof(mp->mc_refcnt.specl)/sizeof(mp->mc_refcnt.specl[0]); i++) { |
---|
1345 | mp->mc_refcnt.specl[i]++; |
---|
1346 | mp->mc_refcnt.other[i]++; |
---|
1347 | } |
---|
1348 | } |
---|
1349 | } |
---|
1350 | |
---|
1351 | static void add_entry(uint32_t off, uint8_t hash, Mc_Refcnt *refcnt) |
---|
1352 | { |
---|
1353 | uint32_t val; |
---|
1354 | uint32_t slot = hash & 0xfc; |
---|
1355 | |
---|
1356 | if ( 0 == (*refcnt)[hash]++ ) { |
---|
1357 | val = MV_READ(off+slot) | ( 1 << ((hash&3)<<3) ); |
---|
1358 | MV_WRITE(off+slot, val); |
---|
1359 | } |
---|
1360 | } |
---|
1361 | |
---|
1362 | static void del_entry(uint32_t off, uint8_t hash, Mc_Refcnt *refcnt) |
---|
1363 | { |
---|
1364 | uint32_t val; |
---|
1365 | uint32_t slot = hash & 0xfc; |
---|
1366 | |
---|
1367 | if ( (*refcnt)[hash] > 0 && 0 == --(*refcnt)[hash] ) { |
---|
1368 | val = MV_READ(off+slot) & ~( 1 << ((hash&3)<<3) ); |
---|
1369 | MV_WRITE(off+slot, val); |
---|
1370 | } |
---|
1371 | } |
---|
1372 | |
---|
1373 | void |
---|
1374 | BSP_mve_mcast_filter_accept_add(struct mveth_private *mp, unsigned char *enaddr) |
---|
1375 | { |
---|
1376 | uint32_t hash; |
---|
1377 | static const char spec[]={0x01,0x00,0x5e,0x00,0x00}; |
---|
1378 | static const char bcst[]={0xff,0xff,0xff,0xff,0xff,0xff}; |
---|
1379 | uint32_t tabl; |
---|
1380 | Mc_Refcnt *refcnt; |
---|
1381 | |
---|
1382 | if ( ! (0x01 & enaddr[0]) ) { |
---|
1383 | /* not a multicast address; ignore */ |
---|
1384 | return; |
---|
1385 | } |
---|
1386 | |
---|
1387 | if ( 0 == memcmp(enaddr, bcst, sizeof(bcst)) ) { |
---|
1388 | /* broadcast address; ignore */ |
---|
1389 | return; |
---|
1390 | } |
---|
1391 | |
---|
1392 | if ( 0 == memcmp(enaddr, spec, sizeof(spec)) ) { |
---|
1393 | hash = enaddr[5]; |
---|
1394 | tabl = MV643XX_ETH_DA_FILTER_SPECL_MCAST_TBL(mp->port_num); |
---|
1395 | refcnt = &mp->mc_refcnt.specl; |
---|
1396 | } else { |
---|
1397 | uint32_t test, mask; |
---|
1398 | int i; |
---|
1399 | /* algorithm used by linux driver */ |
---|
1400 | for ( hash=0, i=0; i<6; i++ ) { |
---|
1401 | hash = (hash ^ enaddr[i]) << 8; |
---|
1402 | for ( test=0x8000, mask=0x8380; test>0x0080; test>>=1, mask>>=1 ) { |
---|
1403 | if ( hash & test ) |
---|
1404 | hash ^= mask; |
---|
1405 | } |
---|
1406 | } |
---|
1407 | tabl = MV643XX_ETH_DA_FILTER_OTHER_MCAST_TBL(mp->port_num); |
---|
1408 | refcnt = &mp->mc_refcnt.other; |
---|
1409 | } |
---|
1410 | add_entry(tabl, hash, refcnt); |
---|
1411 | } |
---|
1412 | |
---|
1413 | void |
---|
1414 | BSP_mve_mcast_filter_accept_del(struct mveth_private *mp, unsigned char *enaddr) |
---|
1415 | { |
---|
1416 | uint32_t hash; |
---|
1417 | static const char spec[]={0x01,0x00,0x5e,0x00,0x00}; |
---|
1418 | static const char bcst[]={0xff,0xff,0xff,0xff,0xff,0xff}; |
---|
1419 | uint32_t tabl; |
---|
1420 | Mc_Refcnt *refcnt; |
---|
1421 | |
---|
1422 | if ( ! (0x01 & enaddr[0]) ) { |
---|
1423 | /* not a multicast address; ignore */ |
---|
1424 | return; |
---|
1425 | } |
---|
1426 | |
---|
1427 | if ( 0 == memcmp(enaddr, bcst, sizeof(bcst)) ) { |
---|
1428 | /* broadcast address; ignore */ |
---|
1429 | return; |
---|
1430 | } |
---|
1431 | |
---|
1432 | if ( 0 == memcmp(enaddr, spec, sizeof(spec)) ) { |
---|
1433 | hash = enaddr[5]; |
---|
1434 | tabl = MV643XX_ETH_DA_FILTER_SPECL_MCAST_TBL(mp->port_num); |
---|
1435 | refcnt = &mp->mc_refcnt.specl; |
---|
1436 | } else { |
---|
1437 | uint32_t test, mask; |
---|
1438 | int i; |
---|
1439 | /* algorithm used by linux driver */ |
---|
1440 | for ( hash=0, i=0; i<6; i++ ) { |
---|
1441 | hash = (hash ^ enaddr[i]) << 8; |
---|
1442 | for ( test=0x8000, mask=0x8380; test>0x0080; test>>=1, mask>>=1 ) { |
---|
1443 | if ( hash & test ) |
---|
1444 | hash ^= mask; |
---|
1445 | } |
---|
1446 | } |
---|
1447 | tabl = MV643XX_ETH_DA_FILTER_OTHER_MCAST_TBL(mp->port_num); |
---|
1448 | refcnt = &mp->mc_refcnt.other; |
---|
1449 | } |
---|
1450 | del_entry(tabl, hash, refcnt); |
---|
1451 | } |
---|
1452 | |
---|
1453 | /* Clear all address filters (multi- and unicast) */ |
---|
1454 | static void |
---|
1455 | mveth_clear_addr_filters(struct mveth_private *mp) |
---|
1456 | { |
---|
1457 | register int i; |
---|
1458 | register uint32_t u; |
---|
1459 | u = MV643XX_ETH_DA_FILTER_UNICAST_TBL(mp->port_num); |
---|
1460 | for (i=0; i<MV643XX_ETH_NUM_UNICAST_ENTRIES; i++) { |
---|
1461 | MV_WRITE(u,0); |
---|
1462 | u+=4; |
---|
1463 | } |
---|
1464 | BSP_mve_mcast_filter_clear(mp); |
---|
1465 | } |
---|
1466 | |
---|
1467 | /* Setup unicast filter for a given MAC address (least significant nibble) */ |
---|
1468 | static void |
---|
1469 | mveth_ucfilter(struct mveth_private *mp, unsigned char mac_lsbyte, int accept) |
---|
1470 | { |
---|
1471 | unsigned nib, slot, bit; |
---|
1472 | uint32_t val; |
---|
1473 | /* compute slot in table */ |
---|
1474 | nib = mac_lsbyte & 0xf; /* strip nibble */ |
---|
1475 | slot = nib & ~3; /* (nibble/4)*4 */ |
---|
1476 | bit = (nib & 3)<<3; /* 8*(nibble % 4) */ |
---|
1477 | val = MV_READ(MV643XX_ETH_DA_FILTER_UNICAST_TBL(mp->port_num) + slot); |
---|
1478 | if ( accept ) { |
---|
1479 | val |= 0x01 << bit; |
---|
1480 | } else { |
---|
1481 | val &= 0x0e << bit; |
---|
1482 | } |
---|
1483 | MV_WRITE(MV643XX_ETH_DA_FILTER_UNICAST_TBL(mp->port_num) + slot, val); |
---|
1484 | } |
---|
1485 | |
---|
1486 | #if defined( ENABLE_TX_WORKAROUND_8_BYTE_PROBLEM ) && 0 |
---|
1487 | /* Currently unused; small unaligned buffers seem to be rare |
---|
1488 | * so we just use memcpy()... |
---|
1489 | */ |
---|
1490 | |
---|
1491 | /* memcpy for 0..7 bytes; arranged so that gcc |
---|
1492 | * optimizes for powerpc... |
---|
1493 | */ |
---|
1494 | |
---|
1495 | static inline void memcpy8(void *to, void *fr, unsigned x) |
---|
1496 | { |
---|
1497 | register uint8_t *d = to, *s = fro; |
---|
1498 | |
---|
1499 | d+=l; s+=l; |
---|
1500 | if ( l & 1 ) { |
---|
1501 | *--d=*--s; |
---|
1502 | } |
---|
1503 | if ( l & 2 ) { |
---|
1504 | /* pre-decrementing causes gcc to use auto-decrementing |
---|
1505 | * PPC instructions (lhzu rx, -2(ry)) |
---|
1506 | */ |
---|
1507 | d-=2; s-=2; |
---|
1508 | /* use memcpy; don't cast to short -- accessing |
---|
1509 | * misaligned data as short is not portable |
---|
1510 | * (but it works on PPC). |
---|
1511 | */ |
---|
1512 | __builtin_memcpy(d,s,2); |
---|
1513 | } |
---|
1514 | if ( l & 4 ) { |
---|
1515 | d-=4; s-=4; |
---|
1516 | /* see above */ |
---|
1517 | __builtin_memcpy(d,s,4); |
---|
1518 | } |
---|
1519 | } |
---|
1520 | #endif |
---|
1521 | |
---|
1522 | /* Assign values (buffer + user data) to a tx descriptor slot */ |
---|
1523 | static int |
---|
1524 | mveth_assign_desc(MvEthTxDesc d, struct mbuf *m, unsigned long extra) |
---|
1525 | { |
---|
1526 | int rval = (d->byte_cnt = m->m_len); |
---|
1527 | |
---|
1528 | #ifdef MVETH_TESTING |
---|
1529 | assert( !d->mb ); |
---|
1530 | assert( m->m_len ); |
---|
1531 | #endif |
---|
1532 | |
---|
1533 | /* set CRC on all descriptors; seems to be necessary */ |
---|
1534 | d->cmd_sts = extra | (TDESC_GEN_CRC | TDESC_ZERO_PAD); |
---|
1535 | |
---|
1536 | #ifdef ENABLE_TX_WORKAROUND_8_BYTE_PROBLEM |
---|
1537 | /* The buffer must be 64bit aligned if the payload is <8 (??) */ |
---|
1538 | if ( rval < 8 && ((mtod(m, uintptr_t)) & 7) ) { |
---|
1539 | d->buf_ptr = CPUADDR2ENET( d->workaround ); |
---|
1540 | memcpy((void*)d->workaround, mtod(m, void*), rval); |
---|
1541 | } else |
---|
1542 | #endif |
---|
1543 | { |
---|
1544 | d->buf_ptr = CPUADDR2ENET( mtod(m, unsigned long) ); |
---|
1545 | } |
---|
1546 | d->l4i_chk = 0; |
---|
1547 | return rval; |
---|
1548 | } |
---|
1549 | |
---|
1550 | static int |
---|
1551 | mveth_assign_desc_raw(MvEthTxDesc d, void *buf, int len, unsigned long extra) |
---|
1552 | { |
---|
1553 | int rval = (d->byte_cnt = len); |
---|
1554 | |
---|
1555 | #ifdef MVETH_TESTING |
---|
1556 | assert( !d->u_buf ); |
---|
1557 | assert( len ); |
---|
1558 | #endif |
---|
1559 | |
---|
1560 | /* set CRC on all descriptors; seems to be necessary */ |
---|
1561 | d->cmd_sts = extra | (TDESC_GEN_CRC | TDESC_ZERO_PAD); |
---|
1562 | |
---|
1563 | #ifdef ENABLE_TX_WORKAROUND_8_BYTE_PROBLEM |
---|
1564 | /* The buffer must be 64bit aligned if the payload is <8 (??) */ |
---|
1565 | if ( rval < 8 && ( ((uintptr_t)buf) & 7) ) { |
---|
1566 | d->buf_ptr = CPUADDR2ENET( d->workaround ); |
---|
1567 | memcpy((void*)d->workaround, buf, rval); |
---|
1568 | } else |
---|
1569 | #endif |
---|
1570 | { |
---|
1571 | d->buf_ptr = CPUADDR2ENET( (unsigned long)buf ); |
---|
1572 | } |
---|
1573 | d->l4i_chk = 0; |
---|
1574 | return rval; |
---|
1575 | } |
---|
1576 | |
---|
1577 | /* |
---|
1578 | * Ring Initialization |
---|
1579 | * |
---|
1580 | * ENDIAN ASSUMPTION: DMA engine matches CPU endianness (???) |
---|
1581 | * |
---|
1582 | * Linux driver discriminates __LITTLE and __BIG endian for re-arranging |
---|
1583 | * the u16 fields in the descriptor structs. However, no endian conversion |
---|
1584 | * is done on the individual fields (SDMA byte swapping is disabled on LE). |
---|
1585 | */ |
---|
1586 | |
---|
1587 | STATIC int |
---|
1588 | mveth_init_rx_desc_ring(struct mveth_private *mp) |
---|
1589 | { |
---|
1590 | int i,sz; |
---|
1591 | MvEthRxDesc d; |
---|
1592 | uintptr_t baddr; |
---|
1593 | |
---|
1594 | memset((void*)mp->rx_ring, 0, sizeof(*mp->rx_ring)*mp->rbuf_count); |
---|
1595 | |
---|
1596 | mp->rx_desc_dma = CPUADDR2ENET(mp->rx_ring); |
---|
1597 | |
---|
1598 | for ( i=0, d = mp->rx_ring; i<mp->rbuf_count; i++, d++ ) { |
---|
1599 | d->u_buf = mp->alloc_rxbuf(&sz, &baddr); |
---|
1600 | assert( d->u_buf ); |
---|
1601 | |
---|
1602 | #ifndef ENABLE_HW_SNOOPING |
---|
1603 | /* could reduce the area to max. ethernet packet size */ |
---|
1604 | INVAL_BUF(baddr, sz); |
---|
1605 | #endif |
---|
1606 | |
---|
1607 | d->buf_size = sz; |
---|
1608 | d->byte_cnt = 0; |
---|
1609 | d->cmd_sts = RDESC_DMA_OWNED | RDESC_INT_ENA; |
---|
1610 | d->next = mp->rx_ring + (i+1) % mp->rbuf_count; |
---|
1611 | |
---|
1612 | d->buf_ptr = CPUADDR2ENET( baddr ); |
---|
1613 | d->next_desc_ptr = CPUADDR2ENET(d->next); |
---|
1614 | FLUSH_DESC(d); |
---|
1615 | } |
---|
1616 | FLUSH_BARRIER(); |
---|
1617 | |
---|
1618 | mp->d_rx_t = mp->rx_ring; |
---|
1619 | |
---|
1620 | /* point the chip to the start of the ring */ |
---|
1621 | MV_WRITE(MV643XX_ETH_RX_Q0_CURRENT_DESC_PTR(mp->port_num),mp->rx_desc_dma); |
---|
1622 | |
---|
1623 | |
---|
1624 | return i; |
---|
1625 | } |
---|
1626 | |
---|
1627 | STATIC int |
---|
1628 | mveth_init_tx_desc_ring(struct mveth_private *mp) |
---|
1629 | { |
---|
1630 | int i; |
---|
1631 | MvEthTxDesc d; |
---|
1632 | |
---|
1633 | memset((void*)mp->tx_ring, 0, sizeof(*mp->tx_ring)*mp->xbuf_count); |
---|
1634 | |
---|
1635 | /* DMA and CPU live in the same address space (rtems) */ |
---|
1636 | mp->tx_desc_dma = CPUADDR2ENET(mp->tx_ring); |
---|
1637 | mp->avail = TX_AVAILABLE_RING_SIZE(mp); |
---|
1638 | |
---|
1639 | for ( i=0, d=mp->tx_ring; i<mp->xbuf_count; i++,d++ ) { |
---|
1640 | d->l4i_chk = 0; |
---|
1641 | d->byte_cnt = 0; |
---|
1642 | d->cmd_sts = 0; |
---|
1643 | d->buf_ptr = 0; |
---|
1644 | |
---|
1645 | d->next = mp->tx_ring + (i+1) % mp->xbuf_count; |
---|
1646 | d->next_desc_ptr = CPUADDR2ENET(d->next); |
---|
1647 | FLUSH_DESC(d); |
---|
1648 | } |
---|
1649 | FLUSH_BARRIER(); |
---|
1650 | |
---|
1651 | mp->d_tx_h = mp->d_tx_t = mp->tx_ring; |
---|
1652 | |
---|
1653 | /* point the chip to the start of the ring */ |
---|
1654 | MV_WRITE(MV643XX_ETH_TX_Q0_CURRENT_DESC_PTR(mp->port_num),mp->tx_desc_dma); |
---|
1655 | |
---|
1656 | return i; |
---|
1657 | } |
---|
1658 | |
---|
1659 | /* PUBLIC LOW-LEVEL DRIVER ACCESS */ |
---|
1660 | |
---|
1661 | static struct mveth_private * |
---|
1662 | mve_setup_internal( |
---|
1663 | int unit, |
---|
1664 | rtems_id tid, |
---|
1665 | void (*isr)(void*isr_arg), |
---|
1666 | void *isr_arg, |
---|
1667 | void (*cleanup_txbuf)(void *user_buf, void *closure, int error_on_tx_occurred), |
---|
1668 | void *cleanup_txbuf_arg, |
---|
1669 | void *(*alloc_rxbuf)(int *p_size, uintptr_t *p_data_addr), |
---|
1670 | void (*consume_rxbuf)(void *user_buf, void *closure, int len), |
---|
1671 | void *consume_rxbuf_arg, |
---|
1672 | int rx_ring_size, |
---|
1673 | int tx_ring_size, |
---|
1674 | int irq_mask |
---|
1675 | ) |
---|
1676 | |
---|
1677 | { |
---|
1678 | struct mveth_private *mp; |
---|
1679 | struct ifnet *ifp; |
---|
1680 | int InstallISRSuccessful; |
---|
1681 | |
---|
1682 | if ( unit <= 0 || unit > MV643XXETH_NUM_DRIVER_SLOTS ) { |
---|
1683 | printk(DRVNAME": Bad unit number %i; must be 1..%i\n", unit, MV643XXETH_NUM_DRIVER_SLOTS); |
---|
1684 | return 0; |
---|
1685 | } |
---|
1686 | ifp = &theMvEths[unit-1].arpcom.ac_if; |
---|
1687 | if ( ifp->if_init ) { |
---|
1688 | if ( ifp->if_init ) { |
---|
1689 | printk(DRVNAME": instance %i already attached.\n", unit); |
---|
1690 | return 0; |
---|
1691 | } |
---|
1692 | } |
---|
1693 | |
---|
1694 | if ( rx_ring_size < 0 && tx_ring_size < 0 ) |
---|
1695 | return 0; |
---|
1696 | |
---|
1697 | if ( MV_64360 != BSP_getDiscoveryVersion(0) ) { |
---|
1698 | printk(DRVNAME": not mv64360 chip\n"); |
---|
1699 | return 0; |
---|
1700 | } |
---|
1701 | |
---|
1702 | /* lazy init of mutex (non thread-safe! - we assume 1st initialization is single-threaded) */ |
---|
1703 | if ( ! mveth_mtx ) { |
---|
1704 | rtems_status_code sc; |
---|
1705 | sc = rtems_semaphore_create( |
---|
1706 | rtems_build_name('m','v','e','X'), |
---|
1707 | 1, |
---|
1708 | RTEMS_BINARY_SEMAPHORE | RTEMS_PRIORITY | RTEMS_INHERIT_PRIORITY | RTEMS_DEFAULT_ATTRIBUTES, |
---|
1709 | 0, |
---|
1710 | &mveth_mtx); |
---|
1711 | if ( RTEMS_SUCCESSFUL != sc ) { |
---|
1712 | rtems_error(sc,DRVNAME": creating mutex\n"); |
---|
1713 | rtems_panic("unable to proceed\n"); |
---|
1714 | } |
---|
1715 | } |
---|
1716 | |
---|
1717 | mp = &theMvEths[unit-1].pvt; |
---|
1718 | |
---|
1719 | memset(mp, 0, sizeof(*mp)); |
---|
1720 | |
---|
1721 | mp->port_num = unit-1; |
---|
1722 | mp->phy = (MV_READ(MV643XX_ETH_PHY_ADDR_R) >> (5*mp->port_num)) & 0x1f; |
---|
1723 | |
---|
1724 | mp->tid = tid; |
---|
1725 | mp->isr = isr; |
---|
1726 | mp->isr_arg = isr_arg; |
---|
1727 | |
---|
1728 | mp->cleanup_txbuf = cleanup_txbuf; |
---|
1729 | mp->cleanup_txbuf_arg = cleanup_txbuf_arg; |
---|
1730 | mp->alloc_rxbuf = alloc_rxbuf; |
---|
1731 | mp->consume_rxbuf = consume_rxbuf; |
---|
1732 | mp->consume_rxbuf_arg = consume_rxbuf_arg; |
---|
1733 | |
---|
1734 | mp->rbuf_count = rx_ring_size ? rx_ring_size : MV643XX_RX_RING_SIZE; |
---|
1735 | mp->xbuf_count = tx_ring_size ? tx_ring_size : MV643XX_TX_RING_SIZE; |
---|
1736 | |
---|
1737 | if ( mp->xbuf_count > 0 ) |
---|
1738 | mp->xbuf_count += TX_NUM_TAG_SLOTS; |
---|
1739 | |
---|
1740 | if ( mp->rbuf_count < 0 ) |
---|
1741 | mp->rbuf_count = 0; |
---|
1742 | if ( mp->xbuf_count < 0 ) |
---|
1743 | mp->xbuf_count = 0; |
---|
1744 | |
---|
1745 | /* allocate ring area; add 1 entry -- room for alignment */ |
---|
1746 | assert( !mp->ring_area ); |
---|
1747 | mp->ring_area = malloc( |
---|
1748 | sizeof(*mp->ring_area) * |
---|
1749 | (mp->rbuf_count + mp->xbuf_count + 1), |
---|
1750 | M_DEVBUF, |
---|
1751 | M_WAIT ); |
---|
1752 | assert( mp->ring_area ); |
---|
1753 | |
---|
1754 | BSP_mve_stop_hw(mp); |
---|
1755 | |
---|
1756 | if ( irq_mask ) { |
---|
1757 | irq_data[mp->port_num].hdl = tid ? mveth_isr : mveth_isr_1; |
---|
1758 | InstallISRSuccessful = BSP_install_rtems_irq_handler( &irq_data[mp->port_num] ); |
---|
1759 | assert( InstallISRSuccessful ); |
---|
1760 | } |
---|
1761 | |
---|
1762 | /* mark as used */ |
---|
1763 | ifp->if_init = (void*)(-1); |
---|
1764 | |
---|
1765 | if ( rx_ring_size < 0 ) |
---|
1766 | irq_mask &= ~ MV643XX_ETH_IRQ_RX_DONE; |
---|
1767 | if ( tx_ring_size < 0 ) |
---|
1768 | irq_mask &= ~ MV643XX_ETH_EXT_IRQ_TX_DONE; |
---|
1769 | |
---|
1770 | mp->irq_mask = (irq_mask & MV643XX_ETH_IRQ_RX_DONE); |
---|
1771 | if ( (irq_mask &= (MV643XX_ETH_EXT_IRQ_TX_DONE | MV643XX_ETH_EXT_IRQ_LINK_CHG)) ) { |
---|
1772 | mp->irq_mask |= MV643XX_ETH_IRQ_EXT_ENA; |
---|
1773 | mp->xirq_mask = irq_mask; |
---|
1774 | } else { |
---|
1775 | mp->xirq_mask = 0; |
---|
1776 | } |
---|
1777 | |
---|
1778 | return mp; |
---|
1779 | } |
---|
1780 | |
---|
1781 | struct mveth_private * |
---|
1782 | BSP_mve_setup( |
---|
1783 | int unit, |
---|
1784 | rtems_id tid, |
---|
1785 | void (*cleanup_txbuf)(void *user_buf, void *closure, int error_on_tx_occurred), |
---|
1786 | void *cleanup_txbuf_arg, |
---|
1787 | void *(*alloc_rxbuf)(int *p_size, uintptr_t *p_data_addr), |
---|
1788 | void (*consume_rxbuf)(void *user_buf, void *closure, int len), |
---|
1789 | void *consume_rxbuf_arg, |
---|
1790 | int rx_ring_size, |
---|
1791 | int tx_ring_size, |
---|
1792 | int irq_mask |
---|
1793 | ) |
---|
1794 | { |
---|
1795 | if ( irq_mask && 0 == tid ) { |
---|
1796 | printk(DRVNAME": must supply a TID if irq_msk not zero\n"); |
---|
1797 | return 0; |
---|
1798 | } |
---|
1799 | |
---|
1800 | return mve_setup_internal( |
---|
1801 | unit, |
---|
1802 | tid, |
---|
1803 | 0, 0, |
---|
1804 | cleanup_txbuf, cleanup_txbuf_arg, |
---|
1805 | alloc_rxbuf, |
---|
1806 | consume_rxbuf, consume_rxbuf_arg, |
---|
1807 | rx_ring_size, tx_ring_size, |
---|
1808 | irq_mask); |
---|
1809 | } |
---|
1810 | |
---|
1811 | struct mveth_private * |
---|
1812 | BSP_mve_setup_1( |
---|
1813 | int unit, |
---|
1814 | void (*isr)(void *isr_arg), |
---|
1815 | void *isr_arg, |
---|
1816 | void (*cleanup_txbuf)(void *user_buf, void *closure, int error_on_tx_occurred), |
---|
1817 | void *cleanup_txbuf_arg, |
---|
1818 | void *(*alloc_rxbuf)(int *p_size, uintptr_t *p_data_addr), |
---|
1819 | void (*consume_rxbuf)(void *user_buf, void *closure, int len), |
---|
1820 | void *consume_rxbuf_arg, |
---|
1821 | int rx_ring_size, |
---|
1822 | int tx_ring_size, |
---|
1823 | int irq_mask |
---|
1824 | ) |
---|
1825 | { |
---|
1826 | if ( irq_mask && 0 == isr ) { |
---|
1827 | printk(DRVNAME": must supply an ISR if irq_msk not zero\n"); |
---|
1828 | return 0; |
---|
1829 | } |
---|
1830 | |
---|
1831 | return mve_setup_internal( |
---|
1832 | unit, |
---|
1833 | 0, |
---|
1834 | isr, isr_arg, |
---|
1835 | cleanup_txbuf, cleanup_txbuf_arg, |
---|
1836 | alloc_rxbuf, |
---|
1837 | consume_rxbuf, consume_rxbuf_arg, |
---|
1838 | rx_ring_size, tx_ring_size, |
---|
1839 | irq_mask); |
---|
1840 | } |
---|
1841 | |
---|
1842 | rtems_id |
---|
1843 | BSP_mve_get_tid(struct mveth_private *mp) |
---|
1844 | { |
---|
1845 | return mp->tid; |
---|
1846 | } |
---|
1847 | |
---|
1848 | int |
---|
1849 | BSP_mve_detach(struct mveth_private *mp) |
---|
1850 | { |
---|
1851 | int unit = mp->port_num; |
---|
1852 | BSP_mve_stop_hw(mp); |
---|
1853 | if ( mp->irq_mask || mp->xirq_mask ) { |
---|
1854 | if ( !BSP_remove_rtems_irq_handler( &irq_data[mp->port_num] ) ) |
---|
1855 | return -1; |
---|
1856 | } |
---|
1857 | free( (void*)mp->ring_area, M_DEVBUF ); |
---|
1858 | memset(mp, 0, sizeof(*mp)); |
---|
1859 | __asm__ __volatile__("":::"memory"); |
---|
1860 | /* mark as unused */ |
---|
1861 | theMvEths[unit].arpcom.ac_if.if_init = 0; |
---|
1862 | return 0; |
---|
1863 | } |
---|
1864 | |
---|
1865 | /* MAIN RX-TX ROUTINES |
---|
1866 | * |
---|
1867 | * BSP_mve_swipe_tx(): descriptor scavenger; releases mbufs |
---|
1868 | * BSP_mve_send_buf(): xfer mbufs from IF to chip |
---|
1869 | * BSP_mve_swipe_rx(): enqueue received mbufs to interface |
---|
1870 | * allocate new ones and yield them to the |
---|
1871 | * chip. |
---|
1872 | */ |
---|
1873 | |
---|
1874 | /* clean up the TX ring freeing up buffers */ |
---|
1875 | int |
---|
1876 | BSP_mve_swipe_tx(struct mveth_private *mp) |
---|
1877 | { |
---|
1878 | int rval = 0; |
---|
1879 | register MvEthTxDesc d; |
---|
1880 | |
---|
1881 | for ( d = mp->d_tx_t; d->buf_ptr; d = NEXT_TXD(d) ) { |
---|
1882 | |
---|
1883 | INVAL_DESC(d); |
---|
1884 | |
---|
1885 | if ( (TDESC_DMA_OWNED & d->cmd_sts) |
---|
1886 | && (uint32_t)d == MV_READ(MV643XX_ETH_CURRENT_SERVED_TX_DESC(mp->port_num)) ) |
---|
1887 | break; |
---|
1888 | |
---|
1889 | /* d->u_buf is only set on the last descriptor in a chain; |
---|
1890 | * we only count errors in the last descriptor; |
---|
1891 | */ |
---|
1892 | if ( d->u_buf ) { |
---|
1893 | mp->cleanup_txbuf(d->u_buf, mp->cleanup_txbuf_arg, (d->cmd_sts & TDESC_ERROR) ? 1 : 0); |
---|
1894 | d->u_buf = 0; |
---|
1895 | } |
---|
1896 | |
---|
1897 | d->buf_ptr = 0; |
---|
1898 | |
---|
1899 | rval++; |
---|
1900 | } |
---|
1901 | mp->d_tx_t = d; |
---|
1902 | mp->avail += rval; |
---|
1903 | |
---|
1904 | return rval; |
---|
1905 | } |
---|
1906 | |
---|
1907 | /* allocate a new cluster and copy an existing chain there; |
---|
1908 | * old chain is released... |
---|
1909 | */ |
---|
1910 | static struct mbuf * |
---|
1911 | repackage_chain(struct mbuf *m_head) |
---|
1912 | { |
---|
1913 | struct mbuf *m; |
---|
1914 | MGETHDR(m, M_DONTWAIT, MT_DATA); |
---|
1915 | |
---|
1916 | if ( !m ) { |
---|
1917 | goto bail; |
---|
1918 | } |
---|
1919 | |
---|
1920 | MCLGET(m, M_DONTWAIT); |
---|
1921 | |
---|
1922 | if ( !(M_EXT & m->m_flags) ) { |
---|
1923 | m_freem(m); |
---|
1924 | m = 0; |
---|
1925 | goto bail; |
---|
1926 | } |
---|
1927 | |
---|
1928 | m_copydata(m_head, 0, MCLBYTES, mtod(m, caddr_t)); |
---|
1929 | m->m_pkthdr.len = m->m_len = m_head->m_pkthdr.len; |
---|
1930 | |
---|
1931 | bail: |
---|
1932 | m_freem(m_head); |
---|
1933 | return m; |
---|
1934 | } |
---|
1935 | |
---|
1936 | /* Enqueue a mbuf chain or a raw data buffer for transmission; |
---|
1937 | * RETURN: #bytes sent or -1 if there are not enough descriptors |
---|
1938 | * |
---|
1939 | * If 'len' is <=0 then 'm_head' is assumed to point to a mbuf chain. |
---|
1940 | * OTOH, a raw data packet may be send (non-BSD driver) by pointing |
---|
1941 | * m_head to the start of the data and passing 'len' > 0. |
---|
1942 | * |
---|
1943 | * Comments: software cache-flushing incurs a penalty if the |
---|
1944 | * packet cannot be queued since it is flushed anyways. |
---|
1945 | * The algorithm is slightly more efficient in the normal |
---|
1946 | * case, though. |
---|
1947 | */ |
---|
1948 | int |
---|
1949 | BSP_mve_send_buf(struct mveth_private *mp, void *m_head, void *data_p, int len) |
---|
1950 | { |
---|
1951 | int rval; |
---|
1952 | register MvEthTxDesc l,d,h; |
---|
1953 | register struct mbuf *m1; |
---|
1954 | int nmbs; |
---|
1955 | int ismbuf = (len <= 0); |
---|
1956 | |
---|
1957 | /* Only way to get here is when we discover that the mbuf chain |
---|
1958 | * is too long for the tx ring |
---|
1959 | */ |
---|
1960 | startover: |
---|
1961 | |
---|
1962 | rval = 0; |
---|
1963 | |
---|
1964 | #ifdef MVETH_TESTING |
---|
1965 | assert(m_head); |
---|
1966 | #endif |
---|
1967 | |
---|
1968 | /* if no descriptor is available; try to wipe the queue */ |
---|
1969 | if ( (mp->avail < 1) && MVETH_CLEAN_ON_SEND(mp)<=0 ) { |
---|
1970 | /* Maybe TX is stalled and needs to be restarted */ |
---|
1971 | mveth_start_tx(mp); |
---|
1972 | return -1; |
---|
1973 | } |
---|
1974 | |
---|
1975 | h = mp->d_tx_h; |
---|
1976 | |
---|
1977 | #ifdef MVETH_TESTING |
---|
1978 | assert( !h->buf_ptr ); |
---|
1979 | assert( !h->mb ); |
---|
1980 | #endif |
---|
1981 | |
---|
1982 | if ( ! (m1 = m_head) ) |
---|
1983 | return 0; |
---|
1984 | |
---|
1985 | if ( ismbuf ) { |
---|
1986 | /* find first mbuf with actual data */ |
---|
1987 | while ( 0 == m1->m_len ) { |
---|
1988 | if ( ! (m1 = m1->m_next) ) { |
---|
1989 | /* end reached and still no data to send ?? */ |
---|
1990 | m_freem(m_head); |
---|
1991 | return 0; |
---|
1992 | } |
---|
1993 | } |
---|
1994 | } |
---|
1995 | |
---|
1996 | /* Don't use the first descriptor yet because BSP_mve_swipe_tx() |
---|
1997 | * needs mp->d_tx_h->buf_ptr == NULL as a marker. Hence, we |
---|
1998 | * start with the second mbuf and fill the first descriptor |
---|
1999 | * last. |
---|
2000 | */ |
---|
2001 | |
---|
2002 | l = h; |
---|
2003 | d = NEXT_TXD(h); |
---|
2004 | |
---|
2005 | mp->avail--; |
---|
2006 | |
---|
2007 | nmbs = 1; |
---|
2008 | if ( ismbuf ) { |
---|
2009 | register struct mbuf *m; |
---|
2010 | for ( m=m1->m_next; m; m=m->m_next ) { |
---|
2011 | if ( 0 == m->m_len ) |
---|
2012 | continue; /* skip empty mbufs */ |
---|
2013 | |
---|
2014 | nmbs++; |
---|
2015 | |
---|
2016 | if ( mp->avail < 1 && MVETH_CLEAN_ON_SEND(mp)<=0 ) { |
---|
2017 | /* Maybe TX was stalled - try to restart */ |
---|
2018 | mveth_start_tx(mp); |
---|
2019 | |
---|
2020 | /* not enough descriptors; cleanup... |
---|
2021 | * the first slot was never used, so we start |
---|
2022 | * at mp->d_tx_h->next; |
---|
2023 | */ |
---|
2024 | for ( l = NEXT_TXD(h); l!=d; l=NEXT_TXD(l) ) { |
---|
2025 | #ifdef MVETH_TESTING |
---|
2026 | assert( l->mb == 0 ); |
---|
2027 | #endif |
---|
2028 | l->buf_ptr = 0; |
---|
2029 | l->cmd_sts = 0; |
---|
2030 | mp->avail++; |
---|
2031 | } |
---|
2032 | mp->avail++; |
---|
2033 | if ( nmbs > TX_AVAILABLE_RING_SIZE(mp) ) { |
---|
2034 | /* this chain will never fit into the ring */ |
---|
2035 | if ( nmbs > mp->stats.maxchain ) |
---|
2036 | mp->stats.maxchain = nmbs; |
---|
2037 | mp->stats.repack++; |
---|
2038 | if ( ! (m_head = repackage_chain(m_head)) ) { |
---|
2039 | /* no cluster available */ |
---|
2040 | mp->stats.odrops++; |
---|
2041 | return 0; |
---|
2042 | } |
---|
2043 | goto startover; |
---|
2044 | } |
---|
2045 | return -1; |
---|
2046 | } |
---|
2047 | |
---|
2048 | mp->avail--; |
---|
2049 | |
---|
2050 | #ifdef MVETH_TESTING |
---|
2051 | assert( d != h ); |
---|
2052 | assert( !d->buf_ptr ); |
---|
2053 | #endif |
---|
2054 | |
---|
2055 | /* fill this slot */ |
---|
2056 | rval += mveth_assign_desc(d, m, TDESC_DMA_OWNED); |
---|
2057 | |
---|
2058 | FLUSH_BUF(mtod(m, uint32_t), m->m_len); |
---|
2059 | |
---|
2060 | l = d; |
---|
2061 | d = NEXT_TXD(d); |
---|
2062 | |
---|
2063 | FLUSH_DESC(l); |
---|
2064 | } |
---|
2065 | |
---|
2066 | /* fill first slot - don't release to DMA yet */ |
---|
2067 | rval += mveth_assign_desc(h, m1, TDESC_FRST); |
---|
2068 | |
---|
2069 | |
---|
2070 | FLUSH_BUF(mtod(m1, uint32_t), m1->m_len); |
---|
2071 | |
---|
2072 | } else { |
---|
2073 | /* fill first slot with raw buffer - don't release to DMA yet */ |
---|
2074 | rval += mveth_assign_desc_raw(h, data_p, len, TDESC_FRST); |
---|
2075 | |
---|
2076 | FLUSH_BUF( (uint32_t)data_p, len); |
---|
2077 | } |
---|
2078 | |
---|
2079 | /* tag last slot; this covers the case where 1st==last */ |
---|
2080 | l->cmd_sts |= TDESC_LAST | TDESC_INT_ENA; |
---|
2081 | /* mbuf goes into last desc */ |
---|
2082 | l->u_buf = m_head; |
---|
2083 | |
---|
2084 | |
---|
2085 | FLUSH_DESC(l); |
---|
2086 | |
---|
2087 | /* Tag end; make sure chip doesn't try to read ahead of here! */ |
---|
2088 | l->next->cmd_sts = 0; |
---|
2089 | FLUSH_DESC(l->next); |
---|
2090 | |
---|
2091 | #ifdef MVETH_DEBUG_TX_DUMP |
---|
2092 | if ( (mveth_tx_dump & (1<<mp->port_num)) ) { |
---|
2093 | int ll,kk; |
---|
2094 | if ( ismbuf ) { |
---|
2095 | struct mbuf *m; |
---|
2096 | for ( kk=0, m=m_head; m; m=m->m_next) { |
---|
2097 | for ( ll=0; ll<m->m_len; ll++ ) { |
---|
2098 | printf("%02X ",*(mtod(m,char*) + ll)); |
---|
2099 | if ( ((++kk)&0xf) == 0 ) |
---|
2100 | printf("\n"); |
---|
2101 | } |
---|
2102 | } |
---|
2103 | } else { |
---|
2104 | for ( ll=0; ll<len; ) { |
---|
2105 | printf("%02X ",*((char*)data_p + ll)); |
---|
2106 | if ( ((++ll)&0xf) == 0 ) |
---|
2107 | printf("\n"); |
---|
2108 | } |
---|
2109 | } |
---|
2110 | printf("\n"); |
---|
2111 | } |
---|
2112 | #endif |
---|
2113 | |
---|
2114 | membarrier(); |
---|
2115 | |
---|
2116 | /* turn over the whole chain by flipping ownership of the first desc */ |
---|
2117 | h->cmd_sts |= TDESC_DMA_OWNED; |
---|
2118 | |
---|
2119 | FLUSH_DESC(h); |
---|
2120 | |
---|
2121 | membarrier(); |
---|
2122 | |
---|
2123 | /* notify the device */ |
---|
2124 | MV_WRITE(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_R(mp->port_num), MV643XX_ETH_TX_START(0)); |
---|
2125 | |
---|
2126 | /* Update softc */ |
---|
2127 | mp->stats.packet++; |
---|
2128 | if ( nmbs > mp->stats.maxchain ) |
---|
2129 | mp->stats.maxchain = nmbs; |
---|
2130 | |
---|
2131 | /* remember new head */ |
---|
2132 | mp->d_tx_h = d; |
---|
2133 | |
---|
2134 | return rval; /* #bytes sent */ |
---|
2135 | } |
---|
2136 | |
---|
2137 | int |
---|
2138 | BSP_mve_send_buf_raw( |
---|
2139 | struct mveth_private *mp, |
---|
2140 | void *head_p, |
---|
2141 | int h_len, |
---|
2142 | void *data_p, |
---|
2143 | int d_len) |
---|
2144 | { |
---|
2145 | int rval; |
---|
2146 | register MvEthTxDesc l,d,h; |
---|
2147 | int needed; |
---|
2148 | void *frst_buf; |
---|
2149 | int frst_len; |
---|
2150 | |
---|
2151 | rval = 0; |
---|
2152 | |
---|
2153 | #ifdef MVETH_TESTING |
---|
2154 | assert(header || data); |
---|
2155 | #endif |
---|
2156 | |
---|
2157 | needed = head_p && data_p ? 2 : 1; |
---|
2158 | |
---|
2159 | /* if no descriptor is available; try to wipe the queue */ |
---|
2160 | if ( ( mp->avail < needed ) |
---|
2161 | && ( MVETH_CLEAN_ON_SEND(mp) <= 0 || mp->avail < needed ) ) { |
---|
2162 | /* Maybe TX was stalled and needs a restart */ |
---|
2163 | mveth_start_tx(mp); |
---|
2164 | return -1; |
---|
2165 | } |
---|
2166 | |
---|
2167 | h = mp->d_tx_h; |
---|
2168 | |
---|
2169 | #ifdef MVETH_TESTING |
---|
2170 | assert( !h->buf_ptr ); |
---|
2171 | assert( !h->mb ); |
---|
2172 | #endif |
---|
2173 | |
---|
2174 | /* find the 'first' user buffer */ |
---|
2175 | if ( (frst_buf = head_p) ) { |
---|
2176 | frst_len = h_len; |
---|
2177 | } else { |
---|
2178 | frst_buf = data_p; |
---|
2179 | frst_len = d_len; |
---|
2180 | } |
---|
2181 | |
---|
2182 | /* Don't use the first descriptor yet because BSP_mve_swipe_tx() |
---|
2183 | * needs mp->d_tx_h->buf_ptr == NULL as a marker. Hence, we |
---|
2184 | * start with the second (optional) slot and fill the first |
---|
2185 | * descriptor last. |
---|
2186 | */ |
---|
2187 | |
---|
2188 | l = h; |
---|
2189 | d = NEXT_TXD(h); |
---|
2190 | |
---|
2191 | mp->avail--; |
---|
2192 | |
---|
2193 | if ( needed > 1 ) { |
---|
2194 | mp->avail--; |
---|
2195 | #ifdef MVETH_TESTING |
---|
2196 | assert( d != h ); |
---|
2197 | assert( !d->buf_ptr ); |
---|
2198 | #endif |
---|
2199 | rval += mveth_assign_desc_raw(d, data_p, d_len, TDESC_DMA_OWNED); |
---|
2200 | FLUSH_BUF( (uint32_t)data_p, d_len ); |
---|
2201 | d->u_buf = data_p; |
---|
2202 | |
---|
2203 | l = d; |
---|
2204 | d = NEXT_TXD(d); |
---|
2205 | |
---|
2206 | FLUSH_DESC(l); |
---|
2207 | } |
---|
2208 | |
---|
2209 | /* fill first slot with raw buffer - don't release to DMA yet */ |
---|
2210 | rval += mveth_assign_desc_raw(h, frst_buf, frst_len, TDESC_FRST); |
---|
2211 | |
---|
2212 | FLUSH_BUF( (uint32_t)frst_buf, frst_len); |
---|
2213 | |
---|
2214 | /* tag last slot; this covers the case where 1st==last */ |
---|
2215 | l->cmd_sts |= TDESC_LAST | TDESC_INT_ENA; |
---|
2216 | |
---|
2217 | /* first buffer of 'chain' goes into last desc */ |
---|
2218 | l->u_buf = frst_buf; |
---|
2219 | |
---|
2220 | FLUSH_DESC(l); |
---|
2221 | |
---|
2222 | /* Tag end; make sure chip doesn't try to read ahead of here! */ |
---|
2223 | l->next->cmd_sts = 0; |
---|
2224 | FLUSH_DESC(l->next); |
---|
2225 | |
---|
2226 | membarrier(); |
---|
2227 | |
---|
2228 | /* turn over the whole chain by flipping ownership of the first desc */ |
---|
2229 | h->cmd_sts |= TDESC_DMA_OWNED; |
---|
2230 | |
---|
2231 | FLUSH_DESC(h); |
---|
2232 | |
---|
2233 | membarrier(); |
---|
2234 | |
---|
2235 | /* notify the device */ |
---|
2236 | MV_WRITE(MV643XX_ETH_TRANSMIT_QUEUE_COMMAND_R(mp->port_num), MV643XX_ETH_TX_START(0)); |
---|
2237 | |
---|
2238 | /* Update softc */ |
---|
2239 | mp->stats.packet++; |
---|
2240 | if ( needed > mp->stats.maxchain ) |
---|
2241 | mp->stats.maxchain = needed; |
---|
2242 | |
---|
2243 | /* remember new head */ |
---|
2244 | mp->d_tx_h = d; |
---|
2245 | |
---|
2246 | return rval; /* #bytes sent */ |
---|
2247 | } |
---|
2248 | |
---|
2249 | /* send received buffers upwards and replace them |
---|
2250 | * with freshly allocated ones; |
---|
2251 | * ASSUMPTION: buffer length NEVER changes and is set |
---|
2252 | * when the ring is initialized. |
---|
2253 | * TS 20060727: not sure if this assumption is still necessary - I believe it isn't. |
---|
2254 | */ |
---|
2255 | |
---|
2256 | int |
---|
2257 | BSP_mve_swipe_rx(struct mveth_private *mp) |
---|
2258 | { |
---|
2259 | int rval = 0, err; |
---|
2260 | register MvEthRxDesc d; |
---|
2261 | void *newbuf; |
---|
2262 | int sz; |
---|
2263 | uintptr_t baddr; |
---|
2264 | |
---|
2265 | for ( d = mp->d_rx_t; ! (INVAL_DESC(d), (RDESC_DMA_OWNED & d->cmd_sts)); d=NEXT_RXD(d) ) { |
---|
2266 | |
---|
2267 | #ifdef MVETH_TESTING |
---|
2268 | assert(d->u_buf); |
---|
2269 | #endif |
---|
2270 | |
---|
2271 | err = (RDESC_ERROR & d->cmd_sts); |
---|
2272 | |
---|
2273 | if ( err || !(newbuf = mp->alloc_rxbuf(&sz, &baddr)) ) { |
---|
2274 | /* drop packet and recycle buffer */ |
---|
2275 | newbuf = d->u_buf; |
---|
2276 | mp->consume_rxbuf(0, mp->consume_rxbuf_arg, err ? -1 : 0); |
---|
2277 | } else { |
---|
2278 | #ifdef MVETH_TESTING |
---|
2279 | assert( d->byte_cnt > 0 ); |
---|
2280 | #endif |
---|
2281 | mp->consume_rxbuf(d->u_buf, mp->consume_rxbuf_arg, d->byte_cnt); |
---|
2282 | |
---|
2283 | #ifndef ENABLE_HW_SNOOPING |
---|
2284 | /* could reduce the area to max. ethernet packet size */ |
---|
2285 | INVAL_BUF(baddr, sz); |
---|
2286 | #endif |
---|
2287 | d->u_buf = newbuf; |
---|
2288 | d->buf_ptr = CPUADDR2ENET(baddr); |
---|
2289 | d->buf_size = sz; |
---|
2290 | FLUSH_DESC(d); |
---|
2291 | } |
---|
2292 | |
---|
2293 | membarrier(); |
---|
2294 | |
---|
2295 | d->cmd_sts = RDESC_DMA_OWNED | RDESC_INT_ENA; |
---|
2296 | |
---|
2297 | FLUSH_DESC(d); |
---|
2298 | |
---|
2299 | rval++; |
---|
2300 | } |
---|
2301 | MV_WRITE(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_R(mp->port_num), MV643XX_ETH_RX_START(0)); |
---|
2302 | mp->d_rx_t = d; |
---|
2303 | return rval; |
---|
2304 | } |
---|
2305 | |
---|
2306 | /* Stop hardware and clean out the rings */ |
---|
2307 | void |
---|
2308 | BSP_mve_stop_hw(struct mveth_private *mp) |
---|
2309 | { |
---|
2310 | MvEthTxDesc d; |
---|
2311 | MvEthRxDesc r; |
---|
2312 | int i; |
---|
2313 | |
---|
2314 | mveth_disable_irqs(mp, -1); |
---|
2315 | |
---|
2316 | mveth_stop_tx(mp->port_num); |
---|
2317 | |
---|
2318 | /* cleanup TX rings */ |
---|
2319 | if (mp->d_tx_t) { /* maybe ring isn't initialized yet */ |
---|
2320 | for ( i=0, d=mp->tx_ring; i<mp->xbuf_count; i++, d++ ) { |
---|
2321 | /* should be safe to clear ownership */ |
---|
2322 | d->cmd_sts &= ~TDESC_DMA_OWNED; |
---|
2323 | FLUSH_DESC(d); |
---|
2324 | } |
---|
2325 | FLUSH_BARRIER(); |
---|
2326 | |
---|
2327 | BSP_mve_swipe_tx(mp); |
---|
2328 | |
---|
2329 | #ifdef MVETH_TESTING |
---|
2330 | assert( mp->d_tx_h == mp->d_tx_t ); |
---|
2331 | for ( i=0, d=mp->tx_ring; i<mp->xbuf_count; i++, d++ ) { |
---|
2332 | assert( !d->buf_ptr ); |
---|
2333 | } |
---|
2334 | #endif |
---|
2335 | } |
---|
2336 | |
---|
2337 | MV_WRITE(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_R(mp->port_num), MV643XX_ETH_RX_STOP_ALL); |
---|
2338 | while ( MV643XX_ETH_RX_ANY_RUNNING & MV_READ(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_R(mp->port_num)) ) |
---|
2339 | /* poll-wait */; |
---|
2340 | |
---|
2341 | /* stop serial port */ |
---|
2342 | MV_WRITE(MV643XX_ETH_SERIAL_CONTROL_R(mp->port_num), |
---|
2343 | MV_READ(MV643XX_ETH_SERIAL_CONTROL_R(mp->port_num)) |
---|
2344 | & ~( MV643XX_ETH_SERIAL_PORT_ENBL | MV643XX_ETH_FORCE_LINK_FAIL_DISABLE | MV643XX_ETH_FORCE_LINK_PASS) |
---|
2345 | ); |
---|
2346 | |
---|
2347 | /* clear pending interrupts */ |
---|
2348 | MV_WRITE(MV643XX_ETH_INTERRUPT_CAUSE_R(mp->port_num), 0); |
---|
2349 | MV_WRITE(MV643XX_ETH_INTERRUPT_EXTEND_CAUSE_R(mp->port_num), 0); |
---|
2350 | |
---|
2351 | /* cleanup RX rings */ |
---|
2352 | if ( mp->rx_ring ) { |
---|
2353 | for ( i=0, r=mp->rx_ring; i<mp->rbuf_count; i++, r++ ) { |
---|
2354 | /* should be OK to clear ownership flag */ |
---|
2355 | r->cmd_sts = 0; |
---|
2356 | FLUSH_DESC(r); |
---|
2357 | mp->consume_rxbuf(r->u_buf, mp->consume_rxbuf_arg, 0); |
---|
2358 | r->u_buf = 0; |
---|
2359 | } |
---|
2360 | FLUSH_BARRIER(); |
---|
2361 | } |
---|
2362 | |
---|
2363 | |
---|
2364 | } |
---|
2365 | |
---|
2366 | uint32_t mveth_serial_ctrl_config_val = MVETH_SERIAL_CTRL_CONFIG_VAL; |
---|
2367 | |
---|
2368 | /* Fire up the low-level driver |
---|
2369 | * |
---|
2370 | * - make sure hardware is halted |
---|
2371 | * - enable cache snooping |
---|
2372 | * - clear address filters |
---|
2373 | * - clear mib counters |
---|
2374 | * - reset phy |
---|
2375 | * - initialize (or reinitialize) descriptor rings |
---|
2376 | * - check that the firmware has set up a reasonable mac address. |
---|
2377 | * - generate unicast filter entry for our mac address |
---|
2378 | * - write register config values to the chip |
---|
2379 | * - start hardware (serial port and SDMA) |
---|
2380 | */ |
---|
2381 | |
---|
2382 | void |
---|
2383 | BSP_mve_init_hw(struct mveth_private *mp, int promisc, unsigned char *enaddr) |
---|
2384 | { |
---|
2385 | int i; |
---|
2386 | uint32_t v; |
---|
2387 | static int inited = 0; |
---|
2388 | |
---|
2389 | #ifdef MVETH_DEBUG |
---|
2390 | printk(DRVNAME"%i: Entering BSP_mve_init_hw()\n", mp->port_num+1); |
---|
2391 | #endif |
---|
2392 | |
---|
2393 | /* since enable/disable IRQ routine only operate on select bitsets |
---|
2394 | * we must make sure everything is masked initially. |
---|
2395 | */ |
---|
2396 | MV_WRITE(MV643XX_ETH_INTERRUPT_ENBL_R(mp->port_num), 0); |
---|
2397 | MV_WRITE(MV643XX_ETH_INTERRUPT_EXTEND_ENBL_R(mp->port_num), 0); |
---|
2398 | |
---|
2399 | BSP_mve_stop_hw(mp); |
---|
2400 | |
---|
2401 | memset(&mp->stats, 0, sizeof(mp->stats)); |
---|
2402 | |
---|
2403 | mp->promisc = promisc; |
---|
2404 | |
---|
2405 | /* MotLoad has cache snooping disabled on the ENET2MEM windows. |
---|
2406 | * Some comments in (linux) indicate that there are errata |
---|
2407 | * which cause problems which would be a real bummer. |
---|
2408 | * We try it anyways... |
---|
2409 | */ |
---|
2410 | if ( !inited ) { |
---|
2411 | unsigned long disbl, bar; |
---|
2412 | inited = 1; /* FIXME: non-thread safe lazy init */ |
---|
2413 | disbl = MV_READ(MV643XX_ETH_BAR_ENBL_R); |
---|
2414 | /* disable all 6 windows */ |
---|
2415 | MV_WRITE(MV643XX_ETH_BAR_ENBL_R, MV643XX_ETH_BAR_DISBL_ALL); |
---|
2416 | /* set WB snooping on enabled bars */ |
---|
2417 | for ( i=0; i<MV643XX_ETH_NUM_BARS*8; i+=8 ) { |
---|
2418 | if ( (bar = MV_READ(MV643XX_ETH_BAR_0 + i)) && MV_READ(MV643XX_ETH_SIZE_R_0 + i) ) { |
---|
2419 | #ifdef ENABLE_HW_SNOOPING |
---|
2420 | MV_WRITE(MV643XX_ETH_BAR_0 + i, bar | MV64360_ENET2MEM_SNOOP_WB); |
---|
2421 | #else |
---|
2422 | MV_WRITE(MV643XX_ETH_BAR_0 + i, bar & ~MV64360_ENET2MEM_SNOOP_MSK); |
---|
2423 | #endif |
---|
2424 | /* read back to flush fifo [linux comment] */ |
---|
2425 | (void)MV_READ(MV643XX_ETH_BAR_0 + i); |
---|
2426 | } |
---|
2427 | } |
---|
2428 | /* restore/re-enable */ |
---|
2429 | MV_WRITE(MV643XX_ETH_BAR_ENBL_R, disbl); |
---|
2430 | } |
---|
2431 | |
---|
2432 | mveth_clear_mib_counters(mp); |
---|
2433 | mveth_clear_addr_filters(mp); |
---|
2434 | |
---|
2435 | /* Just leave it alone... |
---|
2436 | reset_phy(); |
---|
2437 | */ |
---|
2438 | |
---|
2439 | if ( mp->rbuf_count > 0 ) { |
---|
2440 | mp->rx_ring = (MvEthRxDesc)MV643XX_ALIGN(mp->ring_area, RING_ALIGNMENT); |
---|
2441 | mveth_init_rx_desc_ring(mp); |
---|
2442 | } |
---|
2443 | |
---|
2444 | if ( mp->xbuf_count > 0 ) { |
---|
2445 | mp->tx_ring = (MvEthTxDesc)mp->rx_ring + mp->rbuf_count; |
---|
2446 | mveth_init_tx_desc_ring(mp); |
---|
2447 | } |
---|
2448 | |
---|
2449 | if ( enaddr ) { |
---|
2450 | /* set ethernet address from arpcom struct */ |
---|
2451 | #ifdef MVETH_DEBUG |
---|
2452 | printk(DRVNAME"%i: Writing MAC addr ", mp->port_num+1); |
---|
2453 | for (i=5; i>=0; i--) { |
---|
2454 | printk("%02X%c", enaddr[i], i?':':'\n'); |
---|
2455 | } |
---|
2456 | #endif |
---|
2457 | mveth_write_eaddr(mp, enaddr); |
---|
2458 | } |
---|
2459 | |
---|
2460 | /* set mac address and unicast filter */ |
---|
2461 | |
---|
2462 | { |
---|
2463 | uint32_t machi, maclo; |
---|
2464 | maclo = MV_READ(MV643XX_ETH_MAC_ADDR_LO(mp->port_num)); |
---|
2465 | machi = MV_READ(MV643XX_ETH_MAC_ADDR_HI(mp->port_num)); |
---|
2466 | /* ASSUME: firmware has set the mac address for us |
---|
2467 | * - if assertion fails, we have to do more work... |
---|
2468 | */ |
---|
2469 | assert( maclo && machi && maclo != 0xffffffff && machi != 0xffffffff ); |
---|
2470 | mveth_ucfilter(mp, maclo&0xff, 1/* accept */); |
---|
2471 | } |
---|
2472 | |
---|
2473 | /* port, serial and sdma configuration */ |
---|
2474 | v = MVETH_PORT_CONFIG_VAL; |
---|
2475 | if ( promisc ) { |
---|
2476 | /* multicast filters were already set up to |
---|
2477 | * accept everything (mveth_clear_addr_filters()) |
---|
2478 | */ |
---|
2479 | v |= MV643XX_ETH_UNICAST_PROMISC_MODE; |
---|
2480 | } else { |
---|
2481 | v &= ~MV643XX_ETH_UNICAST_PROMISC_MODE; |
---|
2482 | } |
---|
2483 | MV_WRITE(MV643XX_ETH_PORT_CONFIG_R(mp->port_num), |
---|
2484 | v); |
---|
2485 | MV_WRITE(MV643XX_ETH_PORT_CONFIG_XTEND_R(mp->port_num), |
---|
2486 | MVETH_PORT_XTEND_CONFIG_VAL); |
---|
2487 | |
---|
2488 | v = MV_READ(MV643XX_ETH_SERIAL_CONTROL_R(mp->port_num)); |
---|
2489 | v &= ~(MVETH_SERIAL_CTRL_CONFIG_MSK); |
---|
2490 | v |= mveth_serial_ctrl_config_val; |
---|
2491 | MV_WRITE(MV643XX_ETH_SERIAL_CONTROL_R(mp->port_num), v); |
---|
2492 | |
---|
2493 | i = IFM_MAKEWORD(0, 0, 0, 0); |
---|
2494 | if ( 0 == BSP_mve_media_ioctl(mp, SIOCGIFMEDIA, &i) ) { |
---|
2495 | if ( (IFM_LINK_OK & i) ) { |
---|
2496 | mveth_update_serial_port(mp, i); |
---|
2497 | } |
---|
2498 | } |
---|
2499 | |
---|
2500 | /* enable serial port */ |
---|
2501 | v = MV_READ(MV643XX_ETH_SERIAL_CONTROL_R(mp->port_num)); |
---|
2502 | MV_WRITE(MV643XX_ETH_SERIAL_CONTROL_R(mp->port_num), |
---|
2503 | v | MV643XX_ETH_SERIAL_PORT_ENBL); |
---|
2504 | |
---|
2505 | #ifndef __BIG_ENDIAN__ |
---|
2506 | #error "byte swapping needs to be disabled for little endian machines" |
---|
2507 | #endif |
---|
2508 | MV_WRITE(MV643XX_ETH_SDMA_CONFIG_R(mp->port_num), MVETH_SDMA_CONFIG_VAL); |
---|
2509 | |
---|
2510 | /* allow short frames */ |
---|
2511 | MV_WRITE(MV643XX_ETH_RX_MIN_FRAME_SIZE_R(mp->port_num), MVETH_MIN_FRAMSZ_CONFIG_VAL); |
---|
2512 | |
---|
2513 | MV_WRITE(MV643XX_ETH_INTERRUPT_CAUSE_R(mp->port_num), 0); |
---|
2514 | MV_WRITE(MV643XX_ETH_INTERRUPT_EXTEND_CAUSE_R(mp->port_num), 0); |
---|
2515 | /* TODO: set irq coalescing */ |
---|
2516 | |
---|
2517 | /* enable Rx */ |
---|
2518 | if ( mp->rbuf_count > 0 ) { |
---|
2519 | MV_WRITE(MV643XX_ETH_RECEIVE_QUEUE_COMMAND_R(mp->port_num), MV643XX_ETH_RX_START(0)); |
---|
2520 | } |
---|
2521 | |
---|
2522 | mveth_enable_irqs(mp, -1); |
---|
2523 | |
---|
2524 | #ifdef MVETH_DEBUG |
---|
2525 | printk(DRVNAME"%i: Leaving BSP_mve_init_hw()\n", mp->port_num+1); |
---|
2526 | #endif |
---|
2527 | } |
---|
2528 | |
---|
2529 | /* read ethernet address from hw to buffer */ |
---|
2530 | void |
---|
2531 | BSP_mve_read_eaddr(struct mveth_private *mp, unsigned char *oeaddr) |
---|
2532 | { |
---|
2533 | int i; |
---|
2534 | uint32_t x; |
---|
2535 | unsigned char buf[6], *eaddr; |
---|
2536 | |
---|
2537 | eaddr = oeaddr ? oeaddr : buf; |
---|
2538 | |
---|
2539 | eaddr += 5; |
---|
2540 | x = MV_READ(MV643XX_ETH_MAC_ADDR_LO(mp->port_num)); |
---|
2541 | |
---|
2542 | /* lo word */ |
---|
2543 | for (i=2; i; i--, eaddr--) { |
---|
2544 | *eaddr = (unsigned char)(x & 0xff); |
---|
2545 | x>>=8; |
---|
2546 | } |
---|
2547 | |
---|
2548 | x = MV_READ(MV643XX_ETH_MAC_ADDR_HI(mp->port_num)); |
---|
2549 | /* hi word */ |
---|
2550 | for (i=4; i; i--, eaddr--) { |
---|
2551 | *eaddr = (unsigned char)(x & 0xff); |
---|
2552 | x>>=8; |
---|
2553 | } |
---|
2554 | |
---|
2555 | if ( !oeaddr ) { |
---|
2556 | printf("%02X",buf[0]); |
---|
2557 | for (i=1; i<sizeof(buf); i++) |
---|
2558 | printf(":%02X",buf[i]); |
---|
2559 | printf("\n"); |
---|
2560 | } |
---|
2561 | } |
---|
2562 | |
---|
2563 | int |
---|
2564 | BSP_mve_media_ioctl(struct mveth_private *mp, int cmd, int *parg) |
---|
2565 | { |
---|
2566 | int rval; |
---|
2567 | /* alias cmd == 0,1 */ |
---|
2568 | switch ( cmd ) { |
---|
2569 | case 0: cmd = SIOCGIFMEDIA; |
---|
2570 | break; |
---|
2571 | case 1: cmd = SIOCSIFMEDIA; |
---|
2572 | case SIOCGIFMEDIA: |
---|
2573 | case SIOCSIFMEDIA: |
---|
2574 | break; |
---|
2575 | default: return -1; |
---|
2576 | } |
---|
2577 | REGLOCK(); |
---|
2578 | rval = rtems_mii_ioctl(&mveth_mdio, mp, cmd, parg); |
---|
2579 | REGUNLOCK(); |
---|
2580 | return rval; |
---|
2581 | } |
---|
2582 | |
---|
2583 | void |
---|
2584 | BSP_mve_enable_irqs(struct mveth_private *mp) |
---|
2585 | { |
---|
2586 | mveth_enable_irqs(mp, -1); |
---|
2587 | } |
---|
2588 | |
---|
2589 | void |
---|
2590 | BSP_mve_disable_irqs(struct mveth_private *mp) |
---|
2591 | { |
---|
2592 | mveth_disable_irqs(mp, -1); |
---|
2593 | } |
---|
2594 | |
---|
2595 | uint32_t |
---|
2596 | BSP_mve_ack_irqs(struct mveth_private *mp) |
---|
2597 | { |
---|
2598 | return mveth_ack_irqs(mp, -1); |
---|
2599 | } |
---|
2600 | |
---|
2601 | |
---|
2602 | void |
---|
2603 | BSP_mve_enable_irq_mask(struct mveth_private *mp, uint32_t mask) |
---|
2604 | { |
---|
2605 | mveth_enable_irqs(mp, mask); |
---|
2606 | } |
---|
2607 | |
---|
2608 | uint32_t |
---|
2609 | BSP_mve_disable_irq_mask(struct mveth_private *mp, uint32_t mask) |
---|
2610 | { |
---|
2611 | return mveth_disable_irqs(mp, mask); |
---|
2612 | } |
---|
2613 | |
---|
2614 | uint32_t |
---|
2615 | BSP_mve_ack_irq_mask(struct mveth_private *mp, uint32_t mask) |
---|
2616 | { |
---|
2617 | return mveth_ack_irqs(mp, mask); |
---|
2618 | } |
---|
2619 | |
---|
2620 | int |
---|
2621 | BSP_mve_ack_link_chg(struct mveth_private *mp, int *pmedia) |
---|
2622 | { |
---|
2623 | int media = IFM_MAKEWORD(0,0,0,0); |
---|
2624 | |
---|
2625 | if ( 0 == BSP_mve_media_ioctl(mp, SIOCGIFMEDIA, &media)) { |
---|
2626 | if ( IFM_LINK_OK & media ) { |
---|
2627 | mveth_update_serial_port(mp, media); |
---|
2628 | /* If TX stalled because there was no buffer then whack it */ |
---|
2629 | mveth_start_tx(mp); |
---|
2630 | } |
---|
2631 | if ( pmedia ) |
---|
2632 | *pmedia = media; |
---|
2633 | return 0; |
---|
2634 | } |
---|
2635 | return -1; |
---|
2636 | } |
---|
2637 | |
---|
2638 | /* BSDNET SUPPORT/GLUE ROUTINES */ |
---|
2639 | |
---|
2640 | static void |
---|
2641 | mveth_set_filters(struct ifnet *ifp); |
---|
2642 | |
---|
2643 | STATIC void |
---|
2644 | mveth_stop(struct mveth_softc *sc) |
---|
2645 | { |
---|
2646 | BSP_mve_stop_hw(&sc->pvt); |
---|
2647 | sc->arpcom.ac_if.if_timer = 0; |
---|
2648 | } |
---|
2649 | |
---|
2650 | /* allocate a mbuf for RX with a properly aligned data buffer |
---|
2651 | * RETURNS 0 if allocation fails |
---|
2652 | */ |
---|
2653 | static void * |
---|
2654 | alloc_mbuf_rx(int *psz, uintptr_t *paddr) |
---|
2655 | { |
---|
2656 | struct mbuf *m; |
---|
2657 | unsigned long l,o; |
---|
2658 | |
---|
2659 | MGETHDR(m, M_DONTWAIT, MT_DATA); |
---|
2660 | if ( !m ) |
---|
2661 | return 0; |
---|
2662 | MCLGET(m, M_DONTWAIT); |
---|
2663 | if ( ! (m->m_flags & M_EXT) ) { |
---|
2664 | m_freem(m); |
---|
2665 | return 0; |
---|
2666 | } |
---|
2667 | |
---|
2668 | o = mtod(m, unsigned long); |
---|
2669 | l = MV643XX_ALIGN(o, RX_BUF_ALIGNMENT) - o; |
---|
2670 | |
---|
2671 | /* align start of buffer */ |
---|
2672 | m->m_data += l; |
---|
2673 | |
---|
2674 | /* reduced length */ |
---|
2675 | l = MCLBYTES - l; |
---|
2676 | |
---|
2677 | m->m_len = m->m_pkthdr.len = l; |
---|
2678 | *psz = m->m_len; |
---|
2679 | *paddr = mtod(m, uintptr_t); |
---|
2680 | |
---|
2681 | return (void*) m; |
---|
2682 | } |
---|
2683 | |
---|
2684 | static void consume_rx_mbuf(void *buf, void *arg, int len) |
---|
2685 | { |
---|
2686 | struct ifnet *ifp = arg; |
---|
2687 | struct mbuf *m = buf; |
---|
2688 | |
---|
2689 | if ( len <= 0 ) { |
---|
2690 | ifp->if_iqdrops++; |
---|
2691 | if ( len < 0 ) { |
---|
2692 | ifp->if_ierrors++; |
---|
2693 | } |
---|
2694 | if ( m ) |
---|
2695 | m_freem(m); |
---|
2696 | } else { |
---|
2697 | struct ether_header *eh; |
---|
2698 | |
---|
2699 | eh = (struct ether_header *)(mtod(m, unsigned long) + ETH_RX_OFFSET); |
---|
2700 | m->m_len = m->m_pkthdr.len = len - sizeof(struct ether_header) - ETH_RX_OFFSET - ETH_CRC_LEN; |
---|
2701 | m->m_data += sizeof(struct ether_header) + ETH_RX_OFFSET; |
---|
2702 | m->m_pkthdr.rcvif = ifp; |
---|
2703 | |
---|
2704 | ifp->if_ipackets++; |
---|
2705 | ifp->if_ibytes += m->m_pkthdr.len; |
---|
2706 | |
---|
2707 | if (0) { |
---|
2708 | /* Low-level debugging */ |
---|
2709 | int i; |
---|
2710 | for (i=0; i<13; i++) { |
---|
2711 | printf("%02X:",((char*)eh)[i]); |
---|
2712 | } |
---|
2713 | printf("%02X\n",((char*)eh)[i]); |
---|
2714 | for (i=0; i<m->m_len; i++) { |
---|
2715 | if ( !(i&15) ) |
---|
2716 | printf("\n"); |
---|
2717 | printf("0x%02x ",mtod(m,char*)[i]); |
---|
2718 | } |
---|
2719 | printf("\n"); |
---|
2720 | } |
---|
2721 | |
---|
2722 | if (0) { |
---|
2723 | /* Low-level debugging/testing without bsd stack */ |
---|
2724 | m_freem(m); |
---|
2725 | } else { |
---|
2726 | /* send buffer upwards */ |
---|
2727 | ether_input(ifp, eh, m); |
---|
2728 | } |
---|
2729 | } |
---|
2730 | } |
---|
2731 | |
---|
2732 | static void release_tx_mbuf(void *buf, void *arg, int err) |
---|
2733 | { |
---|
2734 | struct ifnet *ifp = arg; |
---|
2735 | struct mbuf *mb = buf; |
---|
2736 | |
---|
2737 | if ( err ) { |
---|
2738 | ifp->if_oerrors++; |
---|
2739 | } else { |
---|
2740 | ifp->if_opackets++; |
---|
2741 | } |
---|
2742 | ifp->if_obytes += mb->m_pkthdr.len; |
---|
2743 | m_freem(mb); |
---|
2744 | } |
---|
2745 | |
---|
2746 | static void |
---|
2747 | dump_update_stats(struct mveth_private *mp, FILE *f) |
---|
2748 | { |
---|
2749 | int p = mp->port_num; |
---|
2750 | int idx; |
---|
2751 | uint32_t v; |
---|
2752 | |
---|
2753 | if ( !f ) |
---|
2754 | f = stdout; |
---|
2755 | |
---|
2756 | fprintf(f, DRVNAME"%i Statistics:\n", mp->port_num + 1); |
---|
2757 | fprintf(f, " # IRQS: %i\n", mp->stats.irqs); |
---|
2758 | fprintf(f, " Max. mbuf chain length: %i\n", mp->stats.maxchain); |
---|
2759 | fprintf(f, " # repacketed: %i\n", mp->stats.repack); |
---|
2760 | fprintf(f, " # packets: %i\n", mp->stats.packet); |
---|
2761 | fprintf(f, "MIB Counters:\n"); |
---|
2762 | for ( idx = MV643XX_ETH_MIB_GOOD_OCTS_RCVD_LO>>2; |
---|
2763 | idx < MV643XX_ETH_NUM_MIB_COUNTERS; |
---|
2764 | idx++ ) { |
---|
2765 | switch ( idx ) { |
---|
2766 | case MV643XX_ETH_MIB_GOOD_OCTS_RCVD_LO>>2: |
---|
2767 | mp->stats.mib.good_octs_rcvd += read_long_mib_counter(p, idx); |
---|
2768 | fprintf(f, mibfmt[idx], mp->stats.mib.good_octs_rcvd); |
---|
2769 | idx++; |
---|
2770 | break; |
---|
2771 | |
---|
2772 | case MV643XX_ETH_MIB_GOOD_OCTS_SENT_LO>>2: |
---|
2773 | mp->stats.mib.good_octs_sent += read_long_mib_counter(p, idx); |
---|
2774 | fprintf(f, mibfmt[idx], mp->stats.mib.good_octs_sent); |
---|
2775 | idx++; |
---|
2776 | break; |
---|
2777 | |
---|
2778 | default: |
---|
2779 | v = ((uint32_t*)&mp->stats.mib)[idx] += read_mib_counter(p, idx); |
---|
2780 | fprintf(f, mibfmt[idx], v); |
---|
2781 | break; |
---|
2782 | } |
---|
2783 | } |
---|
2784 | fprintf(f, "\n"); |
---|
2785 | } |
---|
2786 | |
---|
2787 | void |
---|
2788 | BSP_mve_dump_stats(struct mveth_private *mp, FILE *f) |
---|
2789 | { |
---|
2790 | dump_update_stats(mp, f); |
---|
2791 | } |
---|
2792 | |
---|
2793 | /* BSDNET DRIVER CALLBACKS */ |
---|
2794 | |
---|
2795 | static void |
---|
2796 | mveth_init(void *arg) |
---|
2797 | { |
---|
2798 | struct mveth_softc *sc = arg; |
---|
2799 | struct ifnet *ifp = &sc->arpcom.ac_if; |
---|
2800 | int media; |
---|
2801 | |
---|
2802 | BSP_mve_init_hw(&sc->pvt, ifp->if_flags & IFF_PROMISC, sc->arpcom.ac_enaddr); |
---|
2803 | |
---|
2804 | media = IFM_MAKEWORD(0, 0, 0, 0); |
---|
2805 | if ( 0 == BSP_mve_media_ioctl(&sc->pvt, SIOCGIFMEDIA, &media) ) { |
---|
2806 | if ( (IFM_LINK_OK & media) ) { |
---|
2807 | ifp->if_flags &= ~IFF_OACTIVE; |
---|
2808 | } else { |
---|
2809 | ifp->if_flags |= IFF_OACTIVE; |
---|
2810 | } |
---|
2811 | } |
---|
2812 | |
---|
2813 | /* if promiscuous then there is no need to change */ |
---|
2814 | if ( ! (ifp->if_flags & IFF_PROMISC) ) |
---|
2815 | mveth_set_filters(ifp); |
---|
2816 | |
---|
2817 | ifp->if_flags |= IFF_RUNNING; |
---|
2818 | sc->arpcom.ac_if.if_timer = 0; |
---|
2819 | } |
---|
2820 | |
---|
2821 | /* bsdnet driver entry to start transmission */ |
---|
2822 | static void |
---|
2823 | mveth_start(struct ifnet *ifp) |
---|
2824 | { |
---|
2825 | struct mveth_softc *sc = ifp->if_softc; |
---|
2826 | struct mbuf *m = 0; |
---|
2827 | |
---|
2828 | while ( ifp->if_snd.ifq_head ) { |
---|
2829 | IF_DEQUEUE( &ifp->if_snd, m ); |
---|
2830 | if ( BSP_mve_send_buf(&sc->pvt, m, 0, 0) < 0 ) { |
---|
2831 | IF_PREPEND( &ifp->if_snd, m); |
---|
2832 | ifp->if_flags |= IFF_OACTIVE; |
---|
2833 | break; |
---|
2834 | } |
---|
2835 | /* need to do this really only once |
---|
2836 | * but it's cheaper this way. |
---|
2837 | */ |
---|
2838 | ifp->if_timer = 2*IFNET_SLOWHZ; |
---|
2839 | } |
---|
2840 | } |
---|
2841 | |
---|
2842 | /* bsdnet driver entry; */ |
---|
2843 | static void |
---|
2844 | mveth_watchdog(struct ifnet *ifp) |
---|
2845 | { |
---|
2846 | struct mveth_softc *sc = ifp->if_softc; |
---|
2847 | |
---|
2848 | ifp->if_oerrors++; |
---|
2849 | printk(DRVNAME"%i: watchdog timeout; resetting\n", ifp->if_unit); |
---|
2850 | |
---|
2851 | mveth_init(sc); |
---|
2852 | mveth_start(ifp); |
---|
2853 | } |
---|
2854 | |
---|
2855 | static void |
---|
2856 | mveth_set_filters(struct ifnet *ifp) |
---|
2857 | { |
---|
2858 | struct mveth_softc *sc = ifp->if_softc; |
---|
2859 | uint32_t v; |
---|
2860 | |
---|
2861 | v = MV_READ(MV643XX_ETH_PORT_CONFIG_R(sc->pvt.port_num)); |
---|
2862 | if ( ifp->if_flags & IFF_PROMISC ) |
---|
2863 | v |= MV643XX_ETH_UNICAST_PROMISC_MODE; |
---|
2864 | else |
---|
2865 | v &= ~MV643XX_ETH_UNICAST_PROMISC_MODE; |
---|
2866 | MV_WRITE(MV643XX_ETH_PORT_CONFIG_R(sc->pvt.port_num), v); |
---|
2867 | |
---|
2868 | if ( ifp->if_flags & (IFF_PROMISC | IFF_ALLMULTI) ) { |
---|
2869 | BSP_mve_mcast_filter_accept_all(&sc->pvt); |
---|
2870 | } else { |
---|
2871 | struct ether_multi *enm; |
---|
2872 | struct ether_multistep step; |
---|
2873 | |
---|
2874 | BSP_mve_mcast_filter_clear( &sc->pvt ); |
---|
2875 | |
---|
2876 | ETHER_FIRST_MULTI(step, (struct arpcom *)ifp, enm); |
---|
2877 | |
---|
2878 | while ( enm ) { |
---|
2879 | if ( memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN) ) |
---|
2880 | assert( !"Should never get here; IFF_ALLMULTI should be set!" ); |
---|
2881 | |
---|
2882 | BSP_mve_mcast_filter_accept_add(&sc->pvt, enm->enm_addrlo); |
---|
2883 | |
---|
2884 | ETHER_NEXT_MULTI(step, enm); |
---|
2885 | } |
---|
2886 | } |
---|
2887 | } |
---|
2888 | |
---|
2889 | /* bsdnet driver ioctl entry */ |
---|
2890 | static int |
---|
2891 | mveth_ioctl(struct ifnet *ifp, ioctl_command_t cmd, caddr_t data) |
---|
2892 | { |
---|
2893 | struct mveth_softc *sc = ifp->if_softc; |
---|
2894 | struct ifreq *ifr = (struct ifreq *)data; |
---|
2895 | int error = 0; |
---|
2896 | int f; |
---|
2897 | |
---|
2898 | switch ( cmd ) { |
---|
2899 | case SIOCSIFFLAGS: |
---|
2900 | f = ifp->if_flags; |
---|
2901 | if ( f & IFF_UP ) { |
---|
2902 | if ( ! ( f & IFF_RUNNING ) ) { |
---|
2903 | mveth_init(sc); |
---|
2904 | } else { |
---|
2905 | if ( (f & IFF_PROMISC) != (sc->bsd.oif_flags & IFF_PROMISC) ) { |
---|
2906 | /* Note: in all other scenarios the 'promisc' flag |
---|
2907 | * in the low-level driver [which affects the way |
---|
2908 | * the multicast filter is setup: accept none vs. |
---|
2909 | * accept all in promisc mode] is eventually |
---|
2910 | * set when the IF is brought up... |
---|
2911 | */ |
---|
2912 | sc->pvt.promisc = (f & IFF_PROMISC); |
---|
2913 | |
---|
2914 | mveth_set_filters(ifp); |
---|
2915 | } |
---|
2916 | /* FIXME: other flag changes are ignored/unimplemented */ |
---|
2917 | } |
---|
2918 | } else { |
---|
2919 | if ( f & IFF_RUNNING ) { |
---|
2920 | mveth_stop(sc); |
---|
2921 | ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); |
---|
2922 | } |
---|
2923 | } |
---|
2924 | sc->bsd.oif_flags = ifp->if_flags; |
---|
2925 | break; |
---|
2926 | |
---|
2927 | case SIOCGIFMEDIA: |
---|
2928 | case SIOCSIFMEDIA: |
---|
2929 | error = BSP_mve_media_ioctl(&sc->pvt, cmd, &ifr->ifr_media); |
---|
2930 | break; |
---|
2931 | |
---|
2932 | case SIOCADDMULTI: |
---|
2933 | case SIOCDELMULTI: |
---|
2934 | error = (cmd == SIOCADDMULTI) |
---|
2935 | ? ether_addmulti(ifr, &sc->arpcom) |
---|
2936 | : ether_delmulti(ifr, &sc->arpcom); |
---|
2937 | |
---|
2938 | if (error == ENETRESET) { |
---|
2939 | if (ifp->if_flags & IFF_RUNNING) { |
---|
2940 | mveth_set_filters(ifp); |
---|
2941 | } |
---|
2942 | error = 0; |
---|
2943 | } |
---|
2944 | break; |
---|
2945 | |
---|
2946 | |
---|
2947 | break; |
---|
2948 | |
---|
2949 | case SIO_RTEMS_SHOW_STATS: |
---|
2950 | dump_update_stats(&sc->pvt, stdout); |
---|
2951 | break; |
---|
2952 | |
---|
2953 | default: |
---|
2954 | error = ether_ioctl(ifp, cmd, data); |
---|
2955 | break; |
---|
2956 | } |
---|
2957 | |
---|
2958 | return error; |
---|
2959 | } |
---|
2960 | |
---|
2961 | /* DRIVER TASK */ |
---|
2962 | |
---|
2963 | /* Daemon task does all the 'interrupt' work */ |
---|
2964 | static void mveth_daemon(void *arg) |
---|
2965 | { |
---|
2966 | struct mveth_softc *sc; |
---|
2967 | struct ifnet *ifp; |
---|
2968 | rtems_event_set evs; |
---|
2969 | for (;;) { |
---|
2970 | rtems_bsdnet_event_receive( 7, RTEMS_WAIT | RTEMS_EVENT_ANY, RTEMS_NO_TIMEOUT, &evs ); |
---|
2971 | evs &= 7; |
---|
2972 | for ( sc = theMvEths; evs; evs>>=1, sc++ ) { |
---|
2973 | if ( (evs & 1) ) { |
---|
2974 | register uint32_t x; |
---|
2975 | |
---|
2976 | ifp = &sc->arpcom.ac_if; |
---|
2977 | |
---|
2978 | if ( !(ifp->if_flags & IFF_UP) ) { |
---|
2979 | mveth_stop(sc); |
---|
2980 | ifp->if_flags &= ~(IFF_UP|IFF_RUNNING); |
---|
2981 | continue; |
---|
2982 | } |
---|
2983 | |
---|
2984 | if ( !(ifp->if_flags & IFF_RUNNING) ) { |
---|
2985 | /* event could have been pending at the time hw was stopped; |
---|
2986 | * just ignore... |
---|
2987 | */ |
---|
2988 | continue; |
---|
2989 | } |
---|
2990 | |
---|
2991 | x = mveth_ack_irqs(&sc->pvt, -1); |
---|
2992 | |
---|
2993 | if ( MV643XX_ETH_EXT_IRQ_LINK_CHG & x ) { |
---|
2994 | /* phy status changed */ |
---|
2995 | int media; |
---|
2996 | |
---|
2997 | if ( 0 == BSP_mve_ack_link_chg(&sc->pvt, &media) ) { |
---|
2998 | if ( IFM_LINK_OK & media ) { |
---|
2999 | ifp->if_flags &= ~IFF_OACTIVE; |
---|
3000 | mveth_start(ifp); |
---|
3001 | } else { |
---|
3002 | /* stop sending */ |
---|
3003 | ifp->if_flags |= IFF_OACTIVE; |
---|
3004 | } |
---|
3005 | } |
---|
3006 | } |
---|
3007 | /* free tx chain */ |
---|
3008 | if ( (MV643XX_ETH_EXT_IRQ_TX_DONE & x) && BSP_mve_swipe_tx(&sc->pvt) ) { |
---|
3009 | ifp->if_flags &= ~IFF_OACTIVE; |
---|
3010 | if ( TX_AVAILABLE_RING_SIZE(&sc->pvt) == sc->pvt.avail ) |
---|
3011 | ifp->if_timer = 0; |
---|
3012 | mveth_start(ifp); |
---|
3013 | } |
---|
3014 | if ( (MV643XX_ETH_IRQ_RX_DONE & x) ) |
---|
3015 | BSP_mve_swipe_rx(&sc->pvt); |
---|
3016 | |
---|
3017 | mveth_enable_irqs(&sc->pvt, -1); |
---|
3018 | } |
---|
3019 | } |
---|
3020 | } |
---|
3021 | } |
---|
3022 | |
---|
3023 | #ifdef MVETH_DETACH_HACK |
---|
3024 | static int mveth_detach(struct mveth_softc *sc); |
---|
3025 | #endif |
---|
3026 | |
---|
3027 | |
---|
3028 | /* PUBLIC RTEMS BSDNET ATTACH FUNCTION */ |
---|
3029 | int |
---|
3030 | rtems_mve_attach(struct rtems_bsdnet_ifconfig *ifcfg, int attaching) |
---|
3031 | { |
---|
3032 | char *unitName; |
---|
3033 | int unit,i,cfgUnits; |
---|
3034 | struct mveth_softc *sc; |
---|
3035 | struct ifnet *ifp; |
---|
3036 | |
---|
3037 | unit = rtems_bsdnet_parse_driver_name(ifcfg, &unitName); |
---|
3038 | if ( unit <= 0 || unit > MV643XXETH_NUM_DRIVER_SLOTS ) { |
---|
3039 | printk(DRVNAME": Bad unit number %i; must be 1..%i\n", unit, MV643XXETH_NUM_DRIVER_SLOTS); |
---|
3040 | return 1; |
---|
3041 | } |
---|
3042 | |
---|
3043 | sc = &theMvEths[unit-1]; |
---|
3044 | ifp = &sc->arpcom.ac_if; |
---|
3045 | sc->pvt.port_num = unit-1; |
---|
3046 | sc->pvt.phy = (MV_READ(MV643XX_ETH_PHY_ADDR_R) >> (5*sc->pvt.port_num)) & 0x1f; |
---|
3047 | |
---|
3048 | if ( attaching ) { |
---|
3049 | if ( ifp->if_init ) { |
---|
3050 | printk(DRVNAME": instance %i already attached.\n", unit); |
---|
3051 | return -1; |
---|
3052 | } |
---|
3053 | |
---|
3054 | for ( i=cfgUnits = 0; i<MV643XXETH_NUM_DRIVER_SLOTS; i++ ) { |
---|
3055 | if ( theMvEths[i].arpcom.ac_if.if_init ) |
---|
3056 | cfgUnits++; |
---|
3057 | } |
---|
3058 | cfgUnits++; /* this new one */ |
---|
3059 | |
---|
3060 | /* lazy init of TID should still be thread-safe because we are protected |
---|
3061 | * by the global networking semaphore.. |
---|
3062 | */ |
---|
3063 | if ( !mveth_tid ) { |
---|
3064 | /* newproc uses the 1st 4 chars of name string to build an rtems name */ |
---|
3065 | mveth_tid = rtems_bsdnet_newproc("MVEd", 4096, mveth_daemon, 0); |
---|
3066 | } |
---|
3067 | |
---|
3068 | if ( !BSP_mve_setup( unit, |
---|
3069 | mveth_tid, |
---|
3070 | release_tx_mbuf, ifp, |
---|
3071 | alloc_mbuf_rx, |
---|
3072 | consume_rx_mbuf, ifp, |
---|
3073 | ifcfg->rbuf_count, |
---|
3074 | ifcfg->xbuf_count, |
---|
3075 | BSP_MVE_IRQ_TX | BSP_MVE_IRQ_RX | BSP_MVE_IRQ_LINK) ) { |
---|
3076 | return -1; |
---|
3077 | } |
---|
3078 | |
---|
3079 | if ( nmbclusters < sc->pvt.rbuf_count * cfgUnits + 60 /* arbitrary */ ) { |
---|
3080 | printk(DRVNAME"%i: (mv643xx ethernet) Your application has not enough mbuf clusters\n", unit); |
---|
3081 | printk( " configured for this driver.\n"); |
---|
3082 | return -1; |
---|
3083 | } |
---|
3084 | |
---|
3085 | if ( ifcfg->hardware_address ) { |
---|
3086 | memcpy(sc->arpcom.ac_enaddr, ifcfg->hardware_address, ETHER_ADDR_LEN); |
---|
3087 | } else { |
---|
3088 | /* read back from hardware assuming that MotLoad already had set it up */ |
---|
3089 | BSP_mve_read_eaddr(&sc->pvt, sc->arpcom.ac_enaddr); |
---|
3090 | } |
---|
3091 | |
---|
3092 | ifp->if_softc = sc; |
---|
3093 | ifp->if_unit = unit; |
---|
3094 | ifp->if_name = unitName; |
---|
3095 | |
---|
3096 | ifp->if_mtu = ifcfg->mtu ? ifcfg->mtu : ETHERMTU; |
---|
3097 | |
---|
3098 | ifp->if_init = mveth_init; |
---|
3099 | ifp->if_ioctl = mveth_ioctl; |
---|
3100 | ifp->if_start = mveth_start; |
---|
3101 | ifp->if_output = ether_output; |
---|
3102 | /* |
---|
3103 | * While nonzero, the 'if->if_timer' is decremented |
---|
3104 | * (by the networking code) at a rate of IFNET_SLOWHZ (1hz) and 'if_watchdog' |
---|
3105 | * is called when it expires. |
---|
3106 | * If either of those fields is 0 the feature is disabled. |
---|
3107 | */ |
---|
3108 | ifp->if_watchdog = mveth_watchdog; |
---|
3109 | ifp->if_timer = 0; |
---|
3110 | |
---|
3111 | sc->bsd.oif_flags = /* ... */ |
---|
3112 | ifp->if_flags = IFF_BROADCAST | IFF_MULTICAST | IFF_SIMPLEX; |
---|
3113 | |
---|
3114 | /* |
---|
3115 | * if unset, this set to 10Mbps by ether_ifattach; seems to be unused by bsdnet stack; |
---|
3116 | * could be updated along with phy speed, though... |
---|
3117 | ifp->if_baudrate = 10000000; |
---|
3118 | */ |
---|
3119 | |
---|
3120 | /* NOTE: ether_output drops packets if ifq_len >= ifq_maxlen |
---|
3121 | * but this is the packet count, not the fragment count! |
---|
3122 | ifp->if_snd.ifq_maxlen = sc->pvt.xbuf_count; |
---|
3123 | */ |
---|
3124 | ifp->if_snd.ifq_maxlen = ifqmaxlen; |
---|
3125 | |
---|
3126 | #ifdef MVETH_DETACH_HACK |
---|
3127 | if ( !ifp->if_addrlist ) /* do only the first time [reattach hack] */ |
---|
3128 | #endif |
---|
3129 | { |
---|
3130 | if_attach(ifp); |
---|
3131 | ether_ifattach(ifp); |
---|
3132 | } |
---|
3133 | |
---|
3134 | } else { |
---|
3135 | #ifdef MVETH_DETACH_HACK |
---|
3136 | if ( !ifp->if_init ) { |
---|
3137 | printk(DRVNAME": instance %i not attached.\n", unit); |
---|
3138 | return -1; |
---|
3139 | } |
---|
3140 | return mveth_detach(sc); |
---|
3141 | #else |
---|
3142 | printk(DRVNAME": interface detaching not implemented\n"); |
---|
3143 | return -1; |
---|
3144 | #endif |
---|
3145 | } |
---|
3146 | |
---|
3147 | return 0; |
---|
3148 | } |
---|
3149 | |
---|
3150 | /* EARLY PHY ACCESS */ |
---|
3151 | static int |
---|
3152 | mveth_early_init(int idx) |
---|
3153 | { |
---|
3154 | if ( idx < 0 || idx >= MV643XXETH_NUM_DRIVER_SLOTS ) |
---|
3155 | return -1; |
---|
3156 | |
---|
3157 | /* determine the phy */ |
---|
3158 | theMvEths[idx].pvt.phy = (MV_READ(MV643XX_ETH_PHY_ADDR_R) >> (5*idx)) & 0x1f; |
---|
3159 | return 0; |
---|
3160 | } |
---|
3161 | |
---|
3162 | static int |
---|
3163 | mveth_early_read_phy(int idx, unsigned reg) |
---|
3164 | { |
---|
3165 | int rval; |
---|
3166 | |
---|
3167 | if ( idx < 0 || idx >= MV643XXETH_NUM_DRIVER_SLOTS ) |
---|
3168 | return -1; |
---|
3169 | |
---|
3170 | rval = mveth_mii_read(&theMvEths[idx].pvt, reg); |
---|
3171 | return rval < 0 ? rval : rval & 0xffff; |
---|
3172 | } |
---|
3173 | |
---|
3174 | static int |
---|
3175 | mveth_early_write_phy(int idx, unsigned reg, unsigned val) |
---|
3176 | { |
---|
3177 | if ( idx < 0 || idx >= MV643XXETH_NUM_DRIVER_SLOTS ) |
---|
3178 | return -1; |
---|
3179 | |
---|
3180 | mveth_mii_write(&theMvEths[idx].pvt, reg, val); |
---|
3181 | return 0; |
---|
3182 | } |
---|
3183 | |
---|
3184 | rtems_bsdnet_early_link_check_ops |
---|
3185 | rtems_mve_early_link_check_ops = { |
---|
3186 | init: mveth_early_init, |
---|
3187 | read_phy: mveth_early_read_phy, |
---|
3188 | write_phy: mveth_early_write_phy, |
---|
3189 | name: DRVNAME, |
---|
3190 | num_slots: MAX_NUM_SLOTS |
---|
3191 | }; |
---|
3192 | |
---|
3193 | /* DEBUGGING */ |
---|
3194 | |
---|
3195 | #ifdef MVETH_DEBUG |
---|
3196 | /* Display/dump descriptor rings */ |
---|
3197 | |
---|
3198 | int |
---|
3199 | mveth_dring(struct mveth_softc *sc) |
---|
3200 | { |
---|
3201 | int i; |
---|
3202 | if (1) { |
---|
3203 | MvEthRxDesc pr; |
---|
3204 | printf("RX:\n"); |
---|
3205 | |
---|
3206 | for (i=0, pr=sc->pvt.rx_ring; i<sc->pvt.rbuf_count; i++, pr++) { |
---|
3207 | #ifndef ENABLE_HW_SNOOPING |
---|
3208 | /* can't just invalidate the descriptor - if it contains |
---|
3209 | * data that hasn't been flushed yet, we create an inconsistency... |
---|
3210 | */ |
---|
3211 | rtems_bsdnet_semaphore_obtain(); |
---|
3212 | INVAL_DESC(pr); |
---|
3213 | #endif |
---|
3214 | printf("cnt: 0x%04x, size: 0x%04x, stat: 0x%08x, next: 0x%08x, buf: 0x%08x\n", |
---|
3215 | pr->byte_cnt, pr->buf_size, pr->cmd_sts, (uint32_t)pr->next_desc_ptr, pr->buf_ptr); |
---|
3216 | |
---|
3217 | #ifndef ENABLE_HW_SNOOPING |
---|
3218 | rtems_bsdnet_semaphore_release(); |
---|
3219 | #endif |
---|
3220 | } |
---|
3221 | } |
---|
3222 | if (1) { |
---|
3223 | MvEthTxDesc pt; |
---|
3224 | printf("TX:\n"); |
---|
3225 | for (i=0, pt=sc->pvt.tx_ring; i<sc->pvt.xbuf_count; i++, pt++) { |
---|
3226 | #ifndef ENABLE_HW_SNOOPING |
---|
3227 | rtems_bsdnet_semaphore_obtain(); |
---|
3228 | INVAL_DESC(pt); |
---|
3229 | #endif |
---|
3230 | printf("cnt: 0x%04x, stat: 0x%08x, next: 0x%08x, buf: 0x%08x, mb: 0x%08x\n", |
---|
3231 | pt->byte_cnt, pt->cmd_sts, (uint32_t)pt->next_desc_ptr, pt->buf_ptr, |
---|
3232 | (uint32_t)pt->mb); |
---|
3233 | |
---|
3234 | #ifndef ENABLE_HW_SNOOPING |
---|
3235 | rtems_bsdnet_semaphore_release(); |
---|
3236 | #endif |
---|
3237 | } |
---|
3238 | } |
---|
3239 | return 0; |
---|
3240 | } |
---|
3241 | |
---|
3242 | #endif |
---|
3243 | |
---|
3244 | /* DETACH HACK DETAILS */ |
---|
3245 | |
---|
3246 | #ifdef MVETH_DETACH_HACK |
---|
3247 | int |
---|
3248 | _cexpModuleFinalize(void *mh) |
---|
3249 | { |
---|
3250 | int i; |
---|
3251 | for ( i=0; i<MV643XXETH_NUM_DRIVER_SLOTS; i++ ) { |
---|
3252 | if ( theMvEths[i].arpcom.ac_if.if_init ) { |
---|
3253 | printf("Interface %i still attached; refuse to unload\n", i+1); |
---|
3254 | return -1; |
---|
3255 | } |
---|
3256 | } |
---|
3257 | /* delete task; since there are no attached interfaces, it should block |
---|
3258 | * for events and hence not hold the semaphore or other resources... |
---|
3259 | */ |
---|
3260 | rtems_task_delete(mveth_tid); |
---|
3261 | return 0; |
---|
3262 | } |
---|
3263 | |
---|
3264 | /* ugly hack to allow unloading/reloading the driver core. |
---|
3265 | * needed because rtems' bsdnet release doesn't implement |
---|
3266 | * if_detach(). Therefore, we bring the interface down but |
---|
3267 | * keep the device record alive... |
---|
3268 | */ |
---|
3269 | static void |
---|
3270 | ether_ifdetach_pvt(struct ifnet *ifp) |
---|
3271 | { |
---|
3272 | ifp->if_flags = 0; |
---|
3273 | ifp->if_ioctl = 0; |
---|
3274 | ifp->if_start = 0; |
---|
3275 | ifp->if_watchdog = 0; |
---|
3276 | ifp->if_init = 0; |
---|
3277 | } |
---|
3278 | |
---|
3279 | static int |
---|
3280 | mveth_detach(struct mveth_softc *sc) |
---|
3281 | { |
---|
3282 | struct ifnet *ifp = &sc->arpcom.ac_if; |
---|
3283 | if ( ifp->if_init ) { |
---|
3284 | if ( ifp->if_flags & (IFF_UP | IFF_RUNNING) ) { |
---|
3285 | printf(DRVNAME"%i: refuse to detach; interface still up\n",sc->pvt.port_num+1); |
---|
3286 | return -1; |
---|
3287 | } |
---|
3288 | mveth_stop(sc); |
---|
3289 | /* not implemented in BSDnet/RTEMS (yet) but declared in header */ |
---|
3290 | #define ether_ifdetach ether_ifdetach_pvt |
---|
3291 | ether_ifdetach(ifp); |
---|
3292 | } |
---|
3293 | free( (void*)sc->pvt.ring_area, M_DEVBUF ); |
---|
3294 | sc->pvt.ring_area = 0; |
---|
3295 | sc->pvt.tx_ring = 0; |
---|
3296 | sc->pvt.rx_ring = 0; |
---|
3297 | sc->pvt.d_tx_t = sc->pvt.d_tx_h = 0; |
---|
3298 | sc->pvt.d_rx_t = 0; |
---|
3299 | sc->pvt.avail = 0; |
---|
3300 | /* may fail if ISR was not installed yet */ |
---|
3301 | BSP_remove_rtems_irq_handler( &irq_data[sc->pvt.port_num] ); |
---|
3302 | return 0; |
---|
3303 | } |
---|
3304 | |
---|
3305 | #ifdef MVETH_DEBUG |
---|
3306 | struct rtems_bsdnet_ifconfig mveth_dbg_config = { |
---|
3307 | name: DRVNAME"1", |
---|
3308 | attach: rtems_mve_attach, |
---|
3309 | ip_address: "192.168.2.10", /* not used by rtems_bsdnet_attach */ |
---|
3310 | ip_netmask: "255.255.255.0", /* not used by rtems_bsdnet_attach */ |
---|
3311 | hardware_address: 0, /* (void *) */ |
---|
3312 | ignore_broadcast: 0, /* TODO driver should honour this */ |
---|
3313 | mtu: 0, |
---|
3314 | rbuf_count: 0, /* TODO driver should honour this */ |
---|
3315 | xbuf_count: 0, /* TODO driver should honour this */ |
---|
3316 | }; |
---|
3317 | #endif |
---|
3318 | #endif |
---|