source: rtems/bsps/bfin/bf537Stamp/net/ethernet.c @ 0cab067

5
Last change on this file since 0cab067 was e2bd1f6, checked in by Sebastian Huber <sebastian.huber@…>, on 03/21/18 at 15:38:43

bsp/bfin: Move libcpu content to bsps

This patch is a part of the BSP source reorganization.

Update #3285.

  • Property mode set to 100644
File size: 27.3 KB
Line 
1/*
2 *  RTEMS network driver for Blackfin ethernet controller
3 *
4 *  COPYRIGHT (c) 2008 Kallisti Labs, Los Gatos, CA, USA
5 *            written by Allan Hessenflow <allanh@kallisti.com>
6 *
7 *  The license and distribution terms for this file may be
8 *  found in the file LICENSE in this distribution or at
9 *  http://www.rtems.org/license/LICENSE.
10 *
11 */
12
13#define __INSIDE_RTEMS_BSD_TCPIP_STACK__
14
15#include <rtems.h>
16#include <rtems/rtems_bsdnet.h>
17#include <rtems/rtems/cache.h>
18
19#include <stdio.h>
20#include <inttypes.h>
21#include <string.h>
22
23#include <errno.h>
24#include <rtems/error.h>
25
26#include <sys/param.h>
27#include <sys/mbuf.h>
28#include <sys/socket.h>
29#include <sys/sockio.h>
30#include <sys/sockio.h>
31
32#include <net/if.h>
33
34#include <netinet/in.h>
35#include <netinet/if_ether.h>
36
37#include <libcpu/dmaRegs.h>
38#include <libcpu/ethernetRegs.h>
39#include <libcpu/ethernet.h>
40
41#if (BFIN_ETHERNET_DEBUG & BFIN_ETHERNET_DEBUG_DUMP_MBUFS)
42#include <rtems/dumpbuf.h>
43#endif
44
45/*
46 * Number of devices supported by this driver
47 */
48#ifndef N_BFIN_ETHERNET
49# define N_BFIN_ETHERNET 1
50#endif
51
52
53/* #define BFIN_IPCHECKSUMS */
54
55
56/*
57 * RTEMS event used by interrupt handler to signal daemons.
58 */
59#define INTERRUPT_EVENT  RTEMS_EVENT_1
60
61/*
62 * RTEMS event used to start transmit daemon.
63 */
64#define START_TRANSMIT_EVENT    RTEMS_EVENT_2
65
66
67/* largest Ethernet frame MAC will handle */
68#define BFIN_ETHERNET_MAX_FRAME_LENGTH      1556
69
70#if MCLBYTES < (BFIN_ETHERNET_MAX_FRAME_LENGTH + 2)
71#error MCLBYTES too small
72#endif
73
74#define BFIN_REG16(base, offset) \
75        (*((uint16_t volatile *) ((char *)(base) + (offset))))
76#define BFIN_REG32(base, offset) \
77        (*((uint32_t volatile *) ((char *)(base) + (offset))))
78
79
80#define DMA_MODE_RX               (DMA_CONFIG_FLOW_DESC_LARGE | \
81                                   (5 << DMA_CONFIG_NDSIZE_SHIFT) | \
82                                   DMA_CONFIG_WDSIZE_32 | \
83                                   DMA_CONFIG_WNR | \
84                                   DMA_CONFIG_DMAEN)
85
86#define DMA_MODE_TX               (DMA_CONFIG_FLOW_DESC_LARGE | \
87                                   (5 << DMA_CONFIG_NDSIZE_SHIFT) | \
88                                   DMA_CONFIG_WDSIZE_32 | \
89                                   DMA_CONFIG_DMAEN)
90
91#define DMA_MODE_STATUS           (DMA_CONFIG_FLOW_DESC_LARGE | \
92                                   (5 << DMA_CONFIG_NDSIZE_SHIFT) | \
93                                   DMA_CONFIG_DI_EN | \
94                                   DMA_CONFIG_WDSIZE_32 | \
95                                   DMA_CONFIG_WNR | \
96                                   DMA_CONFIG_DMAEN)
97
98#define DMA_MODE_STATUS_NO_INT    (DMA_CONFIG_FLOW_DESC_LARGE | \
99                                   (5 << DMA_CONFIG_NDSIZE_SHIFT) | \
100                                   DMA_CONFIG_WDSIZE_32 | \
101                                   DMA_CONFIG_WNR | \
102                                   DMA_CONFIG_DMAEN)
103
104#define DMA_MODE_STATUS_LAST      (DMA_CONFIG_FLOW_STOP | \
105                                   (0 << DMA_CONFIG_NDSIZE_SHIFT) | \
106                                   DMA_CONFIG_DI_EN | \
107                                   DMA_CONFIG_WDSIZE_32 | \
108                                   DMA_CONFIG_WNR | \
109                                   DMA_CONFIG_DMAEN)
110
111/* five 16 bit words */
112typedef struct dmaDescS {
113  struct dmaDescS *next;
114  void *addr;
115  uint16_t dmaConfig;
116} dmaDescT;
117
118typedef struct {
119  uint32_t status;
120} txStatusT;
121
122#ifdef BFIN_IPCHECKSUMS
123typedef struct {
124  uint16_t ipHeaderChecksum;
125  uint16_t ipPayloadChecksum;
126  uint32_t status;
127} rxStatusT;
128#else
129typedef struct {
130  uint32_t status;
131} rxStatusT;
132#endif
133
134typedef struct {
135  dmaDescT data;
136  dmaDescT status;
137  struct mbuf *m;
138} rxPacketDescT;
139
140typedef struct {
141  dmaDescT data;
142  dmaDescT status;
143  bool     inUse;
144  union {
145    uint32_t dummy; /* try to force 32 bit alignment */
146    struct {
147      uint16_t length;
148      char data[BFIN_ETHERNET_MAX_FRAME_LENGTH];
149    } packet;
150  } buffer;
151} txPacketDescT;
152
153
154/* hardware-specific storage */
155struct bfin_ethernetSoftc {
156  struct arpcom arpcom; /* this entry must be first */
157
158  uint32_t sclk;
159
160  void *ethBase;
161  void *rxdmaBase;
162  void *txdmaBase;
163
164  int acceptBroadcast;
165
166  rtems_id rxDaemonTid;
167  rtems_id txDaemonTid;
168
169  void *status;
170  int rxDescCount;
171  rxPacketDescT *rx;
172  int txDescCount;
173  txPacketDescT *tx;
174
175  bool rmii;
176  int phyAddr;
177
178  /* statistics */
179#ifdef BISON
180  unsigned long                   Interrupts;
181  unsigned long                   rxInterrupts;
182  unsigned long                   rxMissed;
183  unsigned long                   rxGiant;
184  unsigned long                   rxNonOctet;
185  unsigned long                   rxBadCRC;
186  unsigned long                   rxCollision;
187
188  unsigned long                   txInterrupts;
189  unsigned long                   txSingleCollision;
190  unsigned long                   txMultipleCollision;
191  unsigned long                   txCollision;
192  unsigned long                   txDeferred;
193  unsigned long                   txUnderrun;
194  unsigned long                   txLateCollision;
195  unsigned long                   txExcessiveCollision;
196  unsigned long                   txExcessiveDeferral;
197  unsigned long                   txLostCarrier;
198  unsigned long                   txRawWait;
199#endif
200};
201
202static struct bfin_ethernetSoftc ethernetSoftc[N_BFIN_ETHERNET];
203
204
205/* Shut down the interface.  */
206static void ethernetStop(struct bfin_ethernetSoftc *sc) {
207  struct ifnet *ifp;
208  void *ethBase;
209
210  ifp = &sc->arpcom.ac_if;
211  ethBase = sc->ethBase;
212
213  ifp->if_flags &= ~IFF_RUNNING;
214
215  /* stop the transmitter and receiver.  */
216  BFIN_REG32(ethBase, EMAC_OPMODE_OFFSET) &= ~(EMAC_OPMODE_TE |
217                                               EMAC_OPMODE_RE);
218}
219
220/* Show interface statistics */
221static void bfin_ethernetStats(struct bfin_ethernetSoftc *sc) {
222#ifdef BISON
223  printf(" Total Interrupts:%-8lu", sc->Interrupts);
224  printf("    Rx Interrupts:%-8lu", sc->rxInterrupts);
225  printf("            Giant:%-8lu", sc->rxGiant);
226  printf("        Non-octet:%-8lu\n", sc->rxNonOctet);
227  printf("          Bad CRC:%-8lu", sc->rxBadCRC);
228  printf("        Collision:%-8lu", sc->rxCollision);
229  printf("           Missed:%-8lu\n", sc->rxMissed);
230
231  printf(    "    Tx Interrupts:%-8lu", sc->txInterrupts);
232  printf(  "           Deferred:%-8lu", sc->txDeferred);
233  printf("        Lost Carrier:%-8lu\n", sc->txLostCarrier);
234  printf(   "Single Collisions:%-8lu", sc->txSingleCollision);
235  printf( "Multiple Collisions:%-8lu", sc->txMultipleCollision);
236  printf("Excessive Collisions:%-8lu\n", sc->txExcessiveCollision);
237  printf(   " Total Collisions:%-8lu", sc->txCollision);
238  printf( "     Late Collision:%-8lu", sc->txLateCollision);
239  printf("            Underrun:%-8lu\n", sc->txUnderrun);
240  printf(   "  Raw output wait:%-8lu\n", sc->txRawWait);
241#endif /*BISON*/
242}
243
244void bfin_ethernet_rxdma_isr(int vector) {
245  struct bfin_ethernetSoftc *sc;
246  void *rxdmaBase;
247  uint16_t status;
248  int i;
249
250  for (i = 0; i < N_BFIN_ETHERNET; i++) {
251    sc = &ethernetSoftc[i];
252    rxdmaBase = sc->rxdmaBase;
253    status = BFIN_REG16(rxdmaBase, DMA_IRQ_STATUS_OFFSET);
254    if (status & DMA_IRQ_STATUS_DMA_DONE)
255        rtems_bsdnet_event_send (sc->rxDaemonTid, INTERRUPT_EVENT);
256    BFIN_REG16(rxdmaBase, DMA_IRQ_STATUS_OFFSET) = status;
257  }
258}
259
260void bfin_ethernet_txdma_isr(int vector) {
261  struct bfin_ethernetSoftc *sc;
262  void *txdmaBase;
263  uint16_t status;
264  int i;
265
266  for (i = 0; i < N_BFIN_ETHERNET; i++) {
267    sc = &ethernetSoftc[i];
268    txdmaBase = sc->txdmaBase;
269    status = BFIN_REG16(txdmaBase, DMA_IRQ_STATUS_OFFSET);
270    if (status & DMA_IRQ_STATUS_DMA_DONE)
271        rtems_bsdnet_event_send (sc->txDaemonTid, INTERRUPT_EVENT);
272    BFIN_REG16(txdmaBase, DMA_IRQ_STATUS_OFFSET) = status;
273  }
274}
275
276void bfin_ethernet_mac_isr(int vector) {
277  struct bfin_ethernetSoftc *sc;
278  void *ethBase;
279  int i;
280
281  for (i = 0; i < N_BFIN_ETHERNET; i++) {
282    sc = &ethernetSoftc[i];
283    ethBase = sc->ethBase;
284    BFIN_REG32(ethBase, EMAC_SYSTAT_OFFSET) = ~(uint32_t) 0;
285  }
286}
287
288static bool txFree(struct bfin_ethernetSoftc *sc, int index) {
289  bool       freed;
290  txStatusT *status;
291
292  freed = false;
293  if (sc->tx[index].inUse) {
294    status = (txStatusT *) sc->tx[index].status.addr;
295    rtems_cache_invalidate_multiple_data_lines(status, sizeof(*status));
296    if (status->status != 0) {
297      /* update statistics */
298
299      sc->tx[index].inUse = false;
300      freed = true;
301    }
302  }
303
304  return freed;
305}
306
307static void txDaemon(void *arg) {
308  struct bfin_ethernetSoftc *sc;
309  struct ifnet *ifp;
310  struct mbuf *m, *first;
311  rtems_event_set events;
312  void *ethBase;
313  void *txdmaBase;
314  txStatusT *status;
315  int head;
316  int prevHead;
317  int tail;
318  int length;
319  char *ptr;
320
321  sc = (struct bfin_ethernetSoftc *) arg;
322  ifp = &sc->arpcom.ac_if;
323
324  ethBase = sc->ethBase;
325  txdmaBase = sc->txdmaBase;
326  head = 0;
327  prevHead = sc->txDescCount - 1;
328  tail = 0;
329
330  while (1) {
331    /* wait for packet or isr */
332    rtems_bsdnet_event_receive(START_TRANSMIT_EVENT | INTERRUPT_EVENT,
333                               RTEMS_EVENT_ANY | RTEMS_WAIT,
334                               RTEMS_NO_TIMEOUT, &events);
335
336    /* if no descriptors are available, try to free one.  To reduce
337       transmit latency only do one here. */
338    if (sc->tx[head].inUse && txFree(sc, tail)) {
339      if (++tail == sc->txDescCount)
340        tail = 0;
341    }
342    /* send packets until the queue is empty or we run out of tx
343       descriptors */
344    while (!sc->tx[head].inUse && (ifp->if_flags & IFF_OACTIVE)) {
345      /* get the next mbuf chain to transmit */
346      IF_DEQUEUE(&ifp->if_snd, m);
347      if (m != NULL) {
348        /* copy packet into our buffer */
349        ptr = sc->tx[head].buffer.packet.data;
350        length = 0;
351        first = m;
352        while (m && length <= BFIN_ETHERNET_MAX_FRAME_LENGTH) {
353          length += m->m_len;
354          if (length <= BFIN_ETHERNET_MAX_FRAME_LENGTH)
355            memcpy(ptr, m->m_data, m->m_len);
356          ptr += m->m_len;
357          m = m->m_next;
358        }
359        m_freem(first); /* all done with mbuf */
360        if (length <= BFIN_ETHERNET_MAX_FRAME_LENGTH) {
361          sc->tx[head].buffer.packet.length = length;
362
363          /* setup tx dma */
364          status = (txStatusT *) sc->tx[head].status.addr;
365          status->status = 0;
366          sc->tx[head].inUse = true;
367          rtems_cache_flush_multiple_data_lines(status, sizeof(*status));
368
369          /* configure dma to stop after sending this packet */
370          sc->tx[head].status.dmaConfig = DMA_MODE_STATUS_LAST;
371          rtems_cache_flush_multiple_data_lines(
372              &sc->tx[head].status.dmaConfig,
373              sizeof(sc->tx[head].status.dmaConfig));
374          rtems_cache_flush_multiple_data_lines(
375              &sc->tx[head].buffer.packet,
376              length + sizeof(uint16_t));
377
378          /* modify previous descriptor to let it continue
379             automatically */
380          sc->tx[prevHead].status.dmaConfig = DMA_MODE_STATUS;
381          rtems_cache_flush_multiple_data_lines(
382              &sc->tx[prevHead].status.dmaConfig,
383              sizeof(sc->tx[prevHead].status.dmaConfig));
384
385          /* restart dma if it stopped before the packet we just
386             added.  this is purely to reduce transmit latency,
387             as it would be restarted anyway after this loop (and
388             needs to be, as there's a very small chance that the
389             dma controller had started the last status transfer
390             before the new dmaConfig word was written above and
391             is still doing that status transfer when we check the
392             status below.  this will be caught by the check
393             outside the loop as that is guaranteed to run at least
394             once after the last dma complete interrupt. */
395          if ((BFIN_REG16(txdmaBase, DMA_IRQ_STATUS_OFFSET) &
396               DMA_IRQ_STATUS_DMA_RUN) == 0 &&
397               BFIN_REG32(txdmaBase, DMA_NEXT_DESC_PTR_OFFSET) !=
398               (uint32_t) sc->tx[head].data.next) {
399            BFIN_REG16(txdmaBase, DMA_CONFIG_OFFSET) = DMA_MODE_TX;
400            BFIN_REG32(ethBase, EMAC_OPMODE_OFFSET) |= EMAC_OPMODE_TE;
401          }
402
403          if (++head == sc->txDescCount)
404            head = 0;
405          if (++prevHead == sc->txDescCount)
406            prevHead = 0;
407
408          /* if no descriptors are available, try to free one */
409          if (sc->tx[head].inUse && txFree(sc, tail)) {
410            if (++tail == sc->txDescCount)
411              tail = 0;
412          }
413        } else {
414          /* dropping packet: too large */
415
416        }
417      } else {
418        /* no packets queued */
419        ifp->if_flags &= ~IFF_OACTIVE;
420      }
421    }
422
423    /* if dma stopped and there's more to do, restart it */
424    if ((BFIN_REG16(txdmaBase, DMA_IRQ_STATUS_OFFSET) &
425         DMA_IRQ_STATUS_DMA_RUN) == 0 &&
426        BFIN_REG32(txdmaBase, DMA_NEXT_DESC_PTR_OFFSET) !=
427        (uint32_t) &sc->tx[head].data) {
428      BFIN_REG16(txdmaBase, DMA_CONFIG_OFFSET) = DMA_MODE_TX;
429      BFIN_REG32(ethBase, EMAC_OPMODE_OFFSET) |= EMAC_OPMODE_TE;
430    }
431
432    /* free up any additional tx descriptors */
433    while (txFree(sc, tail)) {
434      if (++tail == sc->txDescCount)
435        tail = 0;
436    }
437  }
438}
439
440
441static void rxDaemon(void *arg) {
442  struct bfin_ethernetSoftc *sc;
443  struct ifnet *ifp;
444  struct mbuf *m;
445  struct mbuf *rxPacket;
446  void *dataPtr;
447  rtems_event_set events;
448  struct ether_header *eh;
449  rxStatusT *status;
450  uint32_t rxStatus;
451  int head;
452  int prevHead;
453  int length;
454  void *ethBase;
455  void *rxdmaBase;
456
457  sc = (struct bfin_ethernetSoftc *) arg;
458  rxdmaBase = sc->rxdmaBase;
459  ethBase = sc->ethBase;
460  ifp = &sc->arpcom.ac_if;
461  prevHead = sc->rxDescCount - 1;
462  head = 0;
463
464  BFIN_REG16(rxdmaBase, DMA_CONFIG_OFFSET) = DMA_MODE_RX;
465  BFIN_REG32(ethBase, EMAC_OPMODE_OFFSET) |= EMAC_OPMODE_RE;
466
467  while (1) {
468    status = sc->rx[head].status.addr;
469    rtems_cache_invalidate_multiple_data_lines(status, sizeof(*status));
470    while (status->status != 0) {
471      if (status->status & EMAC_RX_STAT_RX_OK) {
472        /* get new cluster to replace this one */
473        MGETHDR(m, M_WAIT, MT_DATA);
474        MCLGET(m, M_WAIT);
475        m->m_pkthdr.rcvif = ifp;
476      } else
477        m = NULL;
478
479      rxStatus = status->status;
480      /* update statistics */
481
482
483      if (m) {
484        /* save received packet to send up a little later */
485        rxPacket = sc->rx[head].m;
486        dataPtr = sc->rx[head].data.addr;
487
488        /* setup dma for new cluster */
489        sc->rx[head].m = m;
490        sc->rx[head].data.addr = (void *) (((intptr_t) m->m_data + 3) & ~3);
491        /* invalidate cache for new data buffer, in case any lines
492           are dirty from previous owner */
493        rtems_cache_invalidate_multiple_data_lines(
494            sc->rx[head].data.addr,
495            BFIN_ETHERNET_MAX_FRAME_LENGTH + 2);
496      } else
497        rxPacket = NULL;
498
499      sc->rx[head].status.dmaConfig = DMA_MODE_STATUS_LAST;
500      rtems_cache_flush_multiple_data_lines(&sc->rx[head],
501                                            sizeof(sc->rx[head]));
502
503      /* mark descriptor as empty */
504      status->status = 0;
505      rtems_cache_flush_multiple_data_lines(&status->status,
506                                            sizeof(status->status));
507
508      /* allow dma to continue from previous descriptor into this
509         one */
510      sc->rx[prevHead].status.dmaConfig = DMA_MODE_STATUS;
511      rtems_cache_flush_multiple_data_lines(
512          &sc->rx[prevHead].status.dmaConfig,
513          sizeof(sc->rx[prevHead].status.dmaConfig));
514
515      if (rxPacket) {
516        /* send it up */
517        eh = (struct ether_header *) ((intptr_t) dataPtr + 2);
518        rxPacket->m_data = (caddr_t) ((intptr_t) dataPtr + 2 + 14);
519        length = (rxStatus & EMAC_RX_STAT_RX_FRLEN_MASK) >>
520                  EMAC_RX_STAT_RX_FRLEN_SHIFT;
521        rxPacket->m_len = length - 14;
522        rxPacket->m_pkthdr.len = rxPacket->m_len;
523        /* invalidate packet buffer cache again (even though it
524           was invalidated prior to giving it to dma engine),
525           because speculative reads might cause cache lines to
526           be filled at any time */
527        rtems_cache_invalidate_multiple_data_lines(eh, length);
528        ether_input(ifp, eh, rxPacket);
529      }
530
531      if (++prevHead == sc->rxDescCount)
532        prevHead = 0;
533      if (++head == sc->rxDescCount)
534        head = 0;
535      status = sc->rx[head].status.addr;
536      rtems_cache_invalidate_multiple_data_lines(status, sizeof(*status));
537    }
538
539    /* if dma stopped before the next descriptor, restart it */
540    if ((BFIN_REG16(rxdmaBase, DMA_IRQ_STATUS_OFFSET) &
541         DMA_IRQ_STATUS_DMA_RUN) == 0 &&
542        BFIN_REG32(rxdmaBase, DMA_NEXT_DESC_PTR_OFFSET) !=
543        (uint32_t) &sc->rx[head].data) {
544      BFIN_REG16(rxdmaBase, DMA_CONFIG_OFFSET) = DMA_MODE_RX;
545    }
546
547    rtems_bsdnet_event_receive(INTERRUPT_EVENT, RTEMS_WAIT | RTEMS_EVENT_ANY,
548                               RTEMS_NO_TIMEOUT, &events);
549  }
550
551}
552
553/*
554 ******************************************************************
555 *                                                                *
556 *                     Initialization Routines                    *
557 *                                                                *
558 ******************************************************************
559 */
560
561static void resetHardware(struct bfin_ethernetSoftc *sc) {
562  void *ethBase;
563  void *rxdmaBase;
564  void *txdmaBase;
565
566  ethBase = sc->ethBase;
567  rxdmaBase = sc->rxdmaBase;
568  txdmaBase = sc->txdmaBase;
569  BFIN_REG32(ethBase, EMAC_OPMODE_OFFSET) = 0;
570  BFIN_REG16(rxdmaBase, DMA_CONFIG_OFFSET) = 0;
571  BFIN_REG16(txdmaBase, DMA_CONFIG_OFFSET) = 0;
572}
573
574static void initializeHardware(struct bfin_ethernetSoftc *sc) {
575  struct ifnet *ifp;
576  struct mbuf *m;
577  unsigned char *hwaddr;
578  int cacheAlignment;
579  int rxStatusSize;
580  int txStatusSize;
581  char *ptr;
582  int i;
583  void *ethBase;
584  void *rxdmaBase;
585  void *txdmaBase;
586  uint32_t divisor;
587
588  ifp = &sc->arpcom.ac_if;
589  ethBase = sc->ethBase;
590  rxdmaBase = sc->rxdmaBase;
591  txdmaBase = sc->txdmaBase;
592
593  BFIN_REG32(ethBase, EMAC_OPMODE_OFFSET) = 0;
594  BFIN_REG32(ethBase, EMAC_FLC_OFFSET) = 0;
595  divisor = (sc->sclk / 25000000) / 2 - 1;
596  BFIN_REG32(ethBase, EMAC_SYSCTL_OFFSET) = (divisor <<
597                                             EMAC_SYSCTL_MDCDIV_SHIFT) |
598                                            EMAC_SYSCTL_RXDWA;
599#ifdef BFIN_IPCHECKSUMS
600  BFIN_REG32(ethBase, EMAC_SYSCTL_OFFSET) |= EMAC_SYSCTL_RXCKS;
601#endif
602  BFIN_REG32(ethBase, EMAC_SYSTAT_OFFSET) = ~(uint32_t) 0;
603  BFIN_REG32(ethBase, EMAC_RX_IRQE_OFFSET) = 0;
604  BFIN_REG32(ethBase, EMAC_RX_STKY_OFFSET) = ~(uint32_t) 0;
605  BFIN_REG32(ethBase, EMAC_TX_IRQE_OFFSET) = 0;
606  BFIN_REG32(ethBase, EMAC_TX_STKY_OFFSET) = ~(uint32_t) 0;
607  BFIN_REG32(ethBase, EMAC_MMC_RIRQE_OFFSET) = 0;
608  BFIN_REG32(ethBase, EMAC_MMC_RIRQS_OFFSET) = ~(uint32_t) 0;
609  BFIN_REG32(ethBase, EMAC_MMC_TIRQE_OFFSET) = 0;
610  BFIN_REG32(ethBase, EMAC_MMC_TIRQS_OFFSET) = ~(uint32_t) 0;
611  BFIN_REG32(ethBase, EMAC_MMC_CTL_OFFSET) = EMAC_MMC_CTL_MMCE |
612                                             EMAC_MMC_CTL_CCOR |
613                                             EMAC_MMC_CTL_RSTC;
614  BFIN_REG32(ethBase, EMAC_MMC_CTL_OFFSET) = EMAC_MMC_CTL_MMCE |
615                                             EMAC_MMC_CTL_CCOR;
616
617  BFIN_REG16(rxdmaBase, DMA_CONFIG_OFFSET) = 0;
618  BFIN_REG16(txdmaBase, DMA_CONFIG_OFFSET) = 0;
619  BFIN_REG16(rxdmaBase, DMA_X_COUNT_OFFSET) = 0;
620  BFIN_REG16(txdmaBase, DMA_X_COUNT_OFFSET) = 0;
621  BFIN_REG16(rxdmaBase, DMA_X_MODIFY_OFFSET) = 4;
622  BFIN_REG16(txdmaBase, DMA_X_MODIFY_OFFSET) = 4;
623  BFIN_REG16(rxdmaBase, DMA_Y_COUNT_OFFSET) = 0;
624  BFIN_REG16(txdmaBase, DMA_Y_COUNT_OFFSET) = 0;
625  BFIN_REG16(rxdmaBase, DMA_Y_MODIFY_OFFSET) = 0;
626  BFIN_REG16(txdmaBase, DMA_Y_MODIFY_OFFSET) = 0;
627  BFIN_REG16(rxdmaBase, DMA_IRQ_STATUS_OFFSET) = DMA_IRQ_STATUS_DMA_ERR |
628                                                 DMA_IRQ_STATUS_DMA_DONE;
629
630  /* The status structures cannot share cache lines with anything else,
631     including other status structures, so we can safely manage both the
632     processor and DMA writing to them.  So this rounds up the structure
633     sizes to a multiple of the cache line size. */
634  cacheAlignment = (int) rtems_cache_get_data_line_size();
635  if (cacheAlignment == 0)
636     cacheAlignment = 1;
637  rxStatusSize = cacheAlignment * ((sizeof(rxStatusT) + cacheAlignment - 1) /
638                                   cacheAlignment);
639  txStatusSize = cacheAlignment * ((sizeof(txStatusT) + cacheAlignment - 1) /
640                                   cacheAlignment);
641  /* Allocate enough extra to allow structures to start at cache aligned
642     boundary. */
643  sc->status = malloc(sc->rxDescCount * rxStatusSize +
644                      sc->txDescCount * txStatusSize +
645                      cacheAlignment - 1, M_DEVBUF, M_NOWAIT);
646  sc->rx = malloc(sc->rxDescCount * sizeof(*sc->rx), M_DEVBUF, M_NOWAIT);
647  sc->tx = malloc(sc->txDescCount * sizeof(*sc->tx), M_DEVBUF, M_NOWAIT);
648  if (sc->status == NULL || sc->rx == NULL || sc->tx == NULL)
649    rtems_panic("No memory!\n");
650
651  /* Start status structures at cache aligned boundary. */
652  ptr = (char *) (((intptr_t) sc->status + cacheAlignment - 1) &
653                  ~(cacheAlignment - 1));
654  memset(ptr, 0, sc->rxDescCount * rxStatusSize +
655                 sc->txDescCount * txStatusSize);
656  memset(sc->rx, 0, sc->rxDescCount * sizeof(*sc->rx));
657  memset(sc->tx, 0, sc->txDescCount * sizeof(*sc->tx));
658  rtems_cache_flush_multiple_data_lines(ptr, sc->rxDescCount * rxStatusSize +
659                                             sc->txDescCount * txStatusSize);
660  for (i = 0; i < sc->rxDescCount; i++) {
661    MGETHDR(m, M_WAIT, MT_DATA);
662    MCLGET(m, M_WAIT);
663    m->m_pkthdr.rcvif = ifp;
664    sc->rx[i].m = m;
665    /* start dma at 32 bit boundary */
666    sc->rx[i].data.addr = (void *) (((intptr_t) m->m_data + 3) & ~3);
667    rtems_cache_invalidate_multiple_data_lines(
668        sc->rx[i].data.addr,
669        BFIN_ETHERNET_MAX_FRAME_LENGTH + 2);
670    sc->rx[i].data.dmaConfig = DMA_MODE_RX;
671    sc->rx[i].data.next = &(sc->rx[i].status);
672    sc->rx[i].status.addr = ptr;
673    if (i < sc->rxDescCount - 1) {
674      sc->rx[i].status.dmaConfig = DMA_MODE_STATUS;
675      sc->rx[i].status.next = &(sc->rx[i + 1].data);
676    } else {
677      sc->rx[i].status.dmaConfig = DMA_MODE_STATUS_LAST;
678      sc->rx[i].status.next = &(sc->rx[0].data);
679    }
680    ptr += rxStatusSize;
681  }
682  rtems_cache_flush_multiple_data_lines(sc->rx, sc->rxDescCount *
683                                                sizeof(*sc->rx));
684  for (i = 0; i < sc->txDescCount; i++) {
685    sc->tx[i].data.addr = &sc->tx[i].buffer.packet;
686    sc->tx[i].data.dmaConfig = DMA_MODE_TX;
687    sc->tx[i].data.next = &(sc->tx[i].status);
688    sc->tx[i].status.addr = ptr;
689    sc->tx[i].status.dmaConfig = DMA_MODE_STATUS_LAST;
690    if (i < sc->txDescCount - 1)
691      sc->tx[i].status.next = &(sc->tx[i + 1].data);
692    else
693      sc->tx[i].status.next = &(sc->tx[0].data);
694    sc->tx[i].inUse = false;
695    ptr += txStatusSize;
696  }
697  rtems_cache_flush_multiple_data_lines(sc->tx, sc->txDescCount *
698                                                sizeof(*sc->tx));
699
700  BFIN_REG32(rxdmaBase, DMA_NEXT_DESC_PTR_OFFSET) = (uint32_t) &sc->rx[0].data;
701  BFIN_REG32(txdmaBase, DMA_NEXT_DESC_PTR_OFFSET) = (uint32_t) &sc->tx[0].data;
702
703  hwaddr = sc->arpcom.ac_enaddr;
704  BFIN_REG16(ethBase, EMAC_ADDRHI_OFFSET) = ((uint16_t) hwaddr[5] << 8) |
705                                            hwaddr[4];
706  BFIN_REG32(ethBase, EMAC_ADDRLO_OFFSET) = ((uint32_t) hwaddr[3] << 24) |
707                                            ((uint32_t) hwaddr[2] << 16) |
708                                            ((uint32_t) hwaddr[1] << 8) |
709                                            hwaddr[0];
710
711  if (sc->acceptBroadcast)
712    BFIN_REG32(ethBase, EMAC_OPMODE_OFFSET) &= ~EMAC_OPMODE_DBF;
713  else
714    BFIN_REG32(ethBase, EMAC_OPMODE_OFFSET) |= EMAC_OPMODE_DBF;
715
716}
717
718/* send packet (caller provides header) */
719static void ethernetStart(struct ifnet *ifp) {
720  struct bfin_ethernetSoftc *sc;
721
722  sc = ifp->if_softc;
723
724  ifp->if_flags |= IFF_OACTIVE;
725  rtems_bsdnet_event_send(sc->txDaemonTid, START_TRANSMIT_EVENT);
726}
727
728/* initialize and start the device */
729static void ethernetInit(void *arg) {
730  struct bfin_ethernetSoftc *sc;
731  struct ifnet *ifp;
732  void *ethBase;
733
734  sc = arg;
735  ifp = &sc->arpcom.ac_if;
736  ethBase = sc->ethBase;
737
738  if (sc->txDaemonTid == 0) {
739    initializeHardware(sc);
740
741    /* start driver tasks */
742    sc->rxDaemonTid = rtems_bsdnet_newproc("BFrx", 4096, rxDaemon, sc);
743    sc->txDaemonTid = rtems_bsdnet_newproc("BFtx", 4096, txDaemon, sc);
744
745  }
746
747  if (ifp->if_flags & IFF_PROMISC)
748    BFIN_REG32(ethBase, EMAC_OPMODE_OFFSET) |= EMAC_OPMODE_PR;
749  else
750    BFIN_REG32(ethBase, EMAC_OPMODE_OFFSET) &= ~EMAC_OPMODE_PR;
751
752  /*
753   * Tell the world that we're running.
754   */
755  ifp->if_flags |= IFF_RUNNING;
756
757}
758
759/* driver ioctl handler */
760static int ethernetIoctl(struct ifnet *ifp, ioctl_command_t command,
761                         caddr_t data) {
762  int result;
763  struct bfin_ethernetSoftc *sc = ifp->if_softc;
764
765  result = 0;
766  switch (command) {
767  case SIOCGIFADDR:
768  case SIOCSIFADDR:
769    ether_ioctl(ifp, command, data);
770    break;
771  case SIOCSIFFLAGS:
772    switch (ifp->if_flags & (IFF_UP | IFF_RUNNING)) {
773    case IFF_RUNNING:
774      ethernetStop(sc);
775      break;
776    case IFF_UP:
777      ethernetInit(sc);
778      break;
779    case IFF_UP | IFF_RUNNING:
780      ethernetStop(sc);
781      ethernetInit(sc);
782      break;
783    default:
784      break;
785    }
786    break;
787  case SIO_RTEMS_SHOW_STATS:
788    bfin_ethernetStats(sc);
789    break;
790  case SIOCADDMULTI:
791  case SIOCDELMULTI:
792  default:
793    result = EINVAL;
794    break;
795  }
796
797  return result;
798}
799
800/* attach a BFIN ETHERNET driver to the system */
801int bfin_ethernet_driver_attach(struct rtems_bsdnet_ifconfig *config,
802                                int attaching,
803                                bfin_ethernet_configuration_t *chip) {
804  struct bfin_ethernetSoftc *sc;
805  struct ifnet *ifp;
806  int mtu;
807  int unitNumber;
808  char *unitName;
809
810  if ((unitNumber = rtems_bsdnet_parse_driver_name(config, &unitName)) < 0)
811    return 0;
812
813  if ((unitNumber <= 0) || (unitNumber > N_BFIN_ETHERNET)) {
814    printf("Bad bfin ethernet unit number %d.\n", unitNumber);
815    return 0;
816  }
817  sc = &ethernetSoftc[unitNumber - 1];
818  ifp = &sc->arpcom.ac_if;
819  if (ifp->if_softc != NULL) {
820    printf("Driver already in use.\n");
821    return 0;
822  }
823
824  memset(sc, 0, sizeof(*sc));
825
826  /* process options */
827  if (config->hardware_address)
828    memcpy(sc->arpcom.ac_enaddr, config->hardware_address, ETHER_ADDR_LEN);
829  else
830    memset(sc->arpcom.ac_enaddr, 0x08, ETHER_ADDR_LEN);
831  if (config->mtu)
832    mtu = config->mtu;
833  else
834    mtu = ETHERMTU;
835  if (config->rbuf_count)
836    sc->rxDescCount = config->rbuf_count;
837  else
838    sc->rxDescCount = chip->rxDescCount;
839  if (config->xbuf_count)
840    sc->txDescCount = config->xbuf_count;
841  else
842    sc->txDescCount = chip->txDescCount;
843  /* minimum two of each type descriptor */
844  if (sc->rxDescCount <= 1)
845    sc->rxDescCount = 2;
846  if (sc->txDescCount <= 1)
847    sc->txDescCount = 2;
848
849  sc->acceptBroadcast = !config->ignore_broadcast;
850
851  sc->sclk = chip->sclk;
852  sc->ethBase = chip->ethBaseAddress;
853  sc->rxdmaBase = chip->rxdmaBaseAddress;
854  sc->txdmaBase = chip->txdmaBaseAddress;
855
856  /* make sure we should not have any interrupts asserted */
857  resetHardware(sc);
858
859  sc->rmii = (chip->phyType == rmii);
860  sc->phyAddr = chip->phyAddr;
861
862  /* set up network interface values */
863  ifp->if_softc = sc;
864  ifp->if_unit = unitNumber;
865  ifp->if_name = unitName;
866  ifp->if_mtu = mtu;
867  ifp->if_init = ethernetInit;
868  ifp->if_ioctl = ethernetIoctl;
869  ifp->if_start = ethernetStart;
870  ifp->if_output = ether_output;
871  ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX;
872  if (ifp->if_snd.ifq_maxlen == 0)
873    ifp->if_snd.ifq_maxlen = ifqmaxlen;
874
875  if_attach(ifp);
876  ether_ifattach(ifp);
877
878  return 1;
879}
880
Note: See TracBrowser for help on using the repository browser.