source: rtems/c/src/lib/libcpu/bfin/network/ethernet.c @ c499856

4.115
Last change on this file since c499856 was c499856, checked in by Chris Johns <chrisj@…>, on 03/20/14 at 21:10:47

Change all references of rtems.com to rtems.org.

  • Property mode set to 100644
File size: 27.2 KB
Line 
1/*
2 *  RTEMS network driver for Blackfin ethernet controller
3 *
4 *  COPYRIGHT (c) 2008 Kallisti Labs, Los Gatos, CA, USA
5 *            written by Allan Hessenflow <allanh@kallisti.com>
6 *
7 *  The license and distribution terms for this file may be
8 *  found in the file LICENSE in this distribution or at
9 *  http://www.rtems.org/license/LICENSE.
10 *
11 */
12
13#include <rtems.h>
14#include <rtems/rtems_bsdnet.h>
15#include <rtems/rtems/cache.h>
16
17#include <stdio.h>
18#include <inttypes.h>
19#include <string.h>
20
21#include <errno.h>
22#include <rtems/error.h>
23
24#include <sys/param.h>
25#include <sys/mbuf.h>
26#include <sys/socket.h>
27#include <sys/sockio.h>
28#include <sys/sockio.h>
29
30#include <net/if.h>
31
32#include <netinet/in.h>
33#include <netinet/if_ether.h>
34
35#include <libcpu/dmaRegs.h>
36#include <libcpu/ethernetRegs.h>
37#include "ethernet.h"
38
39#if (BFIN_ETHERNET_DEBUG & BFIN_ETHERNET_DEBUG_DUMP_MBUFS)
40#include <rtems/dumpbuf.h>
41#endif
42
43/*
44 * Number of devices supported by this driver
45 */
46#ifndef N_BFIN_ETHERNET
47# define N_BFIN_ETHERNET 1
48#endif
49
50
51/* #define BFIN_IPCHECKSUMS */
52
53
54/*
55 * RTEMS event used by interrupt handler to signal daemons.
56 */
57#define INTERRUPT_EVENT  RTEMS_EVENT_1
58
59/*
60 * RTEMS event used to start transmit daemon.
61 */
62#define START_TRANSMIT_EVENT    RTEMS_EVENT_2
63
64
65/* largest Ethernet frame MAC will handle */
66#define BFIN_ETHERNET_MAX_FRAME_LENGTH      1556
67
68#if MCLBYTES < (BFIN_ETHERNET_MAX_FRAME_LENGTH + 2)
69#error MCLBYTES too small
70#endif
71
72#define BFIN_REG16(base, offset) \
73        (*((uint16_t volatile *) ((char *)(base) + (offset))))
74#define BFIN_REG32(base, offset) \
75        (*((uint32_t volatile *) ((char *)(base) + (offset))))
76
77
78#define DMA_MODE_RX               (DMA_CONFIG_FLOW_DESC_LARGE | \
79                                   (5 << DMA_CONFIG_NDSIZE_SHIFT) | \
80                                   DMA_CONFIG_WDSIZE_32 | \
81                                   DMA_CONFIG_WNR | \
82                                   DMA_CONFIG_DMAEN)
83
84#define DMA_MODE_TX               (DMA_CONFIG_FLOW_DESC_LARGE | \
85                                   (5 << DMA_CONFIG_NDSIZE_SHIFT) | \
86                                   DMA_CONFIG_WDSIZE_32 | \
87                                   DMA_CONFIG_DMAEN)
88
89#define DMA_MODE_STATUS           (DMA_CONFIG_FLOW_DESC_LARGE | \
90                                   (5 << DMA_CONFIG_NDSIZE_SHIFT) | \
91                                   DMA_CONFIG_DI_EN | \
92                                   DMA_CONFIG_WDSIZE_32 | \
93                                   DMA_CONFIG_WNR | \
94                                   DMA_CONFIG_DMAEN)
95
96#define DMA_MODE_STATUS_NO_INT    (DMA_CONFIG_FLOW_DESC_LARGE | \
97                                   (5 << DMA_CONFIG_NDSIZE_SHIFT) | \
98                                   DMA_CONFIG_WDSIZE_32 | \
99                                   DMA_CONFIG_WNR | \
100                                   DMA_CONFIG_DMAEN)
101
102#define DMA_MODE_STATUS_LAST      (DMA_CONFIG_FLOW_STOP | \
103                                   (0 << DMA_CONFIG_NDSIZE_SHIFT) | \
104                                   DMA_CONFIG_DI_EN | \
105                                   DMA_CONFIG_WDSIZE_32 | \
106                                   DMA_CONFIG_WNR | \
107                                   DMA_CONFIG_DMAEN)
108
109/* five 16 bit words */
110typedef struct dmaDescS {
111  struct dmaDescS *next;
112  void *addr;
113  uint16_t dmaConfig;
114} dmaDescT;
115
116typedef struct {
117  uint32_t status;
118} txStatusT;
119
120#ifdef BFIN_IPCHECKSUMS
121typedef struct {
122  uint16_t ipHeaderChecksum;
123  uint16_t ipPayloadChecksum;
124  uint32_t status;
125} rxStatusT;
126#else
127typedef struct {
128  uint32_t status;
129} rxStatusT;
130#endif
131
132typedef struct {
133  dmaDescT data;
134  dmaDescT status;
135  struct mbuf *m;
136} rxPacketDescT;
137
138typedef struct {
139  dmaDescT data;
140  dmaDescT status;
141  bool     inUse;
142  union {
143    uint32_t dummy; /* try to force 32 bit alignment */
144    struct {
145      uint16_t length;
146      char data[BFIN_ETHERNET_MAX_FRAME_LENGTH];
147    } packet;
148  } buffer;
149} txPacketDescT;
150
151
152/* hardware-specific storage */
153struct bfin_ethernetSoftc {
154  struct arpcom arpcom; /* this entry must be first */
155
156  uint32_t sclk;
157
158  void *ethBase;
159  void *rxdmaBase;
160  void *txdmaBase;
161
162  int acceptBroadcast;
163
164  rtems_id rxDaemonTid;
165  rtems_id txDaemonTid;
166
167  void *status;
168  int rxDescCount;
169  rxPacketDescT *rx;
170  int txDescCount;
171  txPacketDescT *tx;
172
173  bool rmii;
174  int phyAddr;
175
176  /* statistics */
177#ifdef BISON
178  unsigned long                   Interrupts;
179  unsigned long                   rxInterrupts;
180  unsigned long                   rxMissed;
181  unsigned long                   rxGiant;
182  unsigned long                   rxNonOctet;
183  unsigned long                   rxBadCRC;
184  unsigned long                   rxCollision;
185
186  unsigned long                   txInterrupts;
187  unsigned long                   txSingleCollision;
188  unsigned long                   txMultipleCollision;
189  unsigned long                   txCollision;
190  unsigned long                   txDeferred;
191  unsigned long                   txUnderrun;
192  unsigned long                   txLateCollision;
193  unsigned long                   txExcessiveCollision;
194  unsigned long                   txExcessiveDeferral;
195  unsigned long                   txLostCarrier;
196  unsigned long                   txRawWait;
197#endif
198};
199
200static struct bfin_ethernetSoftc ethernetSoftc[N_BFIN_ETHERNET];
201
202
203/* Shut down the interface.  */
204static void ethernetStop(struct bfin_ethernetSoftc *sc) {
205  struct ifnet *ifp;
206  void *ethBase;
207
208  ifp = &sc->arpcom.ac_if;
209  ethBase = sc->ethBase;
210
211  ifp->if_flags &= ~IFF_RUNNING;
212
213  /* stop the transmitter and receiver.  */
214  BFIN_REG32(ethBase, EMAC_OPMODE_OFFSET) &= ~(EMAC_OPMODE_TE |
215                                               EMAC_OPMODE_RE);
216}
217
218/* Show interface statistics */
219static void bfin_ethernetStats(struct bfin_ethernetSoftc *sc) {
220#ifdef BISON
221  printf(" Total Interrupts:%-8lu", sc->Interrupts);
222  printf("    Rx Interrupts:%-8lu", sc->rxInterrupts);
223  printf("            Giant:%-8lu", sc->rxGiant);
224  printf("        Non-octet:%-8lu\n", sc->rxNonOctet);
225  printf("          Bad CRC:%-8lu", sc->rxBadCRC);
226  printf("        Collision:%-8lu", sc->rxCollision);
227  printf("           Missed:%-8lu\n", sc->rxMissed);
228
229  printf(    "    Tx Interrupts:%-8lu", sc->txInterrupts);
230  printf(  "           Deferred:%-8lu", sc->txDeferred);
231  printf("        Lost Carrier:%-8lu\n", sc->txLostCarrier);
232  printf(   "Single Collisions:%-8lu", sc->txSingleCollision);
233  printf( "Multiple Collisions:%-8lu", sc->txMultipleCollision);
234  printf("Excessive Collisions:%-8lu\n", sc->txExcessiveCollision);
235  printf(   " Total Collisions:%-8lu", sc->txCollision);
236  printf( "     Late Collision:%-8lu", sc->txLateCollision);
237  printf("            Underrun:%-8lu\n", sc->txUnderrun);
238  printf(   "  Raw output wait:%-8lu\n", sc->txRawWait);
239#endif /*BISON*/
240}
241
242void bfin_ethernet_rxdma_isr(int vector) {
243  struct bfin_ethernetSoftc *sc;
244  void *rxdmaBase;
245  uint16_t status;
246  int i;
247
248  for (i = 0; i < N_BFIN_ETHERNET; i++) {
249    sc = &ethernetSoftc[i];
250    rxdmaBase = sc->rxdmaBase;
251    status = BFIN_REG16(rxdmaBase, DMA_IRQ_STATUS_OFFSET);
252    if (status & DMA_IRQ_STATUS_DMA_DONE)
253        rtems_bsdnet_event_send (sc->rxDaemonTid, INTERRUPT_EVENT);
254    BFIN_REG16(rxdmaBase, DMA_IRQ_STATUS_OFFSET) = status;
255  }
256}
257
258void bfin_ethernet_txdma_isr(int vector) {
259  struct bfin_ethernetSoftc *sc;
260  void *txdmaBase;
261  uint16_t status;
262  int i;
263
264  for (i = 0; i < N_BFIN_ETHERNET; i++) {
265    sc = &ethernetSoftc[i];
266    txdmaBase = sc->txdmaBase;
267    status = BFIN_REG16(txdmaBase, DMA_IRQ_STATUS_OFFSET);
268    if (status & DMA_IRQ_STATUS_DMA_DONE)
269        rtems_bsdnet_event_send (sc->txDaemonTid, INTERRUPT_EVENT);
270    BFIN_REG16(txdmaBase, DMA_IRQ_STATUS_OFFSET) = status;
271  }
272}
273
274void bfin_ethernet_mac_isr(int vector) {
275  struct bfin_ethernetSoftc *sc;
276  void *ethBase;
277  int i;
278
279  for (i = 0; i < N_BFIN_ETHERNET; i++) {
280    sc = &ethernetSoftc[i];
281    ethBase = sc->ethBase;
282    BFIN_REG32(ethBase, EMAC_SYSTAT_OFFSET) = ~(uint32_t) 0;
283  }
284}
285
286static bool txFree(struct bfin_ethernetSoftc *sc, int index) {
287  bool       freed;
288  txStatusT *status;
289
290  freed = false;
291  if (sc->tx[index].inUse) {
292    status = (txStatusT *) sc->tx[index].status.addr;
293    rtems_cache_invalidate_multiple_data_lines(status, sizeof(*status));
294    if (status->status != 0) {
295      /* update statistics */
296
297      sc->tx[index].inUse = false;
298      freed = true;
299    }
300  }
301
302  return freed;
303}
304
305static void txDaemon(void *arg) {
306  struct bfin_ethernetSoftc *sc;
307  struct ifnet *ifp;
308  struct mbuf *m, *first;
309  rtems_event_set events;
310  void *ethBase;
311  void *txdmaBase;
312  txStatusT *status;
313  int head;
314  int prevHead;
315  int tail;
316  int length;
317  char *ptr;
318
319  sc = (struct bfin_ethernetSoftc *) arg;
320  ifp = &sc->arpcom.ac_if;
321
322  ethBase = sc->ethBase;
323  txdmaBase = sc->txdmaBase;
324  head = 0;
325  prevHead = sc->txDescCount - 1;
326  tail = 0;
327
328  while (1) {
329    /* wait for packet or isr */
330    rtems_bsdnet_event_receive(START_TRANSMIT_EVENT | INTERRUPT_EVENT,
331                               RTEMS_EVENT_ANY | RTEMS_WAIT,
332                               RTEMS_NO_TIMEOUT, &events);
333
334    /* if no descriptors are available, try to free one.  To reduce
335       transmit latency only do one here. */
336    if (sc->tx[head].inUse && txFree(sc, tail)) {
337      if (++tail == sc->txDescCount)
338        tail = 0;
339    }
340    /* send packets until the queue is empty or we run out of tx
341       descriptors */
342    while (!sc->tx[head].inUse && (ifp->if_flags & IFF_OACTIVE)) {
343      /* get the next mbuf chain to transmit */
344      IF_DEQUEUE(&ifp->if_snd, m);
345      if (m != NULL) {
346        /* copy packet into our buffer */
347        ptr = sc->tx[head].buffer.packet.data;
348        length = 0;
349        first = m;
350        while (m && length <= BFIN_ETHERNET_MAX_FRAME_LENGTH) {
351          length += m->m_len;
352          if (length <= BFIN_ETHERNET_MAX_FRAME_LENGTH)
353            memcpy(ptr, m->m_data, m->m_len);
354          ptr += m->m_len;
355          m = m->m_next;
356        }
357        m_freem(first); /* all done with mbuf */
358        if (length <= BFIN_ETHERNET_MAX_FRAME_LENGTH) {
359          sc->tx[head].buffer.packet.length = length;
360
361          /* setup tx dma */
362          status = (txStatusT *) sc->tx[head].status.addr;
363          status->status = 0;
364          sc->tx[head].inUse = true;
365          rtems_cache_flush_multiple_data_lines(status, sizeof(*status));
366
367          /* configure dma to stop after sending this packet */
368          sc->tx[head].status.dmaConfig = DMA_MODE_STATUS_LAST;
369          rtems_cache_flush_multiple_data_lines(
370              &sc->tx[head].status.dmaConfig,
371              sizeof(sc->tx[head].status.dmaConfig));
372          rtems_cache_flush_multiple_data_lines(
373              &sc->tx[head].buffer.packet,
374              length + sizeof(uint16_t));
375
376          /* modify previous descriptor to let it continue
377             automatically */
378          sc->tx[prevHead].status.dmaConfig = DMA_MODE_STATUS;
379          rtems_cache_flush_multiple_data_lines(
380              &sc->tx[prevHead].status.dmaConfig,
381              sizeof(sc->tx[prevHead].status.dmaConfig));
382
383          /* restart dma if it stopped before the packet we just
384             added.  this is purely to reduce transmit latency,
385             as it would be restarted anyway after this loop (and
386             needs to be, as there's a very small chance that the
387             dma controller had started the last status transfer
388             before the new dmaConfig word was written above and
389             is still doing that status transfer when we check the
390             status below.  this will be caught by the check
391             outside the loop as that is guaranteed to run at least
392             once after the last dma complete interrupt. */
393          if ((BFIN_REG16(txdmaBase, DMA_IRQ_STATUS_OFFSET) &
394               DMA_IRQ_STATUS_DMA_RUN) == 0 &&
395               BFIN_REG32(txdmaBase, DMA_NEXT_DESC_PTR_OFFSET) !=
396               (uint32_t) sc->tx[head].data.next) {
397            BFIN_REG16(txdmaBase, DMA_CONFIG_OFFSET) = DMA_MODE_TX;
398            BFIN_REG32(ethBase, EMAC_OPMODE_OFFSET) |= EMAC_OPMODE_TE;
399          }
400
401          if (++head == sc->txDescCount)
402            head = 0;
403          if (++prevHead == sc->txDescCount)
404            prevHead = 0;
405
406          /* if no descriptors are available, try to free one */
407          if (sc->tx[head].inUse && txFree(sc, tail)) {
408            if (++tail == sc->txDescCount)
409              tail = 0;
410          }
411        } else {
412          /* dropping packet: too large */
413
414        }
415      } else {
416        /* no packets queued */
417        ifp->if_flags &= ~IFF_OACTIVE;
418      }
419    }
420
421    /* if dma stopped and there's more to do, restart it */
422    if ((BFIN_REG16(txdmaBase, DMA_IRQ_STATUS_OFFSET) &
423         DMA_IRQ_STATUS_DMA_RUN) == 0 &&
424        BFIN_REG32(txdmaBase, DMA_NEXT_DESC_PTR_OFFSET) !=
425        (uint32_t) &sc->tx[head].data) {
426      BFIN_REG16(txdmaBase, DMA_CONFIG_OFFSET) = DMA_MODE_TX;
427      BFIN_REG32(ethBase, EMAC_OPMODE_OFFSET) |= EMAC_OPMODE_TE;
428    }
429
430    /* free up any additional tx descriptors */
431    while (txFree(sc, tail)) {
432      if (++tail == sc->txDescCount)
433        tail = 0;
434    }
435  }
436}
437
438
439static void rxDaemon(void *arg) {
440  struct bfin_ethernetSoftc *sc;
441  struct ifnet *ifp;
442  struct mbuf *m;
443  struct mbuf *rxPacket;
444  void *dataPtr;
445  rtems_event_set events;
446  struct ether_header *eh;
447  rxStatusT *status;
448  uint32_t rxStatus;
449  int head;
450  int prevHead;
451  int length;
452  void *ethBase;
453  void *rxdmaBase;
454
455  sc = (struct bfin_ethernetSoftc *) arg;
456  rxdmaBase = sc->rxdmaBase;
457  ethBase = sc->ethBase;
458  ifp = &sc->arpcom.ac_if;
459  prevHead = sc->rxDescCount - 1;
460  head = 0;
461
462  BFIN_REG16(rxdmaBase, DMA_CONFIG_OFFSET) = DMA_MODE_RX;
463  BFIN_REG32(ethBase, EMAC_OPMODE_OFFSET) |= EMAC_OPMODE_RE;
464
465  while (1) {
466    status = sc->rx[head].status.addr;
467    rtems_cache_invalidate_multiple_data_lines(status, sizeof(*status));
468    while (status->status != 0) {
469      if (status->status & EMAC_RX_STAT_RX_OK) {
470        /* get new cluster to replace this one */
471        MGETHDR(m, M_WAIT, MT_DATA);
472        MCLGET(m, M_WAIT);
473        m->m_pkthdr.rcvif = ifp;
474      } else
475        m = NULL;
476
477      rxStatus = status->status;
478      /* update statistics */
479
480
481      if (m) {
482        /* save received packet to send up a little later */
483        rxPacket = sc->rx[head].m;
484        dataPtr = sc->rx[head].data.addr;
485
486        /* setup dma for new cluster */
487        sc->rx[head].m = m;
488        sc->rx[head].data.addr = (void *) (((intptr_t) m->m_data + 3) & ~3);
489        /* invalidate cache for new data buffer, in case any lines
490           are dirty from previous owner */
491        rtems_cache_invalidate_multiple_data_lines(
492            sc->rx[head].data.addr,
493            BFIN_ETHERNET_MAX_FRAME_LENGTH + 2);
494      } else
495        rxPacket = NULL;
496
497      sc->rx[head].status.dmaConfig = DMA_MODE_STATUS_LAST;
498      rtems_cache_flush_multiple_data_lines(&sc->rx[head],
499                                            sizeof(sc->rx[head]));
500
501      /* mark descriptor as empty */
502      status->status = 0;
503      rtems_cache_flush_multiple_data_lines(&status->status,
504                                            sizeof(status->status));
505
506      /* allow dma to continue from previous descriptor into this
507         one */
508      sc->rx[prevHead].status.dmaConfig = DMA_MODE_STATUS;
509      rtems_cache_flush_multiple_data_lines(
510          &sc->rx[prevHead].status.dmaConfig,
511          sizeof(sc->rx[prevHead].status.dmaConfig));
512
513      if (rxPacket) {
514        /* send it up */
515        eh = (struct ether_header *) ((intptr_t) dataPtr + 2);
516        rxPacket->m_data = (caddr_t) ((intptr_t) dataPtr + 2 + 14);
517        length = (rxStatus & EMAC_RX_STAT_RX_FRLEN_MASK) >>
518                  EMAC_RX_STAT_RX_FRLEN_SHIFT;
519        rxPacket->m_len = length - 14;
520        rxPacket->m_pkthdr.len = rxPacket->m_len;
521        /* invalidate packet buffer cache again (even though it
522           was invalidated prior to giving it to dma engine),
523           because speculative reads might cause cache lines to
524           be filled at any time */
525        rtems_cache_invalidate_multiple_data_lines(eh, length);
526        ether_input(ifp, eh, rxPacket);
527      }
528
529      if (++prevHead == sc->rxDescCount)
530        prevHead = 0;
531      if (++head == sc->rxDescCount)
532        head = 0;
533      status = sc->rx[head].status.addr;
534      rtems_cache_invalidate_multiple_data_lines(status, sizeof(*status));
535    }
536
537    /* if dma stopped before the next descriptor, restart it */
538    if ((BFIN_REG16(rxdmaBase, DMA_IRQ_STATUS_OFFSET) &
539         DMA_IRQ_STATUS_DMA_RUN) == 0 &&
540        BFIN_REG32(rxdmaBase, DMA_NEXT_DESC_PTR_OFFSET) !=
541        (uint32_t) &sc->rx[head].data) {
542      BFIN_REG16(rxdmaBase, DMA_CONFIG_OFFSET) = DMA_MODE_RX;
543    }
544
545    rtems_bsdnet_event_receive(INTERRUPT_EVENT, RTEMS_WAIT | RTEMS_EVENT_ANY,
546                               RTEMS_NO_TIMEOUT, &events);
547  }
548
549}
550
551/*
552 ******************************************************************
553 *                                                                *
554 *                     Initialization Routines                    *
555 *                                                                *
556 ******************************************************************
557 */
558
559static void resetHardware(struct bfin_ethernetSoftc *sc) {
560  void *ethBase;
561  void *rxdmaBase;
562  void *txdmaBase;
563
564  ethBase = sc->ethBase;
565  rxdmaBase = sc->rxdmaBase;
566  txdmaBase = sc->txdmaBase;
567  BFIN_REG32(ethBase, EMAC_OPMODE_OFFSET) = 0;
568  BFIN_REG16(rxdmaBase, DMA_CONFIG_OFFSET) = 0;
569  BFIN_REG16(txdmaBase, DMA_CONFIG_OFFSET) = 0;
570}
571
572static void initializeHardware(struct bfin_ethernetSoftc *sc) {
573  struct ifnet *ifp;
574  struct mbuf *m;
575  unsigned char *hwaddr;
576  int cacheAlignment;
577  int rxStatusSize;
578  int txStatusSize;
579  char *ptr;
580  int i;
581  void *ethBase;
582  void *rxdmaBase;
583  void *txdmaBase;
584  uint32_t divisor;
585
586  ifp = &sc->arpcom.ac_if;
587  ethBase = sc->ethBase;
588  rxdmaBase = sc->rxdmaBase;
589  txdmaBase = sc->txdmaBase;
590
591  BFIN_REG32(ethBase, EMAC_OPMODE_OFFSET) = 0;
592  BFIN_REG32(ethBase, EMAC_FLC_OFFSET) = 0;
593  divisor = (sc->sclk / 25000000) / 2 - 1;
594  BFIN_REG32(ethBase, EMAC_SYSCTL_OFFSET) = (divisor <<
595                                             EMAC_SYSCTL_MDCDIV_SHIFT) |
596                                            EMAC_SYSCTL_RXDWA;
597#ifdef BFIN_IPCHECKSUMS
598  BFIN_REG32(ethBase, EMAC_SYSCTL_OFFSET) |= EMAC_SYSCTL_RXCKS;
599#endif
600  BFIN_REG32(ethBase, EMAC_SYSTAT_OFFSET) = ~(uint32_t) 0;
601  BFIN_REG32(ethBase, EMAC_RX_IRQE_OFFSET) = 0;
602  BFIN_REG32(ethBase, EMAC_RX_STKY_OFFSET) = ~(uint32_t) 0;
603  BFIN_REG32(ethBase, EMAC_TX_IRQE_OFFSET) = 0;
604  BFIN_REG32(ethBase, EMAC_TX_STKY_OFFSET) = ~(uint32_t) 0;
605  BFIN_REG32(ethBase, EMAC_MMC_RIRQE_OFFSET) = 0;
606  BFIN_REG32(ethBase, EMAC_MMC_RIRQS_OFFSET) = ~(uint32_t) 0;
607  BFIN_REG32(ethBase, EMAC_MMC_TIRQE_OFFSET) = 0;
608  BFIN_REG32(ethBase, EMAC_MMC_TIRQS_OFFSET) = ~(uint32_t) 0;
609  BFIN_REG32(ethBase, EMAC_MMC_CTL_OFFSET) = EMAC_MMC_CTL_MMCE |
610                                             EMAC_MMC_CTL_CCOR |
611                                             EMAC_MMC_CTL_RSTC;
612  BFIN_REG32(ethBase, EMAC_MMC_CTL_OFFSET) = EMAC_MMC_CTL_MMCE |
613                                             EMAC_MMC_CTL_CCOR;
614
615  BFIN_REG16(rxdmaBase, DMA_CONFIG_OFFSET) = 0;
616  BFIN_REG16(txdmaBase, DMA_CONFIG_OFFSET) = 0;
617  BFIN_REG16(rxdmaBase, DMA_X_COUNT_OFFSET) = 0;
618  BFIN_REG16(txdmaBase, DMA_X_COUNT_OFFSET) = 0;
619  BFIN_REG16(rxdmaBase, DMA_X_MODIFY_OFFSET) = 4;
620  BFIN_REG16(txdmaBase, DMA_X_MODIFY_OFFSET) = 4;
621  BFIN_REG16(rxdmaBase, DMA_Y_COUNT_OFFSET) = 0;
622  BFIN_REG16(txdmaBase, DMA_Y_COUNT_OFFSET) = 0;
623  BFIN_REG16(rxdmaBase, DMA_Y_MODIFY_OFFSET) = 0;
624  BFIN_REG16(txdmaBase, DMA_Y_MODIFY_OFFSET) = 0;
625  BFIN_REG16(rxdmaBase, DMA_IRQ_STATUS_OFFSET) = DMA_IRQ_STATUS_DMA_ERR |
626                                                 DMA_IRQ_STATUS_DMA_DONE;
627
628  /* The status structures cannot share cache lines with anything else,
629     including other status structures, so we can safely manage both the
630     processor and DMA writing to them.  So this rounds up the structure
631     sizes to a multiple of the cache line size. */
632  cacheAlignment = (int) rtems_cache_get_data_line_size();
633  if (cacheAlignment == 0)
634     cacheAlignment = 1;
635  rxStatusSize = cacheAlignment * ((sizeof(rxStatusT) + cacheAlignment - 1) /
636                                   cacheAlignment);
637  txStatusSize = cacheAlignment * ((sizeof(txStatusT) + cacheAlignment - 1) /
638                                   cacheAlignment);
639  /* Allocate enough extra to allow structures to start at cache aligned
640     boundary. */
641  sc->status = malloc(sc->rxDescCount * rxStatusSize +
642                      sc->txDescCount * txStatusSize +
643                      cacheAlignment - 1, M_DEVBUF, M_NOWAIT);
644  sc->rx = malloc(sc->rxDescCount * sizeof(*sc->rx), M_DEVBUF, M_NOWAIT);
645  sc->tx = malloc(sc->txDescCount * sizeof(*sc->tx), M_DEVBUF, M_NOWAIT);
646  if (sc->status == NULL || sc->rx == NULL || sc->tx == NULL)
647    rtems_panic("No memory!\n");
648
649  /* Start status structures at cache aligned boundary. */
650  ptr = (char *) (((intptr_t) sc->status + cacheAlignment - 1) &
651                  ~(cacheAlignment - 1));
652  memset(ptr, 0, sc->rxDescCount * rxStatusSize +
653                 sc->txDescCount * txStatusSize);
654  memset(sc->rx, 0, sc->rxDescCount * sizeof(*sc->rx));
655  memset(sc->tx, 0, sc->txDescCount * sizeof(*sc->tx));
656  rtems_cache_flush_multiple_data_lines(ptr, sc->rxDescCount * rxStatusSize +
657                                             sc->txDescCount * txStatusSize);
658  for (i = 0; i < sc->rxDescCount; i++) {
659    MGETHDR(m, M_WAIT, MT_DATA);
660    MCLGET(m, M_WAIT);
661    m->m_pkthdr.rcvif = ifp;
662    sc->rx[i].m = m;
663    /* start dma at 32 bit boundary */
664    sc->rx[i].data.addr = (void *) (((intptr_t) m->m_data + 3) & ~3);
665    rtems_cache_invalidate_multiple_data_lines(
666        sc->rx[i].data.addr,
667        BFIN_ETHERNET_MAX_FRAME_LENGTH + 2);
668    sc->rx[i].data.dmaConfig = DMA_MODE_RX;
669    sc->rx[i].data.next = &(sc->rx[i].status);
670    sc->rx[i].status.addr = ptr;
671    if (i < sc->rxDescCount - 1) {
672      sc->rx[i].status.dmaConfig = DMA_MODE_STATUS;
673      sc->rx[i].status.next = &(sc->rx[i + 1].data);
674    } else {
675      sc->rx[i].status.dmaConfig = DMA_MODE_STATUS_LAST;
676      sc->rx[i].status.next = &(sc->rx[0].data);
677    }
678    ptr += rxStatusSize;
679  }
680  rtems_cache_flush_multiple_data_lines(sc->rx, sc->rxDescCount *
681                                                sizeof(*sc->rx));
682  for (i = 0; i < sc->txDescCount; i++) {
683    sc->tx[i].data.addr = &sc->tx[i].buffer.packet;
684    sc->tx[i].data.dmaConfig = DMA_MODE_TX;
685    sc->tx[i].data.next = &(sc->tx[i].status);
686    sc->tx[i].status.addr = ptr;
687    sc->tx[i].status.dmaConfig = DMA_MODE_STATUS_LAST;
688    if (i < sc->txDescCount - 1)
689      sc->tx[i].status.next = &(sc->tx[i + 1].data);
690    else
691      sc->tx[i].status.next = &(sc->tx[0].data);
692    sc->tx[i].inUse = false;
693    ptr += txStatusSize;
694  }
695  rtems_cache_flush_multiple_data_lines(sc->tx, sc->txDescCount *
696                                                sizeof(*sc->tx));
697
698  BFIN_REG32(rxdmaBase, DMA_NEXT_DESC_PTR_OFFSET) = (uint32_t) &sc->rx[0].data;
699  BFIN_REG32(txdmaBase, DMA_NEXT_DESC_PTR_OFFSET) = (uint32_t) &sc->tx[0].data;
700
701  hwaddr = sc->arpcom.ac_enaddr;
702  BFIN_REG16(ethBase, EMAC_ADDRHI_OFFSET) = ((uint16_t) hwaddr[5] << 8) |
703                                            hwaddr[4];
704  BFIN_REG32(ethBase, EMAC_ADDRLO_OFFSET) = ((uint32_t) hwaddr[3] << 24) |
705                                            ((uint32_t) hwaddr[2] << 16) |
706                                            ((uint32_t) hwaddr[1] << 8) |
707                                            hwaddr[0];
708
709  if (sc->acceptBroadcast)
710    BFIN_REG32(ethBase, EMAC_OPMODE_OFFSET) &= ~EMAC_OPMODE_DBF;
711  else
712    BFIN_REG32(ethBase, EMAC_OPMODE_OFFSET) |= EMAC_OPMODE_DBF;
713
714}
715
716/* send packet (caller provides header) */
717static void ethernetStart(struct ifnet *ifp) {
718  struct bfin_ethernetSoftc *sc;
719
720  sc = ifp->if_softc;
721
722  ifp->if_flags |= IFF_OACTIVE;
723  rtems_bsdnet_event_send(sc->txDaemonTid, START_TRANSMIT_EVENT);
724}
725
726/* initialize and start the device */
727static void ethernetInit(void *arg) {
728  struct bfin_ethernetSoftc *sc;
729  struct ifnet *ifp;
730  void *ethBase;
731
732  sc = arg;
733  ifp = &sc->arpcom.ac_if;
734  ethBase = sc->ethBase;
735
736  if (sc->txDaemonTid == 0) {
737    initializeHardware(sc);
738
739    /* start driver tasks */
740    sc->rxDaemonTid = rtems_bsdnet_newproc("BFrx", 4096, rxDaemon, sc);
741    sc->txDaemonTid = rtems_bsdnet_newproc("BFtx", 4096, txDaemon, sc);
742
743  }
744
745  if (ifp->if_flags & IFF_PROMISC)
746    BFIN_REG32(ethBase, EMAC_OPMODE_OFFSET) |= EMAC_OPMODE_PR;
747  else
748    BFIN_REG32(ethBase, EMAC_OPMODE_OFFSET) &= ~EMAC_OPMODE_PR;
749
750  /*
751   * Tell the world that we're running.
752   */
753  ifp->if_flags |= IFF_RUNNING;
754
755}
756
757/* driver ioctl handler */
758static int ethernetIoctl(struct ifnet *ifp, ioctl_command_t command,
759                         caddr_t data) {
760  int result;
761  struct bfin_ethernetSoftc *sc = ifp->if_softc;
762
763  result = 0;
764  switch (command) {
765  case SIOCGIFADDR:
766  case SIOCSIFADDR:
767    ether_ioctl(ifp, command, data);
768    break;
769  case SIOCSIFFLAGS:
770    switch (ifp->if_flags & (IFF_UP | IFF_RUNNING)) {
771    case IFF_RUNNING:
772      ethernetStop(sc);
773      break;
774    case IFF_UP:
775      ethernetInit(sc);
776      break;
777    case IFF_UP | IFF_RUNNING:
778      ethernetStop(sc);
779      ethernetInit(sc);
780      break;
781    default:
782      break;
783    }
784    break;
785  case SIO_RTEMS_SHOW_STATS:
786    bfin_ethernetStats(sc);
787    break;
788  case SIOCADDMULTI:
789  case SIOCDELMULTI:
790  default:
791    result = EINVAL;
792    break;
793  }
794
795  return result;
796}
797
798/* attach a BFIN ETHERNET driver to the system */
799int bfin_ethernet_driver_attach(struct rtems_bsdnet_ifconfig *config,
800                                int attaching,
801                                bfin_ethernet_configuration_t *chip) {
802  struct bfin_ethernetSoftc *sc;
803  struct ifnet *ifp;
804  int mtu;
805  int unitNumber;
806  char *unitName;
807
808  if ((unitNumber = rtems_bsdnet_parse_driver_name(config, &unitName)) < 0)
809    return 0;
810
811  if ((unitNumber <= 0) || (unitNumber > N_BFIN_ETHERNET)) {
812    printf("Bad bfin ethernet unit number %d.\n", unitNumber);
813    return 0;
814  }
815  sc = &ethernetSoftc[unitNumber - 1];
816  ifp = &sc->arpcom.ac_if;
817  if (ifp->if_softc != NULL) {
818    printf("Driver already in use.\n");
819    return 0;
820  }
821
822  memset(sc, 0, sizeof(*sc));
823
824  /* process options */
825  if (config->hardware_address)
826    memcpy(sc->arpcom.ac_enaddr, config->hardware_address, ETHER_ADDR_LEN);
827  else
828    memset(sc->arpcom.ac_enaddr, 0x08, ETHER_ADDR_LEN);
829  if (config->mtu)
830    mtu = config->mtu;
831  else
832    mtu = ETHERMTU;
833  if (config->rbuf_count)
834    sc->rxDescCount = config->rbuf_count;
835  else
836    sc->rxDescCount = chip->rxDescCount;
837  if (config->xbuf_count)
838    sc->txDescCount = config->xbuf_count;
839  else
840    sc->txDescCount = chip->txDescCount;
841  /* minimum two of each type descriptor */
842  if (sc->rxDescCount <= 1)
843    sc->rxDescCount = 2;
844  if (sc->txDescCount <= 1)
845    sc->txDescCount = 2;
846
847  sc->acceptBroadcast = !config->ignore_broadcast;
848
849  sc->sclk = chip->sclk;
850  sc->ethBase = chip->ethBaseAddress;
851  sc->rxdmaBase = chip->rxdmaBaseAddress;
852  sc->txdmaBase = chip->txdmaBaseAddress;
853
854  /* make sure we should not have any interrupts asserted */
855  resetHardware(sc);
856
857  sc->rmii = (chip->phyType == rmii);
858  sc->phyAddr = chip->phyAddr;
859
860  /* set up network interface values */
861  ifp->if_softc = sc;
862  ifp->if_unit = unitNumber;
863  ifp->if_name = unitName;
864  ifp->if_mtu = mtu;
865  ifp->if_init = ethernetInit;
866  ifp->if_ioctl = ethernetIoctl;
867  ifp->if_start = ethernetStart;
868  ifp->if_output = ether_output;
869  ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX;
870  if (ifp->if_snd.ifq_maxlen == 0)
871    ifp->if_snd.ifq_maxlen = ifqmaxlen;
872
873  if_attach(ifp);
874  ether_ifattach(ifp);
875
876  return 1;
877}
878
Note: See TracBrowser for help on using the repository browser.