source: rtems-libbsd/freebsd/sys/arm/lpc/if_lpe.c @ 9f2205a

55-freebsd-126-freebsd-12
Last change on this file since 9f2205a was 9f2205a, checked in by Kevin Kirspel <kevin-kirspel@…>, on 01/30/17 at 16:58:16

Port LPC32XX Ethernet and USB OHCI to RTEMS

  • Property mode set to 100755
File size: 36.1 KB
Line 
1#include <machine/rtems-bsd-kernel-space.h>
2
3/*-
4 * Copyright (c) 2011 Jakub Wojciech Klama <jceel@FreeBSD.org>
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 *    notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 *    notice, this list of conditions and the following disclaimer in the
14 *    documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 *
28 */
29#include <sys/cdefs.h>
30__FBSDID("$FreeBSD$");
31
32#include <rtems/bsd/sys/param.h>
33#include <sys/endian.h>
34#include <sys/systm.h>
35#include <sys/sockio.h>
36#include <sys/mbuf.h>
37#include <sys/malloc.h>
38#include <sys/kernel.h>
39#include <sys/module.h>
40#include <rtems/bsd/sys/lock.h>
41#include <sys/mutex.h>
42#include <sys/rman.h>
43#include <sys/bus.h>
44#include <sys/socket.h>
45#include <machine/bus.h>
46#ifndef __rtems__
47#include <machine/intr.h>
48#endif /* __rtems__ */
49
50#include <net/if.h>
51#include <net/if_arp.h>
52#include <net/ethernet.h>
53#include <net/if_dl.h>
54#include <net/if_media.h>
55#include <net/if_types.h>
56#include <net/if_var.h>
57
58#include <net/bpf.h>
59
60#ifndef __rtems__
61#include <dev/ofw/ofw_bus.h>
62#include <dev/ofw/ofw_bus_subr.h>
63#endif /* __rtems__ */
64
65#include <dev/mii/mii.h>
66#include <dev/mii/miivar.h>
67
68#include <arm/lpc/lpcreg.h>
69#include <arm/lpc/lpcvar.h>
70#include <arm/lpc/if_lpereg.h>
71
72#include <rtems/bsd/local/miibus_if.h>
73#ifdef __rtems__
74#include <machine/rtems-bsd-cache.h>
75#endif /* __rtems__ */
76
77#ifdef DEBUG
78#define debugf(fmt, args...) do { printf("%s(): ", __func__);   \
79    printf(fmt,##args); } while (0)
80#else
81#define debugf(fmt, args...)
82#endif
83
84struct lpe_dmamap_arg {
85        bus_addr_t              lpe_dma_busaddr;
86};
87
88struct lpe_rxdesc {
89        struct mbuf *           lpe_rxdesc_mbuf;
90#ifndef __rtems__
91        bus_dmamap_t            lpe_rxdesc_dmamap;
92#endif /* __rtems__ */
93};
94
95struct lpe_txdesc {
96        int                     lpe_txdesc_first;
97        struct mbuf *           lpe_txdesc_mbuf;
98#ifndef __rtems__
99        bus_dmamap_t            lpe_txdesc_dmamap;
100#endif /* __rtems__ */
101};
102
103struct lpe_chain_data {
104        bus_dma_tag_t           lpe_parent_tag;
105        bus_dma_tag_t           lpe_tx_ring_tag;
106        bus_dmamap_t            lpe_tx_ring_map;
107        bus_dma_tag_t           lpe_tx_status_tag;
108        bus_dmamap_t            lpe_tx_status_map;
109        bus_dma_tag_t           lpe_tx_buf_tag;
110        bus_dma_tag_t           lpe_rx_ring_tag;
111        bus_dmamap_t            lpe_rx_ring_map;
112        bus_dma_tag_t           lpe_rx_status_tag;
113        bus_dmamap_t            lpe_rx_status_map;
114        bus_dma_tag_t           lpe_rx_buf_tag;
115        struct lpe_rxdesc       lpe_rx_desc[LPE_RXDESC_NUM];
116        struct lpe_txdesc       lpe_tx_desc[LPE_TXDESC_NUM];
117        int                     lpe_tx_prod;
118        int                     lpe_tx_last;
119        int                     lpe_tx_used;
120};
121
122struct lpe_ring_data {
123        struct lpe_hwdesc *     lpe_rx_ring;
124        struct lpe_hwstatus *   lpe_rx_status;
125        bus_addr_t              lpe_rx_ring_phys;
126        bus_addr_t              lpe_rx_status_phys;
127        struct lpe_hwdesc *     lpe_tx_ring;
128        struct lpe_hwstatus *   lpe_tx_status;
129        bus_addr_t              lpe_tx_ring_phys;
130        bus_addr_t              lpe_tx_status_phys;
131};
132
133struct lpe_softc {
134        struct ifnet *          lpe_ifp;
135        struct mtx              lpe_mtx;
136#ifndef __rtems__
137        phandle_t               lpe_ofw;
138#endif /* __rtems__ */
139        device_t                lpe_dev;
140        device_t                lpe_miibus;
141        uint8_t                 lpe_enaddr[6];
142        struct resource *       lpe_mem_res;
143        struct resource *       lpe_irq_res;
144        void *                  lpe_intrhand;
145        bus_space_tag_t         lpe_bst;
146        bus_space_handle_t      lpe_bsh;
147#define LPE_FLAG_LINK           (1 << 0)
148        uint32_t                lpe_flags;
149        int                     lpe_watchdog_timer;
150        struct callout          lpe_tick;
151        struct lpe_chain_data   lpe_cdata;
152        struct lpe_ring_data    lpe_rdata;
153};
154
155static int lpe_probe(device_t);
156static int lpe_attach(device_t);
157static int lpe_detach(device_t);
158static int lpe_miibus_readreg(device_t, int, int);
159static int lpe_miibus_writereg(device_t, int, int, int);
160static void lpe_miibus_statchg(device_t);
161
162static void lpe_reset(struct lpe_softc *);
163static void lpe_init(void *);
164static void lpe_init_locked(struct lpe_softc *);
165static void lpe_start(struct ifnet *);
166static void lpe_start_locked(struct ifnet *);
167static void lpe_stop(struct lpe_softc *);
168static void lpe_stop_locked(struct lpe_softc *);
169static int lpe_ioctl(struct ifnet *, u_long, caddr_t);
170static void lpe_set_rxmode(struct lpe_softc *);
171static void lpe_set_rxfilter(struct lpe_softc *);
172static void lpe_intr(void *);
173static void lpe_rxintr(struct lpe_softc *);
174static void lpe_txintr(struct lpe_softc *);
175static void lpe_tick(void *);
176static void lpe_watchdog(struct lpe_softc *);
177static int lpe_encap(struct lpe_softc *, struct mbuf **);
178static int lpe_dma_alloc(struct lpe_softc *);
179static int lpe_dma_alloc_rx(struct lpe_softc *);
180static int lpe_dma_alloc_tx(struct lpe_softc *);
181static int lpe_init_rx(struct lpe_softc *);
182static int lpe_init_rxbuf(struct lpe_softc *, int);
183static void lpe_discard_rxbuf(struct lpe_softc *, int);
184static void lpe_dmamap_cb(void *, bus_dma_segment_t *, int, int);
185static int lpe_ifmedia_upd(struct ifnet *);
186static void lpe_ifmedia_sts(struct ifnet *, struct ifmediareq *);
187
188#define lpe_lock(_sc)           mtx_lock(&(_sc)->lpe_mtx)
189#define lpe_unlock(_sc)         mtx_unlock(&(_sc)->lpe_mtx)
190#define lpe_lock_assert(_sc)    mtx_assert(&(_sc)->lpe_mtx, MA_OWNED)
191
192#define lpe_read_4(_sc, _reg)           \
193    bus_space_read_4((_sc)->lpe_bst, (_sc)->lpe_bsh, (_reg))
194#define lpe_write_4(_sc, _reg, _val)    \
195    bus_space_write_4((_sc)->lpe_bst, (_sc)->lpe_bsh, (_reg), (_val))
196
197#define LPE_HWDESC_RXERRS       (LPE_HWDESC_CRCERROR | LPE_HWDESC_SYMBOLERROR | \
198    LPE_HWDESC_LENGTHERROR | LPE_HWDESC_ALIGNERROR | LPE_HWDESC_OVERRUN | \
199    LPE_HWDESC_RXNODESCR)
200
201#define LPE_HWDESC_TXERRS       (LPE_HWDESC_EXCDEFER | LPE_HWDESC_EXCCOLL | \
202    LPE_HWDESC_LATECOLL | LPE_HWDESC_UNDERRUN | LPE_HWDESC_TXNODESCR)
203
204static int
205lpe_probe(device_t dev)
206{
207
208#ifndef __rtems__
209        if (!ofw_bus_status_okay(dev))
210                return (ENXIO);
211
212        if (!ofw_bus_is_compatible(dev, "lpc,ethernet"))
213                return (ENXIO);
214#endif /* __rtems__ */
215
216        device_set_desc(dev, "LPC32x0 10/100 Ethernet");
217        return (BUS_PROBE_DEFAULT);
218}
219
220static int
221lpe_attach(device_t dev)
222{
223        struct lpe_softc *sc = device_get_softc(dev);
224        struct ifnet *ifp;
225        int rid, i;
226        uint32_t val;
227
228        sc->lpe_dev = dev;
229#ifndef __rtems__
230        sc->lpe_ofw = ofw_bus_get_node(dev);
231
232        i = OF_getprop(sc->lpe_ofw, "local-mac-address", (void *)&sc->lpe_enaddr, 6);
233        if (i != 6) {
234                sc->lpe_enaddr[0] = 0x00;
235                sc->lpe_enaddr[1] = 0x11;
236                sc->lpe_enaddr[2] = 0x22;
237                sc->lpe_enaddr[3] = 0x33;
238                sc->lpe_enaddr[4] = 0x44;
239                sc->lpe_enaddr[5] = 0x55;
240        }
241#else /* __rtems__ */
242        rtems_bsd_get_mac_address(device_get_name(sc->lpe_dev), device_get_unit(sc->lpe_dev), &sc->lpe_enaddr);
243#endif /* __rtems__ */
244
245        mtx_init(&sc->lpe_mtx, device_get_nameunit(dev), MTX_NETWORK_LOCK,
246            MTX_DEF);
247
248        callout_init_mtx(&sc->lpe_tick, &sc->lpe_mtx, 0);
249
250        rid = 0;
251        sc->lpe_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
252            RF_ACTIVE);
253        if (!sc->lpe_mem_res) {
254                device_printf(dev, "cannot allocate memory window\n");
255                goto fail;
256        }
257
258        sc->lpe_bst = rman_get_bustag(sc->lpe_mem_res);
259        sc->lpe_bsh = rman_get_bushandle(sc->lpe_mem_res);
260
261        rid = 0;
262        sc->lpe_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
263            RF_ACTIVE);
264        if (!sc->lpe_irq_res) {
265                device_printf(dev, "cannot allocate interrupt\n");
266                goto fail;
267        }
268
269        sc->lpe_ifp = if_alloc(IFT_ETHER);
270        if (!sc->lpe_ifp) {
271                device_printf(dev, "cannot allocated ifnet\n");
272                goto fail;
273        }
274
275        ifp = sc->lpe_ifp;
276
277        if_initname(ifp, device_get_name(dev), device_get_unit(dev));
278        ifp->if_softc = sc;
279        ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
280        ifp->if_start = lpe_start;
281        ifp->if_ioctl = lpe_ioctl;
282        ifp->if_init = lpe_init;
283        IFQ_SET_MAXLEN(&ifp->if_snd, IFQ_MAXLEN);
284        ifp->if_snd.ifq_drv_maxlen = IFQ_MAXLEN;
285        IFQ_SET_READY(&ifp->if_snd);
286
287        ether_ifattach(ifp, sc->lpe_enaddr);
288
289        if (bus_setup_intr(dev, sc->lpe_irq_res, INTR_TYPE_NET, NULL,
290            lpe_intr, sc, &sc->lpe_intrhand)) {
291                device_printf(dev, "cannot establish interrupt handler\n");
292                ether_ifdetach(ifp);
293                goto fail;
294        }
295
296        /* Enable Ethernet clock */
297#ifndef __rtems__
298        lpc_pwr_write(dev, LPC_CLKPWR_MACCLK_CTRL,
299            LPC_CLKPWR_MACCLK_CTRL_REG |
300            LPC_CLKPWR_MACCLK_CTRL_SLAVE |
301            LPC_CLKPWR_MACCLK_CTRL_MASTER |
302            LPC_CLKPWR_MACCLK_CTRL_HDWINF(3));
303#else /* __rtems__ */
304#ifdef LPC32XX_ETHERNET_RMII
305        lpc_pwr_write(dev, LPC_CLKPWR_MACCLK_CTRL,
306            LPC_CLKPWR_MACCLK_CTRL_REG |
307            LPC_CLKPWR_MACCLK_CTRL_SLAVE |
308            LPC_CLKPWR_MACCLK_CTRL_MASTER |
309            LPC_CLKPWR_MACCLK_CTRL_HDWINF(3));
310#else
311        lpc_pwr_write(dev, LPC_CLKPWR_MACCLK_CTRL,
312            LPC_CLKPWR_MACCLK_CTRL_REG |
313            LPC_CLKPWR_MACCLK_CTRL_SLAVE |
314            LPC_CLKPWR_MACCLK_CTRL_MASTER |
315            LPC_CLKPWR_MACCLK_CTRL_HDWINF(1));
316#endif
317#endif /* __rtems__ */
318
319        /* Reset chip */
320        lpe_reset(sc);
321
322        /* Initialize MII */
323#ifndef __rtems__
324        val = lpe_read_4(sc, LPE_COMMAND);
325        lpe_write_4(sc, LPE_COMMAND, val | LPE_COMMAND_RMII);
326
327        if (mii_attach(dev, &sc->lpe_miibus, ifp, lpe_ifmedia_upd,
328            lpe_ifmedia_sts, BMSR_DEFCAPMASK, 0x01,
329            MII_OFFSET_ANY, 0)) {
330                device_printf(dev, "cannot find PHY\n");
331                goto fail;
332        }
333#else /* __rtems__ */
334        if (mii_attach(dev, &sc->lpe_miibus, ifp, lpe_ifmedia_upd,
335            lpe_ifmedia_sts, BMSR_DEFCAPMASK, MII_PHY_ANY,
336            MII_OFFSET_ANY, 0)) {
337                device_printf(dev, "cannot find PHY\n");
338                goto fail;
339        }
340#endif /* __rtems__ */
341
342        lpe_dma_alloc(sc);
343
344        return (0);
345
346fail:
347        if (sc->lpe_ifp)
348                if_free(sc->lpe_ifp);
349        if (sc->lpe_intrhand)
350                bus_teardown_intr(dev, sc->lpe_irq_res, sc->lpe_intrhand);
351        if (sc->lpe_irq_res)
352                bus_release_resource(dev, SYS_RES_IRQ, 0, sc->lpe_irq_res);
353        if (sc->lpe_mem_res)
354                bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->lpe_mem_res);
355        return (ENXIO);
356}
357
358static int
359lpe_detach(device_t dev)
360{
361        struct lpe_softc *sc = device_get_softc(dev);
362
363        lpe_stop(sc);
364
365        if_free(sc->lpe_ifp);
366        bus_teardown_intr(dev, sc->lpe_irq_res, sc->lpe_intrhand);
367        bus_release_resource(dev, SYS_RES_IRQ, 0, sc->lpe_irq_res);
368        bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->lpe_mem_res);
369
370        return (0);
371}
372
373static int
374lpe_miibus_readreg(device_t dev, int phy, int reg)
375{
376        struct lpe_softc *sc = device_get_softc(dev);
377        uint32_t val;
378        int result;
379
380        lpe_write_4(sc, LPE_MCMD, LPE_MCMD_READ);
381        lpe_write_4(sc, LPE_MADR,
382            (reg & LPE_MADR_REGMASK) << LPE_MADR_REGSHIFT |
383            (phy & LPE_MADR_PHYMASK) << LPE_MADR_PHYSHIFT);
384
385        val = lpe_read_4(sc, LPE_MIND);
386
387        /* Wait until request is completed */
388        while (val & LPE_MIND_BUSY) {
389                val = lpe_read_4(sc, LPE_MIND);
390                DELAY(10);
391        }
392
393        if (val & LPE_MIND_INVALID)
394                return (0);
395
396        lpe_write_4(sc, LPE_MCMD, 0);
397        result = (lpe_read_4(sc, LPE_MRDD) & LPE_MRDD_DATAMASK);
398        debugf("phy=%d reg=%d result=0x%04x\n", phy, reg, result);
399
400        return (result);
401}
402
403static int
404lpe_miibus_writereg(device_t dev, int phy, int reg, int data)
405{
406        struct lpe_softc *sc = device_get_softc(dev);
407        uint32_t val;
408
409        debugf("phy=%d reg=%d data=0x%04x\n", phy, reg, data);
410
411        lpe_write_4(sc, LPE_MCMD, LPE_MCMD_WRITE);
412        lpe_write_4(sc, LPE_MADR,
413            (reg & LPE_MADR_REGMASK) << LPE_MADR_REGSHIFT |
414            (phy & LPE_MADR_PHYMASK) << LPE_MADR_PHYSHIFT);
415
416        lpe_write_4(sc, LPE_MWTD, (data & LPE_MWTD_DATAMASK));
417
418        val = lpe_read_4(sc, LPE_MIND);
419
420        /* Wait until request is completed */
421        while (val & LPE_MIND_BUSY) {
422                val = lpe_read_4(sc, LPE_MIND);
423                DELAY(10);
424        }
425
426        return (0);
427}
428
429static void
430lpe_miibus_statchg(device_t dev)
431{
432        struct lpe_softc *sc = device_get_softc(dev);
433        struct mii_data *mii = device_get_softc(sc->lpe_miibus);
434
435#ifndef __rtems__
436        lpe_lock(sc);
437#endif /* __rtems__ */
438
439        if ((mii->mii_media_status & IFM_ACTIVE) &&
440            (mii->mii_media_status & IFM_AVALID))
441                sc->lpe_flags |= LPE_FLAG_LINK;
442        else
443                sc->lpe_flags &= ~LPE_FLAG_LINK;
444
445#ifndef __rtems__
446        lpe_unlock(sc);
447#endif /* __rtems__ */
448}
449
450static void
451lpe_reset(struct lpe_softc *sc)
452{
453        uint32_t mac1;
454
455#ifndef __rtems__
456        /* Enter soft reset mode */
457        mac1 = lpe_read_4(sc, LPE_MAC1);
458        lpe_write_4(sc, LPE_MAC1, mac1 | LPE_MAC1_SOFTRESET | LPE_MAC1_RESETTX |
459            LPE_MAC1_RESETMCSTX | LPE_MAC1_RESETRX | LPE_MAC1_RESETMCSRX);
460
461        /* Reset registers, Tx path and Rx path */
462        lpe_write_4(sc, LPE_COMMAND, LPE_COMMAND_REGRESET |
463            LPE_COMMAND_TXRESET | LPE_COMMAND_RXRESET);
464
465        /* Set station address */
466        lpe_write_4(sc, LPE_SA2, sc->lpe_enaddr[1] << 8 | sc->lpe_enaddr[0]);
467        lpe_write_4(sc, LPE_SA1, sc->lpe_enaddr[3] << 8 | sc->lpe_enaddr[2]);
468        lpe_write_4(sc, LPE_SA0, sc->lpe_enaddr[5] << 8 | sc->lpe_enaddr[4]);
469
470        /* Leave soft reset mode */
471        mac1 = lpe_read_4(sc, LPE_MAC1);
472        lpe_write_4(sc, LPE_MAC1, mac1 & ~(LPE_MAC1_SOFTRESET | LPE_MAC1_RESETTX |
473            LPE_MAC1_RESETMCSTX | LPE_MAC1_RESETRX | LPE_MAC1_RESETMCSRX));
474#else /* __rtems__ */
475        /* Reset registers, Tx path and Rx path */
476        lpe_write_4(sc, LPE_COMMAND, LPE_COMMAND_REGRESET | LPE_COMMAND_TXRESET | LPE_COMMAND_RXRESET);
477
478        /* Enter soft reset mode */
479        mac1 = lpe_read_4(sc, LPE_MAC1);
480        lpe_write_4(sc, LPE_MAC1, mac1 | LPE_MAC1_SOFTRESET | LPE_MAC1_RESETTX |
481            LPE_MAC1_RESETMCSTX | LPE_MAC1_RESETRX | LPE_MAC1_RESETMCSRX);
482
483        /* Leave soft reset mode */
484        mac1 = lpe_read_4(sc, LPE_MAC1);
485        lpe_write_4(sc, LPE_MAC1, mac1 & ~(LPE_MAC1_SOFTRESET | LPE_MAC1_RESETTX |
486            LPE_MAC1_RESETMCSTX | LPE_MAC1_RESETRX | LPE_MAC1_RESETMCSRX));
487
488        /* Reinitialize registers */
489        lpe_write_4(sc, LPE_MCFG, LPE_MCFG_CLKSEL(0x7));
490        lpe_write_4(sc, LPE_MAC2, LPE_MAC2_PADCRCENABLE | LPE_MAC2_CRCENABLE | LPE_MAC2_FULLDUPLEX);
491        lpe_write_4(sc, LPE_IPGT, 0x15);
492        lpe_write_4(sc, LPE_IPGR, 0x12);
493        lpe_write_4(sc, LPE_CLRT, 0x370f);
494        lpe_write_4(sc, LPE_MAXF, 0x0600);
495        lpe_write_4(sc, LPE_SUPP, LPE_SUPP_SPEED);
496        lpe_write_4(sc, LPE_TEST, 0x0);
497#ifdef LPC32XX_ETHERNET_RMII
498        lpe_write_4(sc, LPE_COMMAND, LPE_COMMAND_FULLDUPLEX | LPE_COMMAND_RMII);
499#else
500        lpe_write_4(sc, LPE_COMMAND, LPE_COMMAND_FULLDUPLEX);
501#endif
502        lpe_write_4(sc, LPE_INTENABLE, 0x0);
503        lpe_write_4(sc, LPE_INTCLEAR, 0x30ff);
504        lpe_write_4(sc, LPE_POWERDOWN, 0x0);
505
506        /* Set station address */
507        lpe_write_4(sc, LPE_SA2, sc->lpe_enaddr[1] << 8 | sc->lpe_enaddr[0]);
508        lpe_write_4(sc, LPE_SA1, sc->lpe_enaddr[3] << 8 | sc->lpe_enaddr[2]);
509        lpe_write_4(sc, LPE_SA0, sc->lpe_enaddr[5] << 8 | sc->lpe_enaddr[4]);
510#endif /* __rtems__ */
511}
512
513static void
514lpe_init(void *arg)
515{
516        struct lpe_softc *sc = (struct lpe_softc *)arg;
517
518        lpe_lock(sc);
519        lpe_init_locked(sc);
520        lpe_unlock(sc);
521}
522
523static void
524lpe_init_locked(struct lpe_softc *sc)
525{
526        struct ifnet *ifp = sc->lpe_ifp;
527        uint32_t cmd, mac1;
528
529        lpe_lock_assert(sc);
530
531        if (ifp->if_drv_flags & IFF_DRV_RUNNING)
532                return;
533
534        /* Enable Tx and Rx */
535        cmd = lpe_read_4(sc, LPE_COMMAND);
536        lpe_write_4(sc, LPE_COMMAND, cmd | LPE_COMMAND_RXENABLE |
537            LPE_COMMAND_TXENABLE | LPE_COMMAND_PASSRUNTFRAME);
538
539        /* Enable receive */
540        mac1 = lpe_read_4(sc, LPE_MAC1);
541        lpe_write_4(sc, LPE_MAC1, /*mac1 |*/ LPE_MAC1_RXENABLE | LPE_MAC1_PASSALL);
542
543        lpe_write_4(sc, LPE_MAC2, LPE_MAC2_CRCENABLE | LPE_MAC2_PADCRCENABLE |
544            LPE_MAC2_FULLDUPLEX);
545
546        lpe_write_4(sc, LPE_MCFG, LPE_MCFG_CLKSEL(7));
547
548        /* Set up Rx filter */
549        lpe_set_rxmode(sc);
550
551        /* Enable interrupts */
552        lpe_write_4(sc, LPE_INTENABLE, LPE_INT_RXOVERRUN | LPE_INT_RXERROR |
553            LPE_INT_RXFINISH | LPE_INT_RXDONE | LPE_INT_TXUNDERRUN |
554            LPE_INT_TXERROR | LPE_INT_TXFINISH | LPE_INT_TXDONE);
555
556        sc->lpe_cdata.lpe_tx_prod = 0;
557        sc->lpe_cdata.lpe_tx_last = 0;
558        sc->lpe_cdata.lpe_tx_used = 0;
559
560        lpe_init_rx(sc);
561
562        /* Initialize Rx packet and status descriptor heads */
563        lpe_write_4(sc, LPE_RXDESC, sc->lpe_rdata.lpe_rx_ring_phys);
564        lpe_write_4(sc, LPE_RXSTATUS, sc->lpe_rdata.lpe_rx_status_phys);
565        lpe_write_4(sc, LPE_RXDESC_NUMBER, LPE_RXDESC_NUM - 1);
566        lpe_write_4(sc, LPE_RXDESC_CONS, 0);
567
568        /* Initialize Tx packet and status descriptor heads */
569        lpe_write_4(sc, LPE_TXDESC, sc->lpe_rdata.lpe_tx_ring_phys);
570        lpe_write_4(sc, LPE_TXSTATUS, sc->lpe_rdata.lpe_tx_status_phys);
571        lpe_write_4(sc, LPE_TXDESC_NUMBER, LPE_TXDESC_NUM - 1);
572        lpe_write_4(sc, LPE_TXDESC_PROD, 0);
573
574        ifp->if_drv_flags |= IFF_DRV_RUNNING;
575        ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
576
577        callout_reset(&sc->lpe_tick, hz, lpe_tick, sc);
578}
579
580static void
581lpe_start(struct ifnet *ifp)
582{
583        struct lpe_softc *sc = (struct lpe_softc *)ifp->if_softc;
584
585        lpe_lock(sc);
586        lpe_start_locked(ifp);
587        lpe_unlock(sc);
588}
589
590static void
591lpe_start_locked(struct ifnet *ifp)
592{
593        struct lpe_softc *sc = (struct lpe_softc *)ifp->if_softc;
594        struct mbuf *m_head;
595        int encap = 0;
596
597        lpe_lock_assert(sc);
598
599        while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) {
600                if (lpe_read_4(sc, LPE_TXDESC_PROD) ==
601                    lpe_read_4(sc, LPE_TXDESC_CONS) - 5)
602                        break;
603
604                /* Dequeue first packet */
605                IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head);
606                if (!m_head)
607                        break;
608
609                lpe_encap(sc, &m_head);
610
611                encap++;
612        }
613
614        /* Submit new descriptor list */
615        if (encap) {
616                lpe_write_4(sc, LPE_TXDESC_PROD, sc->lpe_cdata.lpe_tx_prod);
617                sc->lpe_watchdog_timer = 5;
618        }
619       
620}
621
622#ifdef __rtems__
623static int
624lpe_get_segs_for_tx(struct mbuf *m, bus_dma_segment_t segs[LPE_MAXFRAGS],
625    int *nsegs)
626{
627        int i = 0;
628
629        do {
630                if (m->m_len > 0) {
631                        segs[i].ds_addr = mtod(m, bus_addr_t);
632                        segs[i].ds_len = m->m_len;
633#ifdef CPU_DATA_CACHE_ALIGNMENT
634                        rtems_cache_flush_multiple_data_lines(m->m_data, m->m_len);
635#endif
636                        ++i;
637                }
638                m = m->m_next;
639                if (m == NULL) {
640                        *nsegs = i;
641                        return (0);
642                }
643        } while (i < LPE_MAXFRAGS);
644        return (EFBIG);
645}
646#endif /* __rtems__ */
647static int
648lpe_encap(struct lpe_softc *sc, struct mbuf **m_head)
649{
650        struct lpe_txdesc *txd;
651        struct lpe_hwdesc *hwd;
652        bus_dma_segment_t segs[LPE_MAXFRAGS];
653        int i, err, nsegs, prod;
654
655        lpe_lock_assert(sc);
656        M_ASSERTPKTHDR((*m_head));
657
658        prod = sc->lpe_cdata.lpe_tx_prod;
659        txd = &sc->lpe_cdata.lpe_tx_desc[prod];
660
661        debugf("starting with prod=%d\n", prod);
662
663#ifndef __rtems__
664        err = bus_dmamap_load_mbuf_sg(sc->lpe_cdata.lpe_tx_buf_tag,
665            txd->lpe_txdesc_dmamap, *m_head, segs, &nsegs, BUS_DMA_NOWAIT);
666#else /* __rtems__ */
667        err = lpe_get_segs_for_tx(*m_head, segs, &nsegs);
668#endif /* __rtems__ */
669
670        if (err)
671                return (err);
672
673        if (nsegs == 0) {
674                m_freem(*m_head);
675                *m_head = NULL;
676                return (EIO);
677        }
678
679#ifndef __rtems__
680        bus_dmamap_sync(sc->lpe_cdata.lpe_tx_buf_tag, txd->lpe_txdesc_dmamap,
681          BUS_DMASYNC_PREREAD);
682#endif /* __rtems__ */
683        bus_dmamap_sync(sc->lpe_cdata.lpe_tx_ring_tag, sc->lpe_cdata.lpe_tx_ring_map,
684            BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
685
686        txd->lpe_txdesc_first = 1;
687        txd->lpe_txdesc_mbuf = *m_head;
688
689        for (i = 0; i < nsegs; i++) {
690                hwd = &sc->lpe_rdata.lpe_tx_ring[prod];
691                hwd->lhr_data = segs[i].ds_addr;
692                hwd->lhr_control = segs[i].ds_len - 1;
693
694                if (i == nsegs - 1) {
695                        hwd->lhr_control |= LPE_HWDESC_LASTFLAG;
696                        hwd->lhr_control |= LPE_HWDESC_INTERRUPT;
697                        hwd->lhr_control |= LPE_HWDESC_CRC;
698                        hwd->lhr_control |= LPE_HWDESC_PAD;
699                }
700
701#ifdef __rtems__
702#ifdef CPU_DATA_CACHE_ALIGNMENT
703                rtems_cache_flush_multiple_data_lines(hwd, sizeof(*hwd));
704#endif
705#endif /* __rtems__ */
706                LPE_INC(prod, LPE_TXDESC_NUM);
707        }
708
709        bus_dmamap_sync(sc->lpe_cdata.lpe_tx_ring_tag, sc->lpe_cdata.lpe_tx_ring_map,
710            BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
711
712        sc->lpe_cdata.lpe_tx_used += nsegs;
713        sc->lpe_cdata.lpe_tx_prod = prod;
714
715        return (0);
716}
717
718static void
719lpe_stop(struct lpe_softc *sc)
720{
721        lpe_lock(sc);
722        lpe_stop_locked(sc);
723        lpe_unlock(sc);
724}
725
726static void
727lpe_stop_locked(struct lpe_softc *sc)
728{
729        lpe_lock_assert(sc);
730
731        callout_stop(&sc->lpe_tick);
732
733        /* Disable interrupts */
734        lpe_write_4(sc, LPE_INTCLEAR, 0xffffffff);
735
736        /* Stop EMAC */
737        lpe_write_4(sc, LPE_MAC1, 0);
738        lpe_write_4(sc, LPE_MAC2, 0);
739        lpe_write_4(sc, LPE_COMMAND, 0);
740
741        sc->lpe_ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
742        sc->lpe_ifp->if_drv_flags &= ~IFF_DRV_RUNNING;
743}
744
745static int
746lpe_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
747{
748        struct lpe_softc *sc = ifp->if_softc;
749        struct mii_data *mii = device_get_softc(sc->lpe_miibus);
750        struct ifreq *ifr = (struct ifreq *)data;
751        int err = 0;
752
753        switch (cmd) {
754        case SIOCSIFFLAGS:
755                lpe_lock(sc);
756                if (ifp->if_flags & IFF_UP) {
757                        if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
758                                lpe_set_rxmode(sc);
759                                lpe_set_rxfilter(sc);
760                        } else
761                                lpe_init_locked(sc);
762                } else
763                        lpe_stop(sc);
764                lpe_unlock(sc);
765                break;
766        case SIOCADDMULTI:
767        case SIOCDELMULTI:
768                if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
769                        lpe_lock(sc);
770                        lpe_set_rxfilter(sc);
771                        lpe_unlock(sc);
772                }
773                break;
774        case SIOCGIFMEDIA:
775        case SIOCSIFMEDIA:
776                err = ifmedia_ioctl(ifp, ifr, &mii->mii_media, cmd);
777                break;
778        default:
779                err = ether_ioctl(ifp, cmd, data);
780                break;
781        }
782
783        return (err);
784}
785
786static void lpe_set_rxmode(struct lpe_softc *sc)
787{
788        struct ifnet *ifp = sc->lpe_ifp;
789        uint32_t rxfilt;
790
791        rxfilt = LPE_RXFILTER_UNIHASH | LPE_RXFILTER_MULTIHASH | LPE_RXFILTER_PERFECT;
792
793        if (ifp->if_flags & IFF_BROADCAST)
794                rxfilt |= LPE_RXFILTER_BROADCAST;
795
796        if (ifp->if_flags & IFF_PROMISC)
797                rxfilt |= LPE_RXFILTER_UNICAST | LPE_RXFILTER_MULTICAST;
798
799        if (ifp->if_flags & IFF_ALLMULTI)
800                rxfilt |= LPE_RXFILTER_MULTICAST;
801
802        lpe_write_4(sc, LPE_RXFILTER_CTRL, rxfilt);
803}
804
805static void lpe_set_rxfilter(struct lpe_softc *sc)
806{
807        struct ifnet *ifp = sc->lpe_ifp;
808        struct ifmultiaddr *ifma;
809        int index;
810        uint32_t hashl, hashh;
811
812        hashl = 0;
813        hashh = 0;
814
815        if_maddr_rlock(ifp);
816        TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
817                if (ifma->ifma_addr->sa_family != AF_LINK)
818                        continue;
819
820                index = ether_crc32_be(LLADDR((struct sockaddr_dl *)
821                    ifma->ifma_addr), ETHER_ADDR_LEN) >> 23 & 0x3f;
822
823                if (index > 31)
824                        hashh |= (1 << (index - 32));
825                else
826                        hashl |= (1 << index);
827        }
828        if_maddr_runlock(ifp);
829
830        /* Program new hash filter */
831        lpe_write_4(sc, LPE_HASHFILTER_L, hashl);
832        lpe_write_4(sc, LPE_HASHFILTER_H, hashh);
833}
834
835static void
836lpe_intr(void *arg)
837{
838        struct lpe_softc *sc = (struct lpe_softc *)arg;
839        uint32_t intstatus;
840
841        debugf("status=0x%08x\n", lpe_read_4(sc, LPE_INTSTATUS));
842
843        lpe_lock(sc);
844
845        while ((intstatus = lpe_read_4(sc, LPE_INTSTATUS))) {
846                if (intstatus & LPE_INT_RXDONE)
847                        lpe_rxintr(sc);
848
849#ifndef __rtems__
850                if (intstatus & LPE_INT_TXDONE)
851                        lpe_txintr(sc);
852       
853#else /* __rtems__ */
854                if (intstatus & LPE_INT_TXUNDERRUN) {
855                        if_inc_counter(sc->lpe_ifp, IFCOUNTER_OERRORS, 1);
856                        lpe_stop_locked(sc);
857                        lpe_init_locked(sc);
858                }
859                else if (intstatus & (LPE_INT_TXERROR | LPE_INT_TXFINISH | LPE_INT_TXDONE))
860                        lpe_txintr(sc);
861#endif /* __rtems__ */
862                lpe_write_4(sc, LPE_INTCLEAR, 0xffff);
863        }
864
865        lpe_unlock(sc);
866}
867
868static void
869lpe_rxintr(struct lpe_softc *sc)
870{
871        struct ifnet *ifp = sc->lpe_ifp;
872        struct lpe_hwdesc *hwd;
873        struct lpe_hwstatus *hws;
874        struct lpe_rxdesc *rxd;
875        struct mbuf *m;
876        int prod, cons;
877
878        for (;;) {
879                prod = lpe_read_4(sc, LPE_RXDESC_PROD);
880                cons = lpe_read_4(sc, LPE_RXDESC_CONS);
881               
882                if (prod == cons)
883                        break;
884
885                rxd = &sc->lpe_cdata.lpe_rx_desc[cons];
886                hwd = &sc->lpe_rdata.lpe_rx_ring[cons];
887                hws = &sc->lpe_rdata.lpe_rx_status[cons];
888#ifdef __rtems__
889#ifdef CPU_DATA_CACHE_ALIGNMENT
890                rtems_cache_invalidate_multiple_data_lines(rxd, sizeof(*rxd));
891                rtems_cache_invalidate_multiple_data_lines(hwd, sizeof(*hwd));
892                rtems_cache_invalidate_multiple_data_lines(hws, sizeof(*hws));
893#endif
894#endif /* __rtems__ */
895
896                /* Check received frame for errors */
897                if (hws->lhs_info & LPE_HWDESC_RXERRS) {
898                        if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
899                        lpe_discard_rxbuf(sc, cons);
900                        lpe_init_rxbuf(sc, cons);
901                        goto skip;
902                }
903
904                m = rxd->lpe_rxdesc_mbuf;
905#ifdef __rtems__
906#ifdef CPU_DATA_CACHE_ALIGNMENT
907                rtems_cache_invalidate_multiple_data_lines(m->m_data, m->m_len);
908#endif
909#endif /* __rtems__ */
910                m->m_pkthdr.rcvif = ifp;
911                m->m_data += 2;
912
913                if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
914
915                lpe_unlock(sc);
916                (*ifp->if_input)(ifp, m);       
917                lpe_lock(sc);
918
919                lpe_init_rxbuf(sc, cons);
920skip:
921                LPE_INC(cons, LPE_RXDESC_NUM);
922                lpe_write_4(sc, LPE_RXDESC_CONS, cons);
923        }
924}
925
926static void
927lpe_txintr(struct lpe_softc *sc)
928{
929        struct ifnet *ifp = sc->lpe_ifp;
930        struct lpe_hwdesc *hwd;
931        struct lpe_hwstatus *hws;
932        struct lpe_txdesc *txd;
933        int cons, last;
934
935        for (;;) {
936                cons = lpe_read_4(sc, LPE_TXDESC_CONS);
937                last = sc->lpe_cdata.lpe_tx_last;
938               
939                if (cons == last)
940                        break;
941
942                txd = &sc->lpe_cdata.lpe_tx_desc[last];
943                hwd = &sc->lpe_rdata.lpe_tx_ring[last];
944                hws = &sc->lpe_rdata.lpe_tx_status[last];
945
946#ifndef __rtems__
947                bus_dmamap_sync(sc->lpe_cdata.lpe_tx_buf_tag,
948                    txd->lpe_txdesc_dmamap, BUS_DMASYNC_POSTWRITE);
949#else /* __rtems__ */
950#ifdef CPU_DATA_CACHE_ALIGNMENT
951                rtems_cache_invalidate_multiple_data_lines(txd, sizeof(*txd));
952                rtems_cache_invalidate_multiple_data_lines(hwd, sizeof(*hwd));
953                rtems_cache_invalidate_multiple_data_lines(hws, sizeof(*hws));
954#endif
955#endif /* __rtems__ */
956
957                if_inc_counter(ifp, IFCOUNTER_COLLISIONS, LPE_HWDESC_COLLISIONS(hws->lhs_info));
958
959                if (hws->lhs_info & LPE_HWDESC_TXERRS)
960                        if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
961                else
962                        if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
963
964                if (txd->lpe_txdesc_first) {
965#ifndef __rtems__
966                        bus_dmamap_unload(sc->lpe_cdata.lpe_tx_buf_tag,
967                            txd->lpe_txdesc_dmamap);   
968#endif /* __rtems__ */
969
970                        m_freem(txd->lpe_txdesc_mbuf);
971                        txd->lpe_txdesc_mbuf = NULL;
972                        txd->lpe_txdesc_first = 0;
973                }
974
975                sc->lpe_cdata.lpe_tx_used--;
976                LPE_INC(sc->lpe_cdata.lpe_tx_last, LPE_TXDESC_NUM);
977        }
978
979        if (!sc->lpe_cdata.lpe_tx_used)
980                sc->lpe_watchdog_timer = 0;
981}
982
983static void
984lpe_tick(void *arg)
985{
986        struct lpe_softc *sc = (struct lpe_softc *)arg;
987        struct mii_data *mii = device_get_softc(sc->lpe_miibus);
988
989        lpe_lock_assert(sc);
990       
991        mii_tick(mii);
992        lpe_watchdog(sc);
993
994        callout_reset(&sc->lpe_tick, hz, lpe_tick, sc);
995}
996
997static void
998lpe_watchdog(struct lpe_softc *sc)
999{
1000        struct ifnet *ifp = sc->lpe_ifp;
1001
1002        lpe_lock_assert(sc);
1003
1004        if (sc->lpe_watchdog_timer == 0 || sc->lpe_watchdog_timer--)
1005                return;
1006
1007        /* Chip has stopped responding */
1008        device_printf(sc->lpe_dev, "WARNING: chip hangup, restarting...\n");
1009        lpe_stop_locked(sc);
1010        lpe_init_locked(sc);
1011
1012        /* Try to resend packets */
1013        if (!IFQ_DRV_IS_EMPTY(&ifp->if_snd))
1014                lpe_start_locked(ifp);
1015}
1016
1017static int
1018lpe_dma_alloc(struct lpe_softc *sc)
1019{
1020        int err;
1021
1022        /* Create parent DMA tag */
1023        err = bus_dma_tag_create(
1024            bus_get_dma_tag(sc->lpe_dev),
1025            1, 0,                       /* alignment, boundary */
1026            BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
1027            BUS_SPACE_MAXADDR,          /* highaddr */
1028            NULL, NULL,                 /* filter, filterarg */
1029            BUS_SPACE_MAXSIZE_32BIT, 0, /* maxsize, nsegments */
1030            BUS_SPACE_MAXSIZE_32BIT, 0, /* maxsegsize, flags */
1031            NULL, NULL,                 /* lockfunc, lockarg */
1032            &sc->lpe_cdata.lpe_parent_tag);
1033
1034        if (err) {
1035                device_printf(sc->lpe_dev, "cannot create parent DMA tag\n");
1036                return (err);
1037        }
1038
1039        err = lpe_dma_alloc_rx(sc);
1040        if (err)
1041                return (err);
1042
1043        err = lpe_dma_alloc_tx(sc);
1044        if (err)
1045                return (err);
1046
1047        return (0);
1048}
1049
1050static int
1051lpe_dma_alloc_rx(struct lpe_softc *sc)
1052{
1053        struct lpe_rxdesc *rxd;
1054        struct lpe_dmamap_arg ctx;
1055        int err, i;
1056
1057        /* Create tag for Rx ring */
1058        err = bus_dma_tag_create(
1059            sc->lpe_cdata.lpe_parent_tag,
1060            LPE_DESC_ALIGN, 0,          /* alignment, boundary */
1061            BUS_SPACE_MAXADDR,          /* lowaddr */
1062            BUS_SPACE_MAXADDR,          /* highaddr */
1063            NULL, NULL,                 /* filter, filterarg */
1064            LPE_RXDESC_SIZE, 1,         /* maxsize, nsegments */
1065            LPE_RXDESC_SIZE, 0,         /* maxsegsize, flags */
1066            NULL, NULL,                 /* lockfunc, lockarg */
1067            &sc->lpe_cdata.lpe_rx_ring_tag);
1068
1069        if (err) {
1070                device_printf(sc->lpe_dev, "cannot create Rx ring DMA tag\n");
1071                goto fail;
1072        }
1073
1074        /* Create tag for Rx status ring */
1075        err = bus_dma_tag_create(
1076            sc->lpe_cdata.lpe_parent_tag,
1077            LPE_DESC_ALIGN, 0,          /* alignment, boundary */
1078            BUS_SPACE_MAXADDR,          /* lowaddr */
1079            BUS_SPACE_MAXADDR,          /* highaddr */
1080            NULL, NULL,                 /* filter, filterarg */
1081            LPE_RXSTATUS_SIZE, 1,       /* maxsize, nsegments */
1082            LPE_RXSTATUS_SIZE, 0,       /* maxsegsize, flags */
1083            NULL, NULL,                 /* lockfunc, lockarg */
1084            &sc->lpe_cdata.lpe_rx_status_tag);
1085
1086        if (err) {
1087                device_printf(sc->lpe_dev, "cannot create Rx status ring DMA tag\n");
1088                goto fail;
1089        }
1090
1091        /* Create tag for Rx buffers */
1092        err = bus_dma_tag_create(
1093            sc->lpe_cdata.lpe_parent_tag,
1094            LPE_DESC_ALIGN, 0,          /* alignment, boundary */
1095            BUS_SPACE_MAXADDR,          /* lowaddr */
1096            BUS_SPACE_MAXADDR,          /* highaddr */
1097            NULL, NULL,                 /* filter, filterarg */
1098            MCLBYTES * LPE_RXDESC_NUM,  /* maxsize */
1099            LPE_RXDESC_NUM,             /* segments */
1100            MCLBYTES, 0,                /* maxsegsize, flags */
1101            NULL, NULL,                 /* lockfunc, lockarg */
1102            &sc->lpe_cdata.lpe_rx_buf_tag);
1103
1104        if (err) {
1105                device_printf(sc->lpe_dev, "cannot create Rx buffers DMA tag\n");
1106                goto fail;
1107        }
1108
1109        /* Allocate Rx DMA ring */
1110        err = bus_dmamem_alloc(sc->lpe_cdata.lpe_rx_ring_tag,
1111            (void **)&sc->lpe_rdata.lpe_rx_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT |
1112            BUS_DMA_ZERO, &sc->lpe_cdata.lpe_rx_ring_map);
1113
1114        err = bus_dmamap_load(sc->lpe_cdata.lpe_rx_ring_tag,
1115            sc->lpe_cdata.lpe_rx_ring_map, sc->lpe_rdata.lpe_rx_ring,
1116            LPE_RXDESC_SIZE, lpe_dmamap_cb, &ctx, 0);
1117
1118        sc->lpe_rdata.lpe_rx_ring_phys = ctx.lpe_dma_busaddr;
1119
1120        /* Allocate Rx status ring */
1121        err = bus_dmamem_alloc(sc->lpe_cdata.lpe_rx_status_tag,
1122            (void **)&sc->lpe_rdata.lpe_rx_status, BUS_DMA_WAITOK | BUS_DMA_COHERENT |
1123            BUS_DMA_ZERO, &sc->lpe_cdata.lpe_rx_status_map);
1124
1125        err = bus_dmamap_load(sc->lpe_cdata.lpe_rx_status_tag,
1126            sc->lpe_cdata.lpe_rx_status_map, sc->lpe_rdata.lpe_rx_status,
1127            LPE_RXDESC_SIZE, lpe_dmamap_cb, &ctx, 0);
1128
1129        sc->lpe_rdata.lpe_rx_status_phys = ctx.lpe_dma_busaddr;
1130
1131
1132        /* Create Rx buffers DMA map */
1133        for (i = 0; i < LPE_RXDESC_NUM; i++) {
1134                rxd = &sc->lpe_cdata.lpe_rx_desc[i];
1135                rxd->lpe_rxdesc_mbuf = NULL;
1136#ifndef __rtems__
1137                rxd->lpe_rxdesc_dmamap = NULL;
1138
1139                err = bus_dmamap_create(sc->lpe_cdata.lpe_rx_buf_tag, 0,
1140                    &rxd->lpe_rxdesc_dmamap);
1141
1142                if (err) {
1143                        device_printf(sc->lpe_dev, "cannot create Rx DMA map\n");
1144                        return (err);
1145                }
1146#endif /* __rtems__ */
1147        }
1148
1149        return (0);
1150fail:
1151        return (err);
1152}
1153
1154static int
1155lpe_dma_alloc_tx(struct lpe_softc *sc)
1156{
1157        struct lpe_txdesc *txd;
1158        struct lpe_dmamap_arg ctx;
1159        int err, i;
1160
1161        /* Create tag for Tx ring */
1162        err = bus_dma_tag_create(
1163            sc->lpe_cdata.lpe_parent_tag,
1164            LPE_DESC_ALIGN, 0,          /* alignment, boundary */
1165            BUS_SPACE_MAXADDR,          /* lowaddr */
1166            BUS_SPACE_MAXADDR,          /* highaddr */
1167            NULL, NULL,                 /* filter, filterarg */
1168            LPE_TXDESC_SIZE, 1,         /* maxsize, nsegments */
1169            LPE_TXDESC_SIZE, 0,         /* maxsegsize, flags */
1170            NULL, NULL,                 /* lockfunc, lockarg */
1171            &sc->lpe_cdata.lpe_tx_ring_tag);
1172
1173        if (err) {
1174                device_printf(sc->lpe_dev, "cannot create Tx ring DMA tag\n");
1175                goto fail;
1176        }
1177
1178        /* Create tag for Tx status ring */
1179        err = bus_dma_tag_create(
1180            sc->lpe_cdata.lpe_parent_tag,
1181            LPE_DESC_ALIGN, 0,          /* alignment, boundary */
1182            BUS_SPACE_MAXADDR,          /* lowaddr */
1183            BUS_SPACE_MAXADDR,          /* highaddr */
1184            NULL, NULL,                 /* filter, filterarg */
1185            LPE_TXSTATUS_SIZE, 1,       /* maxsize, nsegments */
1186            LPE_TXSTATUS_SIZE, 0,       /* maxsegsize, flags */
1187            NULL, NULL,                 /* lockfunc, lockarg */
1188            &sc->lpe_cdata.lpe_tx_status_tag);
1189
1190        if (err) {
1191                device_printf(sc->lpe_dev, "cannot create Tx status ring DMA tag\n");
1192                goto fail;
1193        }
1194
1195        /* Create tag for Tx buffers */
1196        err = bus_dma_tag_create(
1197            sc->lpe_cdata.lpe_parent_tag,
1198            LPE_DESC_ALIGN, 0,          /* alignment, boundary */
1199            BUS_SPACE_MAXADDR,          /* lowaddr */
1200            BUS_SPACE_MAXADDR,          /* highaddr */
1201            NULL, NULL,                 /* filter, filterarg */
1202            MCLBYTES * LPE_TXDESC_NUM,  /* maxsize */
1203            LPE_TXDESC_NUM,             /* segments */
1204            MCLBYTES, 0,                /* maxsegsize, flags */
1205            NULL, NULL,                 /* lockfunc, lockarg */
1206            &sc->lpe_cdata.lpe_tx_buf_tag);
1207
1208        if (err) {
1209                device_printf(sc->lpe_dev, "cannot create Tx buffers DMA tag\n");
1210                goto fail;
1211        }
1212
1213        /* Allocate Tx DMA ring */
1214        err = bus_dmamem_alloc(sc->lpe_cdata.lpe_tx_ring_tag,
1215            (void **)&sc->lpe_rdata.lpe_tx_ring, BUS_DMA_WAITOK | BUS_DMA_COHERENT |
1216            BUS_DMA_ZERO, &sc->lpe_cdata.lpe_tx_ring_map);
1217
1218        err = bus_dmamap_load(sc->lpe_cdata.lpe_tx_ring_tag,
1219            sc->lpe_cdata.lpe_tx_ring_map, sc->lpe_rdata.lpe_tx_ring,
1220            LPE_RXDESC_SIZE, lpe_dmamap_cb, &ctx, 0);
1221
1222        sc->lpe_rdata.lpe_tx_ring_phys = ctx.lpe_dma_busaddr;
1223
1224        /* Allocate Tx status ring */
1225        err = bus_dmamem_alloc(sc->lpe_cdata.lpe_tx_status_tag,
1226            (void **)&sc->lpe_rdata.lpe_tx_status, BUS_DMA_WAITOK | BUS_DMA_COHERENT |
1227            BUS_DMA_ZERO, &sc->lpe_cdata.lpe_tx_status_map);
1228
1229        err = bus_dmamap_load(sc->lpe_cdata.lpe_tx_status_tag,
1230            sc->lpe_cdata.lpe_tx_status_map, sc->lpe_rdata.lpe_tx_status,
1231            LPE_RXDESC_SIZE, lpe_dmamap_cb, &ctx, 0);
1232
1233        sc->lpe_rdata.lpe_tx_status_phys = ctx.lpe_dma_busaddr;
1234
1235
1236        /* Create Tx buffers DMA map */
1237        for (i = 0; i < LPE_TXDESC_NUM; i++) {
1238                txd = &sc->lpe_cdata.lpe_tx_desc[i];
1239                txd->lpe_txdesc_mbuf = NULL;
1240#ifndef __rtems__
1241                txd->lpe_txdesc_dmamap = NULL;
1242#endif /* __rtems__ */
1243                txd->lpe_txdesc_first = 0;
1244
1245#ifndef __rtems__
1246                err = bus_dmamap_create(sc->lpe_cdata.lpe_tx_buf_tag, 0,
1247                    &txd->lpe_txdesc_dmamap);
1248#endif /* __rtems__ */
1249
1250                if (err) {
1251                        device_printf(sc->lpe_dev, "cannot create Tx DMA map\n");
1252                        return (err);
1253                }
1254        }
1255
1256        return (0);
1257fail:
1258        return (err);
1259}
1260
1261static int
1262lpe_init_rx(struct lpe_softc *sc)
1263{
1264        int i, err;
1265
1266        for (i = 0; i < LPE_RXDESC_NUM; i++) {
1267                err = lpe_init_rxbuf(sc, i);
1268                if (err)
1269                        return (err);
1270        }
1271
1272        return (0);
1273}
1274
1275static int
1276lpe_init_rxbuf(struct lpe_softc *sc, int n)
1277{
1278        struct lpe_rxdesc *rxd;
1279        struct lpe_hwdesc *hwd;
1280        struct lpe_hwstatus *hws;
1281        struct mbuf *m;
1282        bus_dma_segment_t segs[1];
1283        int nsegs;
1284
1285        rxd = &sc->lpe_cdata.lpe_rx_desc[n];
1286        hwd = &sc->lpe_rdata.lpe_rx_ring[n];
1287        hws = &sc->lpe_rdata.lpe_rx_status[n];
1288        m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1289
1290        if (!m) {
1291                device_printf(sc->lpe_dev, "WARNING: mbufs exhausted!\n");
1292                return (ENOBUFS);
1293        }
1294
1295        m->m_len = m->m_pkthdr.len = MCLBYTES;
1296
1297#ifndef __rtems__
1298        bus_dmamap_unload(sc->lpe_cdata.lpe_rx_buf_tag, rxd->lpe_rxdesc_dmamap);
1299
1300        if (bus_dmamap_load_mbuf_sg(sc->lpe_cdata.lpe_rx_buf_tag,
1301            rxd->lpe_rxdesc_dmamap, m, segs, &nsegs, 0)) {
1302                m_freem(m);
1303                return (ENOBUFS);
1304        }
1305
1306        bus_dmamap_sync(sc->lpe_cdata.lpe_rx_buf_tag, rxd->lpe_rxdesc_dmamap,
1307            BUS_DMASYNC_PREREAD);
1308#else /* __rtems__ */
1309#ifdef CPU_DATA_CACHE_ALIGNMENT
1310        rtems_cache_invalidate_multiple_data_lines(m->m_data, m->m_len);
1311#endif
1312        segs[0].ds_addr = mtod(m, bus_addr_t);
1313#endif /* __rtems__ */
1314
1315        rxd->lpe_rxdesc_mbuf = m;
1316        hwd->lhr_data = segs[0].ds_addr + 2;
1317        hwd->lhr_control = (segs[0].ds_len - 1) | LPE_HWDESC_INTERRUPT;
1318#ifdef __rtems__
1319#ifdef CPU_DATA_CACHE_ALIGNMENT
1320        rtems_cache_flush_multiple_data_lines(hwd, sizeof(*hwd));
1321#endif
1322#endif /* __rtems__ */
1323
1324        return (0);
1325}
1326
1327static void
1328lpe_discard_rxbuf(struct lpe_softc *sc, int n)
1329{
1330        struct lpe_rxdesc *rxd;
1331        struct lpe_hwdesc *hwd;
1332
1333        rxd = &sc->lpe_cdata.lpe_rx_desc[n];
1334        hwd = &sc->lpe_rdata.lpe_rx_ring[n];
1335
1336#ifndef __rtems__
1337        bus_dmamap_unload(sc->lpe_cdata.lpe_rx_buf_tag, rxd->lpe_rxdesc_dmamap);
1338#endif /* __rtems__ */
1339
1340        hwd->lhr_data = 0;
1341        hwd->lhr_control = 0;
1342
1343        if (rxd->lpe_rxdesc_mbuf) {
1344                m_freem(rxd->lpe_rxdesc_mbuf);
1345                rxd->lpe_rxdesc_mbuf = NULL;
1346        }
1347}
1348
1349static void
1350lpe_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1351{
1352        struct lpe_dmamap_arg *ctx;
1353
1354        if (error)
1355                return;
1356
1357        ctx = (struct lpe_dmamap_arg *)arg;
1358        ctx->lpe_dma_busaddr = segs[0].ds_addr;
1359}
1360
1361static int
1362lpe_ifmedia_upd(struct ifnet *ifp)
1363{
1364        return (0);
1365}
1366
1367static void
1368lpe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
1369{
1370        struct lpe_softc *sc = ifp->if_softc;
1371        struct mii_data *mii = device_get_softc(sc->lpe_miibus);
1372
1373        lpe_lock(sc);
1374        mii_pollstat(mii);
1375        ifmr->ifm_active = mii->mii_media_active;
1376        ifmr->ifm_status = mii->mii_media_status;
1377        lpe_unlock(sc);
1378}
1379
1380static device_method_t lpe_methods[] = {
1381        /* Device interface */
1382        DEVMETHOD(device_probe,         lpe_probe),
1383        DEVMETHOD(device_attach,        lpe_attach),
1384        DEVMETHOD(device_detach,        lpe_detach),
1385
1386        /* Bus interface */
1387        DEVMETHOD(bus_print_child,      bus_generic_print_child),
1388
1389        /* MII interface */
1390        DEVMETHOD(miibus_readreg,       lpe_miibus_readreg),
1391        DEVMETHOD(miibus_writereg,      lpe_miibus_writereg),
1392        DEVMETHOD(miibus_statchg,       lpe_miibus_statchg),
1393        { 0, 0 }
1394};
1395
1396static driver_t lpe_driver = {
1397        "lpe",
1398        lpe_methods,
1399        sizeof(struct lpe_softc),
1400};
1401
1402static devclass_t lpe_devclass;
1403
1404#ifndef __rtems__
1405DRIVER_MODULE(lpe, simplebus, lpe_driver, lpe_devclass, 0, 0);
1406#else /* __rtems__ */
1407DRIVER_MODULE(lpe, nexus, lpe_driver, lpe_devclass, 0, 0);
1408#endif /* __rtems__ */
1409DRIVER_MODULE(miibus, lpe, miibus_driver, miibus_devclass, 0, 0);
1410MODULE_DEPEND(lpe, obio, 1, 1, 1);
1411MODULE_DEPEND(lpe, miibus, 1, 1, 1);
1412MODULE_DEPEND(lpe, ether, 1, 1, 1);
Note: See TracBrowser for help on using the repository browser.