source: rtems-libbsd/freebsd/sys/arm/at91/at91_mci.c @ 3fac9e9

55-freebsd-126-freebsd-12
Last change on this file since 3fac9e9 was 3fac9e9, checked in by Sebastian Huber <sebastian.huber@…>, on 04/26/18 at 13:23:08

at91_mci: Use real interrupt

Execute at91_mci_intr() in interrupt context. Synchronize MMC requests
via RTEMS interrupt lock.

  • Property mode set to 100644
File size: 50.9 KB
Line 
1#include <machine/rtems-bsd-kernel-space.h>
2
3/*-
4 * Copyright (c) 2006 Bernd Walter.  All rights reserved.
5 * Copyright (c) 2006 M. Warner Losh.  All rights reserved.
6 * Copyright (c) 2010 Greg Ansley.  All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 *    notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 *    notice, this list of conditions and the following disclaimer in the
15 *    documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30#include <rtems/bsd/local/opt_platform.h>
31
32#include <sys/cdefs.h>
33__FBSDID("$FreeBSD$");
34
35#include <sys/param.h>
36#include <sys/systm.h>
37#include <sys/bus.h>
38#include <sys/endian.h>
39#include <sys/kernel.h>
40#include <sys/lock.h>
41#include <sys/malloc.h>
42#include <sys/module.h>
43#include <sys/mutex.h>
44#include <rtems/bsd/sys/resource.h>
45#include <sys/rman.h>
46#include <sys/sysctl.h>
47
48#include <machine/bus.h>
49#include <machine/resource.h>
50#include <machine/intr.h>
51
52#include <arm/at91/at91var.h>
53#include <arm/at91/at91_mcireg.h>
54#include <arm/at91/at91_pdcreg.h>
55
56#include <dev/mmc/bridge.h>
57#include <dev/mmc/mmcbrvar.h>
58
59#ifdef FDT
60#include <dev/ofw/ofw_bus.h>
61#include <dev/ofw/ofw_bus_subr.h>
62#endif
63
64#include <rtems/bsd/local/mmcbr_if.h>
65
66#include <rtems/bsd/local/opt_at91.h>
67
68#ifdef __rtems__
69#include <bsp.h>
70#endif /* __rtems__ */
71#if defined(__rtems__) && defined(LIBBSP_ARM_ATSAM_BSP_H)
72#ifdef __rtems__
73#include <rtems/irq-extension.h>
74#include <libchip/chip.h>
75
76#define AT91_MCI_HAS_4WIRE 1
77
78#define at91_master_clock BOARD_MCK
79
80static sXdmad *pXdmad = &XDMAD_Instance;
81#endif /* __rtems__ */
82/*
83 * About running the MCI bus above 25MHz
84 *
85 * Historically, the MCI bus has been run at 30MHz on systems with a 60MHz
86 * master clock, in part due to a bug in dev/mmc.c making always request
87 * 30MHz, and in part over clocking the bus because 15MHz was too slow.
88 * Fixing that bug causes the mmc driver to request a 25MHz clock (as it
89 * should) and the logic in at91_mci_update_ios() picks the highest speed that
90 * doesn't exceed that limit.  With a 60MHz MCK that would be 15MHz, and
91 * that's a real performance buzzkill when you've been getting away with 30MHz
92 * all along.
93 *
94 * By defining AT91_MCI_ALLOW_OVERCLOCK (or setting the allow_overclock=1
95 * device hint or sysctl) you can enable logic in at91_mci_update_ios() to
96 * overlcock the SD bus a little by running it at MCK / 2 when the requested
97 * speed is 25MHz and the next highest speed is 15MHz or less.  This appears
98 * to work on virtually all SD cards, since it is what this driver has been
99 * doing prior to the introduction of this option, where the overclocking vs
100 * underclocking decision was automatically "overclock".  Modern SD cards can
101 * run at 45mhz/1-bit in standard mode (high speed mode enable commands not
102 * sent) without problems.
103 *
104 * Speaking of high-speed mode, the rm9200 manual says the MCI device supports
105 * the SD v1.0 specification and can run up to 50MHz.  This is interesting in
106 * that the SD v1.0 spec caps the speed at 25MHz; high speed mode was added in
107 * the v1.10 spec.  Furthermore, high speed mode doesn't just crank up the
108 * clock, it alters the signal timing.  The rm9200 MCI device doesn't support
109 * these altered timings.  So while speeds over 25MHz may work, they only work
110 * in what the SD spec calls "default" speed mode, and it amounts to violating
111 * the spec by overclocking the bus.
112 *
113 * If you also enable 4-wire mode it's possible transfers faster than 25MHz
114 * will fail.  On the AT91RM9200, due to bugs in the bus contention logic, if
115 * you have the USB host device and OHCI driver enabled will fail.  Even
116 * underclocking to 15MHz, intermittant overrun and underrun errors occur.
117 * Note that you don't even need to have usb devices attached to the system,
118 * the errors begin to occur as soon as the OHCI driver sets the register bit
119 * to enable periodic transfers.  It appears (based on brief investigation)
120 * that the usb host controller uses so much ASB bandwidth that sometimes the
121 * DMA for MCI transfers doesn't get a bus grant in time and data gets
122 * dropped.  Adding even a modicum of network activity changes the symptom
123 * from intermittant to very frequent.  Members of the AT91SAM9 family have
124 * corrected this problem, or are at least better about their use of the bus.
125 */
126#ifndef AT91_MCI_ALLOW_OVERCLOCK
127#define AT91_MCI_ALLOW_OVERCLOCK 1
128#endif
129
130/*
131 * Allocate 2 bounce buffers we'll use to endian-swap the data due to the rm9200
132 * erratum.  We use a pair of buffers because when reading that lets us begin
133 * endian-swapping the data in the first buffer while the DMA is reading into
134 * the second buffer.  (We can't use the same trick for writing because we might
135 * not get all the data in the 2nd buffer swapped before the hardware needs it;
136 * dealing with that would add complexity to the driver.)
137 *
138 * The buffers are sized at 16K each due to the way the busdma cache sync
139 * operations work on arm.  A dcache_inv_range() operation on a range larger
140 * than 16K gets turned into a dcache_wbinv_all().  That needlessly flushes the
141 * entire data cache, impacting overall system performance.
142 */
143#define BBCOUNT     2
144#ifndef __rtems__
145#define BBSIZE      (16*1024)
146#define MAX_BLOCKS  ((BBSIZE*BBCOUNT)/512)
147#else /* __rtems__ */
148#define BBSIZE      (32*1024)
149#define MAX_BLOCKS  ((BBSIZE)/512)
150/* FIXME: It would be better to split the DMA up in that case like in the
151 * original driver. But that would need some rework. */
152#endif /* __rtems__ */
153
154#ifndef __rtems__
155static int mci_debug;
156#else /* __rtems__ */
157#define mci_debug 0
158#endif /* __rtems__ */
159
160struct at91_mci_softc {
161        void *intrhand;                 /* Interrupt handle */
162        device_t dev;
163        int sc_cap;
164#define CAP_HAS_4WIRE           1       /* Has 4 wire bus */
165#define CAP_NEEDS_BYTESWAP      2       /* broken hardware needing bounce */
166#define CAP_MCI1_REV2XX         4       /* MCI 1 rev 2.x */
167        int flags;
168#define PENDING_CMD     0x01
169#define PENDING_STOP    0x02
170#define CMD_MULTIREAD   0x10
171#define CMD_MULTIWRITE  0x20
172        int has_4wire;
173        int allow_overclock;
174        struct resource *irq_res;       /* IRQ resource */
175        struct resource *mem_res;       /* Memory resource */
176        struct mtx sc_mtx;
177#ifdef __rtems__
178        RTEMS_INTERRUPT_LOCK_MEMBER(sc_lock)
179#endif /* __rtems__ */
180        bus_dma_tag_t dmatag;
181        struct mmc_host host;
182        int bus_busy;
183        struct mmc_request *req;
184        struct mmc_command *curcmd;
185        bus_dmamap_t bbuf_map[BBCOUNT];
186        char      *  bbuf_vaddr[BBCOUNT]; /* bounce bufs in KVA space */
187        uint32_t     bbuf_len[BBCOUNT];   /* len currently queued for bounce buf */
188        uint32_t     bbuf_curidx;         /* which bbuf is the active DMA buffer */
189        uint32_t     xfer_offset;         /* offset so far into caller's buf */
190#ifdef __rtems__
191        uint32_t xdma_tx_channel;
192        uint32_t xdma_rx_channel;
193        uint8_t xdma_tx_perid;
194        uint8_t xdma_rx_perid;
195        sXdmadCfg xdma_tx_cfg;
196        sXdmadCfg xdma_rx_cfg;
197#endif /* __rtems__ */
198};
199
200/* bus entry points */
201static int at91_mci_probe(device_t dev);
202static int at91_mci_attach(device_t dev);
203static int at91_mci_detach(device_t dev);
204static void at91_mci_intr(void *);
205
206/* helper routines */
207static int at91_mci_activate(device_t dev);
208static void at91_mci_deactivate(device_t dev);
209static int at91_mci_is_mci1rev2xx(void);
210#ifdef __rtems__
211static void at91_mci_read_done(struct at91_mci_softc *sc, uint32_t sr);
212static void at91_mci_write_done(struct at91_mci_softc *sc, uint32_t sr);
213#endif /* __rtems__ */
214
215#ifndef __rtems__
216#define AT91_MCI_LOCK(_sc)              mtx_lock(&(_sc)->sc_mtx)
217#define AT91_MCI_UNLOCK(_sc)            mtx_unlock(&(_sc)->sc_mtx)
218#define AT91_MCI_LOCK_INIT(_sc) \
219        mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \
220            "mci", MTX_DEF)
221#define AT91_MCI_LOCK_DESTROY(_sc)      mtx_destroy(&_sc->sc_mtx);
222#define AT91_MCI_ASSERT_LOCKED(_sc)     mtx_assert(&_sc->sc_mtx, MA_OWNED);
223#define AT91_MCI_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
224#else /* __rtems__ */
225#define AT91_MCI_LOCK(_sc) \
226        rtems_interrupt_lock_context at91_mci_lock_context; \
227        rtems_interrupt_lock_acquire(&(_sc)->sc_lock, &at91_mci_lock_context)
228#define AT91_MCI_UNLOCK(_sc) \
229        rtems_interrupt_lock_release(&(_sc)->sc_lock, &at91_mci_lock_context)
230#define AT91_MCI_LOCK_INIT(_sc) \
231        rtems_interrupt_lock_initialize(&(_sc)->sc_lock, \
232            device_get_nameunit((_sc)->dev))
233#define AT91_MCI_LOCK_DESTROY(_sc) \
234        rtems_interrupt_lock_destroy(&(_sc)->sc_mtx)
235#define AT91_MCI_BUS_LOCK(_sc)          mtx_lock(&(_sc)->sc_mtx)
236#define AT91_MCI_BUS_UNLOCK(_sc)                mtx_unlock(&(_sc)->sc_mtx)
237#define AT91_MCI_BUS_LOCK_INIT(_sc) \
238        mtx_init(&_sc->sc_mtx, device_get_nameunit((_sc)->dev), \
239            "mci", MTX_DEF)
240#endif /* __rtems__ */
241
242static inline uint32_t
243RD4(struct at91_mci_softc *sc, bus_size_t off)
244{
245        return (bus_read_4(sc->mem_res, off));
246}
247
248static inline void
249WR4(struct at91_mci_softc *sc, bus_size_t off, uint32_t val)
250{
251        bus_write_4(sc->mem_res, off, val);
252}
253
254static void
255at91_bswap_buf(struct at91_mci_softc *sc, void * dptr, void * sptr, uint32_t memsize)
256{
257        uint32_t * dst = (uint32_t *)dptr;
258        uint32_t * src = (uint32_t *)sptr;
259        uint32_t   i;
260
261        /*
262         * If the hardware doesn't need byte-swapping, let bcopy() do the
263         * work.  Use bounce buffer even if we don't need byteswap, since
264         * buffer may straddle a page boundary, and we don't handle
265         * multi-segment transfers in hardware.  Seen from 'bsdlabel -w' which
266         * uses raw geom access to the volume.  Greg Ansley (gja (at)
267         * ansley.com)
268         */
269        if (!(sc->sc_cap & CAP_NEEDS_BYTESWAP)) {
270                memcpy(dptr, sptr, memsize);
271                return;
272        }
273
274        /*
275         * Nice performance boost for slightly unrolling this loop.
276         * (But very little extra boost for further unrolling it.)
277         */
278        for (i = 0; i < memsize; i += 16) {
279                *dst++ = bswap32(*src++);
280                *dst++ = bswap32(*src++);
281                *dst++ = bswap32(*src++);
282                *dst++ = bswap32(*src++);
283        }
284
285        /* Mop up the last 1-3 words, if any. */
286        for (i = 0; i < (memsize & 0x0F); i += 4) {
287                *dst++ = bswap32(*src++);
288        }
289}
290
291static void
292at91_mci_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
293{
294        if (error != 0)
295                return;
296        *(bus_addr_t *)arg = segs[0].ds_addr;
297}
298
299static void
300at91_mci_pdc_disable(struct at91_mci_softc *sc)
301{
302#ifndef __rtems__
303        WR4(sc, PDC_PTCR, PDC_PTCR_TXTDIS | PDC_PTCR_RXTDIS);
304        WR4(sc, PDC_RPR, 0);
305        WR4(sc, PDC_RCR, 0);
306        WR4(sc, PDC_RNPR, 0);
307        WR4(sc, PDC_RNCR, 0);
308        WR4(sc, PDC_TPR, 0);
309        WR4(sc, PDC_TCR, 0);
310        WR4(sc, PDC_TNPR, 0);
311        WR4(sc, PDC_TNCR, 0);
312#else /* __rtems__ */
313        /* On SAMV71 there is no PDC but a DMAC */
314        XDMAD_StopTransfer(pXdmad, sc->xdma_rx_channel);
315        XDMAD_StopTransfer(pXdmad, sc->xdma_tx_channel);
316        WR4(sc, MCI_DMA, 0);
317#endif /* __rtems__ */
318}
319
320/*
321 * Reset the controller, then restore most of the current state.
322 *
323 * This is called after detecting an error.  It's also called after stopping a
324 * multi-block write, to un-wedge the device so that it will handle the NOTBUSY
325 * signal correctly.  See comments in at91_mci_stop_done() for more details.
326 */
327static void at91_mci_reset(struct at91_mci_softc *sc)
328{
329        uint32_t mr;
330        uint32_t sdcr;
331        uint32_t dtor;
332        uint32_t imr;
333
334        at91_mci_pdc_disable(sc);
335
336        /* save current state */
337
338        imr  = RD4(sc, MCI_IMR);
339#ifndef __rtems__
340        mr   = RD4(sc, MCI_MR) & 0x7fff;
341#else /* __rtems__ */
342        mr   = RD4(sc, MCI_MR);
343#endif /* __rtems__ */
344        sdcr = RD4(sc, MCI_SDCR);
345        dtor = RD4(sc, MCI_DTOR);
346
347        /* reset the controller */
348
349        WR4(sc, MCI_IDR, 0xffffffff);
350        WR4(sc, MCI_CR, MCI_CR_MCIDIS | MCI_CR_SWRST);
351
352        /* restore state */
353
354        WR4(sc, MCI_CR, MCI_CR_MCIEN|MCI_CR_PWSEN);
355        WR4(sc, MCI_MR, mr);
356        WR4(sc, MCI_SDCR, sdcr);
357        WR4(sc, MCI_DTOR, dtor);
358        WR4(sc, MCI_IER, imr);
359
360        /*
361         * Make sure sdio interrupts will fire.  Not sure why reading
362         * SR ensures that, but this is in the linux driver.
363         */
364
365        RD4(sc, MCI_SR);
366}
367
368static void
369at91_mci_init(device_t dev)
370{
371        struct at91_mci_softc *sc = device_get_softc(dev);
372        uint32_t val;
373
374        WR4(sc, MCI_CR, MCI_CR_MCIDIS | MCI_CR_SWRST); /* device into reset */
375        WR4(sc, MCI_IDR, 0xffffffff);           /* Turn off interrupts */
376        WR4(sc, MCI_DTOR, MCI_DTOR_DTOMUL_1M | 1);
377#ifndef __rtems__
378        val = MCI_MR_PDCMODE;
379#else /* __rtems__ */
380        val = 0;
381        val |= MCI_MR_RDPROOF | MCI_MR_WRPROOF;
382#endif /* __rtems__ */
383        val |= 0x34a;                           /* PWSDIV = 3; CLKDIV = 74 */
384//      if (sc->sc_cap & CAP_MCI1_REV2XX)
385//              val |= MCI_MR_RDPROOF | MCI_MR_WRPROOF;
386        WR4(sc, MCI_MR, val);
387#ifndef  AT91_MCI_SLOT_B
388        WR4(sc, MCI_SDCR, 0);                   /* SLOT A, 1 bit bus */
389#else
390        /*
391         * XXX Really should add second "unit" but nobody using using
392         * a two slot card that we know of. XXX
393         */
394        WR4(sc, MCI_SDCR, 1);                   /* SLOT B, 1 bit bus */
395#endif
396        /*
397         * Enable controller, including power-save.  The slower clock
398         * of the power-save mode is only in effect when there is no
399         * transfer in progress, so it can be left in this mode all
400         * the time.
401         */
402        WR4(sc, MCI_CR, MCI_CR_MCIEN|MCI_CR_PWSEN);
403}
404
405static void
406at91_mci_fini(device_t dev)
407{
408        struct at91_mci_softc *sc = device_get_softc(dev);
409
410        WR4(sc, MCI_IDR, 0xffffffff);           /* Turn off interrupts */
411        at91_mci_pdc_disable(sc);
412        WR4(sc, MCI_CR, MCI_CR_MCIDIS | MCI_CR_SWRST); /* device into reset */
413}
414
415static int
416at91_mci_probe(device_t dev)
417{
418#ifdef FDT
419        if (!ofw_bus_is_compatible(dev, "atmel,hsmci"))
420                return (ENXIO);
421#endif
422        device_set_desc(dev, "MCI mmc/sd host bridge");
423        return (0);
424}
425
426static int
427at91_mci_attach(device_t dev)
428{
429        struct at91_mci_softc *sc = device_get_softc(dev);
430        struct sysctl_ctx_list *sctx;
431        struct sysctl_oid *soid;
432        device_t child;
433        int err, i;
434
435#ifdef __rtems__
436#ifdef LIBBSP_ARM_ATSAM_BSP_H
437        PMC_EnablePeripheral(ID_HSMCI);
438        sc->xdma_tx_channel = XDMAD_ALLOC_FAILED;
439        sc->xdma_rx_channel = XDMAD_ALLOC_FAILED;
440#endif /* LIBBSP_ARM_ATSAM_BSP_H */
441#endif /* __rtems__ */
442        sctx = device_get_sysctl_ctx(dev);
443        soid = device_get_sysctl_tree(dev);
444
445        sc->dev = dev;
446        sc->sc_cap = 0;
447#ifndef __rtems__
448        if (at91_is_rm92())
449                sc->sc_cap |= CAP_NEEDS_BYTESWAP;
450#endif /* __rtems__ */
451        /*
452         * MCI1 Rev 2 controllers need some workarounds, flag if so.
453         */
454        if (at91_mci_is_mci1rev2xx())
455                sc->sc_cap |= CAP_MCI1_REV2XX;
456
457        err = at91_mci_activate(dev);
458        if (err)
459                goto out;
460
461#ifdef __rtems__
462        eXdmadRC rc;
463
464        /* Prepare some configurations so they don't have to be fetched on every
465         * setup */
466        sc->xdma_rx_perid = XDMAIF_Get_ChannelNumber(ID_HSMCI,
467            XDMAD_TRANSFER_RX);
468        sc->xdma_tx_perid = XDMAIF_Get_ChannelNumber(ID_HSMCI,
469            XDMAD_TRANSFER_TX);
470        memset(&sc->xdma_rx_cfg, 0, sizeof(sc->xdma_rx_cfg));
471        sc->xdma_rx_cfg.mbr_cfg = XDMAC_CC_TYPE_PER_TRAN |
472            XDMAC_CC_MBSIZE_SINGLE | XDMAC_CC_DSYNC_PER2MEM |
473            XDMAC_CC_SWREQ_HWR_CONNECTED | XDMAC_CC_MEMSET_NORMAL_MODE |
474            XDMAC_CC_CSIZE_CHK_1 | XDMAC_CC_DWIDTH_WORD |
475            XDMAC_CC_SIF_AHB_IF1 | XDMAC_CC_DIF_AHB_IF1 |
476            XDMAC_CC_SAM_FIXED_AM | XDMAC_CC_DAM_INCREMENTED_AM |
477            XDMAC_CC_PERID(
478                XDMAIF_Get_ChannelNumber(ID_HSMCI,XDMAD_TRANSFER_RX));
479        memset(&sc->xdma_tx_cfg, 0, sizeof(sc->xdma_tx_cfg));
480        sc->xdma_tx_cfg.mbr_cfg = XDMAC_CC_TYPE_PER_TRAN |
481            XDMAC_CC_MBSIZE_SINGLE | XDMAC_CC_DSYNC_MEM2PER |
482            XDMAC_CC_SWREQ_HWR_CONNECTED | XDMAC_CC_MEMSET_NORMAL_MODE |
483            XDMAC_CC_CSIZE_CHK_1 | XDMAC_CC_DWIDTH_WORD |
484            XDMAC_CC_SIF_AHB_IF1 | XDMAC_CC_DIF_AHB_IF1 |
485            XDMAC_CC_SAM_INCREMENTED_AM | XDMAC_CC_DAM_FIXED_AM |
486            XDMAC_CC_PERID(
487                XDMAIF_Get_ChannelNumber(ID_HSMCI,XDMAD_TRANSFER_TX));
488
489        sc->xdma_tx_channel = XDMAD_AllocateChannel(pXdmad,
490            XDMAD_TRANSFER_MEMORY, ID_HSMCI);
491        if (sc->xdma_tx_channel == XDMAD_ALLOC_FAILED)
492                goto out;
493
494        /* FIXME: The two DMA channels are not really necessary for the driver.
495         * But the XDMAD interface does not allow to allocate one and use it
496         * into two directions. The current (2017-07-11) implementation of
497         * the XDMAD interface should work with it. So we might could try it. */
498        sc->xdma_rx_channel = XDMAD_AllocateChannel(pXdmad, ID_HSMCI,
499            XDMAD_TRANSFER_MEMORY);
500        if (sc->xdma_rx_channel == XDMAD_ALLOC_FAILED)
501                goto out;
502
503        rc = XDMAD_PrepareChannel(pXdmad, sc->xdma_rx_channel);
504        if (rc != XDMAD_OK)
505                goto out;
506
507        rc = XDMAD_PrepareChannel(pXdmad, sc->xdma_tx_channel);
508        if (rc != XDMAD_OK)
509                goto out;
510
511        AT91_MCI_BUS_LOCK_INIT(sc);
512#endif /* __rtems__ */
513        AT91_MCI_LOCK_INIT(sc);
514
515        at91_mci_fini(dev);
516        at91_mci_init(dev);
517
518        /*
519         * Allocate DMA tags and maps and bounce buffers.
520         *
521         * The parms in the tag_create call cause the dmamem_alloc call to
522         * create each bounce buffer as a single contiguous buffer of BBSIZE
523         * bytes aligned to a 4096 byte boundary.
524         *
525         * Do not use DMA_COHERENT for these buffers because that maps the
526         * memory as non-cachable, which prevents cache line burst fills/writes,
527         * which is something we need since we're trying to overlap the
528         * byte-swapping with the DMA operations.
529         */
530        err = bus_dma_tag_create(bus_get_dma_tag(dev), 4096, 0,
531            BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
532            BBSIZE, 1, BBSIZE, 0, NULL, NULL, &sc->dmatag);
533        if (err != 0)
534                goto out;
535
536        for (i = 0; i < BBCOUNT; ++i) {
537                err = bus_dmamem_alloc(sc->dmatag, (void **)&sc->bbuf_vaddr[i],
538                    BUS_DMA_NOWAIT, &sc->bbuf_map[i]);
539                if (err != 0)
540                        goto out;
541        }
542
543        /*
544         * Activate the interrupt
545         */
546#ifndef __rtems__
547        err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE,
548            NULL, at91_mci_intr, sc, &sc->intrhand);
549#else /* __rtems__ */
550        err = rtems_interrupt_handler_install(rman_get_start(sc->irq_res),
551            device_get_nameunit(dev), RTEMS_INTERRUPT_SHARED, at91_mci_intr,
552            sc);
553#endif /* __rtems__ */
554        if (err) {
555                AT91_MCI_LOCK_DESTROY(sc);
556                goto out;
557        }
558
559        /*
560         * Allow 4-wire to be initially set via #define.
561         * Allow a device hint to override that.
562         * Allow a sysctl to override that.
563         */
564#if defined(AT91_MCI_HAS_4WIRE) && AT91_MCI_HAS_4WIRE != 0
565        sc->has_4wire = 1;
566#endif
567        resource_int_value(device_get_name(dev), device_get_unit(dev),
568                           "4wire", &sc->has_4wire);
569        SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "4wire",
570            CTLFLAG_RW, &sc->has_4wire, 0, "has 4 wire SD Card bus");
571        if (sc->has_4wire)
572                sc->sc_cap |= CAP_HAS_4WIRE;
573
574        sc->allow_overclock = AT91_MCI_ALLOW_OVERCLOCK;
575        resource_int_value(device_get_name(dev), device_get_unit(dev),
576                           "allow_overclock", &sc->allow_overclock);
577        SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "allow_overclock",
578            CTLFLAG_RW, &sc->allow_overclock, 0,
579            "Allow up to 30MHz clock for 25MHz request when next highest speed 15MHz or less.");
580
581#ifndef __rtems__
582        SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "debug",
583            CTLFLAG_RWTUN, &mci_debug, 0, "enable debug output");
584#endif /* __rtems__ */
585
586        /*
587         * Our real min freq is master_clock/512, but upper driver layers are
588         * going to set the min speed during card discovery, and the right speed
589         * for that is 400kHz, so advertise a safe value just under that.
590         *
591         * For max speed, while the rm9200 manual says the max is 50mhz, it also
592         * says it supports only the SD v1.0 spec, which means the real limit is
593         * 25mhz. On the other hand, historical use has been to slightly violate
594         * the standard by running the bus at 30MHz.  For more information on
595         * that, see the comments at the top of this file.
596         */
597        sc->host.f_min = 375000;
598        sc->host.f_max = at91_master_clock / 2;
599        if (sc->host.f_max > 25000000)
600                sc->host.f_max = 25000000;
601        sc->host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340;
602        sc->host.caps = 0;
603        if (sc->sc_cap & CAP_HAS_4WIRE)
604                sc->host.caps |= MMC_CAP_4_BIT_DATA;
605
606        child = device_add_child(dev, "mmc", 0);
607        device_set_ivars(dev, &sc->host);
608        err = bus_generic_attach(dev);
609out:
610        if (err)
611                at91_mci_deactivate(dev);
612        return (err);
613}
614
615static int
616at91_mci_detach(device_t dev)
617{
618        struct at91_mci_softc *sc = device_get_softc(dev);
619
620        at91_mci_fini(dev);
621        at91_mci_deactivate(dev);
622
623        bus_dmamem_free(sc->dmatag, sc->bbuf_vaddr[0], sc->bbuf_map[0]);
624        bus_dmamem_free(sc->dmatag, sc->bbuf_vaddr[1], sc->bbuf_map[1]);
625        bus_dma_tag_destroy(sc->dmatag);
626
627        return (EBUSY); /* XXX */
628}
629
630static int
631at91_mci_activate(device_t dev)
632{
633        struct at91_mci_softc *sc;
634        int rid;
635
636        sc = device_get_softc(dev);
637        rid = 0;
638        sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
639            RF_ACTIVE);
640        if (sc->mem_res == NULL)
641                goto errout;
642
643        rid = 0;
644        sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
645            RF_ACTIVE);
646        if (sc->irq_res == NULL)
647                goto errout;
648
649        return (0);
650errout:
651        at91_mci_deactivate(dev);
652        return (ENOMEM);
653}
654
655static void
656at91_mci_deactivate(device_t dev)
657{
658        struct at91_mci_softc *sc;
659
660        sc = device_get_softc(dev);
661        if (sc->intrhand)
662                bus_teardown_intr(dev, sc->irq_res, sc->intrhand);
663        sc->intrhand = NULL;
664        bus_generic_detach(sc->dev);
665        if (sc->mem_res)
666                bus_release_resource(dev, SYS_RES_MEMORY,
667                    rman_get_rid(sc->mem_res), sc->mem_res);
668        sc->mem_res = NULL;
669        if (sc->irq_res)
670                bus_release_resource(dev, SYS_RES_IRQ,
671                    rman_get_rid(sc->irq_res), sc->irq_res);
672        sc->irq_res = NULL;
673#ifdef __rtems__
674        if (sc->xdma_rx_channel != XDMAD_ALLOC_FAILED) {
675                XDMAD_FreeChannel(pXdmad, sc->xdma_rx_channel);
676        }
677        if (sc->xdma_tx_channel != XDMAD_ALLOC_FAILED) {
678                XDMAD_FreeChannel(pXdmad, sc->xdma_tx_channel);
679        }
680#endif /* __rtems__ */
681        return;
682}
683
684static int
685at91_mci_is_mci1rev2xx(void)
686{
687
688#ifndef __rtems__
689        switch (soc_info.type) {
690        case AT91_T_SAM9260:
691        case AT91_T_SAM9263:
692        case AT91_T_CAP9:
693        case AT91_T_SAM9G10:
694        case AT91_T_SAM9G20:
695        case AT91_T_SAM9RL:
696                return(1);
697        default:
698                return (0);
699        }
700#else /* __rtems__ */
701        /* Currently only supports the SAM V71 */
702        return (1);
703#endif /* __rtems__ */
704}
705
706static int
707at91_mci_update_ios(device_t brdev, device_t reqdev)
708{
709        struct at91_mci_softc *sc;
710        struct mmc_ios *ios;
711        uint32_t clkdiv;
712        uint32_t freq;
713
714        sc = device_get_softc(brdev);
715        ios = &sc->host.ios;
716
717        /*
718         * Calculate our closest available clock speed that doesn't exceed the
719         * requested speed.
720         *
721         * When overclocking is allowed, the requested clock is 25MHz, the
722         * computed frequency is 15MHz or smaller and clockdiv is 1, use
723         * clockdiv of 0 to double that.  If less than 12.5MHz, double
724         * regardless of the overclocking setting.
725         *
726         * Whatever we come up with, store it back into ios->clock so that the
727         * upper layer drivers can report the actual speed of the bus.
728         */
729        if (ios->clock == 0) {
730                WR4(sc, MCI_CR, MCI_CR_MCIDIS);
731                clkdiv = 0;
732        } else {
733                WR4(sc, MCI_CR, MCI_CR_MCIEN|MCI_CR_PWSEN);
734                if ((at91_master_clock % (ios->clock * 2)) == 0)
735                        clkdiv = ((at91_master_clock / ios->clock) / 2) - 1;
736                else
737                        clkdiv = (at91_master_clock / ios->clock) / 2;
738                freq = at91_master_clock / ((clkdiv+1) * 2);
739                if (clkdiv == 1 && ios->clock == 25000000 && freq <= 15000000) {
740                        if (sc->allow_overclock || freq <= 12500000) {
741                                clkdiv = 0;
742                                freq = at91_master_clock / ((clkdiv+1) * 2);
743                        }
744                }
745                ios->clock = freq;
746        }
747        if (ios->bus_width == bus_width_4)
748                WR4(sc, MCI_SDCR, RD4(sc, MCI_SDCR) | MCI_SDCR_SDCBUS);
749        else
750                WR4(sc, MCI_SDCR, RD4(sc, MCI_SDCR) & ~MCI_SDCR_SDCBUS);
751        WR4(sc, MCI_MR, (RD4(sc, MCI_MR) & ~MCI_MR_CLKDIV) | clkdiv);
752        /* Do we need a settle time here? */
753        /* XXX We need to turn the device on/off here with a GPIO pin */
754        return (0);
755}
756
757#ifdef __rtems__
758static LinkedListDescriporView1 dma_desc[MAX_BLOCKS];
759
760static void
761at91_mci_setup_xdma(struct at91_mci_softc *sc, bool read, uint32_t block_size,
762    uint32_t number_blocks, bus_addr_t paddr, uint32_t len)
763{
764        sXdmadCfg *xdma_cfg;
765        uint32_t xdma_channel;
766        const uint32_t xdma_cndc = XDMAC_CNDC_NDVIEW_NDV1 |
767            XDMAC_CNDC_NDE_DSCR_FETCH_EN |
768            XDMAC_CNDC_NDSUP_SRC_PARAMS_UPDATED |
769            XDMAC_CNDC_NDDUP_DST_PARAMS_UPDATED;
770        const uint32_t sa_rdr = (uint32_t)(sc->mem_res->r_bushandle + MCI_RDR);
771        const uint32_t da_tdr = (uint32_t)(sc->mem_res->r_bushandle + MCI_TDR);
772        const uint32_t xdma_interrupt = XDMAC_CIE_BIE | XDMAC_CIE_DIE |
773            XDMAC_CIE_FIE | XDMAC_CIE_RBIE | XDMAC_CIE_WBIE | XDMAC_CIE_ROIE;
774        eXdmadRC rc;
775        size_t i;
776
777        if (read) {
778                xdma_cfg = &sc->xdma_rx_cfg;
779                xdma_channel = sc->xdma_rx_channel;
780        } else {
781                xdma_cfg = &sc->xdma_tx_cfg;
782                xdma_channel = sc->xdma_tx_channel;
783        }
784
785        for (i = 0; i < number_blocks; ++i) {
786                if (read) {
787                        dma_desc[i].mbr_sa = sa_rdr;
788                        dma_desc[i].mbr_da = ((uint32_t)paddr) + i * block_size;
789                } else {
790                        dma_desc[i].mbr_sa = ((uint32_t)paddr) + i * block_size;
791                        dma_desc[i].mbr_da = da_tdr;
792                }
793                dma_desc[i].mbr_ubc = XDMA_UBC_NVIEW_NDV1 |
794                    XDMA_UBC_NDEN_UPDATED | (block_size/4);
795                if (i == number_blocks - 1) {
796                        dma_desc[i].mbr_ubc |= XDMA_UBC_NDE_FETCH_DIS;
797                        dma_desc[i].mbr_nda = 0;
798                } else {
799                        dma_desc[i].mbr_ubc |= XDMA_UBC_NDE_FETCH_EN;
800                        dma_desc[i].mbr_nda = (uint32_t) &dma_desc[i+1];
801                }
802        }
803
804        rc = XDMAD_ConfigureTransfer(pXdmad, xdma_channel, xdma_cfg, xdma_cndc,
805            (uint32_t)dma_desc, xdma_interrupt);
806        if (rc != XDMAD_OK)
807                panic("Could not configure XDMA: %d.", rc);
808
809        /* FIXME: Is that correct? */
810        if (read) {
811                rtems_cache_invalidate_multiple_data_lines(paddr, len);
812        } else {
813                rtems_cache_flush_multiple_data_lines(paddr, len);
814        }
815        rtems_cache_flush_multiple_data_lines(dma_desc, sizeof(dma_desc));
816
817        rc = XDMAD_StartTransfer(pXdmad, xdma_channel);
818        if (rc != XDMAD_OK)
819                panic("Could not start XDMA: %d.", rc);
820
821}
822
823#endif /* __rtems__ */
824static void
825at91_mci_start_cmd(struct at91_mci_softc *sc, struct mmc_command *cmd)
826{
827        uint32_t cmdr, mr;
828#ifdef __rtems__
829        uint32_t number_blocks;
830        uint32_t block_size;
831#endif /* __rtems__ */
832        struct mmc_data *data;
833
834        sc->curcmd = cmd;
835        data = cmd->data;
836
837        /* XXX Upper layers don't always set this */
838        cmd->mrq = sc->req;
839
840        /* Begin setting up command register. */
841
842        cmdr = cmd->opcode;
843
844        if (sc->host.ios.bus_mode == opendrain)
845                cmdr |= MCI_CMDR_OPDCMD;
846
847        /* Set up response handling.  Allow max timeout for responses. */
848
849        if (MMC_RSP(cmd->flags) == MMC_RSP_NONE)
850                cmdr |= MCI_CMDR_RSPTYP_NO;
851        else {
852                cmdr |= MCI_CMDR_MAXLAT;
853                if (cmd->flags & MMC_RSP_136)
854                        cmdr |= MCI_CMDR_RSPTYP_136;
855                else
856                        cmdr |= MCI_CMDR_RSPTYP_48;
857        }
858
859        /*
860         * If there is no data transfer, just set up the right interrupt mask
861         * and start the command.
862         *
863         * The interrupt mask needs to be CMDRDY plus all non-data-transfer
864         * errors. It's important to leave the transfer-related errors out, to
865         * avoid spurious timeout or crc errors on a STOP command following a
866         * multiblock read.  When a multiblock read is in progress, sending a
867         * STOP in the middle of a block occasionally triggers such errors, but
868         * we're totally disinterested in them because we've already gotten all
869         * the data we wanted without error before sending the STOP command.
870         */
871
872        if (data == NULL) {
873                uint32_t ier = MCI_SR_CMDRDY |
874                    MCI_SR_RTOE | MCI_SR_RENDE |
875                    MCI_SR_RCRCE | MCI_SR_RDIRE | MCI_SR_RINDE;
876
877                at91_mci_pdc_disable(sc);
878
879                if (cmd->opcode == MMC_STOP_TRANSMISSION)
880                        cmdr |= MCI_CMDR_TRCMD_STOP;
881
882                /* Ignore response CRC on CMD2 and ACMD41, per standard. */
883
884                if (cmd->opcode == MMC_SEND_OP_COND ||
885                    cmd->opcode == ACMD_SD_SEND_OP_COND)
886                        ier &= ~MCI_SR_RCRCE;
887
888                if (mci_debug)
889                        printf("CMDR %x (opcode %d) ARGR %x no data\n",
890                            cmdr, cmd->opcode, cmd->arg);
891
892                WR4(sc, MCI_ARGR, cmd->arg);
893                WR4(sc, MCI_CMDR, cmdr);
894                WR4(sc, MCI_IDR, 0xffffffff);
895                WR4(sc, MCI_IER, ier);
896                return;
897        }
898
899        /* There is data, set up the transfer-related parts of the command. */
900
901        if (data->flags & MMC_DATA_READ)
902                cmdr |= MCI_CMDR_TRDIR;
903
904        if (data->flags & (MMC_DATA_READ | MMC_DATA_WRITE))
905                cmdr |= MCI_CMDR_TRCMD_START;
906
907        if (data->flags & MMC_DATA_STREAM)
908                cmdr |= MCI_CMDR_TRTYP_STREAM;
909        else if (data->flags & MMC_DATA_MULTI) {
910                cmdr |= MCI_CMDR_TRTYP_MULTIPLE;
911                sc->flags |= (data->flags & MMC_DATA_READ) ?
912                    CMD_MULTIREAD : CMD_MULTIWRITE;
913        }
914
915        /*
916         * Disable PDC until we're ready.
917         *
918         * Set block size and turn on PDC mode for dma xfer.
919         * Note that the block size is the smaller of the amount of data to be
920         * transferred, or 512 bytes.  The 512 size is fixed by the standard;
921         * smaller blocks are possible, but never larger.
922         */
923
924#ifndef __rtems__
925        WR4(sc, PDC_PTCR, PDC_PTCR_RXTDIS | PDC_PTCR_TXTDIS);
926
927        mr = RD4(sc,MCI_MR) & ~MCI_MR_BLKLEN;
928        mr |=  min(data->len, 512) << 16;
929        WR4(sc, MCI_MR, mr | MCI_MR_PDCMODE|MCI_MR_PDCPADV);
930#else /* __rtems__ */
931        mr = RD4(sc,MCI_MR);
932        WR4(sc, MCI_MR, mr | MCI_MR_PDCPADV);
933
934        WR4(sc, MCI_DMA, MCI_DMA_DMAEN | MCI_DMA_CHKSIZE_1);
935
936        block_size = min(data->len, 512);
937        number_blocks = data->len / block_size;
938        WR4(sc, MCI_BLKR, block_size << 16 | number_blocks);
939#endif /* __rtems__ */
940
941        /*
942         * Set up DMA.
943         *
944         * Use bounce buffers even if we don't need to byteswap, because doing
945         * multi-block IO with large DMA buffers is way fast (compared to
946         * single-block IO), even after incurring the overhead of also copying
947         * from/to the caller's buffers (which may be in non-contiguous physical
948         * pages).
949         *
950         * In an ideal non-byteswap world we could create a dma tag that allows
951         * for discontiguous segments and do the IO directly from/to the
952         * caller's buffer(s), using ENDRX/ENDTX interrupts to chain the
953         * discontiguous buffers through the PDC. Someday.
954         *
955         * If a read is bigger than 2k, split it in half so that we can start
956         * byte-swapping the first half while the second half is on the wire.
957         * It would be best if we could split it into 8k chunks, but we can't
958         * always keep up with the byte-swapping due to other system activity,
959         * and if an RXBUFF interrupt happens while we're still handling the
960         * byte-swap from the prior buffer (IE, we haven't returned from
961         * handling the prior interrupt yet), then data will get dropped on the
962         * floor and we can't easily recover from that.  The right fix for that
963         * would be to have the interrupt handling only keep the DMA flowing and
964         * enqueue filled buffers to be byte-swapped in a non-interrupt context.
965         * Even that won't work on the write side of things though; in that
966         * context we have to have all the data ready to go before starting the
967         * dma.
968         *
969         * XXX what about stream transfers?
970         */
971        sc->xfer_offset = 0;
972        sc->bbuf_curidx = 0;
973
974        if (data->flags & (MMC_DATA_READ | MMC_DATA_WRITE)) {
975                uint32_t len;
976                uint32_t remaining = data->len;
977                bus_addr_t paddr;
978                int err;
979
980#ifndef __rtems__
981                if (remaining > (BBCOUNT*BBSIZE))
982#else /* __rtems__ */
983                if (remaining > (BBSIZE))
984#endif /* __rtems__ */
985                        panic("IO read size exceeds MAXDATA\n");
986
987                if (data->flags & MMC_DATA_READ) {
988#ifndef __rtems__
989                        if (remaining > 2048) // XXX
990                                len = remaining / 2;
991                        else
992#else
993                        /* FIXME: This reduces performance. Set up DMA in two
994                         * parts instead like done on AT91. */
995#endif /* __rtems__ */
996                                len = remaining;
997                        err = bus_dmamap_load(sc->dmatag, sc->bbuf_map[0],
998                            sc->bbuf_vaddr[0], len, at91_mci_getaddr,
999                            &paddr, BUS_DMA_NOWAIT);
1000                        if (err != 0)
1001                                panic("IO read dmamap_load failed\n");
1002                        bus_dmamap_sync(sc->dmatag, sc->bbuf_map[0],
1003                            BUS_DMASYNC_PREREAD);
1004#ifndef __rtems__
1005                        WR4(sc, PDC_RPR, paddr);
1006                        WR4(sc, PDC_RCR, len / 4);
1007                        sc->bbuf_len[0] = len;
1008                        remaining -= len;
1009                        if (remaining == 0) {
1010                                sc->bbuf_len[1] = 0;
1011                        } else {
1012                                len = remaining;
1013                                err = bus_dmamap_load(sc->dmatag, sc->bbuf_map[1],
1014                                    sc->bbuf_vaddr[1], len, at91_mci_getaddr,
1015                                    &paddr, BUS_DMA_NOWAIT);
1016                                if (err != 0)
1017                                        panic("IO read dmamap_load failed\n");
1018                                bus_dmamap_sync(sc->dmatag, sc->bbuf_map[1],
1019                                    BUS_DMASYNC_PREREAD);
1020                                WR4(sc, PDC_RNPR, paddr);
1021                                WR4(sc, PDC_RNCR, len / 4);
1022                                sc->bbuf_len[1] = len;
1023                                remaining -= len;
1024                        }
1025                        WR4(sc, PDC_PTCR, PDC_PTCR_RXTEN);
1026#else /* __rtems__ */
1027                        at91_mci_setup_xdma(sc, true, block_size,
1028                            number_blocks, paddr, len);
1029
1030                        sc->bbuf_len[0] = len;
1031                        remaining -= len;
1032                        sc->bbuf_len[1] = 0;
1033                        if (remaining != 0)
1034                                panic("Still rx-data left. This should never happen.");
1035#endif /* __rtems__ */
1036                } else {
1037                        len = min(BBSIZE, remaining);
1038                        at91_bswap_buf(sc, sc->bbuf_vaddr[0], data->data, len);
1039                        err = bus_dmamap_load(sc->dmatag, sc->bbuf_map[0],
1040                            sc->bbuf_vaddr[0], len, at91_mci_getaddr,
1041                            &paddr, BUS_DMA_NOWAIT);
1042                        if (err != 0)
1043                                panic("IO write dmamap_load failed\n");
1044                        bus_dmamap_sync(sc->dmatag, sc->bbuf_map[0],
1045                            BUS_DMASYNC_PREWRITE);
1046#ifndef __rtems__
1047                        /*
1048                         * Erratum workaround:  PDC transfer length on a write
1049                         * must not be smaller than 12 bytes (3 words); only
1050                         * blklen bytes (set above) are actually transferred.
1051                         */
1052                        WR4(sc, PDC_TPR,paddr);
1053                        WR4(sc, PDC_TCR, (len < 12) ? 3 : len / 4);
1054                        sc->bbuf_len[0] = len;
1055                        remaining -= len;
1056                        if (remaining == 0) {
1057                                sc->bbuf_len[1] = 0;
1058                        } else {
1059                                len = remaining;
1060                                at91_bswap_buf(sc, sc->bbuf_vaddr[1],
1061                                    ((char *)data->data)+BBSIZE, len);
1062                                err = bus_dmamap_load(sc->dmatag, sc->bbuf_map[1],
1063                                    sc->bbuf_vaddr[1], len, at91_mci_getaddr,
1064                                    &paddr, BUS_DMA_NOWAIT);
1065                                if (err != 0)
1066                                        panic("IO write dmamap_load failed\n");
1067                                bus_dmamap_sync(sc->dmatag, sc->bbuf_map[1],
1068                                    BUS_DMASYNC_PREWRITE);
1069                                WR4(sc, PDC_TNPR, paddr);
1070                                WR4(sc, PDC_TNCR, (len < 12) ? 3 : len / 4);
1071                                sc->bbuf_len[1] = len;
1072                                remaining -= len;
1073                        }
1074                        /* do not enable PDC xfer until CMDRDY asserted */
1075#else /* __rtems__ */
1076                        at91_mci_setup_xdma(sc, false, block_size,
1077                            number_blocks, paddr, len);
1078
1079                        sc->bbuf_len[0] = len;
1080                        remaining -= len;
1081                        sc->bbuf_len[1] = 0;
1082                        if (remaining != 0)
1083                                panic("Still tx-data left. This should never happen.");
1084
1085#endif /* __rtems__ */
1086                }
1087                data->xfer_len = 0; /* XXX what's this? appears to be unused. */
1088        }
1089
1090        if (mci_debug)
1091                printf("CMDR %x (opcode %d) ARGR %x with data len %d\n",
1092                       cmdr, cmd->opcode, cmd->arg, cmd->data->len);
1093
1094        WR4(sc, MCI_ARGR, cmd->arg);
1095        WR4(sc, MCI_CMDR, cmdr);
1096        WR4(sc, MCI_IER, MCI_SR_ERROR | MCI_SR_CMDRDY);
1097}
1098
1099static void
1100at91_mci_next_operation(struct at91_mci_softc *sc)
1101{
1102        struct mmc_request *req;
1103
1104        req = sc->req;
1105        if (req == NULL)
1106                return;
1107
1108        if (sc->flags & PENDING_CMD) {
1109                sc->flags &= ~PENDING_CMD;
1110                at91_mci_start_cmd(sc, req->cmd);
1111                return;
1112        } else if (sc->flags & PENDING_STOP) {
1113                sc->flags &= ~PENDING_STOP;
1114                at91_mci_start_cmd(sc, req->stop);
1115                return;
1116        }
1117
1118        WR4(sc, MCI_IDR, 0xffffffff);
1119        sc->req = NULL;
1120        sc->curcmd = NULL;
1121        //printf("req done\n");
1122        req->done(req);
1123}
1124
1125static int
1126at91_mci_request(device_t brdev, device_t reqdev, struct mmc_request *req)
1127{
1128        struct at91_mci_softc *sc = device_get_softc(brdev);
1129
1130        AT91_MCI_LOCK(sc);
1131        if (sc->req != NULL) {
1132                AT91_MCI_UNLOCK(sc);
1133                return (EBUSY);
1134        }
1135        //printf("new req\n");
1136        sc->req = req;
1137        sc->flags = PENDING_CMD;
1138        if (sc->req->stop)
1139                sc->flags |= PENDING_STOP;
1140        at91_mci_next_operation(sc);
1141        AT91_MCI_UNLOCK(sc);
1142        return (0);
1143}
1144
1145static int
1146at91_mci_get_ro(device_t brdev, device_t reqdev)
1147{
1148        return (0);
1149}
1150
1151static int
1152at91_mci_acquire_host(device_t brdev, device_t reqdev)
1153{
1154        struct at91_mci_softc *sc = device_get_softc(brdev);
1155        int err = 0;
1156
1157#ifndef __rtems__
1158        AT91_MCI_LOCK(sc);
1159#else /* __rtems__ */
1160        AT91_MCI_BUS_LOCK(sc);
1161#endif /* __rtems__ */
1162        while (sc->bus_busy)
1163                msleep(sc, &sc->sc_mtx, PZERO, "mciah", hz / 5);
1164        sc->bus_busy++;
1165#ifndef __rtems__
1166        AT91_MCI_UNLOCK(sc);
1167#else /* __rtems__ */
1168        AT91_MCI_BUS_UNLOCK(sc);
1169#endif /* __rtems__ */
1170        return (err);
1171}
1172
1173static int
1174at91_mci_release_host(device_t brdev, device_t reqdev)
1175{
1176        struct at91_mci_softc *sc = device_get_softc(brdev);
1177
1178#ifndef __rtems__
1179        AT91_MCI_LOCK(sc);
1180#else /* __rtems__ */
1181        AT91_MCI_BUS_LOCK(sc);
1182#endif /* __rtems__ */
1183        sc->bus_busy--;
1184        wakeup(sc);
1185#ifndef __rtems__
1186        AT91_MCI_UNLOCK(sc);
1187#else /* __rtems__ */
1188        AT91_MCI_BUS_UNLOCK(sc);
1189#endif /* __rtems__ */
1190        return (0);
1191}
1192
1193static void
1194at91_mci_read_done(struct at91_mci_softc *sc, uint32_t sr)
1195{
1196        struct mmc_command *cmd = sc->curcmd;
1197        char * dataptr = (char *)cmd->data->data;
1198        uint32_t curidx = sc->bbuf_curidx;
1199        uint32_t len = sc->bbuf_len[curidx];
1200
1201        /*
1202         * We arrive here when a DMA transfer for a read is done, whether it's
1203         * a single or multi-block read.
1204         *
1205         * We byte-swap the buffer that just completed, and if that is the
1206         * last buffer that's part of this read then we move on to the next
1207         * operation, otherwise we wait for another ENDRX for the next bufer.
1208         */
1209
1210#ifndef __rtems__
1211        bus_dmamap_sync(sc->dmatag, sc->bbuf_map[curidx], BUS_DMASYNC_POSTREAD);
1212        bus_dmamap_unload(sc->dmatag, sc->bbuf_map[curidx]);
1213#endif /* __rtems__ */
1214
1215        at91_bswap_buf(sc, dataptr + sc->xfer_offset, sc->bbuf_vaddr[curidx], len);
1216
1217        if (mci_debug) {
1218                printf("read done sr %x curidx %d len %d xfer_offset %d\n",
1219                       sr, curidx, len, sc->xfer_offset);
1220        }
1221
1222        sc->xfer_offset += len;
1223        sc->bbuf_curidx = !curidx; /* swap buffers */
1224
1225        /*
1226         * If we've transferred all the data, move on to the next operation.
1227         *
1228         * If we're still transferring the last buffer, RNCR is already zero but
1229         * we have to write a zero anyway to clear the ENDRX status so we don't
1230         * re-interrupt until the last buffer is done.
1231         */
1232        if (sc->xfer_offset == cmd->data->len) {
1233                WR4(sc, PDC_PTCR, PDC_PTCR_RXTDIS | PDC_PTCR_TXTDIS);
1234                cmd->error = MMC_ERR_NONE;
1235                at91_mci_next_operation(sc);
1236        } else {
1237                WR4(sc, PDC_RNCR, 0);
1238#ifndef __rtems__
1239                WR4(sc, MCI_IER, MCI_SR_ERROR | MCI_SR_ENDRX);
1240#else /* __rtems__ */
1241                WR4(sc, MCI_IER, MCI_SR_ERROR | MCI_SR_XFRDONE);
1242#endif /* __rtems__ */
1243        }
1244}
1245
1246static void
1247at91_mci_write_done(struct at91_mci_softc *sc, uint32_t sr)
1248{
1249        struct mmc_command *cmd = sc->curcmd;
1250
1251        /*
1252         * We arrive here when the entire DMA transfer for a write is done,
1253         * whether it's a single or multi-block write.  If it's multi-block we
1254         * have to immediately move on to the next operation which is to send
1255         * the stop command.  If it's a single-block transfer we need to wait
1256         * for NOTBUSY, but if that's already asserted we can avoid another
1257         * interrupt and just move on to completing the request right away.
1258         */
1259
1260        WR4(sc, PDC_PTCR, PDC_PTCR_RXTDIS | PDC_PTCR_TXTDIS);
1261
1262        bus_dmamap_sync(sc->dmatag, sc->bbuf_map[sc->bbuf_curidx],
1263            BUS_DMASYNC_POSTWRITE);
1264        bus_dmamap_unload(sc->dmatag, sc->bbuf_map[sc->bbuf_curidx]);
1265
1266        if ((cmd->data->flags & MMC_DATA_MULTI) || (sr & MCI_SR_NOTBUSY)) {
1267                cmd->error = MMC_ERR_NONE;
1268                at91_mci_next_operation(sc);
1269        } else {
1270                WR4(sc, MCI_IER, MCI_SR_ERROR | MCI_SR_NOTBUSY);
1271        }
1272}
1273
1274static void
1275at91_mci_notbusy(struct at91_mci_softc *sc)
1276{
1277        struct mmc_command *cmd = sc->curcmd;
1278
1279        /*
1280         * We arrive here by either completion of a single-block write, or
1281         * completion of the stop command that ended a multi-block write (and,
1282         * I suppose, after a card-select or erase, but I haven't tested
1283         * those).  Anyway, we're done and it's time to move on to the next
1284         * command.
1285         */
1286
1287        cmd->error = MMC_ERR_NONE;
1288        at91_mci_next_operation(sc);
1289}
1290
1291static void
1292at91_mci_stop_done(struct at91_mci_softc *sc, uint32_t sr)
1293{
1294        struct mmc_command *cmd = sc->curcmd;
1295
1296        /*
1297         * We arrive here after receiving CMDRDY for a MMC_STOP_TRANSMISSION
1298         * command.  Depending on the operation being stopped, we may have to
1299         * do some unusual things to work around hardware bugs.
1300         */
1301
1302        /*
1303         * This is known to be true of at91rm9200 hardware; it may or may not
1304         * apply to more recent chips:
1305         *
1306         * After stopping a multi-block write, the NOTBUSY bit in MCI_SR does
1307         * not properly reflect the actual busy state of the card as signaled
1308         * on the DAT0 line; it always claims the card is not-busy.  If we
1309         * believe that and let operations continue, following commands will
1310         * fail with response timeouts (except of course MMC_SEND_STATUS -- it
1311         * indicates the card is busy in the PRG state, which was the smoking
1312         * gun that showed MCI_SR NOTBUSY was not tracking DAT0 correctly).
1313         *
1314         * The atmel docs are emphatic: "This flag [NOTBUSY] must be used only
1315         * for Write Operations."  I guess technically since we sent a stop
1316         * it's not a write operation anymore.  But then just what did they
1317         * think it meant for the stop command to have "...an optional busy
1318         * signal transmitted on the data line" according to the SD spec?
1319         *
1320         * I tried a variety of things to un-wedge the MCI and get the status
1321         * register to reflect NOTBUSY correctly again, but the only thing
1322         * that worked was a full device reset.  It feels like an awfully big
1323         * hammer, but doing a full reset after every multiblock write is
1324         * still faster than doing single-block IO (by almost two orders of
1325         * magnitude: 20KB/sec improves to about 1.8MB/sec best case).
1326         *
1327         * After doing the reset, wait for a NOTBUSY interrupt before
1328         * continuing with the next operation.
1329         *
1330         * This workaround breaks multiwrite on the rev2xx parts, but some other
1331         * workaround is needed.
1332         */
1333        if ((sc->flags & CMD_MULTIWRITE) && (sc->sc_cap & CAP_NEEDS_BYTESWAP)) {
1334                at91_mci_reset(sc);
1335                WR4(sc, MCI_IER, MCI_SR_ERROR | MCI_SR_NOTBUSY);
1336                return;
1337        }
1338
1339        /*
1340         * This is known to be true of at91rm9200 hardware; it may or may not
1341         * apply to more recent chips:
1342         *
1343         * After stopping a multi-block read, loop to read and discard any
1344         * data that coasts in after we sent the stop command.  The docs don't
1345         * say anything about it, but empirical testing shows that 1-3
1346         * additional words of data get buffered up in some unmentioned
1347         * internal fifo and if we don't read and discard them here they end
1348         * up on the front of the next read DMA transfer we do.
1349         *
1350         * This appears to be unnecessary for rev2xx parts.
1351         */
1352        if ((sc->flags & CMD_MULTIREAD) && (sc->sc_cap & CAP_NEEDS_BYTESWAP)) {
1353                uint32_t sr;
1354                int count = 0;
1355
1356                do {
1357                        sr = RD4(sc, MCI_SR);
1358                        if (sr & MCI_SR_RXRDY) {
1359                                RD4(sc,  MCI_RDR);
1360                                ++count;
1361                        }
1362                } while (sr & MCI_SR_RXRDY);
1363                at91_mci_reset(sc);
1364        }
1365
1366        cmd->error = MMC_ERR_NONE;
1367        at91_mci_next_operation(sc);
1368
1369}
1370
1371static void
1372at91_mci_cmdrdy(struct at91_mci_softc *sc, uint32_t sr)
1373{
1374        struct mmc_command *cmd = sc->curcmd;
1375        int i;
1376
1377        if (cmd == NULL)
1378                return;
1379
1380        /*
1381         * We get here at the end of EVERY command.  We retrieve the command
1382         * response (if any) then decide what to do next based on the command.
1383         */
1384
1385        if (cmd->flags & MMC_RSP_PRESENT) {
1386                for (i = 0; i < ((cmd->flags & MMC_RSP_136) ? 4 : 1); i++) {
1387                        cmd->resp[i] = RD4(sc, MCI_RSPR + i * 4);
1388                        if (mci_debug)
1389                                printf("RSPR[%d] = %x sr=%x\n", i, cmd->resp[i],  sr);
1390                }
1391        }
1392
1393        /*
1394         * If this was a stop command, go handle the various special
1395         * conditions (read: bugs) that have to be dealt with following a stop.
1396         */
1397        if (cmd->opcode == MMC_STOP_TRANSMISSION) {
1398                at91_mci_stop_done(sc, sr);
1399                return;
1400        }
1401
1402        /*
1403         * If this command can continue to assert BUSY beyond the response then
1404         * we need to wait for NOTBUSY before the command is really done.
1405         *
1406         * Note that this may not work properly on the at91rm9200.  It certainly
1407         * doesn't work for the STOP command that follows a multi-block write,
1408         * so post-stop CMDRDY is handled separately; see the special handling
1409         * in at91_mci_stop_done().
1410         *
1411         * Beside STOP, there are other R1B-type commands that use the busy
1412         * signal after CMDRDY: CMD7 (card select), CMD28-29 (write protect),
1413         * CMD38 (erase). I haven't tested any of them, but I rather expect
1414         * them all to have the same sort of problem with MCI_SR not actually
1415         * reflecting the state of the DAT0-line busy indicator.  So this code
1416         * may need to grow some sort of special handling for them too. (This
1417         * just in: CMD7 isn't a problem right now because dev/mmc.c incorrectly
1418         * sets the response flags to R1 rather than R1B.) XXX
1419         */
1420        if ((cmd->flags & MMC_RSP_BUSY)) {
1421                WR4(sc, MCI_IER, MCI_SR_ERROR | MCI_SR_NOTBUSY);
1422                return;
1423        }
1424
1425        /*
1426         * If there is a data transfer with this command, then...
1427         * - If it's a read, we need to wait for ENDRX.
1428         * - If it's a write, now is the time to enable the PDC, and we need
1429         *   to wait for a BLKE that follows a TXBUFE, because if we're doing
1430         *   a split transfer we get a BLKE after the first half (when TPR/TCR
1431         *   get loaded from TNPR/TNCR).  So first we wait for the TXBUFE, and
1432         *   the handling for that interrupt will then invoke the wait for the
1433         *   subsequent BLKE which indicates actual completion.
1434         */
1435        if (cmd->data) {
1436                uint32_t ier;
1437#ifndef __rtems__
1438                if (cmd->data->flags & MMC_DATA_READ) {
1439                        ier = MCI_SR_ENDRX;
1440                } else {
1441                        ier = MCI_SR_TXBUFE;
1442                        WR4(sc, PDC_PTCR, PDC_PTCR_TXTEN);
1443                }
1444#else /* __rtems__ */
1445                ier = MCI_SR_XFRDONE;
1446#endif /* __rtems__ */
1447                WR4(sc, MCI_IER, MCI_SR_ERROR | ier);
1448                return;
1449        }
1450
1451        /*
1452         * If we made it to here, we don't need to wait for anything more for
1453         * the current command, move on to the next command (will complete the
1454         * request if there is no next command).
1455         */
1456        cmd->error = MMC_ERR_NONE;
1457        at91_mci_next_operation(sc);
1458}
1459
1460static void
1461at91_mci_intr(void *arg)
1462{
1463        struct at91_mci_softc *sc = (struct at91_mci_softc*)arg;
1464        struct mmc_command *cmd = sc->curcmd;
1465        uint32_t sr, isr;
1466
1467        AT91_MCI_LOCK(sc);
1468
1469        sr = RD4(sc, MCI_SR);
1470        isr = sr & RD4(sc, MCI_IMR);
1471
1472        if (mci_debug)
1473                printf("i 0x%x sr 0x%x\n", isr, sr);
1474
1475        /*
1476         * All interrupts are one-shot; disable it now.
1477         * The next operation will re-enable whatever interrupts it wants.
1478         */
1479        WR4(sc, MCI_IDR, isr);
1480        if (isr & MCI_SR_ERROR) {
1481                if (isr & (MCI_SR_RTOE | MCI_SR_DTOE))
1482                        cmd->error = MMC_ERR_TIMEOUT;
1483                else if (isr & (MCI_SR_RCRCE | MCI_SR_DCRCE))
1484                        cmd->error = MMC_ERR_BADCRC;
1485                else if (isr & (MCI_SR_OVRE | MCI_SR_UNRE))
1486                        cmd->error = MMC_ERR_FIFO;
1487                else
1488                        cmd->error = MMC_ERR_FAILED;
1489                /*
1490                 * CMD8 is used to probe for SDHC cards, a standard SD card
1491                 * will get a response timeout; don't report it because it's a
1492                 * normal and expected condition.  One might argue that all
1493                 * error reporting should be left to higher levels, but when
1494                 * they report at all it's always EIO, which isn't very
1495                 * helpful. XXX bootverbose?
1496                 */
1497                if (cmd->opcode != 8) {
1498                        device_printf(sc->dev,
1499                            "IO error; status MCI_SR = 0x%b cmd opcode = %d%s\n",
1500                            sr, MCI_SR_BITSTRING, cmd->opcode,
1501                            (cmd->opcode != 12) ? "" :
1502                            (sc->flags & CMD_MULTIREAD) ? " after read" : " after write");
1503                        /* XXX not sure RTOE needs a full reset, just a retry */
1504                        at91_mci_reset(sc);
1505                }
1506                at91_mci_next_operation(sc);
1507        } else {
1508#ifndef __rtems__
1509                if (isr & MCI_SR_TXBUFE) {
1510//                      printf("TXBUFE\n");
1511                        /*
1512                         * We need to wait for a BLKE that follows TXBUFE
1513                         * (intermediate BLKEs might happen after ENDTXes if
1514                         * we're chaining multiple buffers).  If BLKE is also
1515                         * asserted at the time we get TXBUFE, we can avoid
1516                         * another interrupt and process it right away, below.
1517                         */
1518                        if (sr & MCI_SR_BLKE)
1519                                isr |= MCI_SR_BLKE;
1520                        else
1521                                WR4(sc, MCI_IER, MCI_SR_BLKE);
1522                }
1523                if (isr & MCI_SR_RXBUFF) {
1524//                      printf("RXBUFF\n");
1525                }
1526                if (isr & MCI_SR_ENDTX) {
1527//                      printf("ENDTX\n");
1528                }
1529                if (isr & MCI_SR_ENDRX) {
1530//                      printf("ENDRX\n");
1531                        at91_mci_read_done(sc, sr);
1532                }
1533#else /* __rtems__ */
1534                if (isr & MCI_SR_XFRDONE) {
1535                        struct mmc_command *cmd = sc->curcmd;
1536                        if (cmd->data->flags & MMC_DATA_READ) {
1537                                at91_mci_read_done(sc, sr);
1538                        } else {
1539                                if (sr & MCI_SR_BLKE)
1540                                        isr |= MCI_SR_BLKE;
1541                                else
1542                                        WR4(sc, MCI_IER, MCI_SR_BLKE);
1543                        }
1544                }
1545#endif /* __rtems__ */
1546                if (isr & MCI_SR_NOTBUSY) {
1547//                      printf("NOTBUSY\n");
1548                        at91_mci_notbusy(sc);
1549                }
1550                if (isr & MCI_SR_DTIP) {
1551//                      printf("Data transfer in progress\n");
1552                }
1553                if (isr & MCI_SR_BLKE) {
1554//                      printf("Block transfer end\n");
1555                        at91_mci_write_done(sc, sr);
1556                }
1557                if (isr & MCI_SR_TXRDY) {
1558//                      printf("Ready to transmit\n");
1559                }
1560                if (isr & MCI_SR_RXRDY) {
1561//                      printf("Ready to receive\n");
1562                }
1563                if (isr & MCI_SR_CMDRDY) {
1564//                      printf("Command ready\n");
1565                        at91_mci_cmdrdy(sc, sr);
1566                }
1567        }
1568        AT91_MCI_UNLOCK(sc);
1569}
1570
1571static int
1572at91_mci_read_ivar(device_t bus, device_t child, int which, uintptr_t *result)
1573{
1574        struct at91_mci_softc *sc = device_get_softc(bus);
1575
1576        switch (which) {
1577        default:
1578                return (EINVAL);
1579        case MMCBR_IVAR_BUS_MODE:
1580                *(int *)result = sc->host.ios.bus_mode;
1581                break;
1582        case MMCBR_IVAR_BUS_WIDTH:
1583                *(int *)result = sc->host.ios.bus_width;
1584                break;
1585        case MMCBR_IVAR_CHIP_SELECT:
1586                *(int *)result = sc->host.ios.chip_select;
1587                break;
1588        case MMCBR_IVAR_CLOCK:
1589                *(int *)result = sc->host.ios.clock;
1590                break;
1591        case MMCBR_IVAR_F_MIN:
1592                *(int *)result = sc->host.f_min;
1593                break;
1594        case MMCBR_IVAR_F_MAX:
1595                *(int *)result = sc->host.f_max;
1596                break;
1597        case MMCBR_IVAR_HOST_OCR:
1598                *(int *)result = sc->host.host_ocr;
1599                break;
1600        case MMCBR_IVAR_MODE:
1601                *(int *)result = sc->host.mode;
1602                break;
1603        case MMCBR_IVAR_OCR:
1604                *(int *)result = sc->host.ocr;
1605                break;
1606        case MMCBR_IVAR_POWER_MODE:
1607                *(int *)result = sc->host.ios.power_mode;
1608                break;
1609        case MMCBR_IVAR_VDD:
1610                *(int *)result = sc->host.ios.vdd;
1611                break;
1612        case MMCBR_IVAR_CAPS:
1613                if (sc->has_4wire) {
1614                        sc->sc_cap |= CAP_HAS_4WIRE;
1615                        sc->host.caps |= MMC_CAP_4_BIT_DATA;
1616                } else {
1617                        sc->sc_cap &= ~CAP_HAS_4WIRE;
1618                        sc->host.caps &= ~MMC_CAP_4_BIT_DATA;
1619                }
1620                *(int *)result = sc->host.caps;
1621                break;
1622#ifdef __rtems__
1623        case MMCBR_IVAR_TIMING:
1624                *result = sc->host.ios.timing;
1625                break;
1626#endif /* __rtems__ */
1627        case MMCBR_IVAR_MAX_DATA:
1628                /*
1629                 * Something is wrong with the 2x parts and multiblock, so
1630                 * just do 1 block at a time for now, which really kills
1631                 * performance.
1632                 */
1633                if (sc->sc_cap & CAP_MCI1_REV2XX)
1634                        *(int *)result = 1;
1635                else
1636                        *(int *)result = MAX_BLOCKS;
1637                break;
1638        }
1639        return (0);
1640}
1641
1642static int
1643at91_mci_write_ivar(device_t bus, device_t child, int which, uintptr_t value)
1644{
1645        struct at91_mci_softc *sc = device_get_softc(bus);
1646
1647        switch (which) {
1648        default:
1649                return (EINVAL);
1650        case MMCBR_IVAR_BUS_MODE:
1651                sc->host.ios.bus_mode = value;
1652                break;
1653        case MMCBR_IVAR_BUS_WIDTH:
1654                sc->host.ios.bus_width = value;
1655                break;
1656        case MMCBR_IVAR_CHIP_SELECT:
1657                sc->host.ios.chip_select = value;
1658                break;
1659        case MMCBR_IVAR_CLOCK:
1660                sc->host.ios.clock = value;
1661                break;
1662        case MMCBR_IVAR_MODE:
1663                sc->host.mode = value;
1664                break;
1665        case MMCBR_IVAR_OCR:
1666                sc->host.ocr = value;
1667                break;
1668        case MMCBR_IVAR_POWER_MODE:
1669                sc->host.ios.power_mode = value;
1670                break;
1671        case MMCBR_IVAR_VDD:
1672                sc->host.ios.vdd = value;
1673                break;
1674#ifdef __rtems__
1675        case MMCBR_IVAR_TIMING:
1676                sc->host.ios.timing = value;
1677                break;
1678#endif /* __rtems__ */
1679        /* These are read-only */
1680        case MMCBR_IVAR_CAPS:
1681        case MMCBR_IVAR_HOST_OCR:
1682        case MMCBR_IVAR_F_MIN:
1683        case MMCBR_IVAR_F_MAX:
1684        case MMCBR_IVAR_MAX_DATA:
1685                return (EINVAL);
1686        }
1687        return (0);
1688}
1689
1690static device_method_t at91_mci_methods[] = {
1691        /* device_if */
1692        DEVMETHOD(device_probe, at91_mci_probe),
1693        DEVMETHOD(device_attach, at91_mci_attach),
1694        DEVMETHOD(device_detach, at91_mci_detach),
1695
1696        /* Bus interface */
1697        DEVMETHOD(bus_read_ivar,        at91_mci_read_ivar),
1698        DEVMETHOD(bus_write_ivar,       at91_mci_write_ivar),
1699
1700        /* mmcbr_if */
1701        DEVMETHOD(mmcbr_update_ios, at91_mci_update_ios),
1702        DEVMETHOD(mmcbr_request, at91_mci_request),
1703        DEVMETHOD(mmcbr_get_ro, at91_mci_get_ro),
1704        DEVMETHOD(mmcbr_acquire_host, at91_mci_acquire_host),
1705        DEVMETHOD(mmcbr_release_host, at91_mci_release_host),
1706
1707        DEVMETHOD_END
1708};
1709
1710static driver_t at91_mci_driver = {
1711        "at91_mci",
1712        at91_mci_methods,
1713        sizeof(struct at91_mci_softc),
1714};
1715
1716static devclass_t at91_mci_devclass;
1717
1718#ifndef __rtems__
1719#ifdef FDT
1720DRIVER_MODULE(at91_mci, simplebus, at91_mci_driver, at91_mci_devclass, NULL,
1721    NULL);
1722#else
1723DRIVER_MODULE(at91_mci, atmelarm, at91_mci_driver, at91_mci_devclass, NULL,
1724    NULL);
1725#endif
1726
1727MMC_DECLARE_BRIDGE(at91_mci);
1728#else /* __rtems__ */
1729DRIVER_MODULE(at91_mci, nexus, at91_mci_driver, at91_mci_devclass, NULL, NULL);
1730#endif /* __rtems__ */
1731DRIVER_MODULE(mmc, at91_mci, mmc_driver, mmc_devclass, NULL, NULL);
1732MODULE_DEPEND(at91_mci, mmc, 1, 1, 1);
1733#endif /* __rtems__ && LIBBSP_ARM_ATSAM_BSP_H */
Note: See TracBrowser for help on using the repository browser.