1 | #include <machine/rtems-bsd-kernel-space.h> |
---|
2 | |
---|
3 | /*- |
---|
4 | * Copyright (c) 2006 Bernd Walter. All rights reserved. |
---|
5 | * Copyright (c) 2006 M. Warner Losh. All rights reserved. |
---|
6 | * Copyright (c) 2010 Greg Ansley. All rights reserved. |
---|
7 | * |
---|
8 | * Redistribution and use in source and binary forms, with or without |
---|
9 | * modification, are permitted provided that the following conditions |
---|
10 | * are met: |
---|
11 | * 1. Redistributions of source code must retain the above copyright |
---|
12 | * notice, this list of conditions and the following disclaimer. |
---|
13 | * 2. Redistributions in binary form must reproduce the above copyright |
---|
14 | * notice, this list of conditions and the following disclaimer in the |
---|
15 | * documentation and/or other materials provided with the distribution. |
---|
16 | * |
---|
17 | * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND |
---|
18 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
---|
19 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
---|
20 | * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE |
---|
21 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
---|
22 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS |
---|
23 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
---|
24 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT |
---|
25 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY |
---|
26 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF |
---|
27 | * SUCH DAMAGE. |
---|
28 | */ |
---|
29 | |
---|
30 | #include <rtems/bsd/local/opt_platform.h> |
---|
31 | |
---|
32 | #include <sys/cdefs.h> |
---|
33 | __FBSDID("$FreeBSD$"); |
---|
34 | |
---|
35 | #include <sys/param.h> |
---|
36 | #include <sys/systm.h> |
---|
37 | #include <sys/bus.h> |
---|
38 | #include <sys/endian.h> |
---|
39 | #include <sys/kernel.h> |
---|
40 | #include <sys/lock.h> |
---|
41 | #include <sys/malloc.h> |
---|
42 | #include <sys/module.h> |
---|
43 | #include <sys/mutex.h> |
---|
44 | #include <rtems/bsd/sys/resource.h> |
---|
45 | #include <sys/rman.h> |
---|
46 | #include <sys/sysctl.h> |
---|
47 | |
---|
48 | #include <machine/bus.h> |
---|
49 | #include <machine/resource.h> |
---|
50 | #include <machine/intr.h> |
---|
51 | |
---|
52 | #include <arm/at91/at91var.h> |
---|
53 | #include <arm/at91/at91_mcireg.h> |
---|
54 | #include <arm/at91/at91_pdcreg.h> |
---|
55 | |
---|
56 | #include <dev/mmc/bridge.h> |
---|
57 | #include <dev/mmc/mmcbrvar.h> |
---|
58 | |
---|
59 | #ifdef FDT |
---|
60 | #include <dev/ofw/ofw_bus.h> |
---|
61 | #include <dev/ofw/ofw_bus_subr.h> |
---|
62 | #endif |
---|
63 | |
---|
64 | #include <rtems/bsd/local/mmcbr_if.h> |
---|
65 | |
---|
66 | #include <rtems/bsd/local/opt_at91.h> |
---|
67 | |
---|
68 | #ifdef __rtems__ |
---|
69 | #include <bsp.h> |
---|
70 | #endif /* __rtems__ */ |
---|
71 | #if defined(__rtems) && defined(LIBBSP_ARM_ATSAM_BSP_H) |
---|
72 | #ifdef __rtems__ |
---|
73 | #include <libchip/chip.h> |
---|
74 | |
---|
75 | #define AT91_MCI_HAS_4WIRE 1 |
---|
76 | |
---|
77 | uint32_t at91_master_clock = BOARD_MCK; |
---|
78 | |
---|
79 | static sXdmad *pXdmad = &XDMAD_Instance; |
---|
80 | #endif /* __rtems__ */ |
---|
81 | /* |
---|
82 | * About running the MCI bus above 25MHz |
---|
83 | * |
---|
84 | * Historically, the MCI bus has been run at 30MHz on systems with a 60MHz |
---|
85 | * master clock, in part due to a bug in dev/mmc.c making always request |
---|
86 | * 30MHz, and in part over clocking the bus because 15MHz was too slow. |
---|
87 | * Fixing that bug causes the mmc driver to request a 25MHz clock (as it |
---|
88 | * should) and the logic in at91_mci_update_ios() picks the highest speed that |
---|
89 | * doesn't exceed that limit. With a 60MHz MCK that would be 15MHz, and |
---|
90 | * that's a real performance buzzkill when you've been getting away with 30MHz |
---|
91 | * all along. |
---|
92 | * |
---|
93 | * By defining AT91_MCI_ALLOW_OVERCLOCK (or setting the allow_overclock=1 |
---|
94 | * device hint or sysctl) you can enable logic in at91_mci_update_ios() to |
---|
95 | * overlcock the SD bus a little by running it at MCK / 2 when the requested |
---|
96 | * speed is 25MHz and the next highest speed is 15MHz or less. This appears |
---|
97 | * to work on virtually all SD cards, since it is what this driver has been |
---|
98 | * doing prior to the introduction of this option, where the overclocking vs |
---|
99 | * underclocking decision was automatically "overclock". Modern SD cards can |
---|
100 | * run at 45mhz/1-bit in standard mode (high speed mode enable commands not |
---|
101 | * sent) without problems. |
---|
102 | * |
---|
103 | * Speaking of high-speed mode, the rm9200 manual says the MCI device supports |
---|
104 | * the SD v1.0 specification and can run up to 50MHz. This is interesting in |
---|
105 | * that the SD v1.0 spec caps the speed at 25MHz; high speed mode was added in |
---|
106 | * the v1.10 spec. Furthermore, high speed mode doesn't just crank up the |
---|
107 | * clock, it alters the signal timing. The rm9200 MCI device doesn't support |
---|
108 | * these altered timings. So while speeds over 25MHz may work, they only work |
---|
109 | * in what the SD spec calls "default" speed mode, and it amounts to violating |
---|
110 | * the spec by overclocking the bus. |
---|
111 | * |
---|
112 | * If you also enable 4-wire mode it's possible transfers faster than 25MHz |
---|
113 | * will fail. On the AT91RM9200, due to bugs in the bus contention logic, if |
---|
114 | * you have the USB host device and OHCI driver enabled will fail. Even |
---|
115 | * underclocking to 15MHz, intermittant overrun and underrun errors occur. |
---|
116 | * Note that you don't even need to have usb devices attached to the system, |
---|
117 | * the errors begin to occur as soon as the OHCI driver sets the register bit |
---|
118 | * to enable periodic transfers. It appears (based on brief investigation) |
---|
119 | * that the usb host controller uses so much ASB bandwidth that sometimes the |
---|
120 | * DMA for MCI transfers doesn't get a bus grant in time and data gets |
---|
121 | * dropped. Adding even a modicum of network activity changes the symptom |
---|
122 | * from intermittant to very frequent. Members of the AT91SAM9 family have |
---|
123 | * corrected this problem, or are at least better about their use of the bus. |
---|
124 | */ |
---|
125 | #ifndef AT91_MCI_ALLOW_OVERCLOCK |
---|
126 | #define AT91_MCI_ALLOW_OVERCLOCK 1 |
---|
127 | #endif |
---|
128 | |
---|
129 | /* |
---|
130 | * Allocate 2 bounce buffers we'll use to endian-swap the data due to the rm9200 |
---|
131 | * erratum. We use a pair of buffers because when reading that lets us begin |
---|
132 | * endian-swapping the data in the first buffer while the DMA is reading into |
---|
133 | * the second buffer. (We can't use the same trick for writing because we might |
---|
134 | * not get all the data in the 2nd buffer swapped before the hardware needs it; |
---|
135 | * dealing with that would add complexity to the driver.) |
---|
136 | * |
---|
137 | * The buffers are sized at 16K each due to the way the busdma cache sync |
---|
138 | * operations work on arm. A dcache_inv_range() operation on a range larger |
---|
139 | * than 16K gets turned into a dcache_wbinv_all(). That needlessly flushes the |
---|
140 | * entire data cache, impacting overall system performance. |
---|
141 | */ |
---|
142 | #define BBCOUNT 2 |
---|
143 | #define BBSIZE (16*1024) |
---|
144 | #define MAX_BLOCKS ((BBSIZE*BBCOUNT)/512) |
---|
145 | |
---|
146 | static int mci_debug; |
---|
147 | |
---|
148 | struct at91_mci_softc { |
---|
149 | void *intrhand; /* Interrupt handle */ |
---|
150 | device_t dev; |
---|
151 | int sc_cap; |
---|
152 | #define CAP_HAS_4WIRE 1 /* Has 4 wire bus */ |
---|
153 | #define CAP_NEEDS_BYTESWAP 2 /* broken hardware needing bounce */ |
---|
154 | #define CAP_MCI1_REV2XX 4 /* MCI 1 rev 2.x */ |
---|
155 | int flags; |
---|
156 | #define PENDING_CMD 0x01 |
---|
157 | #define PENDING_STOP 0x02 |
---|
158 | #define CMD_MULTIREAD 0x10 |
---|
159 | #define CMD_MULTIWRITE 0x20 |
---|
160 | int has_4wire; |
---|
161 | int allow_overclock; |
---|
162 | struct resource *irq_res; /* IRQ resource */ |
---|
163 | struct resource *mem_res; /* Memory resource */ |
---|
164 | struct mtx sc_mtx; |
---|
165 | bus_dma_tag_t dmatag; |
---|
166 | struct mmc_host host; |
---|
167 | int bus_busy; |
---|
168 | struct mmc_request *req; |
---|
169 | struct mmc_command *curcmd; |
---|
170 | bus_dmamap_t bbuf_map[BBCOUNT]; |
---|
171 | char * bbuf_vaddr[BBCOUNT]; /* bounce bufs in KVA space */ |
---|
172 | uint32_t bbuf_len[BBCOUNT]; /* len currently queued for bounce buf */ |
---|
173 | uint32_t bbuf_curidx; /* which bbuf is the active DMA buffer */ |
---|
174 | uint32_t xfer_offset; /* offset so far into caller's buf */ |
---|
175 | #ifdef __rtems__ |
---|
176 | uint32_t xdma_tx_channel; |
---|
177 | uint32_t xdma_rx_channel; |
---|
178 | uint8_t xdma_tx_perid; |
---|
179 | uint8_t xdma_rx_perid; |
---|
180 | sXdmadCfg xdma_tx_cfg; |
---|
181 | sXdmadCfg xdma_rx_cfg; |
---|
182 | #endif /* __rtems__ */ |
---|
183 | }; |
---|
184 | |
---|
185 | /* bus entry points */ |
---|
186 | static int at91_mci_probe(device_t dev); |
---|
187 | static int at91_mci_attach(device_t dev); |
---|
188 | static int at91_mci_detach(device_t dev); |
---|
189 | static void at91_mci_intr(void *); |
---|
190 | |
---|
191 | /* helper routines */ |
---|
192 | static int at91_mci_activate(device_t dev); |
---|
193 | static void at91_mci_deactivate(device_t dev); |
---|
194 | static int at91_mci_is_mci1rev2xx(void); |
---|
195 | #ifdef __rtems__ |
---|
196 | static void at91_mci_read_done(struct at91_mci_softc *sc, uint32_t sr); |
---|
197 | static void at91_mci_write_done(struct at91_mci_softc *sc, uint32_t sr); |
---|
198 | #endif /* __rtems__ */ |
---|
199 | |
---|
200 | #define AT91_MCI_LOCK(_sc) mtx_lock(&(_sc)->sc_mtx) |
---|
201 | #define AT91_MCI_UNLOCK(_sc) mtx_unlock(&(_sc)->sc_mtx) |
---|
202 | #define AT91_MCI_LOCK_INIT(_sc) \ |
---|
203 | mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \ |
---|
204 | "mci", MTX_DEF) |
---|
205 | #define AT91_MCI_LOCK_DESTROY(_sc) mtx_destroy(&_sc->sc_mtx); |
---|
206 | #define AT91_MCI_ASSERT_LOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_OWNED); |
---|
207 | #define AT91_MCI_ASSERT_UNLOCKED(_sc) mtx_assert(&_sc->sc_mtx, MA_NOTOWNED); |
---|
208 | |
---|
209 | static inline uint32_t |
---|
210 | RD4(struct at91_mci_softc *sc, bus_size_t off) |
---|
211 | { |
---|
212 | return (bus_read_4(sc->mem_res, off)); |
---|
213 | } |
---|
214 | |
---|
215 | static inline void |
---|
216 | WR4(struct at91_mci_softc *sc, bus_size_t off, uint32_t val) |
---|
217 | { |
---|
218 | bus_write_4(sc->mem_res, off, val); |
---|
219 | } |
---|
220 | |
---|
221 | static void |
---|
222 | at91_bswap_buf(struct at91_mci_softc *sc, void * dptr, void * sptr, uint32_t memsize) |
---|
223 | { |
---|
224 | uint32_t * dst = (uint32_t *)dptr; |
---|
225 | uint32_t * src = (uint32_t *)sptr; |
---|
226 | uint32_t i; |
---|
227 | |
---|
228 | /* |
---|
229 | * If the hardware doesn't need byte-swapping, let bcopy() do the |
---|
230 | * work. Use bounce buffer even if we don't need byteswap, since |
---|
231 | * buffer may straddle a page boundary, and we don't handle |
---|
232 | * multi-segment transfers in hardware. Seen from 'bsdlabel -w' which |
---|
233 | * uses raw geom access to the volume. Greg Ansley (gja (at) |
---|
234 | * ansley.com) |
---|
235 | */ |
---|
236 | if (!(sc->sc_cap & CAP_NEEDS_BYTESWAP)) { |
---|
237 | memcpy(dptr, sptr, memsize); |
---|
238 | return; |
---|
239 | } |
---|
240 | |
---|
241 | /* |
---|
242 | * Nice performance boost for slightly unrolling this loop. |
---|
243 | * (But very little extra boost for further unrolling it.) |
---|
244 | */ |
---|
245 | for (i = 0; i < memsize; i += 16) { |
---|
246 | *dst++ = bswap32(*src++); |
---|
247 | *dst++ = bswap32(*src++); |
---|
248 | *dst++ = bswap32(*src++); |
---|
249 | *dst++ = bswap32(*src++); |
---|
250 | } |
---|
251 | |
---|
252 | /* Mop up the last 1-3 words, if any. */ |
---|
253 | for (i = 0; i < (memsize & 0x0F); i += 4) { |
---|
254 | *dst++ = bswap32(*src++); |
---|
255 | } |
---|
256 | } |
---|
257 | |
---|
258 | static void |
---|
259 | at91_mci_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) |
---|
260 | { |
---|
261 | if (error != 0) |
---|
262 | return; |
---|
263 | *(bus_addr_t *)arg = segs[0].ds_addr; |
---|
264 | } |
---|
265 | |
---|
266 | static void |
---|
267 | at91_mci_pdc_disable(struct at91_mci_softc *sc) |
---|
268 | { |
---|
269 | #ifndef __rtems__ |
---|
270 | WR4(sc, PDC_PTCR, PDC_PTCR_TXTDIS | PDC_PTCR_RXTDIS); |
---|
271 | WR4(sc, PDC_RPR, 0); |
---|
272 | WR4(sc, PDC_RCR, 0); |
---|
273 | WR4(sc, PDC_RNPR, 0); |
---|
274 | WR4(sc, PDC_RNCR, 0); |
---|
275 | WR4(sc, PDC_TPR, 0); |
---|
276 | WR4(sc, PDC_TCR, 0); |
---|
277 | WR4(sc, PDC_TNPR, 0); |
---|
278 | WR4(sc, PDC_TNCR, 0); |
---|
279 | #else /* __rtems__ */ |
---|
280 | /* On SAMV71 there is no PDC but a DMAC */ |
---|
281 | XDMAD_StopTransfer(pXdmad, sc->xdma_rx_channel); |
---|
282 | XDMAD_StopTransfer(pXdmad, sc->xdma_tx_channel); |
---|
283 | WR4(sc, MCI_DMA, 0); |
---|
284 | #endif /* __rtems__ */ |
---|
285 | } |
---|
286 | |
---|
287 | /* |
---|
288 | * Reset the controller, then restore most of the current state. |
---|
289 | * |
---|
290 | * This is called after detecting an error. It's also called after stopping a |
---|
291 | * multi-block write, to un-wedge the device so that it will handle the NOTBUSY |
---|
292 | * signal correctly. See comments in at91_mci_stop_done() for more details. |
---|
293 | */ |
---|
294 | static void at91_mci_reset(struct at91_mci_softc *sc) |
---|
295 | { |
---|
296 | uint32_t mr; |
---|
297 | uint32_t sdcr; |
---|
298 | uint32_t dtor; |
---|
299 | uint32_t imr; |
---|
300 | |
---|
301 | at91_mci_pdc_disable(sc); |
---|
302 | |
---|
303 | /* save current state */ |
---|
304 | |
---|
305 | imr = RD4(sc, MCI_IMR); |
---|
306 | #ifndef __rtems__ |
---|
307 | mr = RD4(sc, MCI_MR) & 0x7fff; |
---|
308 | #else /* __rtems__ */ |
---|
309 | mr = RD4(sc, MCI_MR); |
---|
310 | #endif /* __rtems__ */ |
---|
311 | sdcr = RD4(sc, MCI_SDCR); |
---|
312 | dtor = RD4(sc, MCI_DTOR); |
---|
313 | |
---|
314 | /* reset the controller */ |
---|
315 | |
---|
316 | WR4(sc, MCI_IDR, 0xffffffff); |
---|
317 | WR4(sc, MCI_CR, MCI_CR_MCIDIS | MCI_CR_SWRST); |
---|
318 | |
---|
319 | /* restore state */ |
---|
320 | |
---|
321 | WR4(sc, MCI_CR, MCI_CR_MCIEN|MCI_CR_PWSEN); |
---|
322 | WR4(sc, MCI_MR, mr); |
---|
323 | WR4(sc, MCI_SDCR, sdcr); |
---|
324 | WR4(sc, MCI_DTOR, dtor); |
---|
325 | WR4(sc, MCI_IER, imr); |
---|
326 | |
---|
327 | /* |
---|
328 | * Make sure sdio interrupts will fire. Not sure why reading |
---|
329 | * SR ensures that, but this is in the linux driver. |
---|
330 | */ |
---|
331 | |
---|
332 | RD4(sc, MCI_SR); |
---|
333 | } |
---|
334 | |
---|
335 | static void |
---|
336 | at91_mci_init(device_t dev) |
---|
337 | { |
---|
338 | struct at91_mci_softc *sc = device_get_softc(dev); |
---|
339 | uint32_t val; |
---|
340 | |
---|
341 | WR4(sc, MCI_CR, MCI_CR_MCIDIS | MCI_CR_SWRST); /* device into reset */ |
---|
342 | WR4(sc, MCI_IDR, 0xffffffff); /* Turn off interrupts */ |
---|
343 | WR4(sc, MCI_DTOR, MCI_DTOR_DTOMUL_1M | 1); |
---|
344 | #ifndef __rtems__ |
---|
345 | val = MCI_MR_PDCMODE; |
---|
346 | #else /* __rtems__ */ |
---|
347 | val = 0; |
---|
348 | val |= MCI_MR_RDPROOF | MCI_MR_WRPROOF; |
---|
349 | #endif /* __rtems__ */ |
---|
350 | val |= 0x34a; /* PWSDIV = 3; CLKDIV = 74 */ |
---|
351 | // if (sc->sc_cap & CAP_MCI1_REV2XX) |
---|
352 | // val |= MCI_MR_RDPROOF | MCI_MR_WRPROOF; |
---|
353 | WR4(sc, MCI_MR, val); |
---|
354 | #ifndef AT91_MCI_SLOT_B |
---|
355 | WR4(sc, MCI_SDCR, 0); /* SLOT A, 1 bit bus */ |
---|
356 | #else |
---|
357 | /* |
---|
358 | * XXX Really should add second "unit" but nobody using using |
---|
359 | * a two slot card that we know of. XXX |
---|
360 | */ |
---|
361 | WR4(sc, MCI_SDCR, 1); /* SLOT B, 1 bit bus */ |
---|
362 | #endif |
---|
363 | /* |
---|
364 | * Enable controller, including power-save. The slower clock |
---|
365 | * of the power-save mode is only in effect when there is no |
---|
366 | * transfer in progress, so it can be left in this mode all |
---|
367 | * the time. |
---|
368 | */ |
---|
369 | WR4(sc, MCI_CR, MCI_CR_MCIEN|MCI_CR_PWSEN); |
---|
370 | } |
---|
371 | |
---|
372 | static void |
---|
373 | at91_mci_fini(device_t dev) |
---|
374 | { |
---|
375 | struct at91_mci_softc *sc = device_get_softc(dev); |
---|
376 | |
---|
377 | WR4(sc, MCI_IDR, 0xffffffff); /* Turn off interrupts */ |
---|
378 | at91_mci_pdc_disable(sc); |
---|
379 | WR4(sc, MCI_CR, MCI_CR_MCIDIS | MCI_CR_SWRST); /* device into reset */ |
---|
380 | } |
---|
381 | |
---|
382 | static int |
---|
383 | at91_mci_probe(device_t dev) |
---|
384 | { |
---|
385 | #ifdef FDT |
---|
386 | if (!ofw_bus_is_compatible(dev, "atmel,hsmci")) |
---|
387 | return (ENXIO); |
---|
388 | #endif |
---|
389 | device_set_desc(dev, "MCI mmc/sd host bridge"); |
---|
390 | return (0); |
---|
391 | } |
---|
392 | |
---|
393 | static int |
---|
394 | at91_mci_attach(device_t dev) |
---|
395 | { |
---|
396 | struct at91_mci_softc *sc = device_get_softc(dev); |
---|
397 | struct sysctl_ctx_list *sctx; |
---|
398 | struct sysctl_oid *soid; |
---|
399 | device_t child; |
---|
400 | int err, i; |
---|
401 | |
---|
402 | #ifdef __rtems__ |
---|
403 | #ifdef LIBBSP_ARM_ATSAM_BSP_H |
---|
404 | PMC_EnablePeripheral(ID_HSMCI); |
---|
405 | sc->xdma_tx_channel = XDMAD_ALLOC_FAILED; |
---|
406 | sc->xdma_rx_channel = XDMAD_ALLOC_FAILED; |
---|
407 | #endif /* LIBBSP_ARM_ATSAM_BSP_H */ |
---|
408 | #endif /* __rtems__ */ |
---|
409 | sctx = device_get_sysctl_ctx(dev); |
---|
410 | soid = device_get_sysctl_tree(dev); |
---|
411 | |
---|
412 | sc->dev = dev; |
---|
413 | sc->sc_cap = 0; |
---|
414 | #ifndef __rtems__ |
---|
415 | if (at91_is_rm92()) |
---|
416 | sc->sc_cap |= CAP_NEEDS_BYTESWAP; |
---|
417 | #endif /* __rtems__ */ |
---|
418 | /* |
---|
419 | * MCI1 Rev 2 controllers need some workarounds, flag if so. |
---|
420 | */ |
---|
421 | if (at91_mci_is_mci1rev2xx()) |
---|
422 | sc->sc_cap |= CAP_MCI1_REV2XX; |
---|
423 | |
---|
424 | err = at91_mci_activate(dev); |
---|
425 | if (err) |
---|
426 | goto out; |
---|
427 | |
---|
428 | #ifdef __rtems__ |
---|
429 | eXdmadRC rc; |
---|
430 | |
---|
431 | /* Prepare some configurations so they don't have to be fetched on every |
---|
432 | * setup */ |
---|
433 | sc->xdma_rx_perid = XDMAIF_Get_ChannelNumber(ID_HSMCI, |
---|
434 | XDMAD_TRANSFER_RX); |
---|
435 | sc->xdma_tx_perid = XDMAIF_Get_ChannelNumber(ID_HSMCI, |
---|
436 | XDMAD_TRANSFER_TX); |
---|
437 | memset(&sc->xdma_rx_cfg, 0, sizeof(sc->xdma_rx_cfg)); |
---|
438 | sc->xdma_rx_cfg.mbr_cfg = XDMAC_CC_TYPE_PER_TRAN | |
---|
439 | XDMAC_CC_MBSIZE_SINGLE | XDMAC_CC_DSYNC_PER2MEM | |
---|
440 | XDMAC_CC_SWREQ_HWR_CONNECTED | XDMAC_CC_MEMSET_NORMAL_MODE | |
---|
441 | XDMAC_CC_CSIZE_CHK_1 | XDMAC_CC_DWIDTH_WORD | |
---|
442 | XDMAC_CC_SIF_AHB_IF1 | XDMAC_CC_DIF_AHB_IF1 | |
---|
443 | XDMAC_CC_SAM_FIXED_AM | XDMAC_CC_DAM_INCREMENTED_AM | |
---|
444 | XDMAC_CC_PERID( |
---|
445 | XDMAIF_Get_ChannelNumber(ID_HSMCI,XDMAD_TRANSFER_RX)); |
---|
446 | memset(&sc->xdma_tx_cfg, 0, sizeof(sc->xdma_tx_cfg)); |
---|
447 | sc->xdma_tx_cfg.mbr_cfg = XDMAC_CC_TYPE_PER_TRAN | |
---|
448 | XDMAC_CC_MBSIZE_SINGLE | XDMAC_CC_DSYNC_MEM2PER | |
---|
449 | XDMAC_CC_SWREQ_HWR_CONNECTED | XDMAC_CC_MEMSET_NORMAL_MODE | |
---|
450 | XDMAC_CC_CSIZE_CHK_1 | XDMAC_CC_DWIDTH_WORD | |
---|
451 | XDMAC_CC_SIF_AHB_IF1 | XDMAC_CC_DIF_AHB_IF1 | |
---|
452 | XDMAC_CC_SAM_INCREMENTED_AM | XDMAC_CC_DAM_FIXED_AM | |
---|
453 | XDMAC_CC_PERID( |
---|
454 | XDMAIF_Get_ChannelNumber(ID_HSMCI,XDMAD_TRANSFER_TX)); |
---|
455 | |
---|
456 | sc->xdma_tx_channel = XDMAD_AllocateChannel(pXdmad, |
---|
457 | XDMAD_TRANSFER_MEMORY, ID_HSMCI); |
---|
458 | if (sc->xdma_tx_channel == XDMAD_ALLOC_FAILED) |
---|
459 | goto out; |
---|
460 | |
---|
461 | /* FIXME: The two DMA channels are not really necessary for the driver. |
---|
462 | * But the XDMAD interface does not allow to allocate one and use it |
---|
463 | * into two directions. The current (2017-07-11) implementation of |
---|
464 | * the XDMAD interface should work with it. So we might could try it. */ |
---|
465 | sc->xdma_rx_channel = XDMAD_AllocateChannel(pXdmad, ID_HSMCI, |
---|
466 | XDMAD_TRANSFER_MEMORY); |
---|
467 | if (sc->xdma_rx_channel == XDMAD_ALLOC_FAILED) |
---|
468 | goto out; |
---|
469 | |
---|
470 | rc = XDMAD_PrepareChannel(pXdmad, sc->xdma_rx_channel); |
---|
471 | if (rc != XDMAD_OK) |
---|
472 | goto out; |
---|
473 | |
---|
474 | rc = XDMAD_PrepareChannel(pXdmad, sc->xdma_tx_channel); |
---|
475 | if (rc != XDMAD_OK) |
---|
476 | goto out; |
---|
477 | |
---|
478 | #endif /* __rtems__ */ |
---|
479 | AT91_MCI_LOCK_INIT(sc); |
---|
480 | |
---|
481 | at91_mci_fini(dev); |
---|
482 | at91_mci_init(dev); |
---|
483 | |
---|
484 | /* |
---|
485 | * Allocate DMA tags and maps and bounce buffers. |
---|
486 | * |
---|
487 | * The parms in the tag_create call cause the dmamem_alloc call to |
---|
488 | * create each bounce buffer as a single contiguous buffer of BBSIZE |
---|
489 | * bytes aligned to a 4096 byte boundary. |
---|
490 | * |
---|
491 | * Do not use DMA_COHERENT for these buffers because that maps the |
---|
492 | * memory as non-cachable, which prevents cache line burst fills/writes, |
---|
493 | * which is something we need since we're trying to overlap the |
---|
494 | * byte-swapping with the DMA operations. |
---|
495 | */ |
---|
496 | err = bus_dma_tag_create(bus_get_dma_tag(dev), 4096, 0, |
---|
497 | BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, |
---|
498 | BBSIZE, 1, BBSIZE, 0, NULL, NULL, &sc->dmatag); |
---|
499 | if (err != 0) |
---|
500 | goto out; |
---|
501 | |
---|
502 | for (i = 0; i < BBCOUNT; ++i) { |
---|
503 | err = bus_dmamem_alloc(sc->dmatag, (void **)&sc->bbuf_vaddr[i], |
---|
504 | BUS_DMA_NOWAIT, &sc->bbuf_map[i]); |
---|
505 | if (err != 0) |
---|
506 | goto out; |
---|
507 | } |
---|
508 | |
---|
509 | /* |
---|
510 | * Activate the interrupt |
---|
511 | */ |
---|
512 | err = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE, |
---|
513 | NULL, at91_mci_intr, sc, &sc->intrhand); |
---|
514 | if (err) { |
---|
515 | AT91_MCI_LOCK_DESTROY(sc); |
---|
516 | goto out; |
---|
517 | } |
---|
518 | |
---|
519 | /* |
---|
520 | * Allow 4-wire to be initially set via #define. |
---|
521 | * Allow a device hint to override that. |
---|
522 | * Allow a sysctl to override that. |
---|
523 | */ |
---|
524 | #if defined(AT91_MCI_HAS_4WIRE) && AT91_MCI_HAS_4WIRE != 0 |
---|
525 | sc->has_4wire = 1; |
---|
526 | #endif |
---|
527 | resource_int_value(device_get_name(dev), device_get_unit(dev), |
---|
528 | "4wire", &sc->has_4wire); |
---|
529 | SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "4wire", |
---|
530 | CTLFLAG_RW, &sc->has_4wire, 0, "has 4 wire SD Card bus"); |
---|
531 | if (sc->has_4wire) |
---|
532 | sc->sc_cap |= CAP_HAS_4WIRE; |
---|
533 | |
---|
534 | sc->allow_overclock = AT91_MCI_ALLOW_OVERCLOCK; |
---|
535 | resource_int_value(device_get_name(dev), device_get_unit(dev), |
---|
536 | "allow_overclock", &sc->allow_overclock); |
---|
537 | SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "allow_overclock", |
---|
538 | CTLFLAG_RW, &sc->allow_overclock, 0, |
---|
539 | "Allow up to 30MHz clock for 25MHz request when next highest speed 15MHz or less."); |
---|
540 | |
---|
541 | SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "debug", |
---|
542 | CTLFLAG_RWTUN, &mci_debug, 0, "enable debug output"); |
---|
543 | |
---|
544 | /* |
---|
545 | * Our real min freq is master_clock/512, but upper driver layers are |
---|
546 | * going to set the min speed during card discovery, and the right speed |
---|
547 | * for that is 400kHz, so advertise a safe value just under that. |
---|
548 | * |
---|
549 | * For max speed, while the rm9200 manual says the max is 50mhz, it also |
---|
550 | * says it supports only the SD v1.0 spec, which means the real limit is |
---|
551 | * 25mhz. On the other hand, historical use has been to slightly violate |
---|
552 | * the standard by running the bus at 30MHz. For more information on |
---|
553 | * that, see the comments at the top of this file. |
---|
554 | */ |
---|
555 | sc->host.f_min = 375000; |
---|
556 | sc->host.f_max = at91_master_clock / 2; |
---|
557 | if (sc->host.f_max > 25000000) |
---|
558 | sc->host.f_max = 25000000; |
---|
559 | sc->host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340; |
---|
560 | sc->host.caps = 0; |
---|
561 | if (sc->sc_cap & CAP_HAS_4WIRE) |
---|
562 | sc->host.caps |= MMC_CAP_4_BIT_DATA; |
---|
563 | |
---|
564 | child = device_add_child(dev, "mmc", 0); |
---|
565 | device_set_ivars(dev, &sc->host); |
---|
566 | err = bus_generic_attach(dev); |
---|
567 | out: |
---|
568 | if (err) |
---|
569 | at91_mci_deactivate(dev); |
---|
570 | return (err); |
---|
571 | } |
---|
572 | |
---|
573 | static int |
---|
574 | at91_mci_detach(device_t dev) |
---|
575 | { |
---|
576 | struct at91_mci_softc *sc = device_get_softc(dev); |
---|
577 | |
---|
578 | at91_mci_fini(dev); |
---|
579 | at91_mci_deactivate(dev); |
---|
580 | |
---|
581 | bus_dmamem_free(sc->dmatag, sc->bbuf_vaddr[0], sc->bbuf_map[0]); |
---|
582 | bus_dmamem_free(sc->dmatag, sc->bbuf_vaddr[1], sc->bbuf_map[1]); |
---|
583 | bus_dma_tag_destroy(sc->dmatag); |
---|
584 | |
---|
585 | return (EBUSY); /* XXX */ |
---|
586 | } |
---|
587 | |
---|
588 | static int |
---|
589 | at91_mci_activate(device_t dev) |
---|
590 | { |
---|
591 | struct at91_mci_softc *sc; |
---|
592 | int rid; |
---|
593 | |
---|
594 | sc = device_get_softc(dev); |
---|
595 | rid = 0; |
---|
596 | sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, |
---|
597 | RF_ACTIVE); |
---|
598 | if (sc->mem_res == NULL) |
---|
599 | goto errout; |
---|
600 | |
---|
601 | rid = 0; |
---|
602 | sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, |
---|
603 | RF_ACTIVE); |
---|
604 | if (sc->irq_res == NULL) |
---|
605 | goto errout; |
---|
606 | |
---|
607 | return (0); |
---|
608 | errout: |
---|
609 | at91_mci_deactivate(dev); |
---|
610 | return (ENOMEM); |
---|
611 | } |
---|
612 | |
---|
613 | static void |
---|
614 | at91_mci_deactivate(device_t dev) |
---|
615 | { |
---|
616 | struct at91_mci_softc *sc; |
---|
617 | |
---|
618 | sc = device_get_softc(dev); |
---|
619 | if (sc->intrhand) |
---|
620 | bus_teardown_intr(dev, sc->irq_res, sc->intrhand); |
---|
621 | sc->intrhand = NULL; |
---|
622 | bus_generic_detach(sc->dev); |
---|
623 | if (sc->mem_res) |
---|
624 | bus_release_resource(dev, SYS_RES_MEMORY, |
---|
625 | rman_get_rid(sc->mem_res), sc->mem_res); |
---|
626 | sc->mem_res = NULL; |
---|
627 | if (sc->irq_res) |
---|
628 | bus_release_resource(dev, SYS_RES_IRQ, |
---|
629 | rman_get_rid(sc->irq_res), sc->irq_res); |
---|
630 | sc->irq_res = NULL; |
---|
631 | #ifdef __rtems__ |
---|
632 | if (sc->xdma_rx_channel != XDMAD_ALLOC_FAILED) { |
---|
633 | XDMAD_FreeChannel(pXdmad, sc->xdma_rx_channel); |
---|
634 | } |
---|
635 | if (sc->xdma_tx_channel != XDMAD_ALLOC_FAILED) { |
---|
636 | XDMAD_FreeChannel(pXdmad, sc->xdma_tx_channel); |
---|
637 | } |
---|
638 | #endif /* __rtems__ */ |
---|
639 | return; |
---|
640 | } |
---|
641 | |
---|
642 | static int |
---|
643 | at91_mci_is_mci1rev2xx(void) |
---|
644 | { |
---|
645 | |
---|
646 | #ifndef __rtems__ |
---|
647 | switch (soc_info.type) { |
---|
648 | case AT91_T_SAM9260: |
---|
649 | case AT91_T_SAM9263: |
---|
650 | case AT91_T_CAP9: |
---|
651 | case AT91_T_SAM9G10: |
---|
652 | case AT91_T_SAM9G20: |
---|
653 | case AT91_T_SAM9RL: |
---|
654 | return(1); |
---|
655 | default: |
---|
656 | return (0); |
---|
657 | } |
---|
658 | #else /* __rtems__ */ |
---|
659 | /* Currently only supports the SAM V71 */ |
---|
660 | return (1); |
---|
661 | #endif /* __rtems__ */ |
---|
662 | } |
---|
663 | |
---|
664 | static int |
---|
665 | at91_mci_update_ios(device_t brdev, device_t reqdev) |
---|
666 | { |
---|
667 | struct at91_mci_softc *sc; |
---|
668 | struct mmc_ios *ios; |
---|
669 | uint32_t clkdiv; |
---|
670 | uint32_t freq; |
---|
671 | |
---|
672 | sc = device_get_softc(brdev); |
---|
673 | ios = &sc->host.ios; |
---|
674 | |
---|
675 | /* |
---|
676 | * Calculate our closest available clock speed that doesn't exceed the |
---|
677 | * requested speed. |
---|
678 | * |
---|
679 | * When overclocking is allowed, the requested clock is 25MHz, the |
---|
680 | * computed frequency is 15MHz or smaller and clockdiv is 1, use |
---|
681 | * clockdiv of 0 to double that. If less than 12.5MHz, double |
---|
682 | * regardless of the overclocking setting. |
---|
683 | * |
---|
684 | * Whatever we come up with, store it back into ios->clock so that the |
---|
685 | * upper layer drivers can report the actual speed of the bus. |
---|
686 | */ |
---|
687 | if (ios->clock == 0) { |
---|
688 | WR4(sc, MCI_CR, MCI_CR_MCIDIS); |
---|
689 | clkdiv = 0; |
---|
690 | } else { |
---|
691 | WR4(sc, MCI_CR, MCI_CR_MCIEN|MCI_CR_PWSEN); |
---|
692 | if ((at91_master_clock % (ios->clock * 2)) == 0) |
---|
693 | clkdiv = ((at91_master_clock / ios->clock) / 2) - 1; |
---|
694 | else |
---|
695 | clkdiv = (at91_master_clock / ios->clock) / 2; |
---|
696 | freq = at91_master_clock / ((clkdiv+1) * 2); |
---|
697 | if (clkdiv == 1 && ios->clock == 25000000 && freq <= 15000000) { |
---|
698 | if (sc->allow_overclock || freq <= 12500000) { |
---|
699 | clkdiv = 0; |
---|
700 | freq = at91_master_clock / ((clkdiv+1) * 2); |
---|
701 | } |
---|
702 | } |
---|
703 | ios->clock = freq; |
---|
704 | } |
---|
705 | if (ios->bus_width == bus_width_4) |
---|
706 | WR4(sc, MCI_SDCR, RD4(sc, MCI_SDCR) | MCI_SDCR_SDCBUS); |
---|
707 | else |
---|
708 | WR4(sc, MCI_SDCR, RD4(sc, MCI_SDCR) & ~MCI_SDCR_SDCBUS); |
---|
709 | WR4(sc, MCI_MR, (RD4(sc, MCI_MR) & ~MCI_MR_CLKDIV) | clkdiv); |
---|
710 | /* Do we need a settle time here? */ |
---|
711 | /* XXX We need to turn the device on/off here with a GPIO pin */ |
---|
712 | return (0); |
---|
713 | } |
---|
714 | |
---|
715 | #ifdef __rtems__ |
---|
716 | static LinkedListDescriporView1 dma_desc[MAX_BLOCKS]; |
---|
717 | |
---|
718 | static void |
---|
719 | at91_mci_setup_xdma(struct at91_mci_softc *sc, bool read, uint32_t block_size, |
---|
720 | uint32_t number_blocks, bus_addr_t paddr, uint32_t len) |
---|
721 | { |
---|
722 | sXdmadCfg *xdma_cfg; |
---|
723 | uint32_t xdma_channel; |
---|
724 | const uint32_t xdma_cndc = XDMAC_CNDC_NDVIEW_NDV1 | |
---|
725 | XDMAC_CNDC_NDE_DSCR_FETCH_EN | |
---|
726 | XDMAC_CNDC_NDSUP_SRC_PARAMS_UPDATED | |
---|
727 | XDMAC_CNDC_NDDUP_DST_PARAMS_UPDATED; |
---|
728 | const uint32_t sa_rdr = (uint32_t)(sc->mem_res->r_bushandle + MCI_RDR); |
---|
729 | const uint32_t da_tdr = (uint32_t)(sc->mem_res->r_bushandle + MCI_TDR); |
---|
730 | const uint32_t xdma_interrupt = XDMAC_CIE_BIE | XDMAC_CIE_DIE | |
---|
731 | XDMAC_CIE_FIE | XDMAC_CIE_RBIE | XDMAC_CIE_WBIE | XDMAC_CIE_ROIE; |
---|
732 | eXdmadRC rc; |
---|
733 | size_t i; |
---|
734 | |
---|
735 | if (read) { |
---|
736 | xdma_cfg = &sc->xdma_rx_cfg; |
---|
737 | xdma_channel = sc->xdma_rx_channel; |
---|
738 | } else { |
---|
739 | xdma_cfg = &sc->xdma_tx_cfg; |
---|
740 | xdma_channel = sc->xdma_tx_channel; |
---|
741 | } |
---|
742 | |
---|
743 | for (i = 0; i < number_blocks; ++i) { |
---|
744 | if (read) { |
---|
745 | dma_desc[i].mbr_sa = sa_rdr; |
---|
746 | dma_desc[i].mbr_da = ((uint32_t)paddr) + i * block_size; |
---|
747 | } else { |
---|
748 | dma_desc[i].mbr_sa = ((uint32_t)paddr) + i * block_size; |
---|
749 | dma_desc[i].mbr_da = da_tdr; |
---|
750 | } |
---|
751 | dma_desc[i].mbr_ubc = XDMA_UBC_NVIEW_NDV1 | |
---|
752 | XDMA_UBC_NDEN_UPDATED | (block_size/4); |
---|
753 | if (i == number_blocks - 1) { |
---|
754 | dma_desc[i].mbr_ubc |= XDMA_UBC_NDE_FETCH_DIS; |
---|
755 | dma_desc[i].mbr_nda = 0; |
---|
756 | } else { |
---|
757 | dma_desc[i].mbr_ubc |= XDMA_UBC_NDE_FETCH_EN; |
---|
758 | dma_desc[i].mbr_nda = (uint32_t) &dma_desc[i+1]; |
---|
759 | } |
---|
760 | } |
---|
761 | |
---|
762 | rc = XDMAD_ConfigureTransfer(pXdmad, xdma_channel, xdma_cfg, xdma_cndc, |
---|
763 | (uint32_t)dma_desc, xdma_interrupt); |
---|
764 | if (rc != XDMAD_OK) |
---|
765 | panic("Could not configure XDMA: %d.", rc); |
---|
766 | |
---|
767 | /* FIXME: Is that correct? */ |
---|
768 | if (read) { |
---|
769 | rtems_cache_invalidate_multiple_data_lines(paddr, len); |
---|
770 | } else { |
---|
771 | rtems_cache_flush_multiple_data_lines(paddr, len); |
---|
772 | } |
---|
773 | rtems_cache_flush_multiple_data_lines(dma_desc, sizeof(dma_desc)); |
---|
774 | |
---|
775 | rc = XDMAD_StartTransfer(pXdmad, xdma_channel); |
---|
776 | if (rc != XDMAD_OK) |
---|
777 | panic("Could not start XDMA: %d.", rc); |
---|
778 | |
---|
779 | } |
---|
780 | |
---|
781 | #endif /* __rtems__ */ |
---|
782 | static void |
---|
783 | at91_mci_start_cmd(struct at91_mci_softc *sc, struct mmc_command *cmd) |
---|
784 | { |
---|
785 | uint32_t cmdr, mr; |
---|
786 | #ifdef __rtems__ |
---|
787 | uint32_t number_blocks; |
---|
788 | uint32_t block_size; |
---|
789 | #endif /* __rtems__ */ |
---|
790 | struct mmc_data *data; |
---|
791 | |
---|
792 | sc->curcmd = cmd; |
---|
793 | data = cmd->data; |
---|
794 | |
---|
795 | /* XXX Upper layers don't always set this */ |
---|
796 | cmd->mrq = sc->req; |
---|
797 | |
---|
798 | /* Begin setting up command register. */ |
---|
799 | |
---|
800 | cmdr = cmd->opcode; |
---|
801 | |
---|
802 | if (sc->host.ios.bus_mode == opendrain) |
---|
803 | cmdr |= MCI_CMDR_OPDCMD; |
---|
804 | |
---|
805 | /* Set up response handling. Allow max timeout for responses. */ |
---|
806 | |
---|
807 | if (MMC_RSP(cmd->flags) == MMC_RSP_NONE) |
---|
808 | cmdr |= MCI_CMDR_RSPTYP_NO; |
---|
809 | else { |
---|
810 | cmdr |= MCI_CMDR_MAXLAT; |
---|
811 | if (cmd->flags & MMC_RSP_136) |
---|
812 | cmdr |= MCI_CMDR_RSPTYP_136; |
---|
813 | else |
---|
814 | cmdr |= MCI_CMDR_RSPTYP_48; |
---|
815 | } |
---|
816 | |
---|
817 | /* |
---|
818 | * If there is no data transfer, just set up the right interrupt mask |
---|
819 | * and start the command. |
---|
820 | * |
---|
821 | * The interrupt mask needs to be CMDRDY plus all non-data-transfer |
---|
822 | * errors. It's important to leave the transfer-related errors out, to |
---|
823 | * avoid spurious timeout or crc errors on a STOP command following a |
---|
824 | * multiblock read. When a multiblock read is in progress, sending a |
---|
825 | * STOP in the middle of a block occasionally triggers such errors, but |
---|
826 | * we're totally disinterested in them because we've already gotten all |
---|
827 | * the data we wanted without error before sending the STOP command. |
---|
828 | */ |
---|
829 | |
---|
830 | if (data == NULL) { |
---|
831 | uint32_t ier = MCI_SR_CMDRDY | |
---|
832 | MCI_SR_RTOE | MCI_SR_RENDE | |
---|
833 | MCI_SR_RCRCE | MCI_SR_RDIRE | MCI_SR_RINDE; |
---|
834 | |
---|
835 | at91_mci_pdc_disable(sc); |
---|
836 | |
---|
837 | if (cmd->opcode == MMC_STOP_TRANSMISSION) |
---|
838 | cmdr |= MCI_CMDR_TRCMD_STOP; |
---|
839 | |
---|
840 | /* Ignore response CRC on CMD2 and ACMD41, per standard. */ |
---|
841 | |
---|
842 | if (cmd->opcode == MMC_SEND_OP_COND || |
---|
843 | cmd->opcode == ACMD_SD_SEND_OP_COND) |
---|
844 | ier &= ~MCI_SR_RCRCE; |
---|
845 | |
---|
846 | if (mci_debug) |
---|
847 | printf("CMDR %x (opcode %d) ARGR %x no data\n", |
---|
848 | cmdr, cmd->opcode, cmd->arg); |
---|
849 | |
---|
850 | WR4(sc, MCI_ARGR, cmd->arg); |
---|
851 | WR4(sc, MCI_CMDR, cmdr); |
---|
852 | WR4(sc, MCI_IDR, 0xffffffff); |
---|
853 | WR4(sc, MCI_IER, ier); |
---|
854 | return; |
---|
855 | } |
---|
856 | |
---|
857 | /* There is data, set up the transfer-related parts of the command. */ |
---|
858 | |
---|
859 | if (data->flags & MMC_DATA_READ) |
---|
860 | cmdr |= MCI_CMDR_TRDIR; |
---|
861 | |
---|
862 | if (data->flags & (MMC_DATA_READ | MMC_DATA_WRITE)) |
---|
863 | cmdr |= MCI_CMDR_TRCMD_START; |
---|
864 | |
---|
865 | if (data->flags & MMC_DATA_STREAM) |
---|
866 | cmdr |= MCI_CMDR_TRTYP_STREAM; |
---|
867 | else if (data->flags & MMC_DATA_MULTI) { |
---|
868 | cmdr |= MCI_CMDR_TRTYP_MULTIPLE; |
---|
869 | sc->flags |= (data->flags & MMC_DATA_READ) ? |
---|
870 | CMD_MULTIREAD : CMD_MULTIWRITE; |
---|
871 | } |
---|
872 | |
---|
873 | /* |
---|
874 | * Disable PDC until we're ready. |
---|
875 | * |
---|
876 | * Set block size and turn on PDC mode for dma xfer. |
---|
877 | * Note that the block size is the smaller of the amount of data to be |
---|
878 | * transferred, or 512 bytes. The 512 size is fixed by the standard; |
---|
879 | * smaller blocks are possible, but never larger. |
---|
880 | */ |
---|
881 | |
---|
882 | #ifndef __rtems__ |
---|
883 | WR4(sc, PDC_PTCR, PDC_PTCR_RXTDIS | PDC_PTCR_TXTDIS); |
---|
884 | |
---|
885 | mr = RD4(sc,MCI_MR) & ~MCI_MR_BLKLEN; |
---|
886 | mr |= min(data->len, 512) << 16; |
---|
887 | WR4(sc, MCI_MR, mr | MCI_MR_PDCMODE|MCI_MR_PDCPADV); |
---|
888 | #else /* __rtems__ */ |
---|
889 | mr = RD4(sc,MCI_MR); |
---|
890 | WR4(sc, MCI_MR, mr | MCI_MR_PDCPADV); |
---|
891 | |
---|
892 | WR4(sc, MCI_DMA, MCI_DMA_DMAEN | MCI_DMA_CHKSIZE_1); |
---|
893 | |
---|
894 | block_size = min(data->len, 512); |
---|
895 | number_blocks = data->len / block_size; |
---|
896 | WR4(sc, MCI_BLKR, block_size << 16 | number_blocks); |
---|
897 | #endif /* __rtems__ */ |
---|
898 | |
---|
899 | /* |
---|
900 | * Set up DMA. |
---|
901 | * |
---|
902 | * Use bounce buffers even if we don't need to byteswap, because doing |
---|
903 | * multi-block IO with large DMA buffers is way fast (compared to |
---|
904 | * single-block IO), even after incurring the overhead of also copying |
---|
905 | * from/to the caller's buffers (which may be in non-contiguous physical |
---|
906 | * pages). |
---|
907 | * |
---|
908 | * In an ideal non-byteswap world we could create a dma tag that allows |
---|
909 | * for discontiguous segments and do the IO directly from/to the |
---|
910 | * caller's buffer(s), using ENDRX/ENDTX interrupts to chain the |
---|
911 | * discontiguous buffers through the PDC. Someday. |
---|
912 | * |
---|
913 | * If a read is bigger than 2k, split it in half so that we can start |
---|
914 | * byte-swapping the first half while the second half is on the wire. |
---|
915 | * It would be best if we could split it into 8k chunks, but we can't |
---|
916 | * always keep up with the byte-swapping due to other system activity, |
---|
917 | * and if an RXBUFF interrupt happens while we're still handling the |
---|
918 | * byte-swap from the prior buffer (IE, we haven't returned from |
---|
919 | * handling the prior interrupt yet), then data will get dropped on the |
---|
920 | * floor and we can't easily recover from that. The right fix for that |
---|
921 | * would be to have the interrupt handling only keep the DMA flowing and |
---|
922 | * enqueue filled buffers to be byte-swapped in a non-interrupt context. |
---|
923 | * Even that won't work on the write side of things though; in that |
---|
924 | * context we have to have all the data ready to go before starting the |
---|
925 | * dma. |
---|
926 | * |
---|
927 | * XXX what about stream transfers? |
---|
928 | */ |
---|
929 | sc->xfer_offset = 0; |
---|
930 | sc->bbuf_curidx = 0; |
---|
931 | |
---|
932 | if (data->flags & (MMC_DATA_READ | MMC_DATA_WRITE)) { |
---|
933 | uint32_t len; |
---|
934 | uint32_t remaining = data->len; |
---|
935 | bus_addr_t paddr; |
---|
936 | int err; |
---|
937 | |
---|
938 | if (remaining > (BBCOUNT*BBSIZE)) |
---|
939 | panic("IO read size exceeds MAXDATA\n"); |
---|
940 | |
---|
941 | if (data->flags & MMC_DATA_READ) { |
---|
942 | #ifndef __rtems__ |
---|
943 | if (remaining > 2048) // XXX |
---|
944 | len = remaining / 2; |
---|
945 | else |
---|
946 | #else |
---|
947 | /* FIXME: This reduces performance. Set up DMA in two |
---|
948 | * parts instead like done on AT91. */ |
---|
949 | #endif /* __rtems__ */ |
---|
950 | len = remaining; |
---|
951 | err = bus_dmamap_load(sc->dmatag, sc->bbuf_map[0], |
---|
952 | sc->bbuf_vaddr[0], len, at91_mci_getaddr, |
---|
953 | &paddr, BUS_DMA_NOWAIT); |
---|
954 | if (err != 0) |
---|
955 | panic("IO read dmamap_load failed\n"); |
---|
956 | bus_dmamap_sync(sc->dmatag, sc->bbuf_map[0], |
---|
957 | BUS_DMASYNC_PREREAD); |
---|
958 | #ifndef __rtems__ |
---|
959 | WR4(sc, PDC_RPR, paddr); |
---|
960 | WR4(sc, PDC_RCR, len / 4); |
---|
961 | sc->bbuf_len[0] = len; |
---|
962 | remaining -= len; |
---|
963 | if (remaining == 0) { |
---|
964 | sc->bbuf_len[1] = 0; |
---|
965 | } else { |
---|
966 | len = remaining; |
---|
967 | err = bus_dmamap_load(sc->dmatag, sc->bbuf_map[1], |
---|
968 | sc->bbuf_vaddr[1], len, at91_mci_getaddr, |
---|
969 | &paddr, BUS_DMA_NOWAIT); |
---|
970 | if (err != 0) |
---|
971 | panic("IO read dmamap_load failed\n"); |
---|
972 | bus_dmamap_sync(sc->dmatag, sc->bbuf_map[1], |
---|
973 | BUS_DMASYNC_PREREAD); |
---|
974 | WR4(sc, PDC_RNPR, paddr); |
---|
975 | WR4(sc, PDC_RNCR, len / 4); |
---|
976 | sc->bbuf_len[1] = len; |
---|
977 | remaining -= len; |
---|
978 | } |
---|
979 | WR4(sc, PDC_PTCR, PDC_PTCR_RXTEN); |
---|
980 | #else /* __rtems__ */ |
---|
981 | at91_mci_setup_xdma(sc, true, block_size, |
---|
982 | number_blocks, paddr, len); |
---|
983 | |
---|
984 | sc->bbuf_len[0] = len; |
---|
985 | remaining -= len; |
---|
986 | sc->bbuf_len[1] = 0; |
---|
987 | if (remaining != 0) |
---|
988 | panic("Still rx-data left. This should never happen."); |
---|
989 | #endif /* __rtems__ */ |
---|
990 | } else { |
---|
991 | len = min(BBSIZE, remaining); |
---|
992 | at91_bswap_buf(sc, sc->bbuf_vaddr[0], data->data, len); |
---|
993 | err = bus_dmamap_load(sc->dmatag, sc->bbuf_map[0], |
---|
994 | sc->bbuf_vaddr[0], len, at91_mci_getaddr, |
---|
995 | &paddr, BUS_DMA_NOWAIT); |
---|
996 | if (err != 0) |
---|
997 | panic("IO write dmamap_load failed\n"); |
---|
998 | bus_dmamap_sync(sc->dmatag, sc->bbuf_map[0], |
---|
999 | BUS_DMASYNC_PREWRITE); |
---|
1000 | #ifndef __rtems__ |
---|
1001 | /* |
---|
1002 | * Erratum workaround: PDC transfer length on a write |
---|
1003 | * must not be smaller than 12 bytes (3 words); only |
---|
1004 | * blklen bytes (set above) are actually transferred. |
---|
1005 | */ |
---|
1006 | WR4(sc, PDC_TPR,paddr); |
---|
1007 | WR4(sc, PDC_TCR, (len < 12) ? 3 : len / 4); |
---|
1008 | sc->bbuf_len[0] = len; |
---|
1009 | remaining -= len; |
---|
1010 | if (remaining == 0) { |
---|
1011 | sc->bbuf_len[1] = 0; |
---|
1012 | } else { |
---|
1013 | len = remaining; |
---|
1014 | at91_bswap_buf(sc, sc->bbuf_vaddr[1], |
---|
1015 | ((char *)data->data)+BBSIZE, len); |
---|
1016 | err = bus_dmamap_load(sc->dmatag, sc->bbuf_map[1], |
---|
1017 | sc->bbuf_vaddr[1], len, at91_mci_getaddr, |
---|
1018 | &paddr, BUS_DMA_NOWAIT); |
---|
1019 | if (err != 0) |
---|
1020 | panic("IO write dmamap_load failed\n"); |
---|
1021 | bus_dmamap_sync(sc->dmatag, sc->bbuf_map[1], |
---|
1022 | BUS_DMASYNC_PREWRITE); |
---|
1023 | WR4(sc, PDC_TNPR, paddr); |
---|
1024 | WR4(sc, PDC_TNCR, (len < 12) ? 3 : len / 4); |
---|
1025 | sc->bbuf_len[1] = len; |
---|
1026 | remaining -= len; |
---|
1027 | } |
---|
1028 | /* do not enable PDC xfer until CMDRDY asserted */ |
---|
1029 | #else /* __rtems__ */ |
---|
1030 | at91_mci_setup_xdma(sc, false, block_size, |
---|
1031 | number_blocks, paddr, len); |
---|
1032 | |
---|
1033 | sc->bbuf_len[0] = len; |
---|
1034 | remaining -= len; |
---|
1035 | sc->bbuf_len[1] = 0; |
---|
1036 | if (remaining != 0) |
---|
1037 | panic("Still tx-data left. This should never happen."); |
---|
1038 | |
---|
1039 | #endif /* __rtems__ */ |
---|
1040 | } |
---|
1041 | data->xfer_len = 0; /* XXX what's this? appears to be unused. */ |
---|
1042 | } |
---|
1043 | |
---|
1044 | if (mci_debug) |
---|
1045 | printf("CMDR %x (opcode %d) ARGR %x with data len %d\n", |
---|
1046 | cmdr, cmd->opcode, cmd->arg, cmd->data->len); |
---|
1047 | |
---|
1048 | WR4(sc, MCI_ARGR, cmd->arg); |
---|
1049 | WR4(sc, MCI_CMDR, cmdr); |
---|
1050 | WR4(sc, MCI_IER, MCI_SR_ERROR | MCI_SR_CMDRDY); |
---|
1051 | } |
---|
1052 | |
---|
1053 | static void |
---|
1054 | at91_mci_next_operation(struct at91_mci_softc *sc) |
---|
1055 | { |
---|
1056 | struct mmc_request *req; |
---|
1057 | |
---|
1058 | req = sc->req; |
---|
1059 | if (req == NULL) |
---|
1060 | return; |
---|
1061 | |
---|
1062 | if (sc->flags & PENDING_CMD) { |
---|
1063 | sc->flags &= ~PENDING_CMD; |
---|
1064 | at91_mci_start_cmd(sc, req->cmd); |
---|
1065 | return; |
---|
1066 | } else if (sc->flags & PENDING_STOP) { |
---|
1067 | sc->flags &= ~PENDING_STOP; |
---|
1068 | at91_mci_start_cmd(sc, req->stop); |
---|
1069 | return; |
---|
1070 | } |
---|
1071 | |
---|
1072 | WR4(sc, MCI_IDR, 0xffffffff); |
---|
1073 | sc->req = NULL; |
---|
1074 | sc->curcmd = NULL; |
---|
1075 | //printf("req done\n"); |
---|
1076 | req->done(req); |
---|
1077 | } |
---|
1078 | |
---|
1079 | static int |
---|
1080 | at91_mci_request(device_t brdev, device_t reqdev, struct mmc_request *req) |
---|
1081 | { |
---|
1082 | struct at91_mci_softc *sc = device_get_softc(brdev); |
---|
1083 | |
---|
1084 | AT91_MCI_LOCK(sc); |
---|
1085 | if (sc->req != NULL) { |
---|
1086 | AT91_MCI_UNLOCK(sc); |
---|
1087 | return (EBUSY); |
---|
1088 | } |
---|
1089 | //printf("new req\n"); |
---|
1090 | sc->req = req; |
---|
1091 | sc->flags = PENDING_CMD; |
---|
1092 | if (sc->req->stop) |
---|
1093 | sc->flags |= PENDING_STOP; |
---|
1094 | at91_mci_next_operation(sc); |
---|
1095 | AT91_MCI_UNLOCK(sc); |
---|
1096 | return (0); |
---|
1097 | } |
---|
1098 | |
---|
1099 | static int |
---|
1100 | at91_mci_get_ro(device_t brdev, device_t reqdev) |
---|
1101 | { |
---|
1102 | return (0); |
---|
1103 | } |
---|
1104 | |
---|
1105 | static int |
---|
1106 | at91_mci_acquire_host(device_t brdev, device_t reqdev) |
---|
1107 | { |
---|
1108 | struct at91_mci_softc *sc = device_get_softc(brdev); |
---|
1109 | int err = 0; |
---|
1110 | |
---|
1111 | AT91_MCI_LOCK(sc); |
---|
1112 | while (sc->bus_busy) |
---|
1113 | msleep(sc, &sc->sc_mtx, PZERO, "mciah", hz / 5); |
---|
1114 | sc->bus_busy++; |
---|
1115 | AT91_MCI_UNLOCK(sc); |
---|
1116 | return (err); |
---|
1117 | } |
---|
1118 | |
---|
1119 | static int |
---|
1120 | at91_mci_release_host(device_t brdev, device_t reqdev) |
---|
1121 | { |
---|
1122 | struct at91_mci_softc *sc = device_get_softc(brdev); |
---|
1123 | |
---|
1124 | AT91_MCI_LOCK(sc); |
---|
1125 | sc->bus_busy--; |
---|
1126 | wakeup(sc); |
---|
1127 | AT91_MCI_UNLOCK(sc); |
---|
1128 | return (0); |
---|
1129 | } |
---|
1130 | |
---|
1131 | static void |
---|
1132 | at91_mci_read_done(struct at91_mci_softc *sc, uint32_t sr) |
---|
1133 | { |
---|
1134 | struct mmc_command *cmd = sc->curcmd; |
---|
1135 | char * dataptr = (char *)cmd->data->data; |
---|
1136 | uint32_t curidx = sc->bbuf_curidx; |
---|
1137 | uint32_t len = sc->bbuf_len[curidx]; |
---|
1138 | |
---|
1139 | /* |
---|
1140 | * We arrive here when a DMA transfer for a read is done, whether it's |
---|
1141 | * a single or multi-block read. |
---|
1142 | * |
---|
1143 | * We byte-swap the buffer that just completed, and if that is the |
---|
1144 | * last buffer that's part of this read then we move on to the next |
---|
1145 | * operation, otherwise we wait for another ENDRX for the next bufer. |
---|
1146 | */ |
---|
1147 | |
---|
1148 | #ifndef __rtems__ |
---|
1149 | bus_dmamap_sync(sc->dmatag, sc->bbuf_map[curidx], BUS_DMASYNC_POSTREAD); |
---|
1150 | bus_dmamap_unload(sc->dmatag, sc->bbuf_map[curidx]); |
---|
1151 | #endif /* __rtems__ */ |
---|
1152 | |
---|
1153 | at91_bswap_buf(sc, dataptr + sc->xfer_offset, sc->bbuf_vaddr[curidx], len); |
---|
1154 | |
---|
1155 | if (mci_debug) { |
---|
1156 | printf("read done sr %x curidx %d len %d xfer_offset %d\n", |
---|
1157 | sr, curidx, len, sc->xfer_offset); |
---|
1158 | } |
---|
1159 | |
---|
1160 | sc->xfer_offset += len; |
---|
1161 | sc->bbuf_curidx = !curidx; /* swap buffers */ |
---|
1162 | |
---|
1163 | /* |
---|
1164 | * If we've transferred all the data, move on to the next operation. |
---|
1165 | * |
---|
1166 | * If we're still transferring the last buffer, RNCR is already zero but |
---|
1167 | * we have to write a zero anyway to clear the ENDRX status so we don't |
---|
1168 | * re-interrupt until the last buffer is done. |
---|
1169 | */ |
---|
1170 | if (sc->xfer_offset == cmd->data->len) { |
---|
1171 | WR4(sc, PDC_PTCR, PDC_PTCR_RXTDIS | PDC_PTCR_TXTDIS); |
---|
1172 | cmd->error = MMC_ERR_NONE; |
---|
1173 | at91_mci_next_operation(sc); |
---|
1174 | } else { |
---|
1175 | WR4(sc, PDC_RNCR, 0); |
---|
1176 | #ifndef __rtems__ |
---|
1177 | WR4(sc, MCI_IER, MCI_SR_ERROR | MCI_SR_ENDRX); |
---|
1178 | #else /* __rtems__ */ |
---|
1179 | WR4(sc, MCI_IER, MCI_SR_ERROR | MCI_SR_XFRDONE); |
---|
1180 | #endif /* __rtems__ */ |
---|
1181 | } |
---|
1182 | } |
---|
1183 | |
---|
1184 | static void |
---|
1185 | at91_mci_write_done(struct at91_mci_softc *sc, uint32_t sr) |
---|
1186 | { |
---|
1187 | struct mmc_command *cmd = sc->curcmd; |
---|
1188 | |
---|
1189 | /* |
---|
1190 | * We arrive here when the entire DMA transfer for a write is done, |
---|
1191 | * whether it's a single or multi-block write. If it's multi-block we |
---|
1192 | * have to immediately move on to the next operation which is to send |
---|
1193 | * the stop command. If it's a single-block transfer we need to wait |
---|
1194 | * for NOTBUSY, but if that's already asserted we can avoid another |
---|
1195 | * interrupt and just move on to completing the request right away. |
---|
1196 | */ |
---|
1197 | |
---|
1198 | WR4(sc, PDC_PTCR, PDC_PTCR_RXTDIS | PDC_PTCR_TXTDIS); |
---|
1199 | |
---|
1200 | bus_dmamap_sync(sc->dmatag, sc->bbuf_map[sc->bbuf_curidx], |
---|
1201 | BUS_DMASYNC_POSTWRITE); |
---|
1202 | bus_dmamap_unload(sc->dmatag, sc->bbuf_map[sc->bbuf_curidx]); |
---|
1203 | |
---|
1204 | if ((cmd->data->flags & MMC_DATA_MULTI) || (sr & MCI_SR_NOTBUSY)) { |
---|
1205 | cmd->error = MMC_ERR_NONE; |
---|
1206 | at91_mci_next_operation(sc); |
---|
1207 | } else { |
---|
1208 | WR4(sc, MCI_IER, MCI_SR_ERROR | MCI_SR_NOTBUSY); |
---|
1209 | } |
---|
1210 | } |
---|
1211 | |
---|
1212 | static void |
---|
1213 | at91_mci_notbusy(struct at91_mci_softc *sc) |
---|
1214 | { |
---|
1215 | struct mmc_command *cmd = sc->curcmd; |
---|
1216 | |
---|
1217 | /* |
---|
1218 | * We arrive here by either completion of a single-block write, or |
---|
1219 | * completion of the stop command that ended a multi-block write (and, |
---|
1220 | * I suppose, after a card-select or erase, but I haven't tested |
---|
1221 | * those). Anyway, we're done and it's time to move on to the next |
---|
1222 | * command. |
---|
1223 | */ |
---|
1224 | |
---|
1225 | cmd->error = MMC_ERR_NONE; |
---|
1226 | at91_mci_next_operation(sc); |
---|
1227 | } |
---|
1228 | |
---|
1229 | static void |
---|
1230 | at91_mci_stop_done(struct at91_mci_softc *sc, uint32_t sr) |
---|
1231 | { |
---|
1232 | struct mmc_command *cmd = sc->curcmd; |
---|
1233 | |
---|
1234 | /* |
---|
1235 | * We arrive here after receiving CMDRDY for a MMC_STOP_TRANSMISSION |
---|
1236 | * command. Depending on the operation being stopped, we may have to |
---|
1237 | * do some unusual things to work around hardware bugs. |
---|
1238 | */ |
---|
1239 | |
---|
1240 | /* |
---|
1241 | * This is known to be true of at91rm9200 hardware; it may or may not |
---|
1242 | * apply to more recent chips: |
---|
1243 | * |
---|
1244 | * After stopping a multi-block write, the NOTBUSY bit in MCI_SR does |
---|
1245 | * not properly reflect the actual busy state of the card as signaled |
---|
1246 | * on the DAT0 line; it always claims the card is not-busy. If we |
---|
1247 | * believe that and let operations continue, following commands will |
---|
1248 | * fail with response timeouts (except of course MMC_SEND_STATUS -- it |
---|
1249 | * indicates the card is busy in the PRG state, which was the smoking |
---|
1250 | * gun that showed MCI_SR NOTBUSY was not tracking DAT0 correctly). |
---|
1251 | * |
---|
1252 | * The atmel docs are emphatic: "This flag [NOTBUSY] must be used only |
---|
1253 | * for Write Operations." I guess technically since we sent a stop |
---|
1254 | * it's not a write operation anymore. But then just what did they |
---|
1255 | * think it meant for the stop command to have "...an optional busy |
---|
1256 | * signal transmitted on the data line" according to the SD spec? |
---|
1257 | * |
---|
1258 | * I tried a variety of things to un-wedge the MCI and get the status |
---|
1259 | * register to reflect NOTBUSY correctly again, but the only thing |
---|
1260 | * that worked was a full device reset. It feels like an awfully big |
---|
1261 | * hammer, but doing a full reset after every multiblock write is |
---|
1262 | * still faster than doing single-block IO (by almost two orders of |
---|
1263 | * magnitude: 20KB/sec improves to about 1.8MB/sec best case). |
---|
1264 | * |
---|
1265 | * After doing the reset, wait for a NOTBUSY interrupt before |
---|
1266 | * continuing with the next operation. |
---|
1267 | * |
---|
1268 | * This workaround breaks multiwrite on the rev2xx parts, but some other |
---|
1269 | * workaround is needed. |
---|
1270 | */ |
---|
1271 | if ((sc->flags & CMD_MULTIWRITE) && (sc->sc_cap & CAP_NEEDS_BYTESWAP)) { |
---|
1272 | at91_mci_reset(sc); |
---|
1273 | WR4(sc, MCI_IER, MCI_SR_ERROR | MCI_SR_NOTBUSY); |
---|
1274 | return; |
---|
1275 | } |
---|
1276 | |
---|
1277 | /* |
---|
1278 | * This is known to be true of at91rm9200 hardware; it may or may not |
---|
1279 | * apply to more recent chips: |
---|
1280 | * |
---|
1281 | * After stopping a multi-block read, loop to read and discard any |
---|
1282 | * data that coasts in after we sent the stop command. The docs don't |
---|
1283 | * say anything about it, but empirical testing shows that 1-3 |
---|
1284 | * additional words of data get buffered up in some unmentioned |
---|
1285 | * internal fifo and if we don't read and discard them here they end |
---|
1286 | * up on the front of the next read DMA transfer we do. |
---|
1287 | * |
---|
1288 | * This appears to be unnecessary for rev2xx parts. |
---|
1289 | */ |
---|
1290 | if ((sc->flags & CMD_MULTIREAD) && (sc->sc_cap & CAP_NEEDS_BYTESWAP)) { |
---|
1291 | uint32_t sr; |
---|
1292 | int count = 0; |
---|
1293 | |
---|
1294 | do { |
---|
1295 | sr = RD4(sc, MCI_SR); |
---|
1296 | if (sr & MCI_SR_RXRDY) { |
---|
1297 | RD4(sc, MCI_RDR); |
---|
1298 | ++count; |
---|
1299 | } |
---|
1300 | } while (sr & MCI_SR_RXRDY); |
---|
1301 | at91_mci_reset(sc); |
---|
1302 | } |
---|
1303 | |
---|
1304 | cmd->error = MMC_ERR_NONE; |
---|
1305 | at91_mci_next_operation(sc); |
---|
1306 | |
---|
1307 | } |
---|
1308 | |
---|
1309 | static void |
---|
1310 | at91_mci_cmdrdy(struct at91_mci_softc *sc, uint32_t sr) |
---|
1311 | { |
---|
1312 | struct mmc_command *cmd = sc->curcmd; |
---|
1313 | int i; |
---|
1314 | |
---|
1315 | if (cmd == NULL) |
---|
1316 | return; |
---|
1317 | |
---|
1318 | /* |
---|
1319 | * We get here at the end of EVERY command. We retrieve the command |
---|
1320 | * response (if any) then decide what to do next based on the command. |
---|
1321 | */ |
---|
1322 | |
---|
1323 | if (cmd->flags & MMC_RSP_PRESENT) { |
---|
1324 | for (i = 0; i < ((cmd->flags & MMC_RSP_136) ? 4 : 1); i++) { |
---|
1325 | cmd->resp[i] = RD4(sc, MCI_RSPR + i * 4); |
---|
1326 | if (mci_debug) |
---|
1327 | printf("RSPR[%d] = %x sr=%x\n", i, cmd->resp[i], sr); |
---|
1328 | } |
---|
1329 | } |
---|
1330 | |
---|
1331 | /* |
---|
1332 | * If this was a stop command, go handle the various special |
---|
1333 | * conditions (read: bugs) that have to be dealt with following a stop. |
---|
1334 | */ |
---|
1335 | if (cmd->opcode == MMC_STOP_TRANSMISSION) { |
---|
1336 | at91_mci_stop_done(sc, sr); |
---|
1337 | return; |
---|
1338 | } |
---|
1339 | |
---|
1340 | /* |
---|
1341 | * If this command can continue to assert BUSY beyond the response then |
---|
1342 | * we need to wait for NOTBUSY before the command is really done. |
---|
1343 | * |
---|
1344 | * Note that this may not work properly on the at91rm9200. It certainly |
---|
1345 | * doesn't work for the STOP command that follows a multi-block write, |
---|
1346 | * so post-stop CMDRDY is handled separately; see the special handling |
---|
1347 | * in at91_mci_stop_done(). |
---|
1348 | * |
---|
1349 | * Beside STOP, there are other R1B-type commands that use the busy |
---|
1350 | * signal after CMDRDY: CMD7 (card select), CMD28-29 (write protect), |
---|
1351 | * CMD38 (erase). I haven't tested any of them, but I rather expect |
---|
1352 | * them all to have the same sort of problem with MCI_SR not actually |
---|
1353 | * reflecting the state of the DAT0-line busy indicator. So this code |
---|
1354 | * may need to grow some sort of special handling for them too. (This |
---|
1355 | * just in: CMD7 isn't a problem right now because dev/mmc.c incorrectly |
---|
1356 | * sets the response flags to R1 rather than R1B.) XXX |
---|
1357 | */ |
---|
1358 | if ((cmd->flags & MMC_RSP_BUSY)) { |
---|
1359 | WR4(sc, MCI_IER, MCI_SR_ERROR | MCI_SR_NOTBUSY); |
---|
1360 | return; |
---|
1361 | } |
---|
1362 | |
---|
1363 | /* |
---|
1364 | * If there is a data transfer with this command, then... |
---|
1365 | * - If it's a read, we need to wait for ENDRX. |
---|
1366 | * - If it's a write, now is the time to enable the PDC, and we need |
---|
1367 | * to wait for a BLKE that follows a TXBUFE, because if we're doing |
---|
1368 | * a split transfer we get a BLKE after the first half (when TPR/TCR |
---|
1369 | * get loaded from TNPR/TNCR). So first we wait for the TXBUFE, and |
---|
1370 | * the handling for that interrupt will then invoke the wait for the |
---|
1371 | * subsequent BLKE which indicates actual completion. |
---|
1372 | */ |
---|
1373 | if (cmd->data) { |
---|
1374 | uint32_t ier; |
---|
1375 | #ifndef __rtems__ |
---|
1376 | if (cmd->data->flags & MMC_DATA_READ) { |
---|
1377 | ier = MCI_SR_ENDRX; |
---|
1378 | } else { |
---|
1379 | ier = MCI_SR_TXBUFE; |
---|
1380 | WR4(sc, PDC_PTCR, PDC_PTCR_TXTEN); |
---|
1381 | } |
---|
1382 | #else /* __rtems__ */ |
---|
1383 | ier = MCI_SR_XFRDONE; |
---|
1384 | #endif /* __rtems__ */ |
---|
1385 | WR4(sc, MCI_IER, MCI_SR_ERROR | ier); |
---|
1386 | return; |
---|
1387 | } |
---|
1388 | |
---|
1389 | /* |
---|
1390 | * If we made it to here, we don't need to wait for anything more for |
---|
1391 | * the current command, move on to the next command (will complete the |
---|
1392 | * request if there is no next command). |
---|
1393 | */ |
---|
1394 | cmd->error = MMC_ERR_NONE; |
---|
1395 | at91_mci_next_operation(sc); |
---|
1396 | } |
---|
1397 | |
---|
1398 | static void |
---|
1399 | at91_mci_intr(void *arg) |
---|
1400 | { |
---|
1401 | struct at91_mci_softc *sc = (struct at91_mci_softc*)arg; |
---|
1402 | struct mmc_command *cmd = sc->curcmd; |
---|
1403 | uint32_t sr, isr; |
---|
1404 | |
---|
1405 | AT91_MCI_LOCK(sc); |
---|
1406 | |
---|
1407 | sr = RD4(sc, MCI_SR); |
---|
1408 | isr = sr & RD4(sc, MCI_IMR); |
---|
1409 | |
---|
1410 | if (mci_debug) |
---|
1411 | printf("i 0x%x sr 0x%x\n", isr, sr); |
---|
1412 | |
---|
1413 | /* |
---|
1414 | * All interrupts are one-shot; disable it now. |
---|
1415 | * The next operation will re-enable whatever interrupts it wants. |
---|
1416 | */ |
---|
1417 | WR4(sc, MCI_IDR, isr); |
---|
1418 | if (isr & MCI_SR_ERROR) { |
---|
1419 | if (isr & (MCI_SR_RTOE | MCI_SR_DTOE)) |
---|
1420 | cmd->error = MMC_ERR_TIMEOUT; |
---|
1421 | else if (isr & (MCI_SR_RCRCE | MCI_SR_DCRCE)) |
---|
1422 | cmd->error = MMC_ERR_BADCRC; |
---|
1423 | else if (isr & (MCI_SR_OVRE | MCI_SR_UNRE)) |
---|
1424 | cmd->error = MMC_ERR_FIFO; |
---|
1425 | else |
---|
1426 | cmd->error = MMC_ERR_FAILED; |
---|
1427 | /* |
---|
1428 | * CMD8 is used to probe for SDHC cards, a standard SD card |
---|
1429 | * will get a response timeout; don't report it because it's a |
---|
1430 | * normal and expected condition. One might argue that all |
---|
1431 | * error reporting should be left to higher levels, but when |
---|
1432 | * they report at all it's always EIO, which isn't very |
---|
1433 | * helpful. XXX bootverbose? |
---|
1434 | */ |
---|
1435 | if (cmd->opcode != 8) { |
---|
1436 | device_printf(sc->dev, |
---|
1437 | "IO error; status MCI_SR = 0x%b cmd opcode = %d%s\n", |
---|
1438 | sr, MCI_SR_BITSTRING, cmd->opcode, |
---|
1439 | (cmd->opcode != 12) ? "" : |
---|
1440 | (sc->flags & CMD_MULTIREAD) ? " after read" : " after write"); |
---|
1441 | /* XXX not sure RTOE needs a full reset, just a retry */ |
---|
1442 | at91_mci_reset(sc); |
---|
1443 | } |
---|
1444 | at91_mci_next_operation(sc); |
---|
1445 | } else { |
---|
1446 | #ifndef __rtems__ |
---|
1447 | if (isr & MCI_SR_TXBUFE) { |
---|
1448 | // printf("TXBUFE\n"); |
---|
1449 | /* |
---|
1450 | * We need to wait for a BLKE that follows TXBUFE |
---|
1451 | * (intermediate BLKEs might happen after ENDTXes if |
---|
1452 | * we're chaining multiple buffers). If BLKE is also |
---|
1453 | * asserted at the time we get TXBUFE, we can avoid |
---|
1454 | * another interrupt and process it right away, below. |
---|
1455 | */ |
---|
1456 | if (sr & MCI_SR_BLKE) |
---|
1457 | isr |= MCI_SR_BLKE; |
---|
1458 | else |
---|
1459 | WR4(sc, MCI_IER, MCI_SR_BLKE); |
---|
1460 | } |
---|
1461 | if (isr & MCI_SR_RXBUFF) { |
---|
1462 | // printf("RXBUFF\n"); |
---|
1463 | } |
---|
1464 | if (isr & MCI_SR_ENDTX) { |
---|
1465 | // printf("ENDTX\n"); |
---|
1466 | } |
---|
1467 | if (isr & MCI_SR_ENDRX) { |
---|
1468 | // printf("ENDRX\n"); |
---|
1469 | at91_mci_read_done(sc, sr); |
---|
1470 | } |
---|
1471 | #else /* __rtems__ */ |
---|
1472 | if (isr & MCI_SR_XFRDONE) { |
---|
1473 | struct mmc_command *cmd = sc->curcmd; |
---|
1474 | if (cmd->data->flags & MMC_DATA_READ) { |
---|
1475 | at91_mci_read_done(sc, sr); |
---|
1476 | } else { |
---|
1477 | if (sr & MCI_SR_BLKE) |
---|
1478 | isr |= MCI_SR_BLKE; |
---|
1479 | else |
---|
1480 | WR4(sc, MCI_IER, MCI_SR_BLKE); |
---|
1481 | } |
---|
1482 | } |
---|
1483 | #endif /* __rtems__ */ |
---|
1484 | if (isr & MCI_SR_NOTBUSY) { |
---|
1485 | // printf("NOTBUSY\n"); |
---|
1486 | at91_mci_notbusy(sc); |
---|
1487 | } |
---|
1488 | if (isr & MCI_SR_DTIP) { |
---|
1489 | // printf("Data transfer in progress\n"); |
---|
1490 | } |
---|
1491 | if (isr & MCI_SR_BLKE) { |
---|
1492 | // printf("Block transfer end\n"); |
---|
1493 | at91_mci_write_done(sc, sr); |
---|
1494 | } |
---|
1495 | if (isr & MCI_SR_TXRDY) { |
---|
1496 | // printf("Ready to transmit\n"); |
---|
1497 | } |
---|
1498 | if (isr & MCI_SR_RXRDY) { |
---|
1499 | // printf("Ready to receive\n"); |
---|
1500 | } |
---|
1501 | if (isr & MCI_SR_CMDRDY) { |
---|
1502 | // printf("Command ready\n"); |
---|
1503 | at91_mci_cmdrdy(sc, sr); |
---|
1504 | } |
---|
1505 | } |
---|
1506 | AT91_MCI_UNLOCK(sc); |
---|
1507 | } |
---|
1508 | |
---|
1509 | static int |
---|
1510 | at91_mci_read_ivar(device_t bus, device_t child, int which, uintptr_t *result) |
---|
1511 | { |
---|
1512 | struct at91_mci_softc *sc = device_get_softc(bus); |
---|
1513 | |
---|
1514 | switch (which) { |
---|
1515 | default: |
---|
1516 | return (EINVAL); |
---|
1517 | case MMCBR_IVAR_BUS_MODE: |
---|
1518 | *(int *)result = sc->host.ios.bus_mode; |
---|
1519 | break; |
---|
1520 | case MMCBR_IVAR_BUS_WIDTH: |
---|
1521 | *(int *)result = sc->host.ios.bus_width; |
---|
1522 | break; |
---|
1523 | case MMCBR_IVAR_CHIP_SELECT: |
---|
1524 | *(int *)result = sc->host.ios.chip_select; |
---|
1525 | break; |
---|
1526 | case MMCBR_IVAR_CLOCK: |
---|
1527 | *(int *)result = sc->host.ios.clock; |
---|
1528 | break; |
---|
1529 | case MMCBR_IVAR_F_MIN: |
---|
1530 | *(int *)result = sc->host.f_min; |
---|
1531 | break; |
---|
1532 | case MMCBR_IVAR_F_MAX: |
---|
1533 | *(int *)result = sc->host.f_max; |
---|
1534 | break; |
---|
1535 | case MMCBR_IVAR_HOST_OCR: |
---|
1536 | *(int *)result = sc->host.host_ocr; |
---|
1537 | break; |
---|
1538 | case MMCBR_IVAR_MODE: |
---|
1539 | *(int *)result = sc->host.mode; |
---|
1540 | break; |
---|
1541 | case MMCBR_IVAR_OCR: |
---|
1542 | *(int *)result = sc->host.ocr; |
---|
1543 | break; |
---|
1544 | case MMCBR_IVAR_POWER_MODE: |
---|
1545 | *(int *)result = sc->host.ios.power_mode; |
---|
1546 | break; |
---|
1547 | case MMCBR_IVAR_VDD: |
---|
1548 | *(int *)result = sc->host.ios.vdd; |
---|
1549 | break; |
---|
1550 | case MMCBR_IVAR_CAPS: |
---|
1551 | if (sc->has_4wire) { |
---|
1552 | sc->sc_cap |= CAP_HAS_4WIRE; |
---|
1553 | sc->host.caps |= MMC_CAP_4_BIT_DATA; |
---|
1554 | } else { |
---|
1555 | sc->sc_cap &= ~CAP_HAS_4WIRE; |
---|
1556 | sc->host.caps &= ~MMC_CAP_4_BIT_DATA; |
---|
1557 | } |
---|
1558 | *(int *)result = sc->host.caps; |
---|
1559 | break; |
---|
1560 | #ifdef __rtems__ |
---|
1561 | case MMCBR_IVAR_TIMING: |
---|
1562 | *result = sc->host.ios.timing; |
---|
1563 | break; |
---|
1564 | #endif /* __rtems__ */ |
---|
1565 | case MMCBR_IVAR_MAX_DATA: |
---|
1566 | /* |
---|
1567 | * Something is wrong with the 2x parts and multiblock, so |
---|
1568 | * just do 1 block at a time for now, which really kills |
---|
1569 | * performance. |
---|
1570 | */ |
---|
1571 | if (sc->sc_cap & CAP_MCI1_REV2XX) |
---|
1572 | *(int *)result = 1; |
---|
1573 | else |
---|
1574 | *(int *)result = MAX_BLOCKS; |
---|
1575 | break; |
---|
1576 | } |
---|
1577 | return (0); |
---|
1578 | } |
---|
1579 | |
---|
1580 | static int |
---|
1581 | at91_mci_write_ivar(device_t bus, device_t child, int which, uintptr_t value) |
---|
1582 | { |
---|
1583 | struct at91_mci_softc *sc = device_get_softc(bus); |
---|
1584 | |
---|
1585 | switch (which) { |
---|
1586 | default: |
---|
1587 | return (EINVAL); |
---|
1588 | case MMCBR_IVAR_BUS_MODE: |
---|
1589 | sc->host.ios.bus_mode = value; |
---|
1590 | break; |
---|
1591 | case MMCBR_IVAR_BUS_WIDTH: |
---|
1592 | sc->host.ios.bus_width = value; |
---|
1593 | break; |
---|
1594 | case MMCBR_IVAR_CHIP_SELECT: |
---|
1595 | sc->host.ios.chip_select = value; |
---|
1596 | break; |
---|
1597 | case MMCBR_IVAR_CLOCK: |
---|
1598 | sc->host.ios.clock = value; |
---|
1599 | break; |
---|
1600 | case MMCBR_IVAR_MODE: |
---|
1601 | sc->host.mode = value; |
---|
1602 | break; |
---|
1603 | case MMCBR_IVAR_OCR: |
---|
1604 | sc->host.ocr = value; |
---|
1605 | break; |
---|
1606 | case MMCBR_IVAR_POWER_MODE: |
---|
1607 | sc->host.ios.power_mode = value; |
---|
1608 | break; |
---|
1609 | case MMCBR_IVAR_VDD: |
---|
1610 | sc->host.ios.vdd = value; |
---|
1611 | break; |
---|
1612 | #ifdef __rtems__ |
---|
1613 | case MMCBR_IVAR_TIMING: |
---|
1614 | sc->host.ios.timing = value; |
---|
1615 | break; |
---|
1616 | #endif /* __rtems__ */ |
---|
1617 | /* These are read-only */ |
---|
1618 | case MMCBR_IVAR_CAPS: |
---|
1619 | case MMCBR_IVAR_HOST_OCR: |
---|
1620 | case MMCBR_IVAR_F_MIN: |
---|
1621 | case MMCBR_IVAR_F_MAX: |
---|
1622 | case MMCBR_IVAR_MAX_DATA: |
---|
1623 | return (EINVAL); |
---|
1624 | } |
---|
1625 | return (0); |
---|
1626 | } |
---|
1627 | |
---|
1628 | static device_method_t at91_mci_methods[] = { |
---|
1629 | /* device_if */ |
---|
1630 | DEVMETHOD(device_probe, at91_mci_probe), |
---|
1631 | DEVMETHOD(device_attach, at91_mci_attach), |
---|
1632 | DEVMETHOD(device_detach, at91_mci_detach), |
---|
1633 | |
---|
1634 | /* Bus interface */ |
---|
1635 | DEVMETHOD(bus_read_ivar, at91_mci_read_ivar), |
---|
1636 | DEVMETHOD(bus_write_ivar, at91_mci_write_ivar), |
---|
1637 | |
---|
1638 | /* mmcbr_if */ |
---|
1639 | DEVMETHOD(mmcbr_update_ios, at91_mci_update_ios), |
---|
1640 | DEVMETHOD(mmcbr_request, at91_mci_request), |
---|
1641 | DEVMETHOD(mmcbr_get_ro, at91_mci_get_ro), |
---|
1642 | DEVMETHOD(mmcbr_acquire_host, at91_mci_acquire_host), |
---|
1643 | DEVMETHOD(mmcbr_release_host, at91_mci_release_host), |
---|
1644 | |
---|
1645 | DEVMETHOD_END |
---|
1646 | }; |
---|
1647 | |
---|
1648 | static driver_t at91_mci_driver = { |
---|
1649 | "at91_mci", |
---|
1650 | at91_mci_methods, |
---|
1651 | sizeof(struct at91_mci_softc), |
---|
1652 | }; |
---|
1653 | |
---|
1654 | static devclass_t at91_mci_devclass; |
---|
1655 | |
---|
1656 | #ifndef __rtems__ |
---|
1657 | #ifdef FDT |
---|
1658 | DRIVER_MODULE(at91_mci, simplebus, at91_mci_driver, at91_mci_devclass, NULL, |
---|
1659 | NULL); |
---|
1660 | #else |
---|
1661 | DRIVER_MODULE(at91_mci, atmelarm, at91_mci_driver, at91_mci_devclass, NULL, |
---|
1662 | NULL); |
---|
1663 | #endif |
---|
1664 | |
---|
1665 | MMC_DECLARE_BRIDGE(at91_mci); |
---|
1666 | #else /* __rtems__ */ |
---|
1667 | DRIVER_MODULE(at91_mci, nexus, at91_mci_driver, at91_mci_devclass, NULL, NULL); |
---|
1668 | #endif /* __rtems__ */ |
---|
1669 | DRIVER_MODULE(mmc, at91_mci, mmc_driver, mmc_devclass, NULL, NULL); |
---|
1670 | MODULE_DEPEND(at91_mci, mmc, 1, 1, 1); |
---|
1671 | #endif /* __rtems__ && LIBBSP_ARM_ATSAM_BSP_H */ |
---|