source: rtems-libbsd/rtemsbsd/rtems/rtems-bsd-bus-dma.c @ fb683f7

4.1155-freebsd-126-freebsd-12freebsd-9.3
Last change on this file since fb683f7 was fb683f7, checked in by Sebastian Huber <sebastian.huber@…>, on 11/25/14 at 11:50:29

BUS_DMA(9): Support BUS_DMA_COHERENT

  • Property mode set to 100644
File size: 11.0 KB
Line 
1/**
2 * @file
3 *
4 * @ingroup rtems_bsd_rtems
5 *
6 * @brief TODO.
7 *
8 * File origin from FreeBSD "sys/powerpc/powerpc/busdma_machdep.c".
9 */
10
11/*
12 * Copyright (c) 2009-2012 embedded brains GmbH. 
13 * All rights reserved.
14 *
15 *  embedded brains GmbH
16 *  Obere Lagerstr. 30
17 *  82178 Puchheim
18 *  Germany
19 *  <rtems@embedded-brains.de>
20 *
21 * Copyright (c) 2004 Olivier Houchard
22 * Copyright (c) 2002 Peter Grehan
23 * Copyright (c) 1997, 1998 Justin T. Gibbs.
24 * All rights reserved.
25 *
26 * Redistribution and use in source and binary forms, with or without
27 * modification, are permitted provided that the following conditions
28 * are met:
29 * 1. Redistributions of source code must retain the above copyright
30 *    notice, this list of conditions, and the following disclaimer,
31 *    without modification, immediately at the beginning of the file.
32 * 2. The name of the author may not be used to endorse or promote products
33 *    derived from this software without specific prior written permission.
34 *
35 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
36 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
37 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
38 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
39 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
40 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
41 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
42 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
43 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
44 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
45 * SUCH DAMAGE.
46 */
47
48#include <machine/rtems-bsd-kernel-space.h>
49#include <machine/rtems-bsd-cache.h>
50#include <machine/rtems-bsd-bus-dma.h>
51
52#include <rtems/malloc.h>
53
54#include <sys/malloc.h>
55#include <machine/atomic.h>
56
57#ifdef CPU_DATA_CACHE_ALIGNMENT
58  #define CLSZ ((uintptr_t) CPU_DATA_CACHE_ALIGNMENT)
59  #define CLMASK (CLSZ - (uintptr_t) 1)
60#endif
61
62/*
63 * Convenience function for manipulating driver locks from busdma (during
64 * busdma_swi, for example).  Drivers that don't provide their own locks
65 * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
66 * non-mutex locking scheme don't have to use this at all.
67 */
68void
69busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
70{
71        struct mtx *dmtx;
72
73        dmtx = (struct mtx *)arg;
74        switch (op) {
75        case BUS_DMA_LOCK:
76                mtx_lock(dmtx);
77                break;
78        case BUS_DMA_UNLOCK:
79                mtx_unlock(dmtx);
80                break;
81        default:
82                panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
83        }
84}
85
86/*
87 * dflt_lock should never get called.  It gets put into the dma tag when
88 * lockfunc == NULL, which is only valid if the maps that are associated
89 * with the tag are meant to never be defered.
90 * XXX Should have a way to identify which driver is responsible here.
91 */
92static void
93dflt_lock(void *arg, bus_dma_lock_op_t op)
94{
95        panic("driver error: busdma dflt_lock called");
96}
97
98/*
99 * Allocate a device specific dma_tag.
100 */
101int
102bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
103    bus_size_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
104    bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
105    int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
106    void *lockfuncarg, bus_dma_tag_t *dmat)
107{
108        bus_dma_tag_t newtag;
109        int error = 0;
110
111        /* Return a NULL tag on failure */
112        *dmat = NULL;
113
114        newtag = malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT | M_ZERO);
115        if (newtag == NULL)
116                return (ENOMEM);
117
118        newtag->parent = parent;
119        newtag->alignment = alignment;
120        newtag->boundary = boundary;
121        newtag->lowaddr = lowaddr;
122        newtag->highaddr = highaddr;
123        newtag->filter = filter;
124        newtag->filterarg = filterarg;
125        newtag->maxsize = maxsize;
126        newtag->nsegments = nsegments;
127        newtag->maxsegsz = maxsegsz;
128        newtag->flags = flags;
129        newtag->ref_count = 1; /* Count ourself */
130        newtag->map_count = 0;
131        if (lockfunc != NULL) {
132                newtag->lockfunc = lockfunc;
133                newtag->lockfuncarg = lockfuncarg;
134        } else {
135                newtag->lockfunc = dflt_lock;
136                newtag->lockfuncarg = NULL;
137        }
138
139        /*
140         * Take into account any restrictions imposed by our parent tag
141         */
142        if (parent != NULL) {
143                newtag->lowaddr = min(parent->lowaddr, newtag->lowaddr);
144                newtag->highaddr = max(parent->highaddr, newtag->highaddr);
145                if (newtag->boundary == 0)
146                        newtag->boundary = parent->boundary;
147                else if (parent->boundary != 0)
148                        newtag->boundary = MIN(parent->boundary,
149                                               newtag->boundary);
150                if (newtag->filter == NULL) {
151                        /*
152                         * Short circuit looking at our parent directly
153                         * since we have encapsulated all of its information
154                         */
155                        newtag->filter = parent->filter;
156                        newtag->filterarg = parent->filterarg;
157                        newtag->parent = parent->parent;
158                }
159                if (newtag->parent != NULL)
160                        atomic_add_int(&parent->ref_count, 1);
161        }
162
163        *dmat = newtag;
164        return (error);
165}
166
167int
168bus_dma_tag_destroy(bus_dma_tag_t dmat)
169{
170        if (dmat != NULL) {
171
172                if (dmat->map_count != 0)
173                        return (EBUSY);
174
175                while (dmat != NULL) {
176                        bus_dma_tag_t parent;
177
178                        parent = dmat->parent;
179                        atomic_subtract_int(&dmat->ref_count, 1);
180                        if (dmat->ref_count == 0) {
181                                free(dmat, M_DEVBUF);
182                                /*
183                                 * Last reference count, so
184                                 * release our reference
185                                 * count on our parent.
186                                 */
187                                dmat = parent;
188                        } else
189                                dmat = NULL;
190                }
191        }
192        return (0);
193}
194
195/*
196 * Allocate a handle for mapping from kva/uva/physical
197 * address space into bus device space.
198 */
199int
200bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
201{
202        *mapp = malloc(sizeof(**mapp), M_DEVBUF, M_NOWAIT | M_ZERO);
203        if (*mapp == NULL) {
204                return ENOMEM;
205        }
206
207        dmat->map_count++;
208
209        return (0);
210}
211
212/*
213 * Destroy a handle for mapping from kva/uva/physical
214 * address space into bus device space.
215 */
216int
217bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
218{
219        free(map, M_DEVBUF);
220
221        dmat->map_count--;
222
223        return (0);
224}
225
226/*
227 * Allocate a piece of memory that can be efficiently mapped into
228 * bus device space based on the constraints lited in the dma tag.
229 * A dmamap to for use with dmamap_load is also allocated.
230 */
231int
232bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
233    bus_dmamap_t *mapp)
234{
235        *mapp = malloc(sizeof(**mapp), M_DEVBUF, M_NOWAIT | M_ZERO);
236        if (*mapp == NULL) {
237                return ENOMEM;
238        }
239
240        if ((flags & BUS_DMA_COHERENT) != 0) {
241                *vaddr = rtems_cache_coherent_allocate(
242                    dmat->maxsize, dmat->alignment, dmat->boundary);
243        } else {
244                *vaddr = rtems_heap_allocate_aligned_with_boundary(
245                    dmat->maxsize, dmat->alignment, dmat->boundary);
246        }
247
248        if (*vaddr == NULL) {
249                free(*mapp, M_DEVBUF);
250
251                return ENOMEM;
252        }
253
254        (*mapp)->buffer_begin = *vaddr;
255        (*mapp)->buffer_size = dmat->maxsize;
256
257        if ((flags & BUS_DMA_ZERO) != 0) {
258                memset(*vaddr, 0, dmat->maxsize);
259        }
260
261        return (0);
262}
263
264/*
265 * Free a piece of memory and it's allocated dmamap, that was allocated
266 * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
267 */
268void
269bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
270{
271        rtems_cache_coherent_free(vaddr);
272        free(map, M_DEVBUF);
273}
274
275/*
276 * Utility function to load a linear buffer.  lastaddrp holds state
277 * between invocations (for multiple-buffer loads).  segp contains
278 * the starting segment on entrance, and the ending segment on exit.
279 * first indicates if this is the first invocation of this function.
280 */
281int
282bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t segs[],
283    void *buf, bus_size_t buflen, struct thread *td, int flags,
284    vm_offset_t *lastaddrp, int *segp, int first)
285{
286        bus_size_t sgsize;
287        bus_addr_t curaddr, lastaddr, baddr, bmask;
288        vm_offset_t vaddr = (vm_offset_t)buf;
289        int seg;
290
291        lastaddr = *lastaddrp;
292        bmask = ~(dmat->boundary - 1);
293
294        for (seg = *segp; buflen > 0 ; ) {
295                /*
296                 * Get the physical address for this segment.
297                 */
298                curaddr = vaddr;
299
300                /*
301                 * Compute the segment size, and adjust counts.
302                 */
303                sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
304                if (sgsize > dmat->maxsegsz)
305                        sgsize = dmat->maxsegsz;
306                if (buflen < sgsize)
307                        sgsize = buflen;
308
309                /*
310                 * Make sure we don't cross any boundaries.
311                 */
312                if (dmat->boundary > 0) {
313                        baddr = (curaddr + dmat->boundary) & bmask;
314                        if (sgsize > (baddr - curaddr))
315                                sgsize = (baddr - curaddr);
316                }
317
318                /*
319                 * Insert chunk into a segment, coalescing with
320                 * the previous segment if possible.
321                 */
322                if (first) {
323                        segs[seg].ds_addr = curaddr;
324                        segs[seg].ds_len = sgsize;
325                        first = 0;
326                } else {
327                        if (curaddr == lastaddr &&
328                            (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
329                            (dmat->boundary == 0 ||
330                             (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
331                                segs[seg].ds_len += sgsize;
332                        else {
333                                if (++seg >= dmat->nsegments)
334                                        break;
335                                segs[seg].ds_addr = curaddr;
336                                segs[seg].ds_len = sgsize;
337                        }
338                }
339
340                lastaddr = curaddr + sgsize;
341                vaddr += sgsize;
342                buflen -= sgsize;
343        }
344
345        *segp = seg;
346        *lastaddrp = lastaddr;
347
348        /*
349         * Did we fit?
350         */
351        return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
352}
353
354/*
355 * Map the buffer buf into bus space using the dmamap map.
356 */
357int
358bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
359    bus_size_t buflen, bus_dmamap_callback_t *callback,
360    void *callback_arg, int flags)
361{
362        bus_dma_segment_t       dm_segments[dmat->nsegments];
363        vm_offset_t             lastaddr;
364        int                     error, nsegs;
365
366        map->buffer_begin = buf;
367        map->buffer_size = buflen;
368
369        lastaddr = (vm_offset_t)0;
370        nsegs = 0;
371        error = bus_dmamap_load_buffer(dmat, dm_segments, buf, buflen,
372            NULL, flags, &lastaddr, &nsegs, 1);
373
374        if (error == 0)
375                (*callback)(callback_arg, dm_segments, nsegs + 1, 0);
376        else
377                (*callback)(callback_arg, NULL, 0, error);
378
379        return (0);
380}
381
382/*
383 * Release the mapping held by map. A no-op on PowerPC.
384 */
385void
386_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
387{
388
389        return;
390}
391
392void
393_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
394{
395#ifdef CPU_DATA_CACHE_ALIGNMENT
396        uintptr_t size = map->buffer_size;
397        uintptr_t begin = (uintptr_t) map->buffer_begin;
398        uintptr_t end = begin + size;
399
400        if ((op & BUS_DMASYNC_PREWRITE) != 0 && (op & BUS_DMASYNC_PREREAD) == 0) {
401                rtems_cache_flush_multiple_data_lines((void *) begin, size);
402        }
403        if ((op & BUS_DMASYNC_PREREAD) != 0) {
404                if ((op & BUS_DMASYNC_PREWRITE) != 0 || ((begin | size) & CLMASK) != 0) {
405                        rtems_cache_flush_multiple_data_lines((void *) begin, size);
406                }
407                rtems_cache_invalidate_multiple_data_lines((void *) begin, size);
408        }
409        if ((op & BUS_DMASYNC_POSTREAD) != 0) {
410                char first_buf [CLSZ];
411                char last_buf [CLSZ];
412                bool first_is_aligned = (begin & CLMASK) == 0;
413                bool last_is_aligned = (end & CLMASK) == 0;
414                void *first_begin = (void *) (begin & ~CLMASK);
415                size_t first_size = begin & CLMASK;
416                void *last_begin = (void *) end;
417                size_t last_size = CLSZ - (end & CLMASK);
418
419                if (!first_is_aligned) {
420                        memcpy(first_buf, first_begin, first_size);
421                }
422                if (!last_is_aligned) {
423                        memcpy(last_buf, last_begin, last_size);
424                }
425
426                rtems_cache_invalidate_multiple_data_lines((void *) begin, size);
427
428                if (!first_is_aligned) {
429                        memcpy(first_begin, first_buf, first_size);
430                }
431                if (!last_is_aligned) {
432                        memcpy(last_begin, last_buf, last_size);
433                }
434        }
435#endif /* CPU_DATA_CACHE_ALIGNMENT */
436}
Note: See TracBrowser for help on using the repository browser.