source: rtems-libbsd/rtemsbsd/src/rtems-bsd-bus-dma.c @ 8420b94

4.1155-freebsd-126-freebsd-12freebsd-9.3
Last change on this file since 8420b94 was 8420b94, checked in by Jennifer Averett <jennifer.averett@…>, on 05/08/12 at 14:14:42

Modified copyright on rtems-bsd-xxx files to be consistant with FreeBSD copyright.

  • Property mode set to 100644
File size: 10.9 KB
RevLine 
[a9153ec]1/**
2 * @file
3 *
4 * @ingroup rtems_bsd_rtems
5 *
6 * @brief TODO.
7 *
[2da0777]8 * File origin from FreeBSD "sys/powerpc/powerpc/busdma_machdep.c".
[a9153ec]9 */
10
[8420b94]11/*
12 * Copyright (c) 2009-2012 embedded brains GmbH. 
13 * All rights reserved.
[a9153ec]14 *
15 *  embedded brains GmbH
16 *  Obere Lagerstr. 30
17 *  82178 Puchheim
18 *  Germany
19 *  <rtems@embedded-brains.de>
20 *
21 * Copyright (c) 2004 Olivier Houchard
22 * Copyright (c) 2002 Peter Grehan
23 * Copyright (c) 1997, 1998 Justin T. Gibbs.
24 * All rights reserved.
25 *
26 * Redistribution and use in source and binary forms, with or without
27 * modification, are permitted provided that the following conditions
28 * are met:
29 * 1. Redistributions of source code must retain the above copyright
30 *    notice, this list of conditions, and the following disclaimer,
31 *    without modification, immediately at the beginning of the file.
32 * 2. The name of the author may not be used to endorse or promote products
33 *    derived from this software without specific prior written permission.
34 *
35 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
36 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
37 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
38 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
39 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
40 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
41 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
42 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
43 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
44 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
45 * SUCH DAMAGE.
46 */
47
[6ad03bf]48#include <freebsd/machine/rtems-bsd-config.h>
49#include <freebsd/machine/rtems-bsd-cache.h>
[2da0777]50#include <freebsd/machine/rtems-bsd-bus-dma.h>
51
[a9153ec]52#include <rtems/malloc.h>
53
[6ad03bf]54#include <freebsd/sys/malloc.h>
55#include <freebsd/machine/atomic.h>
[a9153ec]56
57#ifdef CPU_DATA_CACHE_ALIGNMENT
58  #define CLSZ ((uintptr_t) CPU_DATA_CACHE_ALIGNMENT)
59  #define CLMASK (CLSZ - (uintptr_t) 1)
60#endif
61
62/*
63 * Convenience function for manipulating driver locks from busdma (during
64 * busdma_swi, for example).  Drivers that don't provide their own locks
65 * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
66 * non-mutex locking scheme don't have to use this at all.
67 */
68void
69busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
70{
71        struct mtx *dmtx;
72
73        dmtx = (struct mtx *)arg;
74        switch (op) {
75        case BUS_DMA_LOCK:
76                mtx_lock(dmtx);
77                break;
78        case BUS_DMA_UNLOCK:
79                mtx_unlock(dmtx);
80                break;
81        default:
82                panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
83        }
84}
85
86/*
87 * dflt_lock should never get called.  It gets put into the dma tag when
88 * lockfunc == NULL, which is only valid if the maps that are associated
89 * with the tag are meant to never be defered.
90 * XXX Should have a way to identify which driver is responsible here.
91 */
92static void
93dflt_lock(void *arg, bus_dma_lock_op_t op)
94{
95        panic("driver error: busdma dflt_lock called");
96}
97
98/*
99 * Allocate a device specific dma_tag.
100 */
101int
102bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
103    bus_size_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
104    bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
105    int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
106    void *lockfuncarg, bus_dma_tag_t *dmat)
107{
108        bus_dma_tag_t newtag;
109        int error = 0;
110
111        /* Return a NULL tag on failure */
112        *dmat = NULL;
113
114        newtag = malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT | M_ZERO);
115        if (newtag == NULL)
116                return (ENOMEM);
117
118        newtag->parent = parent;
119        newtag->alignment = alignment;
120        newtag->boundary = boundary;
121        newtag->lowaddr = lowaddr;
122        newtag->highaddr = highaddr;
123        newtag->filter = filter;
124        newtag->filterarg = filterarg;
125        newtag->maxsize = maxsize;
126        newtag->nsegments = nsegments;
127        newtag->maxsegsz = maxsegsz;
128        newtag->flags = flags;
129        newtag->ref_count = 1; /* Count ourself */
130        newtag->map_count = 0;
131        if (lockfunc != NULL) {
132                newtag->lockfunc = lockfunc;
133                newtag->lockfuncarg = lockfuncarg;
134        } else {
135                newtag->lockfunc = dflt_lock;
136                newtag->lockfuncarg = NULL;
137        }
138
139        /*
140         * Take into account any restrictions imposed by our parent tag
141         */
142        if (parent != NULL) {
143                newtag->lowaddr = min(parent->lowaddr, newtag->lowaddr);
144                newtag->highaddr = max(parent->highaddr, newtag->highaddr);
145                if (newtag->boundary == 0)
146                        newtag->boundary = parent->boundary;
147                else if (parent->boundary != 0)
148                        newtag->boundary = MIN(parent->boundary,
149                                               newtag->boundary);
150                if (newtag->filter == NULL) {
151                        /*
152                         * Short circuit looking at our parent directly
153                         * since we have encapsulated all of its information
154                         */
155                        newtag->filter = parent->filter;
156                        newtag->filterarg = parent->filterarg;
157                        newtag->parent = parent->parent;
158                }
159                if (newtag->parent != NULL)
160                        atomic_add_int(&parent->ref_count, 1);
161        }
162
163        *dmat = newtag;
164        return (error);
165}
166
167int
168bus_dma_tag_destroy(bus_dma_tag_t dmat)
169{
170        if (dmat != NULL) {
171
172                if (dmat->map_count != 0)
173                        return (EBUSY);
174
175                while (dmat != NULL) {
176                        bus_dma_tag_t parent;
177
178                        parent = dmat->parent;
179                        atomic_subtract_int(&dmat->ref_count, 1);
180                        if (dmat->ref_count == 0) {
181                                free(dmat, M_DEVBUF);
182                                /*
183                                 * Last reference count, so
184                                 * release our reference
185                                 * count on our parent.
186                                 */
187                                dmat = parent;
188                        } else
189                                dmat = NULL;
190                }
191        }
192        return (0);
193}
194
195/*
196 * Allocate a handle for mapping from kva/uva/physical
197 * address space into bus device space.
198 */
199int
200bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
201{
202        *mapp = malloc(sizeof(**mapp), M_DEVBUF, M_NOWAIT | M_ZERO);
203        if (*mapp == NULL) {
204                return ENOMEM;
205        }
206
207        dmat->map_count++;
208
209        return (0);
210}
211
212/*
213 * Destroy a handle for mapping from kva/uva/physical
214 * address space into bus device space.
215 */
216int
217bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
218{
219        free(map, M_DEVBUF);
220
221        dmat->map_count--;
222
223        return (0);
224}
225
226/*
227 * Allocate a piece of memory that can be efficiently mapped into
228 * bus device space based on the constraints lited in the dma tag.
229 * A dmamap to for use with dmamap_load is also allocated.
230 */
231int
232bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
233    bus_dmamap_t *mapp)
234{
235        *mapp = malloc(sizeof(**mapp), M_DEVBUF, M_NOWAIT | M_ZERO);
236        if (*mapp == NULL) {
237                return ENOMEM;
238        }
239
240        *vaddr = rtems_heap_allocate_aligned_with_boundary(dmat->maxsize, dmat->alignment, dmat->boundary);
241        if (*vaddr == NULL) {
242                free(*mapp, M_DEVBUF);
243
244                return ENOMEM;
245        }
246
247        (*mapp)->buffer_begin = *vaddr;
248        (*mapp)->buffer_size = dmat->maxsize;
249
250        if ((flags & BUS_DMA_ZERO) != 0) {
251                memset(*vaddr, 0, dmat->maxsize);
252        }
253
254        return (0);
255}
256
257/*
258 * Free a piece of memory and it's allocated dmamap, that was allocated
259 * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
260 */
261void
262bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
263{
264        free(vaddr, M_RTEMS_HEAP);
265        free(map, M_DEVBUF);
266}
267
268/*
269 * Utility function to load a linear buffer.  lastaddrp holds state
270 * between invocations (for multiple-buffer loads).  segp contains
271 * the starting segment on entrance, and the ending segment on exit.
272 * first indicates if this is the first invocation of this function.
273 */
[2da0777]274int
[a9153ec]275bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t segs[],
276    void *buf, bus_size_t buflen, struct thread *td, int flags,
277    vm_offset_t *lastaddrp, int *segp, int first)
278{
279        bus_size_t sgsize;
280        bus_addr_t curaddr, lastaddr, baddr, bmask;
281        vm_offset_t vaddr = (vm_offset_t)buf;
282        int seg;
283
284        lastaddr = *lastaddrp;
285        bmask = ~(dmat->boundary - 1);
286
287        for (seg = *segp; buflen > 0 ; ) {
288                /*
289                 * Get the physical address for this segment.
290                 */
291                curaddr = vaddr;
292
293                /*
294                 * Compute the segment size, and adjust counts.
295                 */
296                sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
297                if (sgsize > dmat->maxsegsz)
298                        sgsize = dmat->maxsegsz;
299                if (buflen < sgsize)
300                        sgsize = buflen;
301
302                /*
303                 * Make sure we don't cross any boundaries.
304                 */
305                if (dmat->boundary > 0) {
306                        baddr = (curaddr + dmat->boundary) & bmask;
307                        if (sgsize > (baddr - curaddr))
308                                sgsize = (baddr - curaddr);
309                }
310
311                /*
312                 * Insert chunk into a segment, coalescing with
313                 * the previous segment if possible.
314                 */
315                if (first) {
316                        segs[seg].ds_addr = curaddr;
317                        segs[seg].ds_len = sgsize;
318                        first = 0;
319                } else {
320                        if (curaddr == lastaddr &&
321                            (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
322                            (dmat->boundary == 0 ||
323                             (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
324                                segs[seg].ds_len += sgsize;
325                        else {
326                                if (++seg >= dmat->nsegments)
327                                        break;
328                                segs[seg].ds_addr = curaddr;
329                                segs[seg].ds_len = sgsize;
330                        }
331                }
332
333                lastaddr = curaddr + sgsize;
334                vaddr += sgsize;
335                buflen -= sgsize;
336        }
337
338        *segp = seg;
339        *lastaddrp = lastaddr;
340
341        /*
342         * Did we fit?
343         */
344        return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
345}
346
347/*
348 * Map the buffer buf into bus space using the dmamap map.
349 */
350int
351bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
352    bus_size_t buflen, bus_dmamap_callback_t *callback,
353    void *callback_arg, int flags)
354{
355        bus_dma_segment_t       dm_segments[dmat->nsegments];
356        vm_offset_t             lastaddr;
357        int                     error, nsegs;
358
359        map->buffer_begin = buf;
360        map->buffer_size = buflen;
361
362        lastaddr = (vm_offset_t)0;
363        nsegs = 0;
364        error = bus_dmamap_load_buffer(dmat, dm_segments, buf, buflen,
365            NULL, flags, &lastaddr, &nsegs, 1);
366
367        if (error == 0)
368                (*callback)(callback_arg, dm_segments, nsegs + 1, 0);
369        else
370                (*callback)(callback_arg, NULL, 0, error);
371
372        return (0);
373}
374
375/*
376 * Release the mapping held by map. A no-op on PowerPC.
377 */
378void
379_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
380{
381
382        return;
383}
384
385void
386_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
387{
388#ifdef CPU_DATA_CACHE_ALIGNMENT
389        uintptr_t size = map->buffer_size;
390        uintptr_t begin = (uintptr_t) map->buffer_begin;
391        uintptr_t end = begin + size;
392
393        if ((op & BUS_DMASYNC_PREWRITE) != 0 && (op & BUS_DMASYNC_PREREAD) == 0) {
394                rtems_cache_flush_multiple_data_lines((void *) begin, size);
395        }
396        if ((op & BUS_DMASYNC_PREREAD) != 0) {
397                if ((op & BUS_DMASYNC_PREWRITE) != 0 || ((begin | size) & CLMASK) != 0) {
398                        rtems_cache_flush_multiple_data_lines((void *) begin, size);
399                }
400                rtems_cache_invalidate_multiple_data_lines((void *) begin, size);
401        }
402        if ((op & BUS_DMASYNC_POSTREAD) != 0) {
403                char first_buf [CLSZ];
404                char last_buf [CLSZ];
405                bool first_is_aligned = (begin & CLMASK) == 0;
406                bool last_is_aligned = (end & CLMASK) == 0;
407                void *first_begin = (void *) (begin & ~CLMASK);
408                size_t first_size = begin & CLMASK;
409                void *last_begin = (void *) end;
410                size_t last_size = CLSZ - (end & CLMASK);
411
412                if (!first_is_aligned) {
413                        memcpy(first_buf, first_begin, first_size);
414                }
415                if (!last_is_aligned) {
416                        memcpy(last_buf, last_begin, last_size);
417                }
418
419                rtems_cache_invalidate_multiple_data_lines((void *) begin, size);
420
421                if (!first_is_aligned) {
422                        memcpy(first_begin, first_buf, first_size);
423                }
424                if (!last_is_aligned) {
425                        memcpy(last_begin, last_buf, last_size);
426                }
427        }
428#endif /* CPU_DATA_CACHE_ALIGNMENT */
429}
Note: See TracBrowser for help on using the repository browser.