source: rtems-libbsd/rtemsbsd/src/rtems-bsd-bus-dma.c @ be8032d

4.1155-freebsd-126-freebsd-12freebsd-9.3
Last change on this file since be8032d was 2da0777, checked in by Sebastian Huber <sebastian.huber@…>, on 04/18/12 at 12:59:28

Add BUS_DMA(9) support for mbufs

  • Property mode set to 100644
File size: 10.9 KB
Line 
1/**
2 * @file
3 *
4 * @ingroup rtems_bsd_rtems
5 *
6 * @brief TODO.
7 *
8 * File origin from FreeBSD "sys/powerpc/powerpc/busdma_machdep.c".
9 */
10
11/*-
12 * Copyright (c) 2009-2012 embedded brains GmbH.  All rights reserved.
13 *
14 *  embedded brains GmbH
15 *  Obere Lagerstr. 30
16 *  82178 Puchheim
17 *  Germany
18 *  <rtems@embedded-brains.de>
19 *
20 * Copyright (c) 2004 Olivier Houchard
21 * Copyright (c) 2002 Peter Grehan
22 * Copyright (c) 1997, 1998 Justin T. Gibbs.
23 * All rights reserved.
24 *
25 * Redistribution and use in source and binary forms, with or without
26 * modification, are permitted provided that the following conditions
27 * are met:
28 * 1. Redistributions of source code must retain the above copyright
29 *    notice, this list of conditions, and the following disclaimer,
30 *    without modification, immediately at the beginning of the file.
31 * 2. The name of the author may not be used to endorse or promote products
32 *    derived from this software without specific prior written permission.
33 *
34 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
35 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
36 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
37 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
38 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
39 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
40 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
41 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
42 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
43 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
44 * SUCH DAMAGE.
45 */
46
47#include <freebsd/machine/rtems-bsd-config.h>
48#include <freebsd/machine/rtems-bsd-cache.h>
49#include <freebsd/machine/rtems-bsd-bus-dma.h>
50
51#include <rtems/malloc.h>
52
53#include <freebsd/sys/malloc.h>
54#include <freebsd/machine/atomic.h>
55
56#ifdef CPU_DATA_CACHE_ALIGNMENT
57  #define CLSZ ((uintptr_t) CPU_DATA_CACHE_ALIGNMENT)
58  #define CLMASK (CLSZ - (uintptr_t) 1)
59#endif
60
61/*
62 * Convenience function for manipulating driver locks from busdma (during
63 * busdma_swi, for example).  Drivers that don't provide their own locks
64 * should specify &Giant to dmat->lockfuncarg.  Drivers that use their own
65 * non-mutex locking scheme don't have to use this at all.
66 */
67void
68busdma_lock_mutex(void *arg, bus_dma_lock_op_t op)
69{
70        struct mtx *dmtx;
71
72        dmtx = (struct mtx *)arg;
73        switch (op) {
74        case BUS_DMA_LOCK:
75                mtx_lock(dmtx);
76                break;
77        case BUS_DMA_UNLOCK:
78                mtx_unlock(dmtx);
79                break;
80        default:
81                panic("Unknown operation 0x%x for busdma_lock_mutex!", op);
82        }
83}
84
85/*
86 * dflt_lock should never get called.  It gets put into the dma tag when
87 * lockfunc == NULL, which is only valid if the maps that are associated
88 * with the tag are meant to never be defered.
89 * XXX Should have a way to identify which driver is responsible here.
90 */
91static void
92dflt_lock(void *arg, bus_dma_lock_op_t op)
93{
94        panic("driver error: busdma dflt_lock called");
95}
96
97/*
98 * Allocate a device specific dma_tag.
99 */
100int
101bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
102    bus_size_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr,
103    bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize,
104    int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc,
105    void *lockfuncarg, bus_dma_tag_t *dmat)
106{
107        bus_dma_tag_t newtag;
108        int error = 0;
109
110        /* Return a NULL tag on failure */
111        *dmat = NULL;
112
113        newtag = malloc(sizeof(*newtag), M_DEVBUF, M_NOWAIT | M_ZERO);
114        if (newtag == NULL)
115                return (ENOMEM);
116
117        newtag->parent = parent;
118        newtag->alignment = alignment;
119        newtag->boundary = boundary;
120        newtag->lowaddr = lowaddr;
121        newtag->highaddr = highaddr;
122        newtag->filter = filter;
123        newtag->filterarg = filterarg;
124        newtag->maxsize = maxsize;
125        newtag->nsegments = nsegments;
126        newtag->maxsegsz = maxsegsz;
127        newtag->flags = flags;
128        newtag->ref_count = 1; /* Count ourself */
129        newtag->map_count = 0;
130        if (lockfunc != NULL) {
131                newtag->lockfunc = lockfunc;
132                newtag->lockfuncarg = lockfuncarg;
133        } else {
134                newtag->lockfunc = dflt_lock;
135                newtag->lockfuncarg = NULL;
136        }
137
138        /*
139         * Take into account any restrictions imposed by our parent tag
140         */
141        if (parent != NULL) {
142                newtag->lowaddr = min(parent->lowaddr, newtag->lowaddr);
143                newtag->highaddr = max(parent->highaddr, newtag->highaddr);
144                if (newtag->boundary == 0)
145                        newtag->boundary = parent->boundary;
146                else if (parent->boundary != 0)
147                        newtag->boundary = MIN(parent->boundary,
148                                               newtag->boundary);
149                if (newtag->filter == NULL) {
150                        /*
151                         * Short circuit looking at our parent directly
152                         * since we have encapsulated all of its information
153                         */
154                        newtag->filter = parent->filter;
155                        newtag->filterarg = parent->filterarg;
156                        newtag->parent = parent->parent;
157                }
158                if (newtag->parent != NULL)
159                        atomic_add_int(&parent->ref_count, 1);
160        }
161
162        *dmat = newtag;
163        return (error);
164}
165
166int
167bus_dma_tag_destroy(bus_dma_tag_t dmat)
168{
169        if (dmat != NULL) {
170
171                if (dmat->map_count != 0)
172                        return (EBUSY);
173
174                while (dmat != NULL) {
175                        bus_dma_tag_t parent;
176
177                        parent = dmat->parent;
178                        atomic_subtract_int(&dmat->ref_count, 1);
179                        if (dmat->ref_count == 0) {
180                                free(dmat, M_DEVBUF);
181                                /*
182                                 * Last reference count, so
183                                 * release our reference
184                                 * count on our parent.
185                                 */
186                                dmat = parent;
187                        } else
188                                dmat = NULL;
189                }
190        }
191        return (0);
192}
193
194/*
195 * Allocate a handle for mapping from kva/uva/physical
196 * address space into bus device space.
197 */
198int
199bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
200{
201        *mapp = malloc(sizeof(**mapp), M_DEVBUF, M_NOWAIT | M_ZERO);
202        if (*mapp == NULL) {
203                return ENOMEM;
204        }
205
206        dmat->map_count++;
207
208        return (0);
209}
210
211/*
212 * Destroy a handle for mapping from kva/uva/physical
213 * address space into bus device space.
214 */
215int
216bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
217{
218        free(map, M_DEVBUF);
219
220        dmat->map_count--;
221
222        return (0);
223}
224
225/*
226 * Allocate a piece of memory that can be efficiently mapped into
227 * bus device space based on the constraints lited in the dma tag.
228 * A dmamap to for use with dmamap_load is also allocated.
229 */
230int
231bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
232    bus_dmamap_t *mapp)
233{
234        *mapp = malloc(sizeof(**mapp), M_DEVBUF, M_NOWAIT | M_ZERO);
235        if (*mapp == NULL) {
236                return ENOMEM;
237        }
238
239        *vaddr = rtems_heap_allocate_aligned_with_boundary(dmat->maxsize, dmat->alignment, dmat->boundary);
240        if (*vaddr == NULL) {
241                free(*mapp, M_DEVBUF);
242
243                return ENOMEM;
244        }
245
246        (*mapp)->buffer_begin = *vaddr;
247        (*mapp)->buffer_size = dmat->maxsize;
248
249        if ((flags & BUS_DMA_ZERO) != 0) {
250                memset(*vaddr, 0, dmat->maxsize);
251        }
252
253        return (0);
254}
255
256/*
257 * Free a piece of memory and it's allocated dmamap, that was allocated
258 * via bus_dmamem_alloc.  Make the same choice for free/contigfree.
259 */
260void
261bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
262{
263        free(vaddr, M_RTEMS_HEAP);
264        free(map, M_DEVBUF);
265}
266
267/*
268 * Utility function to load a linear buffer.  lastaddrp holds state
269 * between invocations (for multiple-buffer loads).  segp contains
270 * the starting segment on entrance, and the ending segment on exit.
271 * first indicates if this is the first invocation of this function.
272 */
273int
274bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dma_segment_t segs[],
275    void *buf, bus_size_t buflen, struct thread *td, int flags,
276    vm_offset_t *lastaddrp, int *segp, int first)
277{
278        bus_size_t sgsize;
279        bus_addr_t curaddr, lastaddr, baddr, bmask;
280        vm_offset_t vaddr = (vm_offset_t)buf;
281        int seg;
282
283        lastaddr = *lastaddrp;
284        bmask = ~(dmat->boundary - 1);
285
286        for (seg = *segp; buflen > 0 ; ) {
287                /*
288                 * Get the physical address for this segment.
289                 */
290                curaddr = vaddr;
291
292                /*
293                 * Compute the segment size, and adjust counts.
294                 */
295                sgsize = PAGE_SIZE - ((u_long)curaddr & PAGE_MASK);
296                if (sgsize > dmat->maxsegsz)
297                        sgsize = dmat->maxsegsz;
298                if (buflen < sgsize)
299                        sgsize = buflen;
300
301                /*
302                 * Make sure we don't cross any boundaries.
303                 */
304                if (dmat->boundary > 0) {
305                        baddr = (curaddr + dmat->boundary) & bmask;
306                        if (sgsize > (baddr - curaddr))
307                                sgsize = (baddr - curaddr);
308                }
309
310                /*
311                 * Insert chunk into a segment, coalescing with
312                 * the previous segment if possible.
313                 */
314                if (first) {
315                        segs[seg].ds_addr = curaddr;
316                        segs[seg].ds_len = sgsize;
317                        first = 0;
318                } else {
319                        if (curaddr == lastaddr &&
320                            (segs[seg].ds_len + sgsize) <= dmat->maxsegsz &&
321                            (dmat->boundary == 0 ||
322                             (segs[seg].ds_addr & bmask) == (curaddr & bmask)))
323                                segs[seg].ds_len += sgsize;
324                        else {
325                                if (++seg >= dmat->nsegments)
326                                        break;
327                                segs[seg].ds_addr = curaddr;
328                                segs[seg].ds_len = sgsize;
329                        }
330                }
331
332                lastaddr = curaddr + sgsize;
333                vaddr += sgsize;
334                buflen -= sgsize;
335        }
336
337        *segp = seg;
338        *lastaddrp = lastaddr;
339
340        /*
341         * Did we fit?
342         */
343        return (buflen != 0 ? EFBIG : 0); /* XXX better return value here? */
344}
345
346/*
347 * Map the buffer buf into bus space using the dmamap map.
348 */
349int
350bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
351    bus_size_t buflen, bus_dmamap_callback_t *callback,
352    void *callback_arg, int flags)
353{
354        bus_dma_segment_t       dm_segments[dmat->nsegments];
355        vm_offset_t             lastaddr;
356        int                     error, nsegs;
357
358        map->buffer_begin = buf;
359        map->buffer_size = buflen;
360
361        lastaddr = (vm_offset_t)0;
362        nsegs = 0;
363        error = bus_dmamap_load_buffer(dmat, dm_segments, buf, buflen,
364            NULL, flags, &lastaddr, &nsegs, 1);
365
366        if (error == 0)
367                (*callback)(callback_arg, dm_segments, nsegs + 1, 0);
368        else
369                (*callback)(callback_arg, NULL, 0, error);
370
371        return (0);
372}
373
374/*
375 * Release the mapping held by map. A no-op on PowerPC.
376 */
377void
378_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
379{
380
381        return;
382}
383
384void
385_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op)
386{
387#ifdef CPU_DATA_CACHE_ALIGNMENT
388        uintptr_t size = map->buffer_size;
389        uintptr_t begin = (uintptr_t) map->buffer_begin;
390        uintptr_t end = begin + size;
391
392        if ((op & BUS_DMASYNC_PREWRITE) != 0 && (op & BUS_DMASYNC_PREREAD) == 0) {
393                rtems_cache_flush_multiple_data_lines((void *) begin, size);
394        }
395        if ((op & BUS_DMASYNC_PREREAD) != 0) {
396                if ((op & BUS_DMASYNC_PREWRITE) != 0 || ((begin | size) & CLMASK) != 0) {
397                        rtems_cache_flush_multiple_data_lines((void *) begin, size);
398                }
399                rtems_cache_invalidate_multiple_data_lines((void *) begin, size);
400        }
401        if ((op & BUS_DMASYNC_POSTREAD) != 0) {
402                char first_buf [CLSZ];
403                char last_buf [CLSZ];
404                bool first_is_aligned = (begin & CLMASK) == 0;
405                bool last_is_aligned = (end & CLMASK) == 0;
406                void *first_begin = (void *) (begin & ~CLMASK);
407                size_t first_size = begin & CLMASK;
408                void *last_begin = (void *) end;
409                size_t last_size = CLSZ - (end & CLMASK);
410
411                if (!first_is_aligned) {
412                        memcpy(first_buf, first_begin, first_size);
413                }
414                if (!last_is_aligned) {
415                        memcpy(last_buf, last_begin, last_size);
416                }
417
418                rtems_cache_invalidate_multiple_data_lines((void *) begin, size);
419
420                if (!first_is_aligned) {
421                        memcpy(first_begin, first_buf, first_size);
422                }
423                if (!last_is_aligned) {
424                        memcpy(last_begin, last_buf, last_size);
425                }
426        }
427#endif /* CPU_DATA_CACHE_ALIGNMENT */
428}
Note: See TracBrowser for help on using the repository browser.