source: rtems/cpukit/score/src/heapallocatealigned.c @ 3a95c679

4.104.115
Last change on this file since 3a95c679 was 3a95c679, checked in by Joel Sherrill <joel.sherrill@…>, on 08/18/09 at 18:40:14

2009-08-18 Joel Sherrill <joel.sherrill@…>

  • score/src/heapallocatealigned.c: Remove unused label.
  • Property mode set to 100644
File size: 8.3 KB
Line 
1/*
2 *  Heap Handler
3 *
4 *  COPYRIGHT (c) 1989-1999.
5 *  On-Line Applications Research Corporation (OAR).
6 *
7 *  The license and distribution terms for this file may be
8 *  found in the file LICENSE in this distribution or at
9 *  http://www.rtems.com/license/LICENSE.
10 *
11 *  $Id$
12 */
13
14#if HAVE_CONFIG_H
15#include "config.h"
16#endif
17
18#include <rtems/system.h>
19#include <rtems/score/sysstate.h>
20#include <rtems/score/heap.h>
21
22#if defined(RTEMS_HEAP_DEBUG)
23
24static void
25check_result(
26  Heap_Control *the_heap,
27  Heap_Block   *the_block,
28  _H_uptr_t     user_addr,
29  _H_uptr_t     aligned_user_addr,
30  intptr_t      size
31)
32{
33  _H_uptr_t const user_area = _H_p2u(_Heap_User_area(the_block));
34  _H_uptr_t const block_end = _H_p2u(the_block)
35    + _Heap_Block_size(the_block) + HEAP_BLOCK_HEADER_OFFSET;
36  _H_uptr_t const user_end = aligned_user_addr + size;
37  _H_uptr_t const heap_start = _H_p2u(the_heap->start) + HEAP_OVERHEAD;
38  _H_uptr_t const heap_end = _H_p2u(the_heap->final)
39    + HEAP_BLOCK_HEADER_OFFSET;
40  uint32_t const page_size = the_heap->page_size;
41
42  _HAssert(user_addr == user_area);
43  _HAssert(aligned_user_addr - user_area < page_size);
44  _HAssert(aligned_user_addr >= user_area);
45  _HAssert(aligned_user_addr < block_end);
46  _HAssert(user_end > user_area);
47  _HAssert(user_end <= block_end);
48  _HAssert(aligned_user_addr >= heap_start);
49  _HAssert(aligned_user_addr < heap_end);
50  _HAssert(user_end > heap_start);
51  _HAssert(user_end <= heap_end);
52}
53
54#else /* !defined(RTEMS_HEAP_DEBUG) */
55
56#define check_result(a, b, c, d, e) ((void)0)
57
58#endif /* !defined(RTEMS_HEAP_DEBUG) */
59
60/*
61 * Allocate block of size 'alloc_size' from 'the_block' belonging to
62 * 'the_heap'. Split 'the_block' if possible, otherwise allocate it entirely.
63 * When split, make the upper part used, and leave the lower part free.
64 * Return the block allocated.
65 *
66 * NOTE: this is similar to _Heap_Block_allocate(), except it makes different
67 * part of the split block used, and returns address of the block instead of its
68 * size. We do need such variant for _Heap_Allocate_aligned() as we can't allow
69 * user pointer to be too far from the beginning of the block, so that we can
70 * recover start-of-block address from the user pointer without additional
71 * information stored in the heap.
72 */
73static
74Heap_Block *block_allocate(
75  Heap_Control  *the_heap,
76  Heap_Block    *the_block,
77  intptr_t       alloc_size
78)
79{
80  Heap_Statistics *const stats = &the_heap->stats;
81  uint32_t const block_size = _Heap_Block_size(the_block);
82  uint32_t const the_rest = block_size - alloc_size;
83
84  _HAssert(_Heap_Is_aligned(block_size, the_heap->page_size));
85  _HAssert(_Heap_Is_aligned(alloc_size, the_heap->page_size));
86  _HAssert(alloc_size <= block_size);
87  _HAssert(_Heap_Is_prev_used(the_block));
88
89  if (the_rest >= the_heap->min_block_size) {
90    /* Split the block so that lower part is still free, and upper part
91       becomes used. */
92    the_block->size = the_rest | HEAP_PREV_USED;
93    the_block = _Heap_Block_at(the_block, the_rest);
94    the_block->prev_size = the_rest;
95    the_block->size = alloc_size;
96  } else {
97    /* Don't split the block as remainder is either zero or too small to be
98       used as a separate free block. Change 'alloc_size' to the size of the
99       block and remove the block from the list of free blocks. */
100    _Heap_Block_remove(the_block);
101    alloc_size = block_size;
102    stats->free_blocks -= 1;
103  }
104  /* Mark the block as used (in the next block). */
105  _Heap_Block_at(the_block, alloc_size)->size |= HEAP_PREV_USED;
106  /* Update statistics */
107  stats->free_size -= alloc_size;
108  if (stats->min_free_size > stats->free_size)
109    stats->min_free_size = stats->free_size;
110  stats->used_blocks += 1;
111  return the_block;
112}
113
114
115/*PAGE
116 *
117 *  _Heap_Allocate_aligned
118 *
119 *  This kernel routine allocates the requested size of memory
120 *  from the specified heap so that returned address is aligned according to
121 *  the 'alignment'.
122 *
123 *  Input parameters:
124 *    the_heap  - pointer to the heap control block.
125 *    size      - size in bytes of the memory block to allocate.
126 *    alignment - required user pointer alignment in bytes
127 *
128 *  Output parameters:
129 *    returns - starting address of memory block allocated. The address is
130 *              aligned on specified boundary.
131 */
132
133void *_Heap_Allocate_aligned(
134  Heap_Control *the_heap,
135  intptr_t      size,
136  uint32_t      alignment
137)
138{
139  uint32_t search_count;
140  Heap_Block *the_block;
141
142  void *user_ptr = NULL;
143  uint32_t  const page_size = the_heap->page_size;
144  Heap_Statistics *const stats = &the_heap->stats;
145  Heap_Block *const tail = _Heap_Tail(the_heap);
146
147  uint32_t const end_to_user_offs = size - HEAP_BLOCK_HEADER_OFFSET;
148
149  uint32_t const the_size =
150    _Heap_Calc_block_size(size, page_size, the_heap->min_block_size);
151
152  if (the_size == 0)
153    return NULL;
154
155  if (alignment == 0)
156    alignment = CPU_ALIGNMENT;
157
158  /* Find large enough free block that satisfies the alignment requirements. */
159
160  for (the_block = _Heap_First(the_heap), search_count = 0;
161      the_block != tail;
162      the_block = the_block->next, ++search_count)
163  {
164    uint32_t const block_size = _Heap_Block_size(the_block);
165
166    /* As we always coalesce free blocks, prev block must have been used. */
167    _HAssert(_Heap_Is_prev_used(the_block));
168
169    if (block_size >= the_size) { /* the_block is large enough. */
170
171      _H_uptr_t user_addr;
172      _H_uptr_t aligned_user_addr;
173      _H_uptr_t const user_area = _H_p2u(_Heap_User_area(the_block));
174
175      /* Calculate 'aligned_user_addr' that will become the user pointer we
176         return. It should be at least 'end_to_user_offs' bytes less than the
177         the 'block_end' and should be aligned on 'alignment' boundary.
178         Calculations are from the 'block_end' as we are going to split free
179         block so that the upper part of the block becomes used block. */
180      _H_uptr_t const block_end = _H_p2u(the_block) + block_size;
181      aligned_user_addr = block_end - end_to_user_offs;
182      _Heap_Align_down_uptr(&aligned_user_addr, alignment);
183
184      /* 'user_addr' is the 'aligned_user_addr' further aligned down to the
185         'page_size' boundary. We need it as blocks' user areas should begin
186         only at 'page_size' aligned addresses */
187      user_addr = aligned_user_addr;
188      _Heap_Align_down_uptr(&user_addr, page_size);
189
190      /* Make sure 'user_addr' calculated didn't run out of 'the_block'. */
191      if (user_addr >= user_area) {
192
193        /* The block seems to be acceptable. Check if the remainder of
194           'the_block' is less than 'min_block_size' so that 'the_block' won't
195           actually be split at the address we assume. */
196        if (user_addr - user_area < the_heap->min_block_size) {
197
198          /* The block won't be split, so 'user_addr' will be equal to the
199             'user_area'. */
200          user_addr = user_area;
201
202          /* We can't allow the distance between 'user_addr' and
203           'aligned_user_addr' to be outside of [0,page_size) range. If we do,
204           we will need to store this distance somewhere to be able to
205           resurrect the block address from the user pointer. (Having the
206           distance within [0,page_size) range allows resurrection by
207           aligning user pointer down to the nearest 'page_size' boundary.) */
208          if (aligned_user_addr - user_addr >= page_size) {
209
210            /* The user pointer will be too far from 'user_addr'. See if we
211               can make 'aligned_user_addr' to be close enough to the
212               'user_addr'. */
213            aligned_user_addr = user_addr;
214            _Heap_Align_up_uptr(&aligned_user_addr, alignment);
215            if (aligned_user_addr - user_addr >= page_size) {
216              /* No, we can't use the block */
217              continue;
218            }
219          }
220        }
221
222        /* The block is indeed acceptable: calculate the size of the block
223           to be allocated and perform allocation. */
224        uint32_t const alloc_size =
225            block_end - user_addr + HEAP_BLOCK_USER_OFFSET;
226
227        _HAssert(_Heap_Is_aligned_ptr((void*)aligned_user_addr, alignment));
228
229        the_block = block_allocate(the_heap, the_block, alloc_size);
230
231        stats->searches += search_count + 1;
232        stats->allocs += 1;
233
234        check_result(the_heap, the_block, user_addr,
235        aligned_user_addr, size);
236
237        user_ptr = (void*)aligned_user_addr;
238
239        break;
240      }
241    }
242  }
243
244  if (stats->max_search < search_count)
245    stats->max_search = search_count;
246
247  return user_ptr;
248}
Note: See TracBrowser for help on using the repository browser.