source: rtems/cpukit/score/src/heapallocatealigned.c @ 6685aa09

4.104.115
Last change on this file since 6685aa09 was 371cea31, checked in by Joel Sherrill <joel.sherrill@…>, on 08/26/09 at 12:00:24

2009-08-24 Sebastian Huber <Sebastian.Huber@…>

  • libmisc/stackchk/check.c, rtems/src/regionreturnsegment.c, rtems/src/regiongetsegmentsize.c, src/heapalignupuptr.c, src/heapallocatealigned.c, src/heapallocate.c, src/heap.c, src/heapextend.c, src/heapfree.c, src/heapgetfreeinfo.c, src/heapgetinfo.c, src/heapresizeblock.c, src/heapsizeofuserarea.c, src/heapwalk.c, src/pheapgetblocksize.c, inline/rtems/score/heap.inl, include/rtems/score/heap.h: Overall cleanup. Changed all types for addresses, sizes, offsets and alignments to uintptr_t. Reformatted. Added variables for clarity. Renamed various objects. Enabled _HAssert() for all instances. More changes follow.
  • Property mode set to 100644
File size: 8.4 KB
Line 
1/*
2 *  Heap Handler
3 *
4 *  COPYRIGHT (c) 1989-1999.
5 *  On-Line Applications Research Corporation (OAR).
6 *
7 *  The license and distribution terms for this file may be
8 *  found in the file LICENSE in this distribution or at
9 *  http://www.rtems.com/license/LICENSE.
10 *
11 *  $Id$
12 */
13
14#if HAVE_CONFIG_H
15#include "config.h"
16#endif
17
18#include <rtems/system.h>
19#include <rtems/score/sysstate.h>
20#include <rtems/score/heap.h>
21
22#if defined(RTEMS_HEAP_DEBUG)
23
24static void
25check_result(
26  Heap_Control *the_heap,
27  Heap_Block   *the_block,
28  uintptr_t     user_addr,
29  uintptr_t     aligned_user_addr,
30  uintptr_t      size
31)
32{
33  uintptr_t const user_area = _Heap_Alloc_area_of_block(the_block);
34  uintptr_t const block_end = the_block
35    + _Heap_Block_size(the_block) + HEAP_BLOCK_SIZE_OFFSET;
36  uintptr_t const user_end = aligned_user_addr + size;
37  uintptr_t const heap_start = (uintptr_t) the_heap->start + HEAP_LAST_BLOCK_OVERHEAD;
38  uintptr_t const heap_end = (uintptr_t) the_heap->final
39    + HEAP_BLOCK_SIZE_OFFSET;
40  uintptr_t const page_size = the_heap->page_size;
41
42  _HAssert(user_addr == user_area);
43  _HAssert(aligned_user_addr - user_area < page_size);
44  _HAssert(aligned_user_addr >= user_area);
45  _HAssert(aligned_user_addr < block_end);
46  _HAssert(user_end > user_area);
47  _HAssert(user_end <= block_end);
48  _HAssert(aligned_user_addr >= heap_start);
49  _HAssert(aligned_user_addr < heap_end);
50  _HAssert(user_end > heap_start);
51  _HAssert(user_end <= heap_end);
52}
53
54#else /* !defined(RTEMS_HEAP_DEBUG) */
55
56#define check_result(a, b, c, d, e) ((void)0)
57
58#endif /* !defined(RTEMS_HEAP_DEBUG) */
59
60/*
61 * Allocate block of size 'alloc_size' from 'the_block' belonging to
62 * 'the_heap'. Split 'the_block' if possible, otherwise allocate it entirely.
63 * When split, make the upper part used, and leave the lower part free.
64 * Return the block allocated.
65 *
66 * NOTE: this is similar to _Heap_Block_allocate(), except it makes different
67 * part of the split block used, and returns address of the block instead of its
68 * size. We do need such variant for _Heap_Allocate_aligned() as we can't allow
69 * user pointer to be too far from the beginning of the block, so that we can
70 * recover start-of-block address from the user pointer without additional
71 * information stored in the heap.
72 */
73static
74Heap_Block *block_allocate(
75  Heap_Control  *the_heap,
76  Heap_Block    *the_block,
77  uintptr_t       alloc_size
78)
79{
80  Heap_Statistics *const stats = &the_heap->stats;
81  uintptr_t const block_size = _Heap_Block_size(the_block);
82  uintptr_t const the_rest = block_size - alloc_size;
83
84  _HAssert(_Heap_Is_aligned(block_size, the_heap->page_size));
85  _HAssert(_Heap_Is_aligned(alloc_size, the_heap->page_size));
86  _HAssert(alloc_size <= block_size);
87  _HAssert(_Heap_Is_prev_used(the_block));
88
89  if (the_rest >= the_heap->min_block_size) {
90    /* Split the block so that lower part is still free, and upper part
91       becomes used. */
92    the_block->size_and_flag = the_rest | HEAP_PREV_BLOCK_USED;
93    the_block = _Heap_Block_at(the_block, the_rest);
94    the_block->prev_size = the_rest;
95    the_block->size_and_flag = alloc_size;
96  } else {
97    /* Don't split the block as remainder is either zero or too small to be
98       used as a separate free block. Change 'alloc_size' to the size of the
99       block and remove the block from the list of free blocks. */
100    _Heap_Block_remove_from_free_list(the_block);
101    alloc_size = block_size;
102    stats->free_blocks -= 1;
103  }
104  /* Mark the block as used (in the next block). */
105  _Heap_Block_at(the_block, alloc_size)->size_and_flag |= HEAP_PREV_BLOCK_USED;
106  /* Update statistics */
107  stats->free_size -= alloc_size;
108  if (stats->min_free_size > stats->free_size)
109    stats->min_free_size = stats->free_size;
110  stats->used_blocks += 1;
111  return the_block;
112}
113
114
115/*PAGE
116 *
117 *  _Heap_Allocate_aligned
118 *
119 *  This kernel routine allocates the requested size of memory
120 *  from the specified heap so that returned address is aligned according to
121 *  the 'alignment'.
122 *
123 *  Input parameters:
124 *    the_heap  - pointer to the heap control block.
125 *    size      - size in bytes of the memory block to allocate.
126 *    alignment - required user pointer alignment in bytes
127 *
128 *  Output parameters:
129 *    returns - starting address of memory block allocated. The address is
130 *              aligned on specified boundary.
131 */
132
133void *_Heap_Allocate_aligned(
134  Heap_Control *the_heap,
135  uintptr_t      size,
136  uintptr_t      alignment
137)
138{
139  uintptr_t search_count;
140  Heap_Block *the_block;
141
142  void *user_ptr = NULL;
143  uintptr_t  const page_size = the_heap->page_size;
144  Heap_Statistics *const stats = &the_heap->stats;
145  Heap_Block *const tail = _Heap_Free_list_tail(the_heap);
146
147  uintptr_t const end_to_user_offs = size - HEAP_BLOCK_SIZE_OFFSET;
148
149  uintptr_t const the_size =
150    _Heap_Calc_block_size(size, page_size, the_heap->min_block_size);
151
152  if (the_size == 0)
153    return NULL;
154
155  if (alignment == 0)
156    alignment = CPU_ALIGNMENT;
157
158  /* Find large enough free block that satisfies the alignment requirements. */
159
160  for (the_block = _Heap_First_free_block(the_heap), search_count = 0;
161      the_block != tail;
162      the_block = the_block->next, ++search_count)
163  {
164    uintptr_t const block_size = _Heap_Block_size(the_block);
165
166    /* As we always coalesce free blocks, prev block must have been used. */
167    _HAssert(_Heap_Is_prev_used(the_block));
168
169    if (block_size >= the_size) { /* the_block is large enough. */
170
171      uintptr_t user_addr;
172      uintptr_t aligned_user_addr;
173      uintptr_t const user_area = _Heap_Alloc_area_of_block(the_block);
174
175      /* Calculate 'aligned_user_addr' that will become the user pointer we
176         return. It should be at least 'end_to_user_offs' bytes less than the
177         the 'block_end' and should be aligned on 'alignment' boundary.
178         Calculations are from the 'block_end' as we are going to split free
179         block so that the upper part of the block becomes used block. */
180      uintptr_t const block_end = (uintptr_t) the_block + block_size;
181      aligned_user_addr =
182        _Heap_Align_down(block_end - end_to_user_offs, alignment);
183
184      /* 'user_addr' is the 'aligned_user_addr' further aligned down to the
185         'page_size' boundary. We need it as blocks' user areas should begin
186         only at 'page_size' aligned addresses */
187      user_addr = _Heap_Align_down(aligned_user_addr, page_size);
188
189      /* Make sure 'user_addr' calculated didn't run out of 'the_block'. */
190      if (user_addr >= user_area) {
191
192        /* The block seems to be acceptable. Check if the remainder of
193           'the_block' is less than 'min_block_size' so that 'the_block' won't
194           actually be split at the address we assume. */
195        if (user_addr - user_area < the_heap->min_block_size) {
196
197          /* The block won't be split, so 'user_addr' will be equal to the
198             'user_area'. */
199          user_addr = user_area;
200
201          /* We can't allow the distance between 'user_addr' and
202           'aligned_user_addr' to be outside of [0,page_size) range. If we do,
203           we will need to store this distance somewhere to be able to
204           resurrect the block address from the user pointer. (Having the
205           distance within [0,page_size) range allows resurrection by
206           aligning user pointer down to the nearest 'page_size' boundary.) */
207          if (aligned_user_addr - user_addr >= page_size) {
208
209            /* The user pointer will be too far from 'user_addr'. See if we
210               can make 'aligned_user_addr' to be close enough to the
211               'user_addr'. */
212            aligned_user_addr = _Heap_Align_up(user_addr, alignment);
213            if (aligned_user_addr - user_addr >= page_size) {
214              /* No, we can't use the block */
215              continue;
216            }
217          }
218        }
219
220        /* The block is indeed acceptable: calculate the size of the block
221           to be allocated and perform allocation. */
222        uintptr_t const alloc_size =
223            block_end - user_addr + HEAP_BLOCK_ALLOC_AREA_OFFSET;
224
225        _HAssert(_Heap_Is_aligned(aligned_user_addr, alignment));
226
227        the_block = block_allocate(the_heap, the_block, alloc_size);
228
229        stats->searches += search_count + 1;
230        stats->allocs += 1;
231
232        check_result(the_heap, the_block, user_addr,
233        aligned_user_addr, size);
234
235        user_ptr = (void*)aligned_user_addr;
236
237        break;
238      }
239    }
240  }
241
242  if (stats->max_search < search_count)
243    stats->max_search = search_count;
244
245  return user_ptr;
246}
Note: See TracBrowser for help on using the repository browser.