source: rtems/cpukit/score/src/heap.c @ 518c2aeb

4.104.115
Last change on this file since 518c2aeb was 518c2aeb, checked in by Joel Sherrill <joel.sherrill@…>, on 09/09/09 at 14:58:37

2009-09-09 Sebastian Huber <Sebastian.Huber@…>

  • score/include/rtems/score/heap.h, score/inline/rtems/score/heap.inl, score/src/heapallocate.c, score/src/heap.c, score/src/heapextend.c, score/src/heapresizeblock.c, score/src/heapwalk.c: Documenation. Simplified block resize. Improved heap walk. Changed heap layout to avoid a special case for _Heap_Is_used() and _Heap_Is_free().
  • libmisc/stackchk/check.c: Update for heap API changes.
  • Property mode set to 100644
File size: 12.9 KB
Line 
1/**
2 * @file
3 *
4 * @ingroup ScoreHeap
5 *
6 * @brief Heap Handler implementation.
7 */
8
9/*
10 *  COPYRIGHT (c) 1989-2009.
11 *  On-Line Applications Research Corporation (OAR).
12 *
13 *  Copyright (c) 2009 embedded brains GmbH.
14 *
15 *  The license and distribution terms for this file may be
16 *  found in the file LICENSE in this distribution or at
17 *  http://www.rtems.com/license/LICENSE.
18 *
19 *  $Id$
20 */
21
22#if HAVE_CONFIG_H
23#include "config.h"
24#endif
25
26#include <rtems/system.h>
27#include <rtems/score/heap.h>
28
29#if CPU_ALIGNMENT == 0 || CPU_ALIGNMENT % 4 != 0
30  #error "invalid CPU_ALIGNMENT value"
31#endif
32
33static uint32_t instance = 0;
34
35/*PAGE
36 *
37 *  _Heap_Initialize
38 *
39 *  This kernel routine initializes a heap.
40 *
41 *  Input parameters:
42 *    heap         - pointer to heap header
43 *    area_begin - starting address of heap
44 *    size             - size of heap
45 *    page_size        - allocatable unit of memory
46 *
47 *  Output parameters:
48 *    returns - maximum memory available if RTEMS_SUCCESSFUL
49 *    0       - otherwise
50 *
51 *  This is what a heap looks like in memory immediately after initialization:
52 *
53 *
54 *            +--------------------------------+ <- begin = area_begin
55 *            |  unused space due to alignment |
56 *            |       size < page_size         |
57 *         0  +--------------------------------+ <- first block
58 *            |  prev_size = page_size         |
59 *         4  +--------------------------------+
60 *            |  size = size0              | 1 |
61 *         8  +---------------------+----------+ <- aligned on page_size
62 *            |  next = HEAP_TAIL   |          |
63 *        12  +---------------------+          |
64 *            |  prev = HEAP_HEAD   |  memory  |
65 *            +---------------------+          |
66 *            |                     available  |
67 *            |                                |
68 *            |                for allocation  |
69 *            |                                |
70 *     size0  +--------------------------------+ <- last dummy block
71 *            |  prev_size = size0             |
72 *        +4  +--------------------------------+
73 *            |  size = page_size          | 0 | <- prev block is free
74 *        +8  +--------------------------------+ <- aligned on page_size
75 *            |  unused space due to alignment |
76 *            |       size < page_size         |
77 *            +--------------------------------+ <- end = begin + size
78 *
79 *  Below is what a heap looks like after first allocation of SIZE bytes using
80 *  _Heap_allocate(). BSIZE stands for SIZE + 4 aligned up on 'page_size'
81 *  boundary.
82 *  [NOTE: If allocation were performed by _Heap_Allocate_aligned(), the
83 *  block size BSIZE is defined differently, and previously free block will
84 *  be split so that upper part of it will become used block (see
85 *  'heapallocatealigned.c' for details).]
86 *
87 *            +--------------------------------+ <- begin = area_begin
88 *            |  unused space due to alignment |
89 *            |       size < page_size         |
90 *         0  +--------------------------------+ <- used block
91 *            |  prev_size = page_size         |
92 *         4  +--------------------------------+
93 *            |  size = BSIZE              | 1 | <- prev block is used
94 *         8  +--------------------------------+ <- aligned on page_size
95 *            |              .                 | Pointer returned to the user
96 *            |              .                 | is 8 for _Heap_Allocate()
97 *            |              .                 | and is in range
98 * 8 +        |         user-accessible        | [8,8+page_size) for
99 *  page_size +- - -                      - - -+ _Heap_Allocate_aligned()
100 *            |             area               |
101 *            |              .                 |
102 *     BSIZE  +- - - - -     .        - - - - -+ <- free block
103 *            |              .                 |
104 * BSIZE  +4  +--------------------------------+
105 *            |  size = S = size0 - BSIZE  | 1 | <- prev block is used
106 * BSIZE  +8  +-------------------+------------+ <- aligned on page_size
107 *            |  next = HEAP_TAIL |            |
108 * BSIZE +12  +-------------------+            |
109 *            |  prev = HEAP_HEAD |     memory |
110 *            +-------------------+            |
111 *            |                   .  available |
112 *            |                   .            |
113 *            |                   .        for |
114 *            |                   .            |
115 * BSIZE +S+0 +-------------------+ allocation + <- last dummy block
116 *            |  prev_size = S    |            |
117 *       +S+4 +-------------------+------------+
118 *            |  size = page_size          | 0 | <- prev block is free
119 *       +S+8 +--------------------------------+ <- aligned on page_size
120 *            |  unused space due to alignment |
121 *            |       size < page_size         |
122 *            +--------------------------------+ <- end = begin + size
123 *
124 */
125
126uintptr_t _Heap_Initialize(
127  Heap_Control *heap,
128  void *heap_area_begin_ptr,
129  uintptr_t heap_area_size,
130  uintptr_t page_size
131)
132{
133  Heap_Statistics *const stats = &heap->stats;
134  uintptr_t const heap_area_begin = (uintptr_t) heap_area_begin_ptr;
135  uintptr_t const heap_area_end = heap_area_begin + heap_area_size;
136  uintptr_t alloc_area_begin = heap_area_begin + HEAP_BLOCK_HEADER_SIZE;
137  uintptr_t alloc_area_size = 0;
138  uintptr_t first_block_begin = 0;
139  uintptr_t first_block_size = 0;
140  uintptr_t min_block_size = 0;
141  uintptr_t overhead = 0;
142  Heap_Block *first_block = NULL;
143  Heap_Block *last_block = NULL;
144
145  if ( page_size == 0 ) {
146    page_size = CPU_ALIGNMENT;
147  } else {
148    page_size = _Heap_Align_up( page_size, CPU_ALIGNMENT );
149
150    if ( page_size < CPU_ALIGNMENT ) {
151      /* Integer overflow */
152      return 0;
153    }
154  }
155  min_block_size = _Heap_Align_up( sizeof( Heap_Block ), page_size );
156
157  alloc_area_begin = _Heap_Align_up( alloc_area_begin, page_size );
158  first_block_begin = alloc_area_begin - HEAP_BLOCK_HEADER_SIZE;
159  overhead = HEAP_BLOCK_HEADER_SIZE + (first_block_begin - heap_area_begin);
160  first_block_size = heap_area_size - overhead;
161  first_block_size = _Heap_Align_down ( first_block_size, page_size );
162  alloc_area_size = first_block_size - HEAP_BLOCK_HEADER_SIZE;
163
164  if (
165    heap_area_end < heap_area_begin
166      || heap_area_size <= overhead
167      || first_block_size < min_block_size
168  ) {
169    /* Invalid area or area too small */
170    return 0;
171  }
172
173  /* First block */
174  first_block = (Heap_Block *) first_block_begin;
175  first_block->prev_size = page_size;
176  first_block->size_and_flag = first_block_size | HEAP_PREV_BLOCK_USED;
177  first_block->next = _Heap_Free_list_tail( heap );
178  first_block->prev = _Heap_Free_list_head( heap );
179
180  /*
181   * Last block.
182   *
183   * The next block of the last block is the first block.  Since the first
184   * block indicates that the previous block is used, this ensures that the
185   * last block appears as used for the _Heap_Is_used() and _Heap_Is_free()
186   * functions.
187   */
188  last_block = _Heap_Block_at( first_block, first_block_size );
189  last_block->prev_size = first_block_size;
190  last_block->size_and_flag = first_block_begin - (uintptr_t) last_block;
191
192  /* Heap control */
193  heap->page_size = page_size;
194  heap->min_block_size = min_block_size;
195  heap->area_begin = heap_area_begin;
196  heap->area_end = heap_area_end;
197  heap->first_block = first_block;
198  heap->last_block = last_block;
199  _Heap_Free_list_head( heap )->next = first_block;
200  _Heap_Free_list_tail( heap )->prev = first_block;
201
202  /* Statistics */
203  stats->size = first_block_size;
204  stats->free_size = first_block_size;
205  stats->min_free_size = first_block_size;
206  stats->free_blocks = 1;
207  stats->max_free_blocks = 1;
208  stats->used_blocks = 0;
209  stats->max_search = 0;
210  stats->allocs = 0;
211  stats->searches = 0;
212  stats->frees = 0;
213  stats->resizes = 0;
214  stats->instance = instance++;
215
216  _HAssert( _Heap_Is_aligned( CPU_ALIGNMENT, 4 ) );
217  _HAssert( _Heap_Is_aligned( heap->page_size, CPU_ALIGNMENT ) );
218  _HAssert( _Heap_Is_aligned( heap->min_block_size, page_size ) );
219  _HAssert(
220    _Heap_Is_aligned( _Heap_Alloc_area_of_block( first_block ), page_size )
221  );
222  _HAssert(
223    _Heap_Is_aligned( _Heap_Alloc_area_of_block( last_block ), page_size )
224  );
225
226  return alloc_area_size;
227}
228
229void _Heap_Block_split(
230  Heap_Control *heap,
231  Heap_Block *block,
232  Heap_Block *free_list_anchor,
233  uintptr_t alloc_size
234)
235{
236  Heap_Statistics *const stats = &heap->stats;
237
238  uintptr_t const page_size = heap->page_size;
239  uintptr_t const min_block_size = heap->min_block_size;
240  uintptr_t const min_alloc_size = min_block_size - HEAP_BLOCK_HEADER_SIZE;
241
242  uintptr_t const block_size = _Heap_Block_size( block );
243
244  uintptr_t const used_size =
245    _Heap_Max( alloc_size, min_alloc_size ) + HEAP_BLOCK_HEADER_SIZE;
246  uintptr_t const used_block_size = _Heap_Align_up( used_size, page_size );
247
248  uintptr_t const free_size = block_size + HEAP_BLOCK_SIZE_OFFSET - used_size;
249  uintptr_t const free_size_limit = min_block_size + HEAP_BLOCK_SIZE_OFFSET;
250
251  Heap_Block *next_block = _Heap_Block_at( block, block_size );
252
253  _HAssert( used_size <= block_size + HEAP_BLOCK_SIZE_OFFSET );
254  _HAssert( used_size + free_size == block_size + HEAP_BLOCK_SIZE_OFFSET );
255
256  if ( free_size >= free_size_limit ) {
257    Heap_Block *const free_block = _Heap_Block_at( block, used_block_size );
258    uintptr_t free_block_size = block_size - used_block_size;
259
260    _HAssert( used_block_size + free_block_size == block_size );
261
262    _Heap_Block_set_size( block, used_block_size );
263
264    /* Statistics */
265    stats->free_size += free_block_size;
266
267    if ( _Heap_Is_used( next_block ) ) {
268      _Heap_Free_list_insert_after( free_list_anchor, free_block );
269
270      /* Statistics */
271      ++stats->free_blocks;
272    } else {
273      uintptr_t const next_block_size = _Heap_Block_size( next_block );
274
275      _Heap_Free_list_replace( next_block, free_block );
276
277      free_block_size += next_block_size;
278
279      next_block = _Heap_Block_at( free_block, free_block_size );
280    }
281
282    free_block->size_and_flag = free_block_size | HEAP_PREV_BLOCK_USED;
283
284    next_block->prev_size = free_block_size;
285    next_block->size_and_flag &= ~HEAP_PREV_BLOCK_USED;
286  } else {
287    next_block->size_and_flag |= HEAP_PREV_BLOCK_USED;
288  }
289}
290
291static Heap_Block *_Heap_Block_allocate_from_begin(
292  Heap_Control *heap,
293  Heap_Block *block,
294  Heap_Block *free_list_anchor,
295  uintptr_t alloc_size
296)
297{
298  _Heap_Block_split( heap, block, free_list_anchor, alloc_size );
299
300  return block;
301}
302
303static Heap_Block *_Heap_Block_allocate_from_end(
304  Heap_Control *heap,
305  Heap_Block *block,
306  Heap_Block *free_list_anchor,
307  uintptr_t alloc_begin,
308  uintptr_t alloc_size
309)
310{
311  Heap_Statistics *const stats = &heap->stats;
312
313  uintptr_t block_begin = (uintptr_t) block;
314  uintptr_t block_size = _Heap_Block_size( block );
315  uintptr_t block_end = block_begin + block_size;
316
317  Heap_Block *const new_block =
318    _Heap_Block_of_alloc_area( alloc_begin, heap->page_size );
319  uintptr_t const new_block_begin = (uintptr_t) new_block;
320  uintptr_t const new_block_size = block_end - new_block_begin;
321
322  block_end = new_block_begin;
323  block_size = block_end - block_begin;
324
325  _HAssert( block_size >= heap->min_block_size );
326  _HAssert( new_block_size >= heap->min_block_size );
327
328  /* Statistics */
329  stats->free_size += block_size;
330
331  if ( _Heap_Is_prev_used( block ) ) {
332    _Heap_Free_list_insert_after( free_list_anchor, block );
333
334    free_list_anchor = block;
335
336    /* Statistics */
337    ++stats->free_blocks;
338  } else {
339    Heap_Block *const prev_block = _Heap_Prev_block( block );
340    uintptr_t const prev_block_size = _Heap_Block_size( prev_block );
341
342    block = prev_block;
343    block_begin = (uintptr_t) block;
344    block_size += prev_block_size;
345  }
346
347  block->size_and_flag = block_size | HEAP_PREV_BLOCK_USED;
348
349  new_block->prev_size = block_size;
350  new_block->size_and_flag = new_block_size;
351
352  _Heap_Block_split( heap, new_block, free_list_anchor, alloc_size );
353
354  return new_block;
355}
356
357Heap_Block *_Heap_Block_allocate(
358  Heap_Control *heap,
359  Heap_Block *block,
360  uintptr_t alloc_begin,
361  uintptr_t alloc_size
362)
363{
364  Heap_Statistics *const stats = &heap->stats;
365
366  uintptr_t const alloc_area_begin = _Heap_Alloc_area_of_block( block );
367  uintptr_t const alloc_area_offset = alloc_begin - alloc_area_begin;
368
369  Heap_Block *free_list_anchor = NULL;
370
371  _HAssert( alloc_area_begin <= alloc_begin );
372
373  if ( _Heap_Is_free( block ) ) {
374    free_list_anchor = block->prev;
375
376    _Heap_Free_list_remove( block );
377
378    /* Statistics */
379    --stats->free_blocks;
380    ++stats->used_blocks;
381    stats->free_size -= _Heap_Block_size( block );
382  } else {
383    free_list_anchor = _Heap_Free_list_head( heap );
384  }
385
386  if ( alloc_area_offset < heap->page_size ) {
387    alloc_size += alloc_area_offset;
388
389    block = _Heap_Block_allocate_from_begin(
390      heap,
391      block,
392      free_list_anchor,
393      alloc_size
394    );
395  } else {
396    block = _Heap_Block_allocate_from_end(
397      heap,
398      block,
399      free_list_anchor,
400      alloc_begin,
401      alloc_size
402    );
403  }
404
405  /* Statistics */
406  if ( stats->min_free_size > stats->free_size ) {
407    stats->min_free_size = stats->free_size;
408  }
409
410  return block;
411}
Note: See TracBrowser for help on using the repository browser.