source: rtems/cpukit/score/src/heap.c @ 749d64a

4.104.115
Last change on this file since 749d64a was e89faf3e, checked in by Joel Sherrill <joel.sherrill@…>, on 09/25/09 at 17:49:32

2009-09-25 Sebastian Huber <Sebastian.Huber@…>

  • score/src/heap.c, score/include/rtems/score/heap.h: Reduced alignment requirement for CPU_ALIGNMENT from four to two.
  • Property mode set to 100644
File size: 12.9 KB
Line 
1/**
2 * @file
3 *
4 * @ingroup ScoreHeap
5 *
6 * @brief Heap Handler implementation.
7 */
8
9/*
10 *  COPYRIGHT (c) 1989-2009.
11 *  On-Line Applications Research Corporation (OAR).
12 *
13 *  Copyright (c) 2009 embedded brains GmbH.
14 *
15 *  The license and distribution terms for this file may be
16 *  found in the file LICENSE in this distribution or at
17 *  http://www.rtems.com/license/LICENSE.
18 *
19 *  $Id$
20 */
21
22#if HAVE_CONFIG_H
23#include "config.h"
24#endif
25
26#include <rtems/system.h>
27#include <rtems/score/heap.h>
28
29#if CPU_ALIGNMENT == 0 || CPU_ALIGNMENT % 2 != 0
30  #error "invalid CPU_ALIGNMENT value"
31#endif
32
33static uint32_t instance = 0;
34
35/*PAGE
36 *
37 *  _Heap_Initialize
38 *
39 *  This kernel routine initializes a heap.
40 *
41 *  Input parameters:
42 *    heap         - pointer to heap header
43 *    area_begin - starting address of heap
44 *    size             - size of heap
45 *    page_size        - allocatable unit of memory
46 *
47 *  Output parameters:
48 *    returns - maximum memory available if RTEMS_SUCCESSFUL
49 *    0       - otherwise
50 *
51 *  This is what a heap looks like in memory immediately after initialization:
52 *
53 *
54 *            +--------------------------------+ <- begin = area_begin
55 *            |  unused space due to alignment |
56 *            |       size < page_size         |
57 *         0  +--------------------------------+ <- first block
58 *            |  prev_size = page_size         |
59 *         4  +--------------------------------+
60 *            |  size = size0              | 1 |
61 *         8  +---------------------+----------+ <- aligned on page_size
62 *            |  next = HEAP_TAIL   |          |
63 *        12  +---------------------+          |
64 *            |  prev = HEAP_HEAD   |  memory  |
65 *            +---------------------+          |
66 *            |                     available  |
67 *            |                                |
68 *            |                for allocation  |
69 *            |                                |
70 *     size0  +--------------------------------+ <- last dummy block
71 *            |  prev_size = size0             |
72 *        +4  +--------------------------------+
73 *            |  size = page_size          | 0 | <- prev block is free
74 *        +8  +--------------------------------+ <- aligned on page_size
75 *            |  unused space due to alignment |
76 *            |       size < page_size         |
77 *            +--------------------------------+ <- end = begin + size
78 *
79 *  Below is what a heap looks like after first allocation of SIZE bytes using
80 *  _Heap_allocate(). BSIZE stands for SIZE + 4 aligned up on 'page_size'
81 *  boundary.
82 *  [NOTE: If allocation were performed by _Heap_Allocate_aligned(), the
83 *  block size BSIZE is defined differently, and previously free block will
84 *  be split so that upper part of it will become used block (see
85 *  'heapallocatealigned.c' for details).]
86 *
87 *            +--------------------------------+ <- begin = area_begin
88 *            |  unused space due to alignment |
89 *            |       size < page_size         |
90 *         0  +--------------------------------+ <- used block
91 *            |  prev_size = page_size         |
92 *         4  +--------------------------------+
93 *            |  size = BSIZE              | 1 | <- prev block is used
94 *         8  +--------------------------------+ <- aligned on page_size
95 *            |              .                 | Pointer returned to the user
96 *            |              .                 | is 8 for _Heap_Allocate()
97 *            |              .                 | and is in range
98 * 8 +        |         user-accessible        | [8,8+page_size) for
99 *  page_size +- - -                      - - -+ _Heap_Allocate_aligned()
100 *            |             area               |
101 *            |              .                 |
102 *     BSIZE  +- - - - -     .        - - - - -+ <- free block
103 *            |              .                 |
104 * BSIZE  +4  +--------------------------------+
105 *            |  size = S = size0 - BSIZE  | 1 | <- prev block is used
106 * BSIZE  +8  +-------------------+------------+ <- aligned on page_size
107 *            |  next = HEAP_TAIL |            |
108 * BSIZE +12  +-------------------+            |
109 *            |  prev = HEAP_HEAD |     memory |
110 *            +-------------------+            |
111 *            |                   .  available |
112 *            |                   .            |
113 *            |                   .        for |
114 *            |                   .            |
115 * BSIZE +S+0 +-------------------+ allocation + <- last dummy block
116 *            |  prev_size = S    |            |
117 *       +S+4 +-------------------+------------+
118 *            |  size = page_size          | 0 | <- prev block is free
119 *       +S+8 +--------------------------------+ <- aligned on page_size
120 *            |  unused space due to alignment |
121 *            |       size < page_size         |
122 *            +--------------------------------+ <- end = begin + size
123 *
124 */
125
126uintptr_t _Heap_Initialize(
127  Heap_Control *heap,
128  void *heap_area_begin_ptr,
129  uintptr_t heap_area_size,
130  uintptr_t page_size
131)
132{
133  Heap_Statistics *const stats = &heap->stats;
134  uintptr_t const heap_area_begin = (uintptr_t) heap_area_begin_ptr;
135  uintptr_t const heap_area_end = heap_area_begin + heap_area_size;
136  uintptr_t alloc_area_begin = heap_area_begin + HEAP_BLOCK_HEADER_SIZE;
137  uintptr_t alloc_area_size = 0;
138  uintptr_t first_block_begin = 0;
139  uintptr_t first_block_size = 0;
140  uintptr_t min_block_size = 0;
141  uintptr_t overhead = 0;
142  Heap_Block *first_block = NULL;
143  Heap_Block *last_block = NULL;
144
145  if ( page_size == 0 ) {
146    page_size = CPU_ALIGNMENT;
147  } else {
148    page_size = _Heap_Align_up( page_size, CPU_ALIGNMENT );
149
150    if ( page_size < CPU_ALIGNMENT ) {
151      /* Integer overflow */
152      return 0;
153    }
154  }
155  min_block_size = _Heap_Align_up( sizeof( Heap_Block ), page_size );
156
157  alloc_area_begin = _Heap_Align_up( alloc_area_begin, page_size );
158  first_block_begin = alloc_area_begin - HEAP_BLOCK_HEADER_SIZE;
159  overhead = HEAP_BLOCK_HEADER_SIZE + (first_block_begin - heap_area_begin);
160  first_block_size = heap_area_size - overhead;
161  first_block_size = _Heap_Align_down ( first_block_size, page_size );
162  alloc_area_size = first_block_size - HEAP_BLOCK_HEADER_SIZE;
163
164  if (
165    heap_area_end < heap_area_begin
166      || heap_area_size <= overhead
167      || first_block_size < min_block_size
168  ) {
169    /* Invalid area or area too small */
170    return 0;
171  }
172
173  /* First block */
174  first_block = (Heap_Block *) first_block_begin;
175  first_block->prev_size = page_size;
176  first_block->size_and_flag = first_block_size | HEAP_PREV_BLOCK_USED;
177  first_block->next = _Heap_Free_list_tail( heap );
178  first_block->prev = _Heap_Free_list_head( heap );
179
180  /*
181   * Last block.
182   *
183   * The next block of the last block is the first block.  Since the first
184   * block indicates that the previous block is used, this ensures that the
185   * last block appears as used for the _Heap_Is_used() and _Heap_Is_free()
186   * functions.
187   */
188  last_block = _Heap_Block_at( first_block, first_block_size );
189  last_block->prev_size = first_block_size;
190  last_block->size_and_flag = first_block_begin - (uintptr_t) last_block;
191
192  /* Heap control */
193  heap->page_size = page_size;
194  heap->min_block_size = min_block_size;
195  heap->area_begin = heap_area_begin;
196  heap->area_end = heap_area_end;
197  heap->first_block = first_block;
198  heap->last_block = last_block;
199  _Heap_Free_list_head( heap )->next = first_block;
200  _Heap_Free_list_tail( heap )->prev = first_block;
201
202  /* Statistics */
203  stats->size = first_block_size;
204  stats->free_size = first_block_size;
205  stats->min_free_size = first_block_size;
206  stats->free_blocks = 1;
207  stats->max_free_blocks = 1;
208  stats->used_blocks = 0;
209  stats->max_search = 0;
210  stats->allocs = 0;
211  stats->searches = 0;
212  stats->frees = 0;
213  stats->resizes = 0;
214  stats->instance = instance++;
215
216  _HAssert( _Heap_Is_aligned( heap->page_size, CPU_ALIGNMENT ) );
217  _HAssert( _Heap_Is_aligned( heap->min_block_size, page_size ) );
218  _HAssert(
219    _Heap_Is_aligned( _Heap_Alloc_area_of_block( first_block ), page_size )
220  );
221  _HAssert(
222    _Heap_Is_aligned( _Heap_Alloc_area_of_block( last_block ), page_size )
223  );
224
225  return alloc_area_size;
226}
227
228void _Heap_Block_split(
229  Heap_Control *heap,
230  Heap_Block *block,
231  Heap_Block *free_list_anchor,
232  uintptr_t alloc_size
233)
234{
235  Heap_Statistics *const stats = &heap->stats;
236
237  uintptr_t const page_size = heap->page_size;
238  uintptr_t const min_block_size = heap->min_block_size;
239  uintptr_t const min_alloc_size = min_block_size - HEAP_BLOCK_HEADER_SIZE;
240
241  uintptr_t const block_size = _Heap_Block_size( block );
242
243  uintptr_t const used_size =
244    _Heap_Max( alloc_size, min_alloc_size ) + HEAP_BLOCK_HEADER_SIZE;
245  uintptr_t const used_block_size = _Heap_Align_up( used_size, page_size );
246
247  uintptr_t const free_size = block_size + HEAP_BLOCK_SIZE_OFFSET - used_size;
248  uintptr_t const free_size_limit = min_block_size + HEAP_BLOCK_SIZE_OFFSET;
249
250  Heap_Block *next_block = _Heap_Block_at( block, block_size );
251
252  _HAssert( used_size <= block_size + HEAP_BLOCK_SIZE_OFFSET );
253  _HAssert( used_size + free_size == block_size + HEAP_BLOCK_SIZE_OFFSET );
254
255  if ( free_size >= free_size_limit ) {
256    Heap_Block *const free_block = _Heap_Block_at( block, used_block_size );
257    uintptr_t free_block_size = block_size - used_block_size;
258
259    _HAssert( used_block_size + free_block_size == block_size );
260
261    _Heap_Block_set_size( block, used_block_size );
262
263    /* Statistics */
264    stats->free_size += free_block_size;
265
266    if ( _Heap_Is_used( next_block ) ) {
267      _Heap_Free_list_insert_after( free_list_anchor, free_block );
268
269      /* Statistics */
270      ++stats->free_blocks;
271    } else {
272      uintptr_t const next_block_size = _Heap_Block_size( next_block );
273
274      _Heap_Free_list_replace( next_block, free_block );
275
276      free_block_size += next_block_size;
277
278      next_block = _Heap_Block_at( free_block, free_block_size );
279    }
280
281    free_block->size_and_flag = free_block_size | HEAP_PREV_BLOCK_USED;
282
283    next_block->prev_size = free_block_size;
284    next_block->size_and_flag &= ~HEAP_PREV_BLOCK_USED;
285  } else {
286    next_block->size_and_flag |= HEAP_PREV_BLOCK_USED;
287  }
288}
289
290static Heap_Block *_Heap_Block_allocate_from_begin(
291  Heap_Control *heap,
292  Heap_Block *block,
293  Heap_Block *free_list_anchor,
294  uintptr_t alloc_size
295)
296{
297  _Heap_Block_split( heap, block, free_list_anchor, alloc_size );
298
299  return block;
300}
301
302static Heap_Block *_Heap_Block_allocate_from_end(
303  Heap_Control *heap,
304  Heap_Block *block,
305  Heap_Block *free_list_anchor,
306  uintptr_t alloc_begin,
307  uintptr_t alloc_size
308)
309{
310  Heap_Statistics *const stats = &heap->stats;
311
312  uintptr_t block_begin = (uintptr_t) block;
313  uintptr_t block_size = _Heap_Block_size( block );
314  uintptr_t block_end = block_begin + block_size;
315
316  Heap_Block *const new_block =
317    _Heap_Block_of_alloc_area( alloc_begin, heap->page_size );
318  uintptr_t const new_block_begin = (uintptr_t) new_block;
319  uintptr_t const new_block_size = block_end - new_block_begin;
320
321  block_end = new_block_begin;
322  block_size = block_end - block_begin;
323
324  _HAssert( block_size >= heap->min_block_size );
325  _HAssert( new_block_size >= heap->min_block_size );
326
327  /* Statistics */
328  stats->free_size += block_size;
329
330  if ( _Heap_Is_prev_used( block ) ) {
331    _Heap_Free_list_insert_after( free_list_anchor, block );
332
333    free_list_anchor = block;
334
335    /* Statistics */
336    ++stats->free_blocks;
337  } else {
338    Heap_Block *const prev_block = _Heap_Prev_block( block );
339    uintptr_t const prev_block_size = _Heap_Block_size( prev_block );
340
341    block = prev_block;
342    block_begin = (uintptr_t) block;
343    block_size += prev_block_size;
344  }
345
346  block->size_and_flag = block_size | HEAP_PREV_BLOCK_USED;
347
348  new_block->prev_size = block_size;
349  new_block->size_and_flag = new_block_size;
350
351  _Heap_Block_split( heap, new_block, free_list_anchor, alloc_size );
352
353  return new_block;
354}
355
356Heap_Block *_Heap_Block_allocate(
357  Heap_Control *heap,
358  Heap_Block *block,
359  uintptr_t alloc_begin,
360  uintptr_t alloc_size
361)
362{
363  Heap_Statistics *const stats = &heap->stats;
364
365  uintptr_t const alloc_area_begin = _Heap_Alloc_area_of_block( block );
366  uintptr_t const alloc_area_offset = alloc_begin - alloc_area_begin;
367
368  Heap_Block *free_list_anchor = NULL;
369
370  _HAssert( alloc_area_begin <= alloc_begin );
371
372  if ( _Heap_Is_free( block ) ) {
373    free_list_anchor = block->prev;
374
375    _Heap_Free_list_remove( block );
376
377    /* Statistics */
378    --stats->free_blocks;
379    ++stats->used_blocks;
380    stats->free_size -= _Heap_Block_size( block );
381  } else {
382    free_list_anchor = _Heap_Free_list_head( heap );
383  }
384
385  if ( alloc_area_offset < heap->page_size ) {
386    alloc_size += alloc_area_offset;
387
388    block = _Heap_Block_allocate_from_begin(
389      heap,
390      block,
391      free_list_anchor,
392      alloc_size
393    );
394  } else {
395    block = _Heap_Block_allocate_from_end(
396      heap,
397      block,
398      free_list_anchor,
399      alloc_begin,
400      alloc_size
401    );
402  }
403
404  /* Statistics */
405  if ( stats->min_free_size > stats->free_size ) {
406    stats->min_free_size = stats->free_size;
407  }
408
409  return block;
410}
Note: See TracBrowser for help on using the repository browser.