source: rtems/cpukit/score/src/heap.c @ c499856

4.115
Last change on this file since c499856 was c499856, checked in by Chris Johns <chrisj@…>, on 03/20/14 at 21:10:47

Change all references of rtems.com to rtems.org.

  • Property mode set to 100644
File size: 14.9 KB
Line 
1/**
2 * @file
3 *
4 * @ingroup ScoreHeap
5 *
6 * @brief Heap Handler implementation.
7 */
8
9/*
10 *  COPYRIGHT (c) 1989-2009.
11 *  On-Line Applications Research Corporation (OAR).
12 *
13 *  Copyright (c) 2009, 2010 embedded brains GmbH.
14 *
15 *  The license and distribution terms for this file may be
16 *  found in the file LICENSE in this distribution or at
17 *  http://www.rtems.org/license/LICENSE.
18 */
19
20#if HAVE_CONFIG_H
21  #include "config.h"
22#endif
23
24#include <rtems/score/heapimpl.h>
25#include <rtems/score/threadimpl.h>
26#include <rtems/score/interr.h>
27
28#include <string.h>
29
30#if CPU_ALIGNMENT == 0 || CPU_ALIGNMENT % 2 != 0
31  #error "invalid CPU_ALIGNMENT value"
32#endif
33
34static uint32_t instance = 0;
35
36/*
37 *  _Heap_Initialize
38 *
39 *  This kernel routine initializes a heap.
40 *
41 *  Input parameters:
42 *    heap         - pointer to heap header
43 *    area_begin - starting address of heap
44 *    size             - size of heap
45 *    page_size        - allocatable unit of memory
46 *
47 *  Output parameters:
48 *    returns - maximum memory available if RTEMS_SUCCESSFUL
49 *    0       - otherwise
50 *
51 *  This is what a heap looks like in memory immediately after initialization:
52 *
53 *
54 *            +--------------------------------+ <- begin = area_begin
55 *            |  unused space due to alignment |
56 *            |       size < page_size         |
57 *         0  +--------------------------------+ <- first block
58 *            |  prev_size = page_size         |
59 *         4  +--------------------------------+
60 *            |  size = size0              | 1 |
61 *         8  +---------------------+----------+ <- aligned on page_size
62 *            |  next = HEAP_TAIL   |          |
63 *        12  +---------------------+          |
64 *            |  prev = HEAP_HEAD   |  memory  |
65 *            +---------------------+          |
66 *            |                     available  |
67 *            |                                |
68 *            |                for allocation  |
69 *            |                                |
70 *     size0  +--------------------------------+ <- last dummy block
71 *            |  prev_size = size0             |
72 *        +4  +--------------------------------+
73 *            |  size = page_size          | 0 | <- prev block is free
74 *        +8  +--------------------------------+ <- aligned on page_size
75 *            |  unused space due to alignment |
76 *            |       size < page_size         |
77 *            +--------------------------------+ <- end = begin + size
78 *
79 *  Below is what a heap looks like after first allocation of SIZE bytes using
80 *  _Heap_allocate(). BSIZE stands for SIZE + 4 aligned up on 'page_size'
81 *  boundary.
82 *  [NOTE: If allocation were performed by _Heap_Allocate_aligned(), the
83 *  block size BSIZE is defined differently, and previously free block will
84 *  be split so that upper part of it will become used block (see
85 *  'heapallocatealigned.c' for details).]
86 *
87 *            +--------------------------------+ <- begin = area_begin
88 *            |  unused space due to alignment |
89 *            |       size < page_size         |
90 *         0  +--------------------------------+ <- used block
91 *            |  prev_size = page_size         |
92 *         4  +--------------------------------+
93 *            |  size = BSIZE              | 1 | <- prev block is used
94 *         8  +--------------------------------+ <- aligned on page_size
95 *            |              .                 | Pointer returned to the user
96 *            |              .                 | is 8 for _Heap_Allocate()
97 *            |              .                 | and is in range
98 * 8 +        |         user-accessible        | [8,8+page_size) for
99 *  page_size +- - -                      - - -+ _Heap_Allocate_aligned()
100 *            |             area               |
101 *            |              .                 |
102 *     BSIZE  +- - - - -     .        - - - - -+ <- free block
103 *            |              .                 |
104 * BSIZE  +4  +--------------------------------+
105 *            |  size = S = size0 - BSIZE  | 1 | <- prev block is used
106 * BSIZE  +8  +-------------------+------------+ <- aligned on page_size
107 *            |  next = HEAP_TAIL |            |
108 * BSIZE +12  +-------------------+            |
109 *            |  prev = HEAP_HEAD |     memory |
110 *            +-------------------+            |
111 *            |                   .  available |
112 *            |                   .            |
113 *            |                   .        for |
114 *            |                   .            |
115 * BSIZE +S+0 +-------------------+ allocation + <- last dummy block
116 *            |  prev_size = S    |            |
117 *       +S+4 +-------------------+------------+
118 *            |  size = page_size          | 0 | <- prev block is free
119 *       +S+8 +--------------------------------+ <- aligned on page_size
120 *            |  unused space due to alignment |
121 *            |       size < page_size         |
122 *            +--------------------------------+ <- end = begin + size
123 *
124 */
125
126#ifdef HEAP_PROTECTION
127  static void _Heap_Protection_block_initialize_default(
128    Heap_Control *heap,
129    Heap_Block *block
130  )
131  {
132    block->Protection_begin.protector [0] = HEAP_BEGIN_PROTECTOR_0;
133    block->Protection_begin.protector [1] = HEAP_BEGIN_PROTECTOR_1;
134    block->Protection_begin.next_delayed_free_block = NULL;
135    block->Protection_begin.task = _Thread_Get_executing();
136    block->Protection_begin.tag = NULL;
137    block->Protection_end.protector [0] = HEAP_END_PROTECTOR_0;
138    block->Protection_end.protector [1] = HEAP_END_PROTECTOR_1;
139  }
140
141  static void _Heap_Protection_block_check_default(
142    Heap_Control *heap,
143    Heap_Block *block
144  )
145  {
146    if (
147      block->Protection_begin.protector [0] != HEAP_BEGIN_PROTECTOR_0
148        || block->Protection_begin.protector [1] != HEAP_BEGIN_PROTECTOR_1
149        || block->Protection_end.protector [0] != HEAP_END_PROTECTOR_0
150        || block->Protection_end.protector [1] != HEAP_END_PROTECTOR_1
151    ) {
152      _Heap_Protection_block_error( heap, block );
153    }
154  }
155
156  static void _Heap_Protection_block_error_default(
157    Heap_Control *heap,
158    Heap_Block *block
159  )
160  {
161    /* FIXME */
162    _Terminate( INTERNAL_ERROR_CORE, false, 0xdeadbeef );
163  }
164#endif
165
166bool _Heap_Get_first_and_last_block(
167  uintptr_t heap_area_begin,
168  uintptr_t heap_area_size,
169  uintptr_t page_size,
170  uintptr_t min_block_size,
171  Heap_Block **first_block_ptr,
172  Heap_Block **last_block_ptr
173)
174{
175  uintptr_t const heap_area_end = heap_area_begin + heap_area_size;
176  uintptr_t const alloc_area_begin =
177    _Heap_Align_up( heap_area_begin + HEAP_BLOCK_HEADER_SIZE, page_size );
178  uintptr_t const first_block_begin =
179    alloc_area_begin - HEAP_BLOCK_HEADER_SIZE;
180  uintptr_t const overhead =
181    HEAP_BLOCK_HEADER_SIZE + (first_block_begin - heap_area_begin);
182  uintptr_t const first_block_size =
183    _Heap_Align_down( heap_area_size - overhead, page_size );
184  Heap_Block *const first_block = (Heap_Block *) first_block_begin;
185  Heap_Block *const last_block =
186    _Heap_Block_at( first_block, first_block_size );
187
188  if (
189    heap_area_end < heap_area_begin
190      || heap_area_size <= overhead
191      || first_block_size < min_block_size
192  ) {
193    /* Invalid area or area too small */
194    return false;
195  }
196
197  *first_block_ptr = first_block;
198  *last_block_ptr = last_block;
199
200  return true;
201}
202
203uintptr_t _Heap_Initialize(
204  Heap_Control *heap,
205  void *heap_area_begin_ptr,
206  uintptr_t heap_area_size,
207  uintptr_t page_size
208)
209{
210  Heap_Statistics *const stats = &heap->stats;
211  uintptr_t const heap_area_begin = (uintptr_t) heap_area_begin_ptr;
212  uintptr_t const heap_area_end = heap_area_begin + heap_area_size;
213  uintptr_t first_block_begin = 0;
214  uintptr_t first_block_size = 0;
215  uintptr_t last_block_begin = 0;
216  uintptr_t min_block_size = 0;
217  bool area_ok = false;
218  Heap_Block *first_block = NULL;
219  Heap_Block *last_block = NULL;
220
221  if ( page_size == 0 ) {
222    page_size = CPU_ALIGNMENT;
223  } else {
224    page_size = _Heap_Align_up( page_size, CPU_ALIGNMENT );
225
226    if ( page_size < CPU_ALIGNMENT ) {
227      /* Integer overflow */
228      return 0;
229    }
230  }
231  min_block_size = _Heap_Align_up( sizeof( Heap_Block ), page_size );
232
233  area_ok = _Heap_Get_first_and_last_block(
234    heap_area_begin,
235    heap_area_size,
236    page_size,
237    min_block_size,
238    &first_block,
239    &last_block
240  );
241  if ( !area_ok ) {
242    return 0;
243  }
244
245  memset(heap, 0, sizeof(*heap));
246
247  #ifdef HEAP_PROTECTION
248    heap->Protection.block_initialize = _Heap_Protection_block_initialize_default;
249    heap->Protection.block_check = _Heap_Protection_block_check_default;
250    heap->Protection.block_error = _Heap_Protection_block_error_default;
251  #endif
252
253  first_block_begin = (uintptr_t) first_block;
254  last_block_begin = (uintptr_t) last_block;
255  first_block_size = last_block_begin - first_block_begin;
256
257  /* First block */
258  first_block->prev_size = heap_area_end;
259  first_block->size_and_flag = first_block_size | HEAP_PREV_BLOCK_USED;
260  first_block->next = _Heap_Free_list_tail( heap );
261  first_block->prev = _Heap_Free_list_head( heap );
262  _Heap_Protection_block_initialize( heap, first_block );
263
264  /* Heap control */
265  heap->page_size = page_size;
266  heap->min_block_size = min_block_size;
267  heap->area_begin = heap_area_begin;
268  heap->area_end = heap_area_end;
269  heap->first_block = first_block;
270  heap->last_block = last_block;
271  _Heap_Free_list_head( heap )->next = first_block;
272  _Heap_Free_list_tail( heap )->prev = first_block;
273
274  /* Last block */
275  last_block->prev_size = first_block_size;
276  last_block->size_and_flag = 0;
277  _Heap_Set_last_block_size( heap );
278  _Heap_Protection_block_initialize( heap, last_block );
279
280  /* Statistics */
281  stats->size = first_block_size;
282  stats->free_size = first_block_size;
283  stats->min_free_size = first_block_size;
284  stats->free_blocks = 1;
285  stats->max_free_blocks = 1;
286  stats->instance = instance++;
287
288  _HAssert( _Heap_Is_aligned( heap->page_size, CPU_ALIGNMENT ) );
289  _HAssert( _Heap_Is_aligned( heap->min_block_size, page_size ) );
290  _HAssert(
291    _Heap_Is_aligned( _Heap_Alloc_area_of_block( first_block ), page_size )
292  );
293  _HAssert(
294    _Heap_Is_aligned( _Heap_Alloc_area_of_block( last_block ), page_size )
295  );
296
297  return first_block_size;
298}
299
300static void _Heap_Block_split(
301  Heap_Control *heap,
302  Heap_Block *block,
303  Heap_Block *free_list_anchor,
304  uintptr_t alloc_size
305)
306{
307  Heap_Statistics *const stats = &heap->stats;
308
309  uintptr_t const page_size = heap->page_size;
310  uintptr_t const min_block_size = heap->min_block_size;
311  uintptr_t const min_alloc_size = min_block_size - HEAP_BLOCK_HEADER_SIZE;
312
313  uintptr_t const block_size = _Heap_Block_size( block );
314
315  uintptr_t const used_size =
316    _Heap_Max( alloc_size, min_alloc_size ) + HEAP_BLOCK_HEADER_SIZE;
317  uintptr_t const used_block_size = _Heap_Align_up( used_size, page_size );
318
319  uintptr_t const free_size = block_size + HEAP_ALLOC_BONUS - used_size;
320  uintptr_t const free_size_limit = min_block_size + HEAP_ALLOC_BONUS;
321
322  Heap_Block *next_block = _Heap_Block_at( block, block_size );
323
324  _HAssert( used_size <= block_size + HEAP_ALLOC_BONUS );
325  _HAssert( used_size + free_size == block_size + HEAP_ALLOC_BONUS );
326
327  if ( free_size >= free_size_limit ) {
328    Heap_Block *const free_block = _Heap_Block_at( block, used_block_size );
329    uintptr_t free_block_size = block_size - used_block_size;
330
331    _HAssert( used_block_size + free_block_size == block_size );
332
333    _Heap_Block_set_size( block, used_block_size );
334
335    /* Statistics */
336    stats->free_size += free_block_size;
337
338    if ( _Heap_Is_used( next_block ) ) {
339      _Heap_Free_list_insert_after( free_list_anchor, free_block );
340
341      /* Statistics */
342      ++stats->free_blocks;
343    } else {
344      uintptr_t const next_block_size = _Heap_Block_size( next_block );
345
346      _Heap_Free_list_replace( next_block, free_block );
347
348      free_block_size += next_block_size;
349
350      next_block = _Heap_Block_at( free_block, free_block_size );
351    }
352
353    free_block->size_and_flag = free_block_size | HEAP_PREV_BLOCK_USED;
354
355    next_block->prev_size = free_block_size;
356    next_block->size_and_flag &= ~HEAP_PREV_BLOCK_USED;
357
358    _Heap_Protection_block_initialize( heap, free_block );
359  } else {
360    next_block->size_and_flag |= HEAP_PREV_BLOCK_USED;
361  }
362}
363
364static Heap_Block *_Heap_Block_allocate_from_begin(
365  Heap_Control *heap,
366  Heap_Block *block,
367  Heap_Block *free_list_anchor,
368  uintptr_t alloc_size
369)
370{
371  _Heap_Block_split( heap, block, free_list_anchor, alloc_size );
372
373  return block;
374}
375
376static Heap_Block *_Heap_Block_allocate_from_end(
377  Heap_Control *heap,
378  Heap_Block *block,
379  Heap_Block *free_list_anchor,
380  uintptr_t alloc_begin,
381  uintptr_t alloc_size
382)
383{
384  Heap_Statistics *const stats = &heap->stats;
385
386  uintptr_t block_begin = (uintptr_t) block;
387  uintptr_t block_size = _Heap_Block_size( block );
388  uintptr_t block_end = block_begin + block_size;
389
390  Heap_Block *const new_block =
391    _Heap_Block_of_alloc_area( alloc_begin, heap->page_size );
392  uintptr_t const new_block_begin = (uintptr_t) new_block;
393  uintptr_t const new_block_size = block_end - new_block_begin;
394
395  block_end = new_block_begin;
396  block_size = block_end - block_begin;
397
398  _HAssert( block_size >= heap->min_block_size );
399  _HAssert( new_block_size >= heap->min_block_size );
400
401  /* Statistics */
402  stats->free_size += block_size;
403
404  if ( _Heap_Is_prev_used( block ) ) {
405    _Heap_Free_list_insert_after( free_list_anchor, block );
406
407    free_list_anchor = block;
408
409    /* Statistics */
410    ++stats->free_blocks;
411  } else {
412    Heap_Block *const prev_block = _Heap_Prev_block( block );
413    uintptr_t const prev_block_size = _Heap_Block_size( prev_block );
414
415    block = prev_block;
416    block_size += prev_block_size;
417  }
418
419  block->size_and_flag = block_size | HEAP_PREV_BLOCK_USED;
420
421  new_block->prev_size = block_size;
422  new_block->size_and_flag = new_block_size;
423
424  _Heap_Block_split( heap, new_block, free_list_anchor, alloc_size );
425
426  return new_block;
427}
428
429Heap_Block *_Heap_Block_allocate(
430  Heap_Control *heap,
431  Heap_Block *block,
432  uintptr_t alloc_begin,
433  uintptr_t alloc_size
434)
435{
436  Heap_Statistics *const stats = &heap->stats;
437
438  uintptr_t const alloc_area_begin = _Heap_Alloc_area_of_block( block );
439  uintptr_t const alloc_area_offset = alloc_begin - alloc_area_begin;
440
441  Heap_Block *free_list_anchor = NULL;
442
443  _HAssert( alloc_area_begin <= alloc_begin );
444
445  if ( _Heap_Is_free( block ) ) {
446    free_list_anchor = block->prev;
447
448    _Heap_Free_list_remove( block );
449
450    /* Statistics */
451    --stats->free_blocks;
452    ++stats->used_blocks;
453    stats->free_size -= _Heap_Block_size( block );
454  } else {
455    free_list_anchor = _Heap_Free_list_head( heap );
456  }
457
458  if ( alloc_area_offset < heap->page_size ) {
459    alloc_size += alloc_area_offset;
460
461    block = _Heap_Block_allocate_from_begin(
462      heap,
463      block,
464      free_list_anchor,
465      alloc_size
466    );
467  } else {
468    block = _Heap_Block_allocate_from_end(
469      heap,
470      block,
471      free_list_anchor,
472      alloc_begin,
473      alloc_size
474    );
475  }
476
477  /* Statistics */
478  if ( stats->min_free_size > stats->free_size ) {
479    stats->min_free_size = stats->free_size;
480  }
481
482  _Heap_Protection_block_initialize( heap, block );
483
484  return block;
485}
Note: See TracBrowser for help on using the repository browser.