source: rtems/cpukit/score/src/heap.c @ 1c2d178

5
Last change on this file since 1c2d178 was b6606e8, checked in by Sebastian Huber <sebastian.huber@…>, on 12/08/16 at 15:41:30

score: Remove fatal is internal indicator

The fatal is internal indicator is redundant since the fatal source and
error code uniquely identify a fatal error. Keep the fatal user
extension is internal parameter for backward compatibility and set it to
false always.

Update #2825.

  • Property mode set to 100644
File size: 14.8 KB
Line 
1/**
2 * @file
3 *
4 * @ingroup ScoreHeap
5 *
6 * @brief Heap Handler implementation.
7 */
8
9/*
10 *  COPYRIGHT (c) 1989-2009.
11 *  On-Line Applications Research Corporation (OAR).
12 *
13 *  Copyright (c) 2009, 2010 embedded brains GmbH.
14 *
15 *  The license and distribution terms for this file may be
16 *  found in the file LICENSE in this distribution or at
17 *  http://www.rtems.org/license/LICENSE.
18 */
19
20#if HAVE_CONFIG_H
21  #include "config.h"
22#endif
23
24#include <rtems/score/heapimpl.h>
25#include <rtems/score/threadimpl.h>
26#include <rtems/score/interr.h>
27
28#include <string.h>
29
30#if CPU_ALIGNMENT == 0 || CPU_ALIGNMENT % 2 != 0
31  #error "invalid CPU_ALIGNMENT value"
32#endif
33
34/*
35 *  _Heap_Initialize
36 *
37 *  This kernel routine initializes a heap.
38 *
39 *  Input parameters:
40 *    heap         - pointer to heap header
41 *    area_begin - starting address of heap
42 *    size             - size of heap
43 *    page_size        - allocatable unit of memory
44 *
45 *  Output parameters:
46 *    returns - maximum memory available if RTEMS_SUCCESSFUL
47 *    0       - otherwise
48 *
49 *  This is what a heap looks like in memory immediately after initialization:
50 *
51 *
52 *            +--------------------------------+ <- begin = area_begin
53 *            |  unused space due to alignment |
54 *            |       size < page_size         |
55 *         0  +--------------------------------+ <- first block
56 *            |  prev_size = page_size         |
57 *         4  +--------------------------------+
58 *            |  size = size0              | 1 |
59 *         8  +---------------------+----------+ <- aligned on page_size
60 *            |  next = HEAP_TAIL   |          |
61 *        12  +---------------------+          |
62 *            |  prev = HEAP_HEAD   |  memory  |
63 *            +---------------------+          |
64 *            |                     available  |
65 *            |                                |
66 *            |                for allocation  |
67 *            |                                |
68 *     size0  +--------------------------------+ <- last dummy block
69 *            |  prev_size = size0             |
70 *        +4  +--------------------------------+
71 *            |  size = page_size          | 0 | <- prev block is free
72 *        +8  +--------------------------------+ <- aligned on page_size
73 *            |  unused space due to alignment |
74 *            |       size < page_size         |
75 *            +--------------------------------+ <- end = begin + size
76 *
77 *  Below is what a heap looks like after first allocation of SIZE bytes using
78 *  _Heap_allocate(). BSIZE stands for SIZE + 4 aligned up on 'page_size'
79 *  boundary.
80 *  [NOTE: If allocation were performed by _Heap_Allocate_aligned(), the
81 *  block size BSIZE is defined differently, and previously free block will
82 *  be split so that upper part of it will become used block (see
83 *  'heapallocatealigned.c' for details).]
84 *
85 *            +--------------------------------+ <- begin = area_begin
86 *            |  unused space due to alignment |
87 *            |       size < page_size         |
88 *         0  +--------------------------------+ <- used block
89 *            |  prev_size = page_size         |
90 *         4  +--------------------------------+
91 *            |  size = BSIZE              | 1 | <- prev block is used
92 *         8  +--------------------------------+ <- aligned on page_size
93 *            |              .                 | Pointer returned to the user
94 *            |              .                 | is 8 for _Heap_Allocate()
95 *            |              .                 | and is in range
96 * 8 +        |         user-accessible        | [8,8+page_size) for
97 *  page_size +- - -                      - - -+ _Heap_Allocate_aligned()
98 *            |             area               |
99 *            |              .                 |
100 *     BSIZE  +- - - - -     .        - - - - -+ <- free block
101 *            |              .                 |
102 * BSIZE  +4  +--------------------------------+
103 *            |  size = S = size0 - BSIZE  | 1 | <- prev block is used
104 * BSIZE  +8  +-------------------+------------+ <- aligned on page_size
105 *            |  next = HEAP_TAIL |            |
106 * BSIZE +12  +-------------------+            |
107 *            |  prev = HEAP_HEAD |     memory |
108 *            +-------------------+            |
109 *            |                   .  available |
110 *            |                   .            |
111 *            |                   .        for |
112 *            |                   .            |
113 * BSIZE +S+0 +-------------------+ allocation + <- last dummy block
114 *            |  prev_size = S    |            |
115 *       +S+4 +-------------------+------------+
116 *            |  size = page_size          | 0 | <- prev block is free
117 *       +S+8 +--------------------------------+ <- aligned on page_size
118 *            |  unused space due to alignment |
119 *            |       size < page_size         |
120 *            +--------------------------------+ <- end = begin + size
121 *
122 */
123
124#ifdef HEAP_PROTECTION
125  static void _Heap_Protection_block_initialize_default(
126    Heap_Control *heap,
127    Heap_Block *block
128  )
129  {
130    block->Protection_begin.protector [0] = HEAP_BEGIN_PROTECTOR_0;
131    block->Protection_begin.protector [1] = HEAP_BEGIN_PROTECTOR_1;
132    block->Protection_begin.next_delayed_free_block = NULL;
133    block->Protection_begin.task = _Thread_Get_executing();
134    block->Protection_begin.tag = NULL;
135    block->Protection_end.protector [0] = HEAP_END_PROTECTOR_0;
136    block->Protection_end.protector [1] = HEAP_END_PROTECTOR_1;
137  }
138
139  static void _Heap_Protection_block_check_default(
140    Heap_Control *heap,
141    Heap_Block *block
142  )
143  {
144    if (
145      block->Protection_begin.protector [0] != HEAP_BEGIN_PROTECTOR_0
146        || block->Protection_begin.protector [1] != HEAP_BEGIN_PROTECTOR_1
147        || block->Protection_end.protector [0] != HEAP_END_PROTECTOR_0
148        || block->Protection_end.protector [1] != HEAP_END_PROTECTOR_1
149    ) {
150      _Heap_Protection_block_error( heap, block );
151    }
152  }
153
154  static void _Heap_Protection_block_error_default(
155    Heap_Control *heap,
156    Heap_Block *block
157  )
158  {
159    /* FIXME */
160    _Terminate( INTERNAL_ERROR_CORE, 0xdeadbeef );
161  }
162#endif
163
164bool _Heap_Get_first_and_last_block(
165  uintptr_t heap_area_begin,
166  uintptr_t heap_area_size,
167  uintptr_t page_size,
168  uintptr_t min_block_size,
169  Heap_Block **first_block_ptr,
170  Heap_Block **last_block_ptr
171)
172{
173  uintptr_t const heap_area_end = heap_area_begin + heap_area_size;
174  uintptr_t const alloc_area_begin =
175    _Heap_Align_up( heap_area_begin + HEAP_BLOCK_HEADER_SIZE, page_size );
176  uintptr_t const first_block_begin =
177    alloc_area_begin - HEAP_BLOCK_HEADER_SIZE;
178  uintptr_t const overhead =
179    HEAP_BLOCK_HEADER_SIZE + (first_block_begin - heap_area_begin);
180  uintptr_t const first_block_size =
181    _Heap_Align_down( heap_area_size - overhead, page_size );
182  Heap_Block *const first_block = (Heap_Block *) first_block_begin;
183  Heap_Block *const last_block =
184    _Heap_Block_at( first_block, first_block_size );
185
186  if (
187    heap_area_end < heap_area_begin
188      || heap_area_size <= overhead
189      || first_block_size < min_block_size
190  ) {
191    /* Invalid area or area too small */
192    return false;
193  }
194
195  *first_block_ptr = first_block;
196  *last_block_ptr = last_block;
197
198  return true;
199}
200
201uintptr_t _Heap_Initialize(
202  Heap_Control *heap,
203  void *heap_area_begin_ptr,
204  uintptr_t heap_area_size,
205  uintptr_t page_size
206)
207{
208  Heap_Statistics *const stats = &heap->stats;
209  uintptr_t const heap_area_begin = (uintptr_t) heap_area_begin_ptr;
210  uintptr_t const heap_area_end = heap_area_begin + heap_area_size;
211  uintptr_t first_block_begin = 0;
212  uintptr_t first_block_size = 0;
213  uintptr_t last_block_begin = 0;
214  uintptr_t min_block_size = 0;
215  bool area_ok = false;
216  Heap_Block *first_block = NULL;
217  Heap_Block *last_block = NULL;
218
219  if ( page_size == 0 ) {
220    page_size = CPU_ALIGNMENT;
221  } else {
222    page_size = _Heap_Align_up( page_size, CPU_ALIGNMENT );
223
224    if ( page_size < CPU_ALIGNMENT ) {
225      /* Integer overflow */
226      return 0;
227    }
228  }
229
230  min_block_size = _Heap_Min_block_size( page_size );
231
232  area_ok = _Heap_Get_first_and_last_block(
233    heap_area_begin,
234    heap_area_size,
235    page_size,
236    min_block_size,
237    &first_block,
238    &last_block
239  );
240  if ( !area_ok ) {
241    return 0;
242  }
243
244  memset(heap, 0, sizeof(*heap));
245
246  #ifdef HEAP_PROTECTION
247    heap->Protection.block_initialize = _Heap_Protection_block_initialize_default;
248    heap->Protection.block_check = _Heap_Protection_block_check_default;
249    heap->Protection.block_error = _Heap_Protection_block_error_default;
250  #endif
251
252  first_block_begin = (uintptr_t) first_block;
253  last_block_begin = (uintptr_t) last_block;
254  first_block_size = last_block_begin - first_block_begin;
255
256  /* First block */
257  first_block->prev_size = heap_area_end;
258  first_block->size_and_flag = first_block_size | HEAP_PREV_BLOCK_USED;
259  first_block->next = _Heap_Free_list_tail( heap );
260  first_block->prev = _Heap_Free_list_head( heap );
261  _Heap_Protection_block_initialize( heap, first_block );
262
263  /* Heap control */
264  heap->page_size = page_size;
265  heap->min_block_size = min_block_size;
266  heap->area_begin = heap_area_begin;
267  heap->area_end = heap_area_end;
268  heap->first_block = first_block;
269  heap->last_block = last_block;
270  _Heap_Free_list_head( heap )->next = first_block;
271  _Heap_Free_list_tail( heap )->prev = first_block;
272
273  /* Last block */
274  last_block->prev_size = first_block_size;
275  last_block->size_and_flag = 0;
276  _Heap_Set_last_block_size( heap );
277  _Heap_Protection_block_initialize( heap, last_block );
278
279  /* Statistics */
280  stats->size = first_block_size;
281  stats->free_size = first_block_size;
282  stats->min_free_size = first_block_size;
283  stats->free_blocks = 1;
284  stats->max_free_blocks = 1;
285
286  _Heap_Protection_set_delayed_free_fraction( heap, 2 );
287
288  _HAssert( _Heap_Is_aligned( heap->page_size, CPU_ALIGNMENT ) );
289  _HAssert( _Heap_Is_aligned( heap->min_block_size, page_size ) );
290  _HAssert(
291    _Heap_Is_aligned( _Heap_Alloc_area_of_block( first_block ), page_size )
292  );
293  _HAssert(
294    _Heap_Is_aligned( _Heap_Alloc_area_of_block( last_block ), page_size )
295  );
296
297  return first_block_size;
298}
299
300static void _Heap_Block_split(
301  Heap_Control *heap,
302  Heap_Block *block,
303  Heap_Block *free_list_anchor,
304  uintptr_t alloc_size
305)
306{
307  Heap_Statistics *const stats = &heap->stats;
308
309  uintptr_t const page_size = heap->page_size;
310  uintptr_t const min_block_size = heap->min_block_size;
311  uintptr_t const min_alloc_size = min_block_size - HEAP_BLOCK_HEADER_SIZE;
312
313  uintptr_t const block_size = _Heap_Block_size( block );
314
315  uintptr_t const used_size =
316    _Heap_Max( alloc_size, min_alloc_size ) + HEAP_BLOCK_HEADER_SIZE;
317  uintptr_t const used_block_size = _Heap_Align_up( used_size, page_size );
318
319  uintptr_t const free_size = block_size + HEAP_ALLOC_BONUS - used_size;
320  uintptr_t const free_size_limit = min_block_size + HEAP_ALLOC_BONUS;
321
322  Heap_Block *next_block = _Heap_Block_at( block, block_size );
323
324  _HAssert( used_size <= block_size + HEAP_ALLOC_BONUS );
325  _HAssert( used_size + free_size == block_size + HEAP_ALLOC_BONUS );
326
327  if ( free_size >= free_size_limit ) {
328    Heap_Block *const free_block = _Heap_Block_at( block, used_block_size );
329    uintptr_t free_block_size = block_size - used_block_size;
330
331    _HAssert( used_block_size + free_block_size == block_size );
332
333    _Heap_Block_set_size( block, used_block_size );
334
335    /* Statistics */
336    stats->free_size += free_block_size;
337
338    if ( _Heap_Is_used( next_block ) ) {
339      _Heap_Free_list_insert_after( free_list_anchor, free_block );
340
341      /* Statistics */
342      ++stats->free_blocks;
343    } else {
344      uintptr_t const next_block_size = _Heap_Block_size( next_block );
345
346      _Heap_Free_list_replace( next_block, free_block );
347
348      free_block_size += next_block_size;
349
350      next_block = _Heap_Block_at( free_block, free_block_size );
351    }
352
353    free_block->size_and_flag = free_block_size | HEAP_PREV_BLOCK_USED;
354
355    next_block->prev_size = free_block_size;
356    next_block->size_and_flag &= ~HEAP_PREV_BLOCK_USED;
357
358    _Heap_Protection_block_initialize( heap, free_block );
359  } else {
360    next_block->size_and_flag |= HEAP_PREV_BLOCK_USED;
361  }
362}
363
364static Heap_Block *_Heap_Block_allocate_from_begin(
365  Heap_Control *heap,
366  Heap_Block *block,
367  Heap_Block *free_list_anchor,
368  uintptr_t alloc_size
369)
370{
371  _Heap_Block_split( heap, block, free_list_anchor, alloc_size );
372
373  return block;
374}
375
376static Heap_Block *_Heap_Block_allocate_from_end(
377  Heap_Control *heap,
378  Heap_Block *block,
379  Heap_Block *free_list_anchor,
380  uintptr_t alloc_begin,
381  uintptr_t alloc_size
382)
383{
384  Heap_Statistics *const stats = &heap->stats;
385
386  uintptr_t block_begin = (uintptr_t) block;
387  uintptr_t block_size = _Heap_Block_size( block );
388  uintptr_t block_end = block_begin + block_size;
389
390  Heap_Block *const new_block =
391    _Heap_Block_of_alloc_area( alloc_begin, heap->page_size );
392  uintptr_t const new_block_begin = (uintptr_t) new_block;
393  uintptr_t const new_block_size = block_end - new_block_begin;
394
395  block_end = new_block_begin;
396  block_size = block_end - block_begin;
397
398  _HAssert( block_size >= heap->min_block_size );
399  _HAssert( new_block_size >= heap->min_block_size );
400
401  /* Statistics */
402  stats->free_size += block_size;
403
404  if ( _Heap_Is_prev_used( block ) ) {
405    _Heap_Free_list_insert_after( free_list_anchor, block );
406
407    free_list_anchor = block;
408
409    /* Statistics */
410    ++stats->free_blocks;
411  } else {
412    Heap_Block *const prev_block = _Heap_Prev_block( block );
413    uintptr_t const prev_block_size = _Heap_Block_size( prev_block );
414
415    block = prev_block;
416    block_size += prev_block_size;
417  }
418
419  block->size_and_flag = block_size | HEAP_PREV_BLOCK_USED;
420
421  new_block->prev_size = block_size;
422  new_block->size_and_flag = new_block_size;
423
424  _Heap_Block_split( heap, new_block, free_list_anchor, alloc_size );
425
426  return new_block;
427}
428
429Heap_Block *_Heap_Block_allocate(
430  Heap_Control *heap,
431  Heap_Block *block,
432  uintptr_t alloc_begin,
433  uintptr_t alloc_size
434)
435{
436  Heap_Statistics *const stats = &heap->stats;
437
438  uintptr_t const alloc_area_begin = _Heap_Alloc_area_of_block( block );
439  uintptr_t const alloc_area_offset = alloc_begin - alloc_area_begin;
440
441  Heap_Block *free_list_anchor = NULL;
442
443  _HAssert( alloc_area_begin <= alloc_begin );
444
445  if ( _Heap_Is_free( block ) ) {
446    free_list_anchor = block->prev;
447
448    _Heap_Free_list_remove( block );
449
450    /* Statistics */
451    --stats->free_blocks;
452    ++stats->used_blocks;
453    stats->free_size -= _Heap_Block_size( block );
454  } else {
455    free_list_anchor = _Heap_Free_list_head( heap );
456  }
457
458  if ( alloc_area_offset < heap->page_size ) {
459    alloc_size += alloc_area_offset;
460
461    block = _Heap_Block_allocate_from_begin(
462      heap,
463      block,
464      free_list_anchor,
465      alloc_size
466    );
467  } else {
468    block = _Heap_Block_allocate_from_end(
469      heap,
470      block,
471      free_list_anchor,
472      alloc_begin,
473      alloc_size
474    );
475  }
476
477  /* Statistics */
478  if ( stats->min_free_size > stats->free_size ) {
479    stats->min_free_size = stats->free_size;
480  }
481
482  _Heap_Protection_block_initialize( heap, block );
483
484  return block;
485}
Note: See TracBrowser for help on using the repository browser.