source: rtems/cpukit/score/src/heap.c @ 3859cd63

5
Last change on this file since 3859cd63 was 3859cd63, checked in by sebastian.huber <sebastian.huber@…>, on 10/25/19 at 13:19:01

rtems-5: Improve heap fatal error information

Update #3806.

  • Property mode set to 100644
File size: 15.0 KB
Line 
1/**
2 * @file
3 *
4 * @ingroup RTEMSScoreHeap
5 *
6 * @brief Heap Handler implementation.
7 */
8
9/*
10 *  COPYRIGHT (c) 1989-2009.
11 *  On-Line Applications Research Corporation (OAR).
12 *
13 *  Copyright (c) 2009, 2010 embedded brains GmbH.
14 *
15 *  The license and distribution terms for this file may be
16 *  found in the file LICENSE in this distribution or at
17 *  http://www.rtems.org/license/LICENSE.
18 */
19
20#if HAVE_CONFIG_H
21  #include "config.h"
22#endif
23
24#include <rtems/score/heapimpl.h>
25#include <rtems/score/threadimpl.h>
26#include <rtems/score/interr.h>
27
28#include <string.h>
29
30#if CPU_ALIGNMENT == 0 || CPU_ALIGNMENT % 2 != 0
31  #error "invalid CPU_ALIGNMENT value"
32#endif
33
34/*
35 *  _Heap_Initialize
36 *
37 *  This kernel routine initializes a heap.
38 *
39 *  Input parameters:
40 *    heap         - pointer to heap header
41 *    area_begin - starting address of heap
42 *    size             - size of heap
43 *    page_size        - allocatable unit of memory
44 *
45 *  Output parameters:
46 *    returns - maximum memory available if RTEMS_SUCCESSFUL
47 *    0       - otherwise
48 *
49 *  This is what a heap looks like in memory immediately after initialization:
50 *
51 *
52 *            +--------------------------------+ <- begin = area_begin
53 *            |  unused space due to alignment |
54 *            |       size < page_size         |
55 *         0  +--------------------------------+ <- first block
56 *            |  prev_size = page_size         |
57 *         4  +--------------------------------+
58 *            |  size = size0              | 1 |
59 *         8  +---------------------+----------+ <- aligned on page_size
60 *            |  next = HEAP_TAIL   |          |
61 *        12  +---------------------+          |
62 *            |  prev = HEAP_HEAD   |  memory  |
63 *            +---------------------+          |
64 *            |                     available  |
65 *            |                                |
66 *            |                for allocation  |
67 *            |                                |
68 *     size0  +--------------------------------+ <- last dummy block
69 *            |  prev_size = size0             |
70 *        +4  +--------------------------------+
71 *            |  size = page_size          | 0 | <- prev block is free
72 *        +8  +--------------------------------+ <- aligned on page_size
73 *            |  unused space due to alignment |
74 *            |       size < page_size         |
75 *            +--------------------------------+ <- end = begin + size
76 *
77 *  Below is what a heap looks like after first allocation of SIZE bytes using
78 *  _Heap_allocate(). BSIZE stands for SIZE + 4 aligned up on 'page_size'
79 *  boundary.
80 *  [NOTE: If allocation were performed by _Heap_Allocate_aligned(), the
81 *  block size BSIZE is defined differently, and previously free block will
82 *  be split so that upper part of it will become used block (see
83 *  'heapallocatealigned.c' for details).]
84 *
85 *            +--------------------------------+ <- begin = area_begin
86 *            |  unused space due to alignment |
87 *            |       size < page_size         |
88 *         0  +--------------------------------+ <- used block
89 *            |  prev_size = page_size         |
90 *         4  +--------------------------------+
91 *            |  size = BSIZE              | 1 | <- prev block is used
92 *         8  +--------------------------------+ <- aligned on page_size
93 *            |              .                 | Pointer returned to the user
94 *            |              .                 | is 8 for _Heap_Allocate()
95 *            |              .                 | and is in range
96 * 8 +        |         user-accessible        | [8,8+page_size) for
97 *  page_size +- - -                      - - -+ _Heap_Allocate_aligned()
98 *            |             area               |
99 *            |              .                 |
100 *     BSIZE  +- - - - -     .        - - - - -+ <- free block
101 *            |              .                 |
102 * BSIZE  +4  +--------------------------------+
103 *            |  size = S = size0 - BSIZE  | 1 | <- prev block is used
104 * BSIZE  +8  +-------------------+------------+ <- aligned on page_size
105 *            |  next = HEAP_TAIL |            |
106 * BSIZE +12  +-------------------+            |
107 *            |  prev = HEAP_HEAD |     memory |
108 *            +-------------------+            |
109 *            |                   .  available |
110 *            |                   .            |
111 *            |                   .        for |
112 *            |                   .            |
113 * BSIZE +S+0 +-------------------+ allocation + <- last dummy block
114 *            |  prev_size = S    |            |
115 *       +S+4 +-------------------+------------+
116 *            |  size = page_size          | 0 | <- prev block is free
117 *       +S+8 +--------------------------------+ <- aligned on page_size
118 *            |  unused space due to alignment |
119 *            |       size < page_size         |
120 *            +--------------------------------+ <- end = begin + size
121 *
122 */
123
124#ifdef HEAP_PROTECTION
125  static void _Heap_Protection_block_initialize_default(
126    Heap_Control *heap,
127    Heap_Block *block
128  )
129  {
130    block->Protection_begin.protector [0] = HEAP_BEGIN_PROTECTOR_0;
131    block->Protection_begin.protector [1] = HEAP_BEGIN_PROTECTOR_1;
132    block->Protection_begin.next_delayed_free_block = NULL;
133    block->Protection_begin.task = _Thread_Get_executing();
134    block->Protection_begin.tag = NULL;
135    block->Protection_end.protector [0] = HEAP_END_PROTECTOR_0;
136    block->Protection_end.protector [1] = HEAP_END_PROTECTOR_1;
137  }
138
139  static void _Heap_Protection_block_check_default(
140    Heap_Control *heap,
141    Heap_Block *block
142  )
143  {
144    if (
145      block->Protection_begin.protector [0] != HEAP_BEGIN_PROTECTOR_0
146        || block->Protection_begin.protector [1] != HEAP_BEGIN_PROTECTOR_1
147        || block->Protection_end.protector [0] != HEAP_END_PROTECTOR_0
148        || block->Protection_end.protector [1] != HEAP_END_PROTECTOR_1
149    ) {
150      _Heap_Protection_block_error( heap, block, HEAP_ERROR_BROKEN_PROTECTOR );
151    }
152  }
153
154  static void _Heap_Protection_block_error_default(
155    Heap_Control *heap,
156    Heap_Block *block,
157    Heap_Error_reason reason
158  )
159  {
160    Heap_Error_context error_context = {
161      .heap = heap,
162      .block = block,
163      .reason = reason
164    };
165
166    _Terminate( RTEMS_FATAL_SOURCE_HEAP, (uintptr_t) &error_context );
167  }
168#endif
169
170bool _Heap_Get_first_and_last_block(
171  uintptr_t heap_area_begin,
172  uintptr_t heap_area_size,
173  uintptr_t page_size,
174  uintptr_t min_block_size,
175  Heap_Block **first_block_ptr,
176  Heap_Block **last_block_ptr
177)
178{
179  uintptr_t const heap_area_end = heap_area_begin + heap_area_size;
180  uintptr_t const alloc_area_begin =
181    _Heap_Align_up( heap_area_begin + HEAP_BLOCK_HEADER_SIZE, page_size );
182  uintptr_t const first_block_begin =
183    alloc_area_begin - HEAP_BLOCK_HEADER_SIZE;
184  uintptr_t const overhead =
185    HEAP_BLOCK_HEADER_SIZE + (first_block_begin - heap_area_begin);
186  uintptr_t const first_block_size =
187    _Heap_Align_down( heap_area_size - overhead, page_size );
188  Heap_Block *const first_block = (Heap_Block *) first_block_begin;
189  Heap_Block *const last_block =
190    _Heap_Block_at( first_block, first_block_size );
191
192  if (
193    heap_area_end < heap_area_begin
194      || heap_area_size <= overhead
195      || first_block_size < min_block_size
196  ) {
197    /* Invalid area or area too small */
198    return false;
199  }
200
201  *first_block_ptr = first_block;
202  *last_block_ptr = last_block;
203
204  return true;
205}
206
207uintptr_t _Heap_Initialize(
208  Heap_Control *heap,
209  void *heap_area_begin_ptr,
210  uintptr_t heap_area_size,
211  uintptr_t page_size
212)
213{
214  Heap_Statistics *const stats = &heap->stats;
215  uintptr_t const heap_area_begin = (uintptr_t) heap_area_begin_ptr;
216  uintptr_t const heap_area_end = heap_area_begin + heap_area_size;
217  uintptr_t first_block_begin = 0;
218  uintptr_t first_block_size = 0;
219  uintptr_t last_block_begin = 0;
220  uintptr_t min_block_size = 0;
221  bool area_ok = false;
222  Heap_Block *first_block = NULL;
223  Heap_Block *last_block = NULL;
224
225  if ( page_size == 0 ) {
226    page_size = CPU_ALIGNMENT;
227  } else {
228    page_size = _Heap_Align_up( page_size, CPU_ALIGNMENT );
229
230    if ( page_size < CPU_ALIGNMENT ) {
231      /* Integer overflow */
232      return 0;
233    }
234  }
235
236  min_block_size = _Heap_Min_block_size( page_size );
237
238  area_ok = _Heap_Get_first_and_last_block(
239    heap_area_begin,
240    heap_area_size,
241    page_size,
242    min_block_size,
243    &first_block,
244    &last_block
245  );
246  if ( !area_ok ) {
247    return 0;
248  }
249
250  memset(heap, 0, sizeof(*heap));
251
252  #ifdef HEAP_PROTECTION
253    heap->Protection.block_initialize = _Heap_Protection_block_initialize_default;
254    heap->Protection.block_check = _Heap_Protection_block_check_default;
255    heap->Protection.block_error = _Heap_Protection_block_error_default;
256  #endif
257
258  first_block_begin = (uintptr_t) first_block;
259  last_block_begin = (uintptr_t) last_block;
260  first_block_size = last_block_begin - first_block_begin;
261
262  /* First block */
263  first_block->prev_size = heap_area_end;
264  first_block->size_and_flag = first_block_size | HEAP_PREV_BLOCK_USED;
265  first_block->next = _Heap_Free_list_tail( heap );
266  first_block->prev = _Heap_Free_list_head( heap );
267  _Heap_Protection_block_initialize( heap, first_block );
268
269  /* Heap control */
270  heap->page_size = page_size;
271  heap->min_block_size = min_block_size;
272  heap->area_begin = heap_area_begin;
273  heap->area_end = heap_area_end;
274  heap->first_block = first_block;
275  heap->last_block = last_block;
276  _Heap_Free_list_head( heap )->next = first_block;
277  _Heap_Free_list_tail( heap )->prev = first_block;
278
279  /* Last block */
280  last_block->prev_size = first_block_size;
281  last_block->size_and_flag = 0;
282  _Heap_Set_last_block_size( heap );
283  _Heap_Protection_block_initialize( heap, last_block );
284
285  /* Statistics */
286  stats->size = first_block_size;
287  stats->free_size = first_block_size;
288  stats->min_free_size = first_block_size;
289  stats->free_blocks = 1;
290  stats->max_free_blocks = 1;
291
292  _Heap_Protection_set_delayed_free_fraction( heap, 2 );
293
294  _HAssert( _Heap_Is_aligned( heap->page_size, CPU_ALIGNMENT ) );
295  _HAssert( _Heap_Is_aligned( heap->min_block_size, page_size ) );
296  _HAssert(
297    _Heap_Is_aligned( _Heap_Alloc_area_of_block( first_block ), page_size )
298  );
299  _HAssert(
300    _Heap_Is_aligned( _Heap_Alloc_area_of_block( last_block ), page_size )
301  );
302
303  return first_block_size;
304}
305
306static void _Heap_Block_split(
307  Heap_Control *heap,
308  Heap_Block *block,
309  Heap_Block *free_list_anchor,
310  uintptr_t alloc_size
311)
312{
313  Heap_Statistics *const stats = &heap->stats;
314
315  uintptr_t const page_size = heap->page_size;
316  uintptr_t const min_block_size = heap->min_block_size;
317  uintptr_t const min_alloc_size = min_block_size - HEAP_BLOCK_HEADER_SIZE;
318
319  uintptr_t const block_size = _Heap_Block_size( block );
320
321  uintptr_t const used_size =
322    _Heap_Max( alloc_size, min_alloc_size ) + HEAP_BLOCK_HEADER_SIZE;
323  uintptr_t const used_block_size = _Heap_Align_up( used_size, page_size );
324
325  uintptr_t const free_size = block_size + HEAP_ALLOC_BONUS - used_size;
326  uintptr_t const free_size_limit = min_block_size + HEAP_ALLOC_BONUS;
327
328  Heap_Block *next_block = _Heap_Block_at( block, block_size );
329
330  _HAssert( used_size <= block_size + HEAP_ALLOC_BONUS );
331  _HAssert( used_size + free_size == block_size + HEAP_ALLOC_BONUS );
332
333  if ( free_size >= free_size_limit ) {
334    Heap_Block *const free_block = _Heap_Block_at( block, used_block_size );
335    uintptr_t free_block_size = block_size - used_block_size;
336
337    _HAssert( used_block_size + free_block_size == block_size );
338
339    _Heap_Block_set_size( block, used_block_size );
340
341    /* Statistics */
342    stats->free_size += free_block_size;
343
344    if ( _Heap_Is_used( next_block ) ) {
345      _Heap_Free_list_insert_after( free_list_anchor, free_block );
346
347      /* Statistics */
348      ++stats->free_blocks;
349    } else {
350      uintptr_t const next_block_size = _Heap_Block_size( next_block );
351
352      _Heap_Free_list_replace( next_block, free_block );
353
354      free_block_size += next_block_size;
355
356      next_block = _Heap_Block_at( free_block, free_block_size );
357    }
358
359    free_block->size_and_flag = free_block_size | HEAP_PREV_BLOCK_USED;
360
361    next_block->prev_size = free_block_size;
362    next_block->size_and_flag &= ~HEAP_PREV_BLOCK_USED;
363
364    _Heap_Protection_block_initialize( heap, free_block );
365  } else {
366    next_block->size_and_flag |= HEAP_PREV_BLOCK_USED;
367  }
368}
369
370static Heap_Block *_Heap_Block_allocate_from_begin(
371  Heap_Control *heap,
372  Heap_Block *block,
373  Heap_Block *free_list_anchor,
374  uintptr_t alloc_size
375)
376{
377  _Heap_Block_split( heap, block, free_list_anchor, alloc_size );
378
379  return block;
380}
381
382static Heap_Block *_Heap_Block_allocate_from_end(
383  Heap_Control *heap,
384  Heap_Block *block,
385  Heap_Block *free_list_anchor,
386  uintptr_t alloc_begin,
387  uintptr_t alloc_size
388)
389{
390  Heap_Statistics *const stats = &heap->stats;
391
392  uintptr_t block_begin = (uintptr_t) block;
393  uintptr_t block_size = _Heap_Block_size( block );
394  uintptr_t block_end = block_begin + block_size;
395
396  Heap_Block *const new_block =
397    _Heap_Block_of_alloc_area( alloc_begin, heap->page_size );
398  uintptr_t const new_block_begin = (uintptr_t) new_block;
399  uintptr_t const new_block_size = block_end - new_block_begin;
400
401  block_end = new_block_begin;
402  block_size = block_end - block_begin;
403
404  _HAssert( block_size >= heap->min_block_size );
405  _HAssert( new_block_size >= heap->min_block_size );
406
407  /* Statistics */
408  stats->free_size += block_size;
409
410  if ( _Heap_Is_prev_used( block ) ) {
411    _Heap_Free_list_insert_after( free_list_anchor, block );
412
413    free_list_anchor = block;
414
415    /* Statistics */
416    ++stats->free_blocks;
417  } else {
418    Heap_Block *const prev_block = _Heap_Prev_block( block );
419    uintptr_t const prev_block_size = _Heap_Block_size( prev_block );
420
421    block = prev_block;
422    block_size += prev_block_size;
423  }
424
425  block->size_and_flag = block_size | HEAP_PREV_BLOCK_USED;
426
427  new_block->prev_size = block_size;
428  new_block->size_and_flag = new_block_size;
429
430  _Heap_Block_split( heap, new_block, free_list_anchor, alloc_size );
431
432  return new_block;
433}
434
435Heap_Block *_Heap_Block_allocate(
436  Heap_Control *heap,
437  Heap_Block *block,
438  uintptr_t alloc_begin,
439  uintptr_t alloc_size
440)
441{
442  Heap_Statistics *const stats = &heap->stats;
443
444  uintptr_t const alloc_area_begin = _Heap_Alloc_area_of_block( block );
445  uintptr_t const alloc_area_offset = alloc_begin - alloc_area_begin;
446
447  Heap_Block *free_list_anchor = NULL;
448
449  _HAssert( alloc_area_begin <= alloc_begin );
450
451  if ( _Heap_Is_free( block ) ) {
452    free_list_anchor = block->prev;
453
454    _Heap_Free_list_remove( block );
455
456    /* Statistics */
457    --stats->free_blocks;
458    ++stats->used_blocks;
459    stats->free_size -= _Heap_Block_size( block );
460  } else {
461    free_list_anchor = _Heap_Free_list_head( heap );
462  }
463
464  if ( alloc_area_offset < heap->page_size ) {
465    alloc_size += alloc_area_offset;
466
467    block = _Heap_Block_allocate_from_begin(
468      heap,
469      block,
470      free_list_anchor,
471      alloc_size
472    );
473  } else {
474    block = _Heap_Block_allocate_from_end(
475      heap,
476      block,
477      free_list_anchor,
478      alloc_begin,
479      alloc_size
480    );
481  }
482
483  /* Statistics */
484  if ( stats->min_free_size > stats->free_size ) {
485    stats->min_free_size = stats->free_size;
486  }
487
488  _Heap_Protection_block_initialize( heap, block );
489
490  return block;
491}
Note: See TracBrowser for help on using the repository browser.