source: rtems/cpukit/score/src/heap.c @ 2a713e3

4.115
Last change on this file since 2a713e3 was 2a713e3, checked in by Sebastian Huber <sebastian.huber@…>, on 03/24/14 at 14:57:29

score: _Heap_Protection_set_delayed_free_fraction

Add and use _Heap_Protection_set_delayed_free_fraction(). This makes it
possible to avoid a dependency on _Thread_Dispatch_is_enabled().

  • Property mode set to 100644
File size: 14.9 KB
Line 
1/**
2 * @file
3 *
4 * @ingroup ScoreHeap
5 *
6 * @brief Heap Handler implementation.
7 */
8
9/*
10 *  COPYRIGHT (c) 1989-2009.
11 *  On-Line Applications Research Corporation (OAR).
12 *
13 *  Copyright (c) 2009, 2010 embedded brains GmbH.
14 *
15 *  The license and distribution terms for this file may be
16 *  found in the file LICENSE in this distribution or at
17 *  http://www.rtems.org/license/LICENSE.
18 */
19
20#if HAVE_CONFIG_H
21  #include "config.h"
22#endif
23
24#include <rtems/score/heapimpl.h>
25#include <rtems/score/threadimpl.h>
26#include <rtems/score/interr.h>
27
28#include <string.h>
29
30#if CPU_ALIGNMENT == 0 || CPU_ALIGNMENT % 2 != 0
31  #error "invalid CPU_ALIGNMENT value"
32#endif
33
34static uint32_t instance = 0;
35
36/*
37 *  _Heap_Initialize
38 *
39 *  This kernel routine initializes a heap.
40 *
41 *  Input parameters:
42 *    heap         - pointer to heap header
43 *    area_begin - starting address of heap
44 *    size             - size of heap
45 *    page_size        - allocatable unit of memory
46 *
47 *  Output parameters:
48 *    returns - maximum memory available if RTEMS_SUCCESSFUL
49 *    0       - otherwise
50 *
51 *  This is what a heap looks like in memory immediately after initialization:
52 *
53 *
54 *            +--------------------------------+ <- begin = area_begin
55 *            |  unused space due to alignment |
56 *            |       size < page_size         |
57 *         0  +--------------------------------+ <- first block
58 *            |  prev_size = page_size         |
59 *         4  +--------------------------------+
60 *            |  size = size0              | 1 |
61 *         8  +---------------------+----------+ <- aligned on page_size
62 *            |  next = HEAP_TAIL   |          |
63 *        12  +---------------------+          |
64 *            |  prev = HEAP_HEAD   |  memory  |
65 *            +---------------------+          |
66 *            |                     available  |
67 *            |                                |
68 *            |                for allocation  |
69 *            |                                |
70 *     size0  +--------------------------------+ <- last dummy block
71 *            |  prev_size = size0             |
72 *        +4  +--------------------------------+
73 *            |  size = page_size          | 0 | <- prev block is free
74 *        +8  +--------------------------------+ <- aligned on page_size
75 *            |  unused space due to alignment |
76 *            |       size < page_size         |
77 *            +--------------------------------+ <- end = begin + size
78 *
79 *  Below is what a heap looks like after first allocation of SIZE bytes using
80 *  _Heap_allocate(). BSIZE stands for SIZE + 4 aligned up on 'page_size'
81 *  boundary.
82 *  [NOTE: If allocation were performed by _Heap_Allocate_aligned(), the
83 *  block size BSIZE is defined differently, and previously free block will
84 *  be split so that upper part of it will become used block (see
85 *  'heapallocatealigned.c' for details).]
86 *
87 *            +--------------------------------+ <- begin = area_begin
88 *            |  unused space due to alignment |
89 *            |       size < page_size         |
90 *         0  +--------------------------------+ <- used block
91 *            |  prev_size = page_size         |
92 *         4  +--------------------------------+
93 *            |  size = BSIZE              | 1 | <- prev block is used
94 *         8  +--------------------------------+ <- aligned on page_size
95 *            |              .                 | Pointer returned to the user
96 *            |              .                 | is 8 for _Heap_Allocate()
97 *            |              .                 | and is in range
98 * 8 +        |         user-accessible        | [8,8+page_size) for
99 *  page_size +- - -                      - - -+ _Heap_Allocate_aligned()
100 *            |             area               |
101 *            |              .                 |
102 *     BSIZE  +- - - - -     .        - - - - -+ <- free block
103 *            |              .                 |
104 * BSIZE  +4  +--------------------------------+
105 *            |  size = S = size0 - BSIZE  | 1 | <- prev block is used
106 * BSIZE  +8  +-------------------+------------+ <- aligned on page_size
107 *            |  next = HEAP_TAIL |            |
108 * BSIZE +12  +-------------------+            |
109 *            |  prev = HEAP_HEAD |     memory |
110 *            +-------------------+            |
111 *            |                   .  available |
112 *            |                   .            |
113 *            |                   .        for |
114 *            |                   .            |
115 * BSIZE +S+0 +-------------------+ allocation + <- last dummy block
116 *            |  prev_size = S    |            |
117 *       +S+4 +-------------------+------------+
118 *            |  size = page_size          | 0 | <- prev block is free
119 *       +S+8 +--------------------------------+ <- aligned on page_size
120 *            |  unused space due to alignment |
121 *            |       size < page_size         |
122 *            +--------------------------------+ <- end = begin + size
123 *
124 */
125
126#ifdef HEAP_PROTECTION
127  static void _Heap_Protection_block_initialize_default(
128    Heap_Control *heap,
129    Heap_Block *block
130  )
131  {
132    block->Protection_begin.protector [0] = HEAP_BEGIN_PROTECTOR_0;
133    block->Protection_begin.protector [1] = HEAP_BEGIN_PROTECTOR_1;
134    block->Protection_begin.next_delayed_free_block = NULL;
135    block->Protection_begin.task = _Thread_Get_executing();
136    block->Protection_begin.tag = NULL;
137    block->Protection_end.protector [0] = HEAP_END_PROTECTOR_0;
138    block->Protection_end.protector [1] = HEAP_END_PROTECTOR_1;
139  }
140
141  static void _Heap_Protection_block_check_default(
142    Heap_Control *heap,
143    Heap_Block *block
144  )
145  {
146    if (
147      block->Protection_begin.protector [0] != HEAP_BEGIN_PROTECTOR_0
148        || block->Protection_begin.protector [1] != HEAP_BEGIN_PROTECTOR_1
149        || block->Protection_end.protector [0] != HEAP_END_PROTECTOR_0
150        || block->Protection_end.protector [1] != HEAP_END_PROTECTOR_1
151    ) {
152      _Heap_Protection_block_error( heap, block );
153    }
154  }
155
156  static void _Heap_Protection_block_error_default(
157    Heap_Control *heap,
158    Heap_Block *block
159  )
160  {
161    /* FIXME */
162    _Terminate( INTERNAL_ERROR_CORE, false, 0xdeadbeef );
163  }
164#endif
165
166bool _Heap_Get_first_and_last_block(
167  uintptr_t heap_area_begin,
168  uintptr_t heap_area_size,
169  uintptr_t page_size,
170  uintptr_t min_block_size,
171  Heap_Block **first_block_ptr,
172  Heap_Block **last_block_ptr
173)
174{
175  uintptr_t const heap_area_end = heap_area_begin + heap_area_size;
176  uintptr_t const alloc_area_begin =
177    _Heap_Align_up( heap_area_begin + HEAP_BLOCK_HEADER_SIZE, page_size );
178  uintptr_t const first_block_begin =
179    alloc_area_begin - HEAP_BLOCK_HEADER_SIZE;
180  uintptr_t const overhead =
181    HEAP_BLOCK_HEADER_SIZE + (first_block_begin - heap_area_begin);
182  uintptr_t const first_block_size =
183    _Heap_Align_down( heap_area_size - overhead, page_size );
184  Heap_Block *const first_block = (Heap_Block *) first_block_begin;
185  Heap_Block *const last_block =
186    _Heap_Block_at( first_block, first_block_size );
187
188  if (
189    heap_area_end < heap_area_begin
190      || heap_area_size <= overhead
191      || first_block_size < min_block_size
192  ) {
193    /* Invalid area or area too small */
194    return false;
195  }
196
197  *first_block_ptr = first_block;
198  *last_block_ptr = last_block;
199
200  return true;
201}
202
203uintptr_t _Heap_Initialize(
204  Heap_Control *heap,
205  void *heap_area_begin_ptr,
206  uintptr_t heap_area_size,
207  uintptr_t page_size
208)
209{
210  Heap_Statistics *const stats = &heap->stats;
211  uintptr_t const heap_area_begin = (uintptr_t) heap_area_begin_ptr;
212  uintptr_t const heap_area_end = heap_area_begin + heap_area_size;
213  uintptr_t first_block_begin = 0;
214  uintptr_t first_block_size = 0;
215  uintptr_t last_block_begin = 0;
216  uintptr_t min_block_size = 0;
217  bool area_ok = false;
218  Heap_Block *first_block = NULL;
219  Heap_Block *last_block = NULL;
220
221  if ( page_size == 0 ) {
222    page_size = CPU_ALIGNMENT;
223  } else {
224    page_size = _Heap_Align_up( page_size, CPU_ALIGNMENT );
225
226    if ( page_size < CPU_ALIGNMENT ) {
227      /* Integer overflow */
228      return 0;
229    }
230  }
231  min_block_size = _Heap_Align_up( sizeof( Heap_Block ), page_size );
232
233  area_ok = _Heap_Get_first_and_last_block(
234    heap_area_begin,
235    heap_area_size,
236    page_size,
237    min_block_size,
238    &first_block,
239    &last_block
240  );
241  if ( !area_ok ) {
242    return 0;
243  }
244
245  memset(heap, 0, sizeof(*heap));
246
247  #ifdef HEAP_PROTECTION
248    heap->Protection.block_initialize = _Heap_Protection_block_initialize_default;
249    heap->Protection.block_check = _Heap_Protection_block_check_default;
250    heap->Protection.block_error = _Heap_Protection_block_error_default;
251  #endif
252
253  first_block_begin = (uintptr_t) first_block;
254  last_block_begin = (uintptr_t) last_block;
255  first_block_size = last_block_begin - first_block_begin;
256
257  /* First block */
258  first_block->prev_size = heap_area_end;
259  first_block->size_and_flag = first_block_size | HEAP_PREV_BLOCK_USED;
260  first_block->next = _Heap_Free_list_tail( heap );
261  first_block->prev = _Heap_Free_list_head( heap );
262  _Heap_Protection_block_initialize( heap, first_block );
263
264  /* Heap control */
265  heap->page_size = page_size;
266  heap->min_block_size = min_block_size;
267  heap->area_begin = heap_area_begin;
268  heap->area_end = heap_area_end;
269  heap->first_block = first_block;
270  heap->last_block = last_block;
271  _Heap_Free_list_head( heap )->next = first_block;
272  _Heap_Free_list_tail( heap )->prev = first_block;
273
274  /* Last block */
275  last_block->prev_size = first_block_size;
276  last_block->size_and_flag = 0;
277  _Heap_Set_last_block_size( heap );
278  _Heap_Protection_block_initialize( heap, last_block );
279
280  /* Statistics */
281  stats->size = first_block_size;
282  stats->free_size = first_block_size;
283  stats->min_free_size = first_block_size;
284  stats->free_blocks = 1;
285  stats->max_free_blocks = 1;
286  stats->instance = instance++;
287
288  _Heap_Protection_set_delayed_free_fraction( heap, 2 );
289
290  _HAssert( _Heap_Is_aligned( heap->page_size, CPU_ALIGNMENT ) );
291  _HAssert( _Heap_Is_aligned( heap->min_block_size, page_size ) );
292  _HAssert(
293    _Heap_Is_aligned( _Heap_Alloc_area_of_block( first_block ), page_size )
294  );
295  _HAssert(
296    _Heap_Is_aligned( _Heap_Alloc_area_of_block( last_block ), page_size )
297  );
298
299  return first_block_size;
300}
301
302static void _Heap_Block_split(
303  Heap_Control *heap,
304  Heap_Block *block,
305  Heap_Block *free_list_anchor,
306  uintptr_t alloc_size
307)
308{
309  Heap_Statistics *const stats = &heap->stats;
310
311  uintptr_t const page_size = heap->page_size;
312  uintptr_t const min_block_size = heap->min_block_size;
313  uintptr_t const min_alloc_size = min_block_size - HEAP_BLOCK_HEADER_SIZE;
314
315  uintptr_t const block_size = _Heap_Block_size( block );
316
317  uintptr_t const used_size =
318    _Heap_Max( alloc_size, min_alloc_size ) + HEAP_BLOCK_HEADER_SIZE;
319  uintptr_t const used_block_size = _Heap_Align_up( used_size, page_size );
320
321  uintptr_t const free_size = block_size + HEAP_ALLOC_BONUS - used_size;
322  uintptr_t const free_size_limit = min_block_size + HEAP_ALLOC_BONUS;
323
324  Heap_Block *next_block = _Heap_Block_at( block, block_size );
325
326  _HAssert( used_size <= block_size + HEAP_ALLOC_BONUS );
327  _HAssert( used_size + free_size == block_size + HEAP_ALLOC_BONUS );
328
329  if ( free_size >= free_size_limit ) {
330    Heap_Block *const free_block = _Heap_Block_at( block, used_block_size );
331    uintptr_t free_block_size = block_size - used_block_size;
332
333    _HAssert( used_block_size + free_block_size == block_size );
334
335    _Heap_Block_set_size( block, used_block_size );
336
337    /* Statistics */
338    stats->free_size += free_block_size;
339
340    if ( _Heap_Is_used( next_block ) ) {
341      _Heap_Free_list_insert_after( free_list_anchor, free_block );
342
343      /* Statistics */
344      ++stats->free_blocks;
345    } else {
346      uintptr_t const next_block_size = _Heap_Block_size( next_block );
347
348      _Heap_Free_list_replace( next_block, free_block );
349
350      free_block_size += next_block_size;
351
352      next_block = _Heap_Block_at( free_block, free_block_size );
353    }
354
355    free_block->size_and_flag = free_block_size | HEAP_PREV_BLOCK_USED;
356
357    next_block->prev_size = free_block_size;
358    next_block->size_and_flag &= ~HEAP_PREV_BLOCK_USED;
359
360    _Heap_Protection_block_initialize( heap, free_block );
361  } else {
362    next_block->size_and_flag |= HEAP_PREV_BLOCK_USED;
363  }
364}
365
366static Heap_Block *_Heap_Block_allocate_from_begin(
367  Heap_Control *heap,
368  Heap_Block *block,
369  Heap_Block *free_list_anchor,
370  uintptr_t alloc_size
371)
372{
373  _Heap_Block_split( heap, block, free_list_anchor, alloc_size );
374
375  return block;
376}
377
378static Heap_Block *_Heap_Block_allocate_from_end(
379  Heap_Control *heap,
380  Heap_Block *block,
381  Heap_Block *free_list_anchor,
382  uintptr_t alloc_begin,
383  uintptr_t alloc_size
384)
385{
386  Heap_Statistics *const stats = &heap->stats;
387
388  uintptr_t block_begin = (uintptr_t) block;
389  uintptr_t block_size = _Heap_Block_size( block );
390  uintptr_t block_end = block_begin + block_size;
391
392  Heap_Block *const new_block =
393    _Heap_Block_of_alloc_area( alloc_begin, heap->page_size );
394  uintptr_t const new_block_begin = (uintptr_t) new_block;
395  uintptr_t const new_block_size = block_end - new_block_begin;
396
397  block_end = new_block_begin;
398  block_size = block_end - block_begin;
399
400  _HAssert( block_size >= heap->min_block_size );
401  _HAssert( new_block_size >= heap->min_block_size );
402
403  /* Statistics */
404  stats->free_size += block_size;
405
406  if ( _Heap_Is_prev_used( block ) ) {
407    _Heap_Free_list_insert_after( free_list_anchor, block );
408
409    free_list_anchor = block;
410
411    /* Statistics */
412    ++stats->free_blocks;
413  } else {
414    Heap_Block *const prev_block = _Heap_Prev_block( block );
415    uintptr_t const prev_block_size = _Heap_Block_size( prev_block );
416
417    block = prev_block;
418    block_size += prev_block_size;
419  }
420
421  block->size_and_flag = block_size | HEAP_PREV_BLOCK_USED;
422
423  new_block->prev_size = block_size;
424  new_block->size_and_flag = new_block_size;
425
426  _Heap_Block_split( heap, new_block, free_list_anchor, alloc_size );
427
428  return new_block;
429}
430
431Heap_Block *_Heap_Block_allocate(
432  Heap_Control *heap,
433  Heap_Block *block,
434  uintptr_t alloc_begin,
435  uintptr_t alloc_size
436)
437{
438  Heap_Statistics *const stats = &heap->stats;
439
440  uintptr_t const alloc_area_begin = _Heap_Alloc_area_of_block( block );
441  uintptr_t const alloc_area_offset = alloc_begin - alloc_area_begin;
442
443  Heap_Block *free_list_anchor = NULL;
444
445  _HAssert( alloc_area_begin <= alloc_begin );
446
447  if ( _Heap_Is_free( block ) ) {
448    free_list_anchor = block->prev;
449
450    _Heap_Free_list_remove( block );
451
452    /* Statistics */
453    --stats->free_blocks;
454    ++stats->used_blocks;
455    stats->free_size -= _Heap_Block_size( block );
456  } else {
457    free_list_anchor = _Heap_Free_list_head( heap );
458  }
459
460  if ( alloc_area_offset < heap->page_size ) {
461    alloc_size += alloc_area_offset;
462
463    block = _Heap_Block_allocate_from_begin(
464      heap,
465      block,
466      free_list_anchor,
467      alloc_size
468    );
469  } else {
470    block = _Heap_Block_allocate_from_end(
471      heap,
472      block,
473      free_list_anchor,
474      alloc_begin,
475      alloc_size
476    );
477  }
478
479  /* Statistics */
480  if ( stats->min_free_size > stats->free_size ) {
481    stats->min_free_size = stats->free_size;
482  }
483
484  _Heap_Protection_block_initialize( heap, block );
485
486  return block;
487}
Note: See TracBrowser for help on using the repository browser.