source: rtems/cpukit/score/src/heap.c @ c86da31c

4.115
Last change on this file since c86da31c was b2a0214, checked in by Sebastian Huber <sebastian.huber@…>, on 06/07/10 at 09:35:01

2010-06-07 Sebastian Huber <sebastian.huber@…>

  • score/include/rtems/score/heap.h: Declare _Heap_Get_first_and_last_block(). Removed Heap_Extend_status. Changed return type of _Heap_Extend() to bool.
  • score/inline/rtems/score/heap.inl: Define _Heap_Set_last_block_size().
  • score/src/heap.c: Define and use _Heap_Get_first_and_last_block().
  • score/src/heapgetinfo.c: Removed assert statements. Do not count the last block. This ensures that all size values are an integral multiple of the page size which is consistent with the other statistics.
  • score/src/heapextend.c: Implemented support for scattered heap areas.
  • score/src/heapwalk.c: Dump also last block. Changes for new first and last block values.
  • ./score/src/pheapextend.c, rtems/src/regionextend.c: Update for _Heap_Extend() changes.
  • Property mode set to 100644
File size: 13.3 KB
Line 
1/**
2 * @file
3 *
4 * @ingroup ScoreHeap
5 *
6 * @brief Heap Handler implementation.
7 */
8
9/*
10 *  COPYRIGHT (c) 1989-2009.
11 *  On-Line Applications Research Corporation (OAR).
12 *
13 *  Copyright (c) 2009, 2010 embedded brains GmbH.
14 *
15 *  The license and distribution terms for this file may be
16 *  found in the file LICENSE in this distribution or at
17 *  http://www.rtems.com/license/LICENSE.
18 *
19 *  $Id$
20 */
21
22#if HAVE_CONFIG_H
23#include "config.h"
24#endif
25
26#include <rtems/system.h>
27#include <rtems/score/heap.h>
28
29#if CPU_ALIGNMENT == 0 || CPU_ALIGNMENT % 2 != 0
30  #error "invalid CPU_ALIGNMENT value"
31#endif
32
33static uint32_t instance = 0;
34
35/*PAGE
36 *
37 *  _Heap_Initialize
38 *
39 *  This kernel routine initializes a heap.
40 *
41 *  Input parameters:
42 *    heap         - pointer to heap header
43 *    area_begin - starting address of heap
44 *    size             - size of heap
45 *    page_size        - allocatable unit of memory
46 *
47 *  Output parameters:
48 *    returns - maximum memory available if RTEMS_SUCCESSFUL
49 *    0       - otherwise
50 *
51 *  This is what a heap looks like in memory immediately after initialization:
52 *
53 *
54 *            +--------------------------------+ <- begin = area_begin
55 *            |  unused space due to alignment |
56 *            |       size < page_size         |
57 *         0  +--------------------------------+ <- first block
58 *            |  prev_size = page_size         |
59 *         4  +--------------------------------+
60 *            |  size = size0              | 1 |
61 *         8  +---------------------+----------+ <- aligned on page_size
62 *            |  next = HEAP_TAIL   |          |
63 *        12  +---------------------+          |
64 *            |  prev = HEAP_HEAD   |  memory  |
65 *            +---------------------+          |
66 *            |                     available  |
67 *            |                                |
68 *            |                for allocation  |
69 *            |                                |
70 *     size0  +--------------------------------+ <- last dummy block
71 *            |  prev_size = size0             |
72 *        +4  +--------------------------------+
73 *            |  size = page_size          | 0 | <- prev block is free
74 *        +8  +--------------------------------+ <- aligned on page_size
75 *            |  unused space due to alignment |
76 *            |       size < page_size         |
77 *            +--------------------------------+ <- end = begin + size
78 *
79 *  Below is what a heap looks like after first allocation of SIZE bytes using
80 *  _Heap_allocate(). BSIZE stands for SIZE + 4 aligned up on 'page_size'
81 *  boundary.
82 *  [NOTE: If allocation were performed by _Heap_Allocate_aligned(), the
83 *  block size BSIZE is defined differently, and previously free block will
84 *  be split so that upper part of it will become used block (see
85 *  'heapallocatealigned.c' for details).]
86 *
87 *            +--------------------------------+ <- begin = area_begin
88 *            |  unused space due to alignment |
89 *            |       size < page_size         |
90 *         0  +--------------------------------+ <- used block
91 *            |  prev_size = page_size         |
92 *         4  +--------------------------------+
93 *            |  size = BSIZE              | 1 | <- prev block is used
94 *         8  +--------------------------------+ <- aligned on page_size
95 *            |              .                 | Pointer returned to the user
96 *            |              .                 | is 8 for _Heap_Allocate()
97 *            |              .                 | and is in range
98 * 8 +        |         user-accessible        | [8,8+page_size) for
99 *  page_size +- - -                      - - -+ _Heap_Allocate_aligned()
100 *            |             area               |
101 *            |              .                 |
102 *     BSIZE  +- - - - -     .        - - - - -+ <- free block
103 *            |              .                 |
104 * BSIZE  +4  +--------------------------------+
105 *            |  size = S = size0 - BSIZE  | 1 | <- prev block is used
106 * BSIZE  +8  +-------------------+------------+ <- aligned on page_size
107 *            |  next = HEAP_TAIL |            |
108 * BSIZE +12  +-------------------+            |
109 *            |  prev = HEAP_HEAD |     memory |
110 *            +-------------------+            |
111 *            |                   .  available |
112 *            |                   .            |
113 *            |                   .        for |
114 *            |                   .            |
115 * BSIZE +S+0 +-------------------+ allocation + <- last dummy block
116 *            |  prev_size = S    |            |
117 *       +S+4 +-------------------+------------+
118 *            |  size = page_size          | 0 | <- prev block is free
119 *       +S+8 +--------------------------------+ <- aligned on page_size
120 *            |  unused space due to alignment |
121 *            |       size < page_size         |
122 *            +--------------------------------+ <- end = begin + size
123 *
124 */
125
126bool _Heap_Get_first_and_last_block(
127  uintptr_t heap_area_begin,
128  uintptr_t heap_area_size,
129  uintptr_t page_size,
130  uintptr_t min_block_size,
131  Heap_Block **first_block_ptr,
132  Heap_Block **last_block_ptr
133)
134{
135  uintptr_t const heap_area_end = heap_area_begin + heap_area_size;
136  uintptr_t const alloc_area_begin =
137    _Heap_Align_up( heap_area_begin + HEAP_BLOCK_HEADER_SIZE, page_size );
138  uintptr_t const first_block_begin =
139    alloc_area_begin - HEAP_BLOCK_HEADER_SIZE;
140  uintptr_t const overhead =
141    HEAP_BLOCK_HEADER_SIZE + (first_block_begin - heap_area_begin);
142  uintptr_t const first_block_size =
143    _Heap_Align_down( heap_area_size - overhead, page_size );
144  Heap_Block *const first_block = (Heap_Block *) first_block_begin;
145  Heap_Block *const last_block =
146    _Heap_Block_at( first_block, first_block_size );
147
148  if (
149    heap_area_end < heap_area_begin
150      || heap_area_size <= overhead
151      || first_block_size < min_block_size
152  ) {
153    /* Invalid area or area too small */
154    return false;
155  }
156
157  *first_block_ptr = first_block;
158  *last_block_ptr = last_block;
159
160  return true;
161}
162
163uintptr_t _Heap_Initialize(
164  Heap_Control *heap,
165  void *heap_area_begin_ptr,
166  uintptr_t heap_area_size,
167  uintptr_t page_size
168)
169{
170  Heap_Statistics *const stats = &heap->stats;
171  uintptr_t const heap_area_begin = (uintptr_t) heap_area_begin_ptr;
172  uintptr_t const heap_area_end = heap_area_begin + heap_area_size;
173  uintptr_t first_block_begin = 0;
174  uintptr_t first_block_size = 0;
175  uintptr_t last_block_begin = 0;
176  uintptr_t min_block_size = 0;
177  bool area_ok = false;
178  Heap_Block *first_block = NULL;
179  Heap_Block *last_block = NULL;
180
181  if ( page_size == 0 ) {
182    page_size = CPU_ALIGNMENT;
183  } else {
184    page_size = _Heap_Align_up( page_size, CPU_ALIGNMENT );
185
186    if ( page_size < CPU_ALIGNMENT ) {
187      /* Integer overflow */
188      return 0;
189    }
190  }
191  min_block_size = _Heap_Align_up( sizeof( Heap_Block ), page_size );
192
193  area_ok = _Heap_Get_first_and_last_block(
194    heap_area_begin,
195    heap_area_size,
196    page_size,
197    min_block_size,
198    &first_block,
199    &last_block
200  );
201  if ( !area_ok ) {
202    return 0;
203  }
204
205  first_block_begin = (uintptr_t) first_block;
206  last_block_begin = (uintptr_t) last_block;
207  first_block_size = last_block_begin - first_block_begin;
208
209  /* First block */
210  first_block->prev_size = heap_area_end;
211  first_block->size_and_flag = first_block_size | HEAP_PREV_BLOCK_USED;
212  first_block->next = _Heap_Free_list_tail( heap );
213  first_block->prev = _Heap_Free_list_head( heap );
214
215  /* Heap control */
216  heap->page_size = page_size;
217  heap->min_block_size = min_block_size;
218  heap->area_begin = heap_area_begin;
219  heap->area_end = heap_area_end;
220  heap->first_block = first_block;
221  heap->last_block = last_block;
222  _Heap_Free_list_head( heap )->next = first_block;
223  _Heap_Free_list_tail( heap )->prev = first_block;
224
225  /* Last block */
226  last_block->prev_size = first_block_size;
227  last_block->size_and_flag = 0;
228  _Heap_Set_last_block_size( heap );
229
230  /* Statistics */
231  stats->size = first_block_size;
232  stats->free_size = first_block_size;
233  stats->min_free_size = first_block_size;
234  stats->free_blocks = 1;
235  stats->max_free_blocks = 1;
236  stats->used_blocks = 0;
237  stats->max_search = 0;
238  stats->allocs = 0;
239  stats->searches = 0;
240  stats->frees = 0;
241  stats->resizes = 0;
242  stats->instance = instance++;
243
244  _HAssert( _Heap_Is_aligned( heap->page_size, CPU_ALIGNMENT ) );
245  _HAssert( _Heap_Is_aligned( heap->min_block_size, page_size ) );
246  _HAssert(
247    _Heap_Is_aligned( _Heap_Alloc_area_of_block( first_block ), page_size )
248  );
249  _HAssert(
250    _Heap_Is_aligned( _Heap_Alloc_area_of_block( last_block ), page_size )
251  );
252
253  return first_block_size;
254}
255
256void _Heap_Block_split(
257  Heap_Control *heap,
258  Heap_Block *block,
259  Heap_Block *free_list_anchor,
260  uintptr_t alloc_size
261)
262{
263  Heap_Statistics *const stats = &heap->stats;
264
265  uintptr_t const page_size = heap->page_size;
266  uintptr_t const min_block_size = heap->min_block_size;
267  uintptr_t const min_alloc_size = min_block_size - HEAP_BLOCK_HEADER_SIZE;
268
269  uintptr_t const block_size = _Heap_Block_size( block );
270
271  uintptr_t const used_size =
272    _Heap_Max( alloc_size, min_alloc_size ) + HEAP_BLOCK_HEADER_SIZE;
273  uintptr_t const used_block_size = _Heap_Align_up( used_size, page_size );
274
275  uintptr_t const free_size = block_size + HEAP_BLOCK_SIZE_OFFSET - used_size;
276  uintptr_t const free_size_limit = min_block_size + HEAP_BLOCK_SIZE_OFFSET;
277
278  Heap_Block *next_block = _Heap_Block_at( block, block_size );
279
280  _HAssert( used_size <= block_size + HEAP_BLOCK_SIZE_OFFSET );
281  _HAssert( used_size + free_size == block_size + HEAP_BLOCK_SIZE_OFFSET );
282
283  if ( free_size >= free_size_limit ) {
284    Heap_Block *const free_block = _Heap_Block_at( block, used_block_size );
285    uintptr_t free_block_size = block_size - used_block_size;
286
287    _HAssert( used_block_size + free_block_size == block_size );
288
289    _Heap_Block_set_size( block, used_block_size );
290
291    /* Statistics */
292    stats->free_size += free_block_size;
293
294    if ( _Heap_Is_used( next_block ) ) {
295      _Heap_Free_list_insert_after( free_list_anchor, free_block );
296
297      /* Statistics */
298      ++stats->free_blocks;
299    } else {
300      uintptr_t const next_block_size = _Heap_Block_size( next_block );
301
302      _Heap_Free_list_replace( next_block, free_block );
303
304      free_block_size += next_block_size;
305
306      next_block = _Heap_Block_at( free_block, free_block_size );
307    }
308
309    free_block->size_and_flag = free_block_size | HEAP_PREV_BLOCK_USED;
310
311    next_block->prev_size = free_block_size;
312    next_block->size_and_flag &= ~HEAP_PREV_BLOCK_USED;
313  } else {
314    next_block->size_and_flag |= HEAP_PREV_BLOCK_USED;
315  }
316}
317
318static Heap_Block *_Heap_Block_allocate_from_begin(
319  Heap_Control *heap,
320  Heap_Block *block,
321  Heap_Block *free_list_anchor,
322  uintptr_t alloc_size
323)
324{
325  _Heap_Block_split( heap, block, free_list_anchor, alloc_size );
326
327  return block;
328}
329
330static Heap_Block *_Heap_Block_allocate_from_end(
331  Heap_Control *heap,
332  Heap_Block *block,
333  Heap_Block *free_list_anchor,
334  uintptr_t alloc_begin,
335  uintptr_t alloc_size
336)
337{
338  Heap_Statistics *const stats = &heap->stats;
339
340  uintptr_t block_begin = (uintptr_t) block;
341  uintptr_t block_size = _Heap_Block_size( block );
342  uintptr_t block_end = block_begin + block_size;
343
344  Heap_Block *const new_block =
345    _Heap_Block_of_alloc_area( alloc_begin, heap->page_size );
346  uintptr_t const new_block_begin = (uintptr_t) new_block;
347  uintptr_t const new_block_size = block_end - new_block_begin;
348
349  block_end = new_block_begin;
350  block_size = block_end - block_begin;
351
352  _HAssert( block_size >= heap->min_block_size );
353  _HAssert( new_block_size >= heap->min_block_size );
354
355  /* Statistics */
356  stats->free_size += block_size;
357
358  if ( _Heap_Is_prev_used( block ) ) {
359    _Heap_Free_list_insert_after( free_list_anchor, block );
360
361    free_list_anchor = block;
362
363    /* Statistics */
364    ++stats->free_blocks;
365  } else {
366    Heap_Block *const prev_block = _Heap_Prev_block( block );
367    uintptr_t const prev_block_size = _Heap_Block_size( prev_block );
368
369    block = prev_block;
370    block_begin = (uintptr_t) block;
371    block_size += prev_block_size;
372  }
373
374  block->size_and_flag = block_size | HEAP_PREV_BLOCK_USED;
375
376  new_block->prev_size = block_size;
377  new_block->size_and_flag = new_block_size;
378
379  _Heap_Block_split( heap, new_block, free_list_anchor, alloc_size );
380
381  return new_block;
382}
383
384Heap_Block *_Heap_Block_allocate(
385  Heap_Control *heap,
386  Heap_Block *block,
387  uintptr_t alloc_begin,
388  uintptr_t alloc_size
389)
390{
391  Heap_Statistics *const stats = &heap->stats;
392
393  uintptr_t const alloc_area_begin = _Heap_Alloc_area_of_block( block );
394  uintptr_t const alloc_area_offset = alloc_begin - alloc_area_begin;
395
396  Heap_Block *free_list_anchor = NULL;
397
398  _HAssert( alloc_area_begin <= alloc_begin );
399
400  if ( _Heap_Is_free( block ) ) {
401    free_list_anchor = block->prev;
402
403    _Heap_Free_list_remove( block );
404
405    /* Statistics */
406    --stats->free_blocks;
407    ++stats->used_blocks;
408    stats->free_size -= _Heap_Block_size( block );
409  } else {
410    free_list_anchor = _Heap_Free_list_head( heap );
411  }
412
413  if ( alloc_area_offset < heap->page_size ) {
414    alloc_size += alloc_area_offset;
415
416    block = _Heap_Block_allocate_from_begin(
417      heap,
418      block,
419      free_list_anchor,
420      alloc_size
421    );
422  } else {
423    block = _Heap_Block_allocate_from_end(
424      heap,
425      block,
426      free_list_anchor,
427      alloc_begin,
428      alloc_size
429    );
430  }
431
432  /* Statistics */
433  if ( stats->min_free_size > stats->free_size ) {
434    stats->min_free_size = stats->free_size;
435  }
436
437  return block;
438}
Note: See TracBrowser for help on using the repository browser.