source: rtems/cpukit/score/include/rtems/score/heap.h @ abc963d

5
Last change on this file since abc963d was abc963d, checked in by Sebastian Huber <sebastian.huber@…>, on 01/27/17 at 06:42:29

score: Fix typo

  • Property mode set to 100644
File size: 13.6 KB
Line 
1/**
2 * @file
3 *
4 * @ingroup ScoreHeap
5 *
6 * @brief Heap Handler API
7 */
8
9/*
10 *  COPYRIGHT (c) 1989-2006.
11 *  On-Line Applications Research Corporation (OAR).
12 *
13 *  The license and distribution terms for this file may be
14 *  found in the file LICENSE in this distribution or at
15 *  http://www.rtems.org/license/LICENSE.
16 */
17
18#ifndef _RTEMS_SCORE_HEAP_H
19#define _RTEMS_SCORE_HEAP_H
20
21#include <rtems/score/cpu.h>
22#include <rtems/score/thread.h>
23
24#ifdef __cplusplus
25extern "C" {
26#endif
27
28#ifdef RTEMS_DEBUG
29  #define HEAP_PROTECTION
30#endif
31
32/**
33 * @defgroup ScoreHeap Heap Handler
34 *
35 * @ingroup Score
36 *
37 * @brief The Heap Handler provides a heap.
38 *
39 * A heap is a doubly linked list of variable size blocks which are allocated
40 * using the first fit method.  Garbage collection is performed each time a
41 * block is returned to the heap by coalescing neighbor blocks.  Control
42 * information for both allocated and free blocks is contained in the heap
43 * area.  A heap control structure contains control information for the heap.
44 *
45 * The alignment routines could be made faster should we require only powers of
46 * two to be supported for page size, alignment and boundary arguments.  The
47 * minimum alignment requirement for pages is currently CPU_ALIGNMENT and this
48 * value is only required to be multiple of two and explicitly not required to
49 * be a power of two.
50 *
51 * There are two kinds of blocks.  One sort describes a free block from which
52 * we can allocate memory.  The other blocks are used and provide an allocated
53 * memory area.  The free blocks are accessible via a list of free blocks.
54 *
55 * Blocks or areas cover a continuous set of memory addresses. They have a
56 * begin and end address.  The end address is not part of the set.  The size of
57 * a block or area equals the distance between the begin and end address in
58 * units of bytes.
59 *
60 * Free blocks look like:
61 * <table>
62 *   <tr>
63 *     <td rowspan=4>@ref Heap_Block</td><td>previous block size in case the
64 *       previous block is free, <br> otherwise it may contain data used by
65 *       the previous block</td>
66 *   </tr>
67 *   <tr>
68 *     <td>block size and a flag which indicates if the previous block is free
69 *       or used, <br> this field contains always valid data regardless of the
70 *       block usage</td>
71 *   </tr>
72 *   <tr><td>pointer to next block (this field is page size aligned)</td></tr>
73 *   <tr><td>pointer to previous block</td></tr>
74 *   <tr><td colspan=2>free space</td></tr>
75 * </table>
76 *
77 * Used blocks look like:
78 * <table>
79 *   <tr>
80 *     <td rowspan=4>@ref Heap_Block</td><td>previous block size in case the
81 *       previous block is free,<br>otherwise it may contain data used by
82 *       the previous block</td>
83 *   </tr>
84 *   <tr>
85 *     <td>block size and a flag which indicates if the previous block is free
86 *       or used, <br> this field contains always valid data regardless of the
87 *       block usage</td>
88 *   </tr>
89 *   <tr><td>begin of allocated area (this field is page size aligned)</td></tr>
90 *   <tr><td>allocated space</td></tr>
91 *   <tr><td colspan=2>allocated space</td></tr>
92 * </table>
93 *
94 * The heap area after initialization contains two blocks and looks like:
95 * <table>
96 *   <tr><th>Label</th><th colspan=2>Content</th></tr>
97 *   <tr><td>heap->area_begin</td><td colspan=2>heap area begin address</td></tr>
98 *   <tr>
99 *     <td>first_block->prev_size</td>
100 *     <td colspan=2>
101 *       subordinate heap area end address (this will be used to maintain a
102 *       linked list of scattered heap areas)
103 *     </td>
104 *   </tr>
105 *   <tr>
106 *     <td>first_block->size</td>
107 *     <td colspan=2>size available for allocation
108 *       | @c HEAP_PREV_BLOCK_USED</td>
109 *   </tr>
110 *   <tr>
111 *     <td>first_block->next</td><td>_Heap_Free_list_tail(heap)</td>
112 *     <td rowspan=3>memory area available for allocation</td>
113 *   </tr>
114 *   <tr><td>first_block->prev</td><td>_Heap_Free_list_head(heap)</td></tr>
115 *   <tr><td>...</td></tr>
116 *   <tr>
117 *     <td>last_block->prev_size</td><td colspan=2>size of first block</td>
118 *   </tr>
119 *   <tr>
120 *     <td>last_block->size</td>
121 *     <td colspan=2>first block begin address - last block begin address</td>
122 *   </tr>
123 *   <tr><td>heap->area_end</td><td colspan=2>heap area end address</td></tr>
124 * </table>
125 * The next block of the last block is the first block.  Since the first
126 * block indicates that the previous block is used, this ensures that the
127 * last block appears as used for the _Heap_Is_used() and _Heap_Is_free()
128 * functions.
129 */
130/**@{**/
131
132typedef struct Heap_Control Heap_Control;
133
134typedef struct Heap_Block Heap_Block;
135
136#ifndef HEAP_PROTECTION
137  #define HEAP_PROTECTION_HEADER_SIZE 0
138#else
139  #define HEAP_PROTECTOR_COUNT 2
140
141  #define HEAP_BEGIN_PROTECTOR_0 ((uintptr_t) 0xfd75a98f)
142  #define HEAP_BEGIN_PROTECTOR_1 ((uintptr_t) 0xbfa1f177)
143  #define HEAP_END_PROTECTOR_0 ((uintptr_t) 0xd6b8855e)
144  #define HEAP_END_PROTECTOR_1 ((uintptr_t) 0x13a44a5b)
145
146  #define HEAP_FREE_PATTERN ((uintptr_t) 0xe7093cdf)
147
148  #define HEAP_PROTECTION_OBOLUS ((Heap_Block *) 1)
149
150  typedef void (*_Heap_Protection_handler)(
151     Heap_Control *heap,
152     Heap_Block *block
153  );
154
155  typedef struct {
156    _Heap_Protection_handler block_initialize;
157    _Heap_Protection_handler block_check;
158    _Heap_Protection_handler block_error;
159    void *handler_data;
160    Heap_Block *first_delayed_free_block;
161    Heap_Block *last_delayed_free_block;
162    uintptr_t delayed_free_block_count;
163    uintptr_t delayed_free_fraction;
164  } Heap_Protection;
165
166  typedef struct {
167    uintptr_t protector [HEAP_PROTECTOR_COUNT];
168    Heap_Block *next_delayed_free_block;
169    Thread_Control *task;
170    void *tag;
171  } Heap_Protection_block_begin;
172
173  typedef struct {
174    uintptr_t protector [HEAP_PROTECTOR_COUNT];
175  } Heap_Protection_block_end;
176
177  #define HEAP_PROTECTION_HEADER_SIZE \
178    (sizeof(Heap_Protection_block_begin) + sizeof(Heap_Protection_block_end))
179#endif
180
181/**
182 * @brief The block header consists of the two size fields
183 * (@ref Heap_Block.prev_size and @ref Heap_Block.size_and_flag).
184 */
185#define HEAP_BLOCK_HEADER_SIZE \
186  (2 * sizeof(uintptr_t) + HEAP_PROTECTION_HEADER_SIZE)
187
188/**
189 * @brief Description for free or used blocks.
190 */
191struct Heap_Block {
192  /**
193   * @brief Size of the previous block or part of the allocated area of the
194   * previous block.
195   *
196   * This field is only valid if the previous block is free.  This case is
197   * indicated by a cleared @c HEAP_PREV_BLOCK_USED flag in the
198   * @a size_and_flag field of the current block.
199   *
200   * In a used block only the @a size_and_flag field needs to be valid.  The
201   * @a prev_size field of the current block is maintained by the previous
202   * block.  The current block can use the @a prev_size field in the next block
203   * for allocation.
204   */
205  uintptr_t prev_size;
206
207  #ifdef HEAP_PROTECTION
208    Heap_Protection_block_begin Protection_begin;
209  #endif
210
211  /**
212   * @brief Contains the size of the current block and a flag which indicates
213   * if the previous block is free or used.
214   *
215   * If the flag @c HEAP_PREV_BLOCK_USED is set, then the previous block is
216   * used, otherwise the previous block is free.  A used previous block may
217   * claim the @a prev_size field for allocation.  This trick allows to
218   * decrease the overhead in the used blocks by the size of the @a prev_size
219   * field.  As sizes are required to be multiples of two, the least
220   * significant bits would be always zero. We use this bit to store the flag.
221   *
222   * This field is always valid.
223   */
224  uintptr_t size_and_flag;
225
226  #ifdef HEAP_PROTECTION
227    Heap_Protection_block_end Protection_end;
228  #endif
229
230  /**
231   * @brief Pointer to the next free block or part of the allocated area.
232   *
233   * This field is page size aligned and begins of the allocated area in case
234   * the block is used.
235   *
236   * This field is only valid if the block is free and thus part of the free
237   * block list.
238   */
239  Heap_Block *next;
240
241  /**
242   * @brief Pointer to the previous free block or part of the allocated area.
243   *
244   * This field is only valid if the block is free and thus part of the free
245   * block list.
246   */
247  Heap_Block *prev;
248};
249
250/**
251 * @brief Run-time heap statistics.
252 *
253 * The value @a searches / @a allocs gives the mean number of searches per
254 * allocation, while @a max_search gives maximum number of searches ever
255 * performed on a single allocation call.
256 */
257typedef struct {
258  /**
259   * @brief Lifetime number of bytes allocated from this heap.
260   *
261   * This value is an integral multiple of the page size.
262   */
263  uint64_t lifetime_allocated;
264
265  /**
266   * @brief Lifetime number of bytes freed to this heap.
267   *
268   * This value is an integral multiple of the page size.
269   */
270  uint64_t lifetime_freed;
271
272  /**
273   * @brief Size of the allocatable area in bytes.
274   *
275   * This value is an integral multiple of the page size.
276   */
277  uintptr_t size;
278
279  /**
280   * @brief Current free size in bytes.
281   *
282   * This value is an integral multiple of the page size.
283   */
284  uintptr_t free_size;
285
286  /**
287   * @brief Minimum free size ever in bytes.
288   *
289   * This value is an integral multiple of the page size.
290   */
291  uintptr_t min_free_size;
292
293  /**
294   * @brief Current number of free blocks.
295   */
296  uint32_t free_blocks;
297
298  /**
299   * @brief Maximum number of free blocks ever.
300   */
301  uint32_t max_free_blocks;
302
303  /**
304   * @brief Current number of used blocks.
305   */
306  uint32_t used_blocks;
307
308  /**
309   * @brief Maximum number of blocks searched ever.
310   */
311  uint32_t max_search;
312
313  /**
314   * @brief Total number of searches.
315   */
316  uint32_t searches;
317
318  /**
319   * @brief Total number of successful allocations.
320   */
321  uint32_t allocs;
322
323  /**
324   * @brief Total number of failed allocations.
325   */
326  uint32_t failed_allocs;
327
328  /**
329   * @brief Total number of successful frees.
330   */
331  uint32_t frees;
332
333  /**
334   * @brief Total number of successful resizes.
335   */
336  uint32_t resizes;
337} Heap_Statistics;
338
339/**
340 * @brief Control block used to manage a heap.
341 */
342struct Heap_Control {
343  Heap_Block free_list;
344  uintptr_t page_size;
345  uintptr_t min_block_size;
346  uintptr_t area_begin;
347  uintptr_t area_end;
348  Heap_Block *first_block;
349  Heap_Block *last_block;
350  Heap_Statistics stats;
351  #ifdef HEAP_PROTECTION
352    Heap_Protection Protection;
353  #endif
354};
355
356/**
357 * @brief Information about blocks.
358 */
359typedef struct {
360  /**
361   * @brief Number of blocks of this type.
362   */
363  uint32_t number;
364
365  /**
366   * @brief Largest block of this type.
367   */
368  uint32_t largest;
369
370  /**
371   * @brief Total size of the blocks of this type.
372   */
373  uint32_t total;
374} Heap_Information;
375
376/**
377 * @brief Information block returned by _Heap_Get_information().
378 */
379typedef struct {
380  Heap_Information Free;
381  Heap_Information Used;
382  Heap_Statistics Stats;
383} Heap_Information_block;
384
385/**
386 * @brief Heap area structure for table based heap initialization and
387 * extension.
388 *
389 * @see Heap_Initialization_or_extend_handler.
390 */
391typedef struct {
392  void *begin;
393  uintptr_t size;
394} Heap_Area;
395
396/**
397 * @brief Heap initialization and extend handler type.
398 *
399 * This helps to do a table based heap initialization and extension.  Create a
400 * table of Heap_Area elements and iterate through it.  Set the handler to
401 * _Heap_Initialize() in the first iteration and then to _Heap_Extend().
402 *
403 * @see Heap_Area, _Heap_Initialize(), _Heap_Extend(), or _Heap_No_extend().
404 */
405typedef uintptr_t (*Heap_Initialization_or_extend_handler)(
406  Heap_Control *heap,
407  void *area_begin,
408  uintptr_t area_size,
409  uintptr_t page_size_or_unused
410);
411
412/**
413 * @brief Extends the memory available for the heap @a heap using the memory
414 * area starting at @a area_begin of size @a area_size bytes.
415 *
416 * There are no alignment requirements.  The memory area must be big enough to
417 * contain some maintenance blocks.  It must not overlap parts of the current
418 * heap areas.  Disconnected subordinate heap areas will lead to used blocks
419 * which cover the gaps.  Extending with an inappropriate memory area will
420 * corrupt the heap.
421 *
422 * The unused fourth parameter is provided to have the same signature as
423 * _Heap_Initialize().
424 *
425 * Returns the extended space available for allocation, or zero in case of failure.
426 *
427 * @see Heap_Initialization_or_extend_handler.
428 */
429uintptr_t _Heap_Extend(
430  Heap_Control *heap,
431  void *area_begin,
432  uintptr_t area_size,
433  uintptr_t unused
434);
435
436/**
437 * @brief This function returns always zero.
438 *
439 * This function only returns zero and does nothing else.
440 *
441 * Returns always zero.
442 *
443 * @see Heap_Initialization_or_extend_handler.
444 */
445uintptr_t _Heap_No_extend(
446  Heap_Control *unused_0,
447  void *unused_1,
448  uintptr_t unused_2,
449  uintptr_t unused_3
450);
451
452RTEMS_INLINE_ROUTINE uintptr_t _Heap_Align_up(
453  uintptr_t value,
454  uintptr_t alignment
455)
456{
457  uintptr_t remainder = value % alignment;
458
459  if ( remainder != 0 ) {
460    return value - remainder + alignment;
461  } else {
462    return value;
463  }
464}
465
466RTEMS_INLINE_ROUTINE uintptr_t _Heap_Min_block_size( uintptr_t page_size )
467{
468  return _Heap_Align_up( sizeof( Heap_Block ), page_size );
469}
470
471/**
472 * @brief Returns the worst case overhead to manage a memory area.
473 */
474RTEMS_INLINE_ROUTINE uintptr_t _Heap_Area_overhead(
475  uintptr_t page_size
476)
477{
478  if ( page_size != 0 ) {
479    page_size = _Heap_Align_up( page_size, CPU_ALIGNMENT );
480  } else {
481    page_size = CPU_ALIGNMENT;
482  }
483
484  return 2 * (page_size - 1) + HEAP_BLOCK_HEADER_SIZE;
485}
486
487/**
488 * @brief Returns the size with administration and alignment overhead for one
489 * allocation.
490 */
491RTEMS_INLINE_ROUTINE uintptr_t _Heap_Size_with_overhead(
492  uintptr_t page_size,
493  uintptr_t size,
494  uintptr_t alignment
495)
496{
497  if ( page_size != 0 ) {
498    page_size = _Heap_Align_up( page_size, CPU_ALIGNMENT );
499  } else {
500    page_size = CPU_ALIGNMENT;
501  }
502
503  if ( page_size < alignment ) {
504    page_size = alignment;
505  }
506
507  return HEAP_BLOCK_HEADER_SIZE + page_size - 1 + size;
508}
509
510/** @} */
511
512#ifdef __cplusplus
513}
514#endif
515
516#endif
517/* end of include file */
Note: See TracBrowser for help on using the repository browser.