source: rtems/cpukit/include/rtems/score/heap.h @ 21275b58

Last change on this file since 21275b58 was eaa5ea84, checked in by Sebastian Huber <sebastian.huber@…>, on Nov 20, 2018 at 3:22:56 PM

score: Introduce <rtems/score/heapinfo.h>

Move Heap_Information_block to separate header file to hide heap
implementation details from <rtems.h>.

Update #3598.

  • Property mode set to 100644
File size: 11.5 KB
Line 
1/**
2 * @file
3 *
4 * @ingroup ScoreHeap
5 *
6 * @brief Heap Handler API
7 */
8
9/*
10 *  COPYRIGHT (c) 1989-2006.
11 *  On-Line Applications Research Corporation (OAR).
12 *
13 *  The license and distribution terms for this file may be
14 *  found in the file LICENSE in this distribution or at
15 *  http://www.rtems.org/license/LICENSE.
16 */
17
18#ifndef _RTEMS_SCORE_HEAP_H
19#define _RTEMS_SCORE_HEAP_H
20
21#include <rtems/score/cpu.h>
22#include <rtems/score/heapinfo.h>
23
24#ifdef __cplusplus
25extern "C" {
26#endif
27
28#ifdef RTEMS_DEBUG
29  #define HEAP_PROTECTION
30#endif
31
32/**
33 * @defgroup ScoreHeap Heap Handler
34 *
35 * @ingroup Score
36 *
37 * @brief The Heap Handler provides a heap.
38 *
39 * A heap is a doubly linked list of variable size blocks which are allocated
40 * using the first fit method.  Garbage collection is performed each time a
41 * block is returned to the heap by coalescing neighbor blocks.  Control
42 * information for both allocated and free blocks is contained in the heap
43 * area.  A heap control structure contains control information for the heap.
44 *
45 * The alignment routines could be made faster should we require only powers of
46 * two to be supported for page size, alignment and boundary arguments.  The
47 * minimum alignment requirement for pages is currently CPU_ALIGNMENT and this
48 * value is only required to be multiple of two and explicitly not required to
49 * be a power of two.
50 *
51 * There are two kinds of blocks.  One sort describes a free block from which
52 * we can allocate memory.  The other blocks are used and provide an allocated
53 * memory area.  The free blocks are accessible via a list of free blocks.
54 *
55 * Blocks or areas cover a continuous set of memory addresses. They have a
56 * begin and end address.  The end address is not part of the set.  The size of
57 * a block or area equals the distance between the begin and end address in
58 * units of bytes.
59 *
60 * Free blocks look like:
61 * <table>
62 *   <tr>
63 *     <td rowspan=4>@ref Heap_Block</td><td>previous block size in case the
64 *       previous block is free, <br> otherwise it may contain data used by
65 *       the previous block</td>
66 *   </tr>
67 *   <tr>
68 *     <td>block size and a flag which indicates if the previous block is free
69 *       or used, <br> this field contains always valid data regardless of the
70 *       block usage</td>
71 *   </tr>
72 *   <tr><td>pointer to next block (this field is page size aligned)</td></tr>
73 *   <tr><td>pointer to previous block</td></tr>
74 *   <tr><td colspan=2>free space</td></tr>
75 * </table>
76 *
77 * Used blocks look like:
78 * <table>
79 *   <tr>
80 *     <td rowspan=4>@ref Heap_Block</td><td>previous block size in case the
81 *       previous block is free,<br>otherwise it may contain data used by
82 *       the previous block</td>
83 *   </tr>
84 *   <tr>
85 *     <td>block size and a flag which indicates if the previous block is free
86 *       or used, <br> this field contains always valid data regardless of the
87 *       block usage</td>
88 *   </tr>
89 *   <tr><td>begin of allocated area (this field is page size aligned)</td></tr>
90 *   <tr><td>allocated space</td></tr>
91 *   <tr><td colspan=2>allocated space</td></tr>
92 * </table>
93 *
94 * The heap area after initialization contains two blocks and looks like:
95 * <table>
96 *   <tr><th>Label</th><th colspan=2>Content</th></tr>
97 *   <tr><td>heap->area_begin</td><td colspan=2>heap area begin address</td></tr>
98 *   <tr>
99 *     <td>first_block->prev_size</td>
100 *     <td colspan=2>
101 *       subordinate heap area end address (this will be used to maintain a
102 *       linked list of scattered heap areas)
103 *     </td>
104 *   </tr>
105 *   <tr>
106 *     <td>first_block->size</td>
107 *     <td colspan=2>size available for allocation
108 *       | @c HEAP_PREV_BLOCK_USED</td>
109 *   </tr>
110 *   <tr>
111 *     <td>first_block->next</td><td>_Heap_Free_list_tail(heap)</td>
112 *     <td rowspan=3>memory area available for allocation</td>
113 *   </tr>
114 *   <tr><td>first_block->prev</td><td>_Heap_Free_list_head(heap)</td></tr>
115 *   <tr><td>...</td></tr>
116 *   <tr>
117 *     <td>last_block->prev_size</td><td colspan=2>size of first block</td>
118 *   </tr>
119 *   <tr>
120 *     <td>last_block->size</td>
121 *     <td colspan=2>first block begin address - last block begin address</td>
122 *   </tr>
123 *   <tr><td>heap->area_end</td><td colspan=2>heap area end address</td></tr>
124 * </table>
125 * The next block of the last block is the first block.  Since the first
126 * block indicates that the previous block is used, this ensures that the
127 * last block appears as used for the _Heap_Is_used() and _Heap_Is_free()
128 * functions.
129 */
130/**@{**/
131
132typedef struct Heap_Control Heap_Control;
133
134typedef struct Heap_Block Heap_Block;
135
136#ifndef HEAP_PROTECTION
137  #define HEAP_PROTECTION_HEADER_SIZE 0
138#else
139  #define HEAP_PROTECTOR_COUNT 2
140
141  #define HEAP_BEGIN_PROTECTOR_0 ((uintptr_t) 0xfd75a98f)
142  #define HEAP_BEGIN_PROTECTOR_1 ((uintptr_t) 0xbfa1f177)
143  #define HEAP_END_PROTECTOR_0 ((uintptr_t) 0xd6b8855e)
144  #define HEAP_END_PROTECTOR_1 ((uintptr_t) 0x13a44a5b)
145
146  #define HEAP_FREE_PATTERN ((uintptr_t) 0xe7093cdf)
147
148  #define HEAP_PROTECTION_OBOLUS ((Heap_Block *) 1)
149
150  typedef void (*_Heap_Protection_handler)(
151     Heap_Control *heap,
152     Heap_Block *block
153  );
154
155  typedef struct {
156    _Heap_Protection_handler block_initialize;
157    _Heap_Protection_handler block_check;
158    _Heap_Protection_handler block_error;
159    void *handler_data;
160    Heap_Block *first_delayed_free_block;
161    Heap_Block *last_delayed_free_block;
162    uintptr_t delayed_free_block_count;
163    uintptr_t delayed_free_fraction;
164  } Heap_Protection;
165
166  struct _Thread_Control;
167
168  typedef struct {
169    uintptr_t protector [HEAP_PROTECTOR_COUNT];
170    Heap_Block *next_delayed_free_block;
171    struct _Thread_Control *task;
172    void *tag;
173  } Heap_Protection_block_begin;
174
175  typedef struct {
176    uintptr_t protector [HEAP_PROTECTOR_COUNT];
177  } Heap_Protection_block_end;
178
179  #define HEAP_PROTECTION_HEADER_SIZE \
180    (sizeof(Heap_Protection_block_begin) + sizeof(Heap_Protection_block_end))
181#endif
182
183/**
184 * @brief The block header consists of the two size fields
185 * (@ref Heap_Block.prev_size and @ref Heap_Block.size_and_flag).
186 */
187#define HEAP_BLOCK_HEADER_SIZE \
188  (2 * sizeof(uintptr_t) + HEAP_PROTECTION_HEADER_SIZE)
189
190/**
191 * @brief Description for free or used blocks.
192 */
193struct Heap_Block {
194  /**
195   * @brief Size of the previous block or part of the allocated area of the
196   * previous block.
197   *
198   * This field is only valid if the previous block is free.  This case is
199   * indicated by a cleared @c HEAP_PREV_BLOCK_USED flag in the
200   * @a size_and_flag field of the current block.
201   *
202   * In a used block only the @a size_and_flag field needs to be valid.  The
203   * @a prev_size field of the current block is maintained by the previous
204   * block.  The current block can use the @a prev_size field in the next block
205   * for allocation.
206   */
207  uintptr_t prev_size;
208
209  #ifdef HEAP_PROTECTION
210    Heap_Protection_block_begin Protection_begin;
211  #endif
212
213  /**
214   * @brief Contains the size of the current block and a flag which indicates
215   * if the previous block is free or used.
216   *
217   * If the flag @c HEAP_PREV_BLOCK_USED is set, then the previous block is
218   * used, otherwise the previous block is free.  A used previous block may
219   * claim the @a prev_size field for allocation.  This trick allows to
220   * decrease the overhead in the used blocks by the size of the @a prev_size
221   * field.  As sizes are required to be multiples of two, the least
222   * significant bits would be always zero. We use this bit to store the flag.
223   *
224   * This field is always valid.
225   */
226  uintptr_t size_and_flag;
227
228  #ifdef HEAP_PROTECTION
229    Heap_Protection_block_end Protection_end;
230  #endif
231
232  /**
233   * @brief Pointer to the next free block or part of the allocated area.
234   *
235   * This field is page size aligned and begins of the allocated area in case
236   * the block is used.
237   *
238   * This field is only valid if the block is free and thus part of the free
239   * block list.
240   */
241  Heap_Block *next;
242
243  /**
244   * @brief Pointer to the previous free block or part of the allocated area.
245   *
246   * This field is only valid if the block is free and thus part of the free
247   * block list.
248   */
249  Heap_Block *prev;
250};
251
252/**
253 * @brief Control block used to manage a heap.
254 */
255struct Heap_Control {
256  Heap_Block free_list;
257  uintptr_t page_size;
258  uintptr_t min_block_size;
259  uintptr_t area_begin;
260  uintptr_t area_end;
261  Heap_Block *first_block;
262  Heap_Block *last_block;
263  Heap_Statistics stats;
264  #ifdef HEAP_PROTECTION
265    Heap_Protection Protection;
266  #endif
267};
268
269/**
270 * @brief Heap area structure for table based heap initialization and
271 * extension.
272 *
273 * @see Heap_Initialization_or_extend_handler.
274 */
275typedef struct {
276  void *begin;
277  uintptr_t size;
278} Heap_Area;
279
280/**
281 * @brief Heap initialization and extend handler type.
282 *
283 * This helps to do a table based heap initialization and extension.  Create a
284 * table of Heap_Area elements and iterate through it.  Set the handler to
285 * _Heap_Initialize() in the first iteration and then to _Heap_Extend().
286 *
287 * @see Heap_Area, _Heap_Initialize(), _Heap_Extend(), or _Heap_No_extend().
288 */
289typedef uintptr_t (*Heap_Initialization_or_extend_handler)(
290  Heap_Control *heap,
291  void *area_begin,
292  uintptr_t area_size,
293  uintptr_t page_size_or_unused
294);
295
296/**
297 * @brief Extends the memory available for the heap @a heap using the memory
298 * area starting at @a area_begin of size @a area_size bytes.
299 *
300 * There are no alignment requirements for the memory area.  The memory area
301 * must be big enough to contain some maintenance blocks.  It must not overlap
302 * parts of the current heap memory areas.  Disconnected memory areas added to
303 * the heap will lead to used blocks which cover the gaps.  Extending with an
304 * inappropriate memory area will corrupt the heap resulting in undefined
305 * behaviour.
306 *
307 * The unused fourth parameter is provided to have the same signature as
308 * _Heap_Initialize().
309 *
310 * Returns the extended space available for allocation, or zero in case of failure.
311 *
312 * @see Heap_Initialization_or_extend_handler.
313 */
314uintptr_t _Heap_Extend(
315  Heap_Control *heap,
316  void *area_begin,
317  uintptr_t area_size,
318  uintptr_t unused
319);
320
321/**
322 * @brief This function returns always zero.
323 *
324 * This function only returns zero and does nothing else.
325 *
326 * Returns always zero.
327 *
328 * @see Heap_Initialization_or_extend_handler.
329 */
330uintptr_t _Heap_No_extend(
331  Heap_Control *unused_0,
332  void *unused_1,
333  uintptr_t unused_2,
334  uintptr_t unused_3
335);
336
337RTEMS_INLINE_ROUTINE uintptr_t _Heap_Align_up(
338  uintptr_t value,
339  uintptr_t alignment
340)
341{
342  uintptr_t remainder = value % alignment;
343
344  if ( remainder != 0 ) {
345    return value - remainder + alignment;
346  } else {
347    return value;
348  }
349}
350
351RTEMS_INLINE_ROUTINE uintptr_t _Heap_Min_block_size( uintptr_t page_size )
352{
353  return _Heap_Align_up( sizeof( Heap_Block ), page_size );
354}
355
356/**
357 * @brief Returns the worst case overhead to manage a memory area.
358 */
359RTEMS_INLINE_ROUTINE uintptr_t _Heap_Area_overhead(
360  uintptr_t page_size
361)
362{
363  if ( page_size != 0 ) {
364    page_size = _Heap_Align_up( page_size, CPU_ALIGNMENT );
365  } else {
366    page_size = CPU_ALIGNMENT;
367  }
368
369  return 2 * (page_size - 1) + HEAP_BLOCK_HEADER_SIZE;
370}
371
372/**
373 * @brief Returns the size with administration and alignment overhead for one
374 * allocation.
375 */
376RTEMS_INLINE_ROUTINE uintptr_t _Heap_Size_with_overhead(
377  uintptr_t page_size,
378  uintptr_t size,
379  uintptr_t alignment
380)
381{
382  if ( page_size != 0 ) {
383    page_size = _Heap_Align_up( page_size, CPU_ALIGNMENT );
384  } else {
385    page_size = CPU_ALIGNMENT;
386  }
387
388  if ( page_size < alignment ) {
389    page_size = alignment;
390  }
391
392  return HEAP_BLOCK_HEADER_SIZE + page_size - 1 + size;
393}
394
395/** @} */
396
397#ifdef __cplusplus
398}
399#endif
400
401#endif
402/* end of include file */
Note: See TracBrowser for help on using the repository browser.