source: rtems/cpukit/score/include/rtems/score/heap.h @ 7e119990

4.115
Last change on this file since 7e119990 was 6cf45cb, checked in by Sebastian Huber <sebastian.huber@…>, on 04/07/14 at 14:50:13

score: Fix workspace size estimate for TLS

  • Property mode set to 100644
File size: 13.3 KB
Line 
1/**
2 * @file
3 *
4 * @ingroup ScoreHeap
5 *
6 * @brief Heap Handler API
7 */
8
9/*
10 *  COPYRIGHT (c) 1989-2006.
11 *  On-Line Applications Research Corporation (OAR).
12 *
13 *  The license and distribution terms for this file may be
14 *  found in the file LICENSE in this distribution or at
15 *  http://www.rtems.org/license/LICENSE.
16 */
17
18#ifndef _RTEMS_SCORE_HEAP_H
19#define _RTEMS_SCORE_HEAP_H
20
21#include <rtems/score/cpu.h>
22
23#ifdef __cplusplus
24extern "C" {
25#endif
26
27#ifdef RTEMS_DEBUG
28  #define HEAP_PROTECTION
29#endif
30
31/**
32 * @defgroup ScoreHeap Heap Handler
33 *
34 * @ingroup Score
35 *
36 * @brief The Heap Handler provides a heap.
37 *
38 * A heap is a doubly linked list of variable size blocks which are allocated
39 * using the first fit method.  Garbage collection is performed each time a
40 * block is returned to the heap by coalescing neighbor blocks.  Control
41 * information for both allocated and free blocks is contained in the heap
42 * area.  A heap control structure contains control information for the heap.
43 *
44 * The alignment routines could be made faster should we require only powers of
45 * two to be supported for page size, alignment and boundary arguments.  The
46 * minimum alignment requirement for pages is currently CPU_ALIGNMENT and this
47 * value is only required to be multiple of two and explicitly not required to
48 * be a power of two.
49 *
50 * There are two kinds of blocks.  One sort describes a free block from which
51 * we can allocate memory.  The other blocks are used and provide an allocated
52 * memory area.  The free blocks are accessible via a list of free blocks.
53 *
54 * Blocks or areas cover a continuous set of memory addresses. They have a
55 * begin and end address.  The end address is not part of the set.  The size of
56 * a block or area equals the distance between the begin and end address in
57 * units of bytes.
58 *
59 * Free blocks look like:
60 * <table>
61 *   <tr>
62 *     <td rowspan=4>@ref Heap_Block</td><td>previous block size in case the
63 *       previous block is free, <br> otherwise it may contain data used by
64 *       the previous block</td>
65 *   </tr>
66 *   <tr>
67 *     <td>block size and a flag which indicates if the previous block is free
68 *       or used, <br> this field contains always valid data regardless of the
69 *       block usage</td>
70 *   </tr>
71 *   <tr><td>pointer to next block (this field is page size aligned)</td></tr>
72 *   <tr><td>pointer to previous block</td></tr>
73 *   <tr><td colspan=2>free space</td></tr>
74 * </table>
75 *
76 * Used blocks look like:
77 * <table>
78 *   <tr>
79 *     <td rowspan=4>@ref Heap_Block</td><td>previous block size in case the
80 *       previous block is free,<br>otherwise it may contain data used by
81 *       the previous block</td>
82 *   </tr>
83 *   <tr>
84 *     <td>block size and a flag which indicates if the previous block is free
85 *       or used, <br> this field contains always valid data regardless of the
86 *       block usage</td>
87 *   </tr>
88 *   <tr><td>begin of allocated area (this field is page size aligned)</td></tr>
89 *   <tr><td>allocated space</td></tr>
90 *   <tr><td colspan=2>allocated space</td></tr>
91 * </table>
92 *
93 * The heap area after initialization contains two blocks and looks like:
94 * <table>
95 *   <tr><th>Label</th><th colspan=2>Content</th></tr>
96 *   <tr><td>heap->area_begin</td><td colspan=2>heap area begin address</td></tr>
97 *   <tr>
98 *     <td>first_block->prev_size</td>
99 *     <td colspan=2>
100 *       subordinate heap area end address (this will be used to maintain a
101 *       linked list of scattered heap areas)
102 *     </td>
103 *   </tr>
104 *   <tr>
105 *     <td>first_block->size</td>
106 *     <td colspan=2>size available for allocation
107 *       | @c HEAP_PREV_BLOCK_USED</td>
108 *   </tr>
109 *   <tr>
110 *     <td>first_block->next</td><td>_Heap_Free_list_tail(heap)</td>
111 *     <td rowspan=3>memory area available for allocation</td>
112 *   </tr>
113 *   <tr><td>first_block->prev</td><td>_Heap_Free_list_head(heap)</td></tr>
114 *   <tr><td>...</td></tr>
115 *   <tr>
116 *     <td>last_block->prev_size</td><td colspan=2>size of first block</td>
117 *   </tr>
118 *   <tr>
119 *     <td>last_block->size</td>
120 *     <td colspan=2>first block begin address - last block begin address</td>
121 *   </tr>
122 *   <tr><td>heap->area_end</td><td colspan=2>heap area end address</td></tr>
123 * </table>
124 * The next block of the last block is the first block.  Since the first
125 * block indicates that the previous block is used, this ensures that the
126 * last block appears as used for the _Heap_Is_used() and _Heap_Is_free()
127 * functions.
128 */
129/**@{**/
130
131typedef struct Heap_Control Heap_Control;
132
133typedef struct Heap_Block Heap_Block;
134
135#ifndef HEAP_PROTECTION
136  #define HEAP_PROTECTION_HEADER_SIZE 0
137#else
138  #include <rtems/score/thread.h>
139
140  #define HEAP_PROTECTOR_COUNT 2
141
142  #define HEAP_BEGIN_PROTECTOR_0 ((uintptr_t) 0xfd75a98f)
143  #define HEAP_BEGIN_PROTECTOR_1 ((uintptr_t) 0xbfa1f177)
144  #define HEAP_END_PROTECTOR_0 ((uintptr_t) 0xd6b8855e)
145  #define HEAP_END_PROTECTOR_1 ((uintptr_t) 0x13a44a5b)
146
147  #define HEAP_FREE_PATTERN ((uintptr_t) 0xe7093cdf)
148
149  #define HEAP_PROTECTION_OBOLUS ((Heap_Block *) 1)
150
151  typedef void (*_Heap_Protection_handler)(
152     Heap_Control *heap,
153     Heap_Block *block
154  );
155
156  typedef struct {
157    _Heap_Protection_handler block_initialize;
158    _Heap_Protection_handler block_check;
159    _Heap_Protection_handler block_error;
160    void *handler_data;
161    Heap_Block *first_delayed_free_block;
162    Heap_Block *last_delayed_free_block;
163    uintptr_t delayed_free_block_count;
164    uintptr_t delayed_free_fraction;
165  } Heap_Protection;
166
167  typedef struct {
168    uintptr_t protector [HEAP_PROTECTOR_COUNT];
169    Heap_Block *next_delayed_free_block;
170    Thread_Control *task;
171    void *tag;
172  } Heap_Protection_block_begin;
173
174  typedef struct {
175    uintptr_t protector [HEAP_PROTECTOR_COUNT];
176  } Heap_Protection_block_end;
177
178  #define HEAP_PROTECTION_HEADER_SIZE \
179    (sizeof(Heap_Protection_block_begin) + sizeof(Heap_Protection_block_end))
180#endif
181
182/**
183 * @brief The block header consists of the two size fields
184 * (@ref Heap_Block.prev_size and @ref Heap_Block.size_and_flag).
185 */
186#define HEAP_BLOCK_HEADER_SIZE \
187  (2 * sizeof(uintptr_t) + HEAP_PROTECTION_HEADER_SIZE)
188
189/**
190 * @brief Description for free or used blocks.
191 */
192struct Heap_Block {
193  /**
194   * @brief Size of the previous block or part of the allocated area of the
195   * previous block.
196   *
197   * This field is only valid if the previous block is free.  This case is
198   * indicated by a cleared @c HEAP_PREV_BLOCK_USED flag in the
199   * @a size_and_flag field of the current block.
200   *
201   * In a used block only the @a size_and_flag field needs to be valid.  The
202   * @a prev_size field of the current block is maintained by the previous
203   * block.  The current block can use the @a prev_size field in the next block
204   * for allocation.
205   */
206  uintptr_t prev_size;
207
208  #ifdef HEAP_PROTECTION
209    Heap_Protection_block_begin Protection_begin;
210  #endif
211
212  /**
213   * @brief Contains the size of the current block and a flag which indicates
214   * if the previous block is free or used.
215   *
216   * If the flag @c HEAP_PREV_BLOCK_USED is set, then the previous block is
217   * used, otherwise the previous block is free.  A used previous block may
218   * claim the @a prev_size field for allocation.  This trick allows to
219   * decrease the overhead in the used blocks by the size of the @a prev_size
220   * field.  As sizes are required to be multiples of two, the least
221   * significant bits would be always zero. We use this bit to store the flag.
222   *
223   * This field is always valid.
224   */
225  uintptr_t size_and_flag;
226
227  #ifdef HEAP_PROTECTION
228    Heap_Protection_block_end Protection_end;
229  #endif
230
231  /**
232   * @brief Pointer to the next free block or part of the allocated area.
233   *
234   * This field is page size aligned and begins of the allocated area in case
235   * the block is used.
236   *
237   * This field is only valid if the block is free and thus part of the free
238   * block list.
239   */
240  Heap_Block *next;
241
242  /**
243   * @brief Pointer to the previous free block or part of the allocated area.
244   *
245   * This field is only valid if the block is free and thus part of the free
246   * block list.
247   */
248  Heap_Block *prev;
249};
250
251/**
252 * @brief Run-time heap statistics.
253 *
254 * The value @a searches / @a allocs gives the mean number of searches per
255 * allocation, while @a max_search gives maximum number of searches ever
256 * performed on a single allocation call.
257 */
258typedef struct {
259  /**
260   * @brief Instance number of this heap.
261   */
262  uint32_t instance;
263
264  /**
265   * @brief Size of the allocatable area in bytes.
266   *
267   * This value is an integral multiple of the page size.
268   */
269  uintptr_t size;
270
271  /**
272   * @brief Current free size in bytes.
273   *
274   * This value is an integral multiple of the page size.
275   */
276  uintptr_t free_size;
277
278  /**
279   * @brief Minimum free size ever in bytes.
280   *
281   * This value is an integral multiple of the page size.
282   */
283  uintptr_t min_free_size;
284
285  /**
286   * @brief Current number of free blocks.
287   */
288  uint32_t free_blocks;
289
290  /**
291   * @brief Maximum number of free blocks ever.
292   */
293  uint32_t max_free_blocks;
294
295  /**
296   * @brief Current number of used blocks.
297   */
298  uint32_t used_blocks;
299
300  /**
301   * @brief Maximum number of blocks searched ever.
302   */
303  uint32_t max_search;
304
305  /**
306   * @brief Total number of successful allocations.
307   */
308  uint32_t allocs;
309
310  /**
311   * @brief Total number of searches ever.
312   */
313  uint32_t searches;
314
315  /**
316   * @brief Total number of suceessful calls to free.
317   */
318  uint32_t frees;
319
320  /**
321   * @brief Total number of successful resizes.
322   */
323  uint32_t resizes;
324} Heap_Statistics;
325
326/**
327 * @brief Control block used to manage a heap.
328 */
329struct Heap_Control {
330  Heap_Block free_list;
331  uintptr_t page_size;
332  uintptr_t min_block_size;
333  uintptr_t area_begin;
334  uintptr_t area_end;
335  Heap_Block *first_block;
336  Heap_Block *last_block;
337  Heap_Statistics stats;
338  #ifdef HEAP_PROTECTION
339    Heap_Protection Protection;
340  #endif
341};
342
343/**
344 * @brief Information about blocks.
345 */
346typedef struct {
347  /**
348   * @brief Number of blocks of this type.
349   */
350  uint32_t number;
351
352  /**
353   * @brief Largest block of this type.
354   */
355  uint32_t largest;
356
357  /**
358   * @brief Total size of the blocks of this type.
359   */
360  uint32_t total;
361} Heap_Information;
362
363/**
364 * @brief Information block returned by _Heap_Get_information().
365 */
366typedef struct {
367  Heap_Information Free;
368  Heap_Information Used;
369} Heap_Information_block;
370
371/**
372 * @brief Heap area structure for table based heap initialization and
373 * extension.
374 *
375 * @see Heap_Initialization_or_extend_handler.
376 */
377typedef struct {
378  void *begin;
379  uintptr_t size;
380} Heap_Area;
381
382/**
383 * @brief Heap initialization and extend handler type.
384 *
385 * This helps to do a table based heap initialization and extension.  Create a
386 * table of Heap_Area elements and iterate through it.  Set the handler to
387 * _Heap_Initialize() in the first iteration and then to _Heap_Extend().
388 *
389 * @see Heap_Area, _Heap_Initialize(), _Heap_Extend(), or _Heap_No_extend().
390 */
391typedef uintptr_t (*Heap_Initialization_or_extend_handler)(
392  Heap_Control *heap,
393  void *area_begin,
394  uintptr_t area_size,
395  uintptr_t page_size_or_unused
396);
397
398/**
399 * @brief Extends the memory available for the heap @a heap using the memory
400 * area starting at @a area_begin of size @a area_size bytes.
401 *
402 * There are no alignment requirements.  The memory area must be big enough to
403 * contain some maintainance blocks.  It must not overlap parts of the current
404 * heap areas.  Disconnected subordinate heap areas will lead to used blocks
405 * which cover the gaps.  Extending with an inappropriate memory area will
406 * corrupt the heap.
407 *
408 * The unused fourth parameter is provided to have the same signature as
409 * _Heap_Initialize().
410 *
411 * Returns the extended space available for allocation, or zero in case of failure.
412 *
413 * @see Heap_Initialization_or_extend_handler.
414 */
415uintptr_t _Heap_Extend(
416  Heap_Control *heap,
417  void *area_begin,
418  uintptr_t area_size,
419  uintptr_t unused
420);
421
422/**
423 * @brief This function returns always zero.
424 *
425 * This function only returns zero and does nothing else.
426 *
427 * Returns always zero.
428 *
429 * @see Heap_Initialization_or_extend_handler.
430 */
431uintptr_t _Heap_No_extend(
432  Heap_Control *unused_0,
433  void *unused_1,
434  uintptr_t unused_2,
435  uintptr_t unused_3
436);
437
438RTEMS_INLINE_ROUTINE uintptr_t _Heap_Align_up(
439  uintptr_t value,
440  uintptr_t alignment
441)
442{
443  uintptr_t remainder = value % alignment;
444
445  if ( remainder != 0 ) {
446    return value - remainder + alignment;
447  } else {
448    return value;
449  }
450}
451
452RTEMS_INLINE_ROUTINE uintptr_t _Heap_Min_block_size( uintptr_t page_size )
453{
454  return _Heap_Align_up( sizeof( Heap_Block ), page_size );
455}
456
457/**
458 * @brief Returns the worst case overhead to manage a memory area.
459 */
460RTEMS_INLINE_ROUTINE uintptr_t _Heap_Area_overhead(
461  uintptr_t page_size
462)
463{
464  if ( page_size != 0 ) {
465    page_size = _Heap_Align_up( page_size, CPU_ALIGNMENT );
466  } else {
467    page_size = CPU_ALIGNMENT;
468  }
469
470  return 2 * (page_size - 1) + HEAP_BLOCK_HEADER_SIZE;
471}
472
473/**
474 * @brief Returns the size with administration and alignment overhead for one
475 * allocation.
476 */
477RTEMS_INLINE_ROUTINE uintptr_t _Heap_Size_with_overhead(
478  uintptr_t page_size,
479  uintptr_t size,
480  uintptr_t alignment
481)
482{
483  if ( page_size != 0 ) {
484    page_size = _Heap_Align_up( page_size, CPU_ALIGNMENT );
485  } else {
486    page_size = CPU_ALIGNMENT;
487  }
488
489  if ( page_size < alignment ) {
490    page_size = alignment;
491  }
492
493  return HEAP_BLOCK_HEADER_SIZE + page_size - 1 + size;
494}
495
496/** @} */
497
498#ifdef __cplusplus
499}
500#endif
501
502#endif
503/* end of include file */
Note: See TracBrowser for help on using the repository browser.