source: rtems/cpukit/score/include/rtems/score/heap.h @ d0d357ed

4.115
Last change on this file since d0d357ed was d0d357ed, checked in by Sebastian Huber <sebastian.huber@…>, on 06/25/13 at 10:09:50

heap: Add _Heap_Greedy_allocate_all_except_largest

Add rtems_workspace_greedy_allocate_all_except_largest() and
rtems_heap_greedy_allocate_all_except_largest().

  • Property mode set to 100644
File size: 21.7 KB
Line 
1/**
2 * @file
3 *
4 * @ingroup ScoreHeap
5 *
6 * @brief Heap Handler API
7 */
8
9/*
10 *  COPYRIGHT (c) 1989-2006.
11 *  On-Line Applications Research Corporation (OAR).
12 *
13 *  The license and distribution terms for this file may be
14 *  found in the file LICENSE in this distribution or at
15 *  http://www.rtems.com/license/LICENSE.
16 */
17
18#ifndef _RTEMS_SCORE_HEAP_H
19#define _RTEMS_SCORE_HEAP_H
20
21#include <rtems/system.h>
22#include <rtems/score/thread.h>
23
24#ifdef __cplusplus
25extern "C" {
26#endif
27
28#ifdef RTEMS_DEBUG
29  #define HEAP_PROTECTION
30#endif
31
32/**
33 * @defgroup ScoreHeap Heap Handler
34 *
35 * @ingroup Score
36 *
37 * @brief The Heap Handler provides a heap.
38 *
39 * A heap is a doubly linked list of variable size blocks which are allocated
40 * using the first fit method.  Garbage collection is performed each time a
41 * block is returned to the heap by coalescing neighbor blocks.  Control
42 * information for both allocated and free blocks is contained in the heap
43 * area.  A heap control structure contains control information for the heap.
44 *
45 * The alignment routines could be made faster should we require only powers of
46 * two to be supported for page size, alignment and boundary arguments.  The
47 * minimum alignment requirement for pages is currently CPU_ALIGNMENT and this
48 * value is only required to be multiple of two and explicitly not required to
49 * be a power of two.
50 *
51 * There are two kinds of blocks.  One sort describes a free block from which
52 * we can allocate memory.  The other blocks are used and provide an allocated
53 * memory area.  The free blocks are accessible via a list of free blocks.
54 *
55 * Blocks or areas cover a continuous set of memory addresses. They have a
56 * begin and end address.  The end address is not part of the set.  The size of
57 * a block or area equals the distance between the begin and end address in
58 * units of bytes.
59 *
60 * Free blocks look like:
61 * <table>
62 *   <tr>
63 *     <td rowspan=4>@ref Heap_Block</td><td>previous block size in case the
64 *       previous block is free, <br> otherwise it may contain data used by
65 *       the previous block</td>
66 *   </tr>
67 *   <tr>
68 *     <td>block size and a flag which indicates if the previous block is free
69 *       or used, <br> this field contains always valid data regardless of the
70 *       block usage</td>
71 *   </tr>
72 *   <tr><td>pointer to next block (this field is page size aligned)</td></tr>
73 *   <tr><td>pointer to previous block</td></tr>
74 *   <tr><td colspan=2>free space</td></tr>
75 * </table>
76 *
77 * Used blocks look like:
78 * <table>
79 *   <tr>
80 *     <td rowspan=4>@ref Heap_Block</td><td>previous block size in case the
81 *       previous block is free,<br>otherwise it may contain data used by
82 *       the previous block</td>
83 *   </tr>
84 *   <tr>
85 *     <td>block size and a flag which indicates if the previous block is free
86 *       or used, <br> this field contains always valid data regardless of the
87 *       block usage</td>
88 *   </tr>
89 *   <tr><td>begin of allocated area (this field is page size aligned)</td></tr>
90 *   <tr><td>allocated space</td></tr>
91 *   <tr><td colspan=2>allocated space</td></tr>
92 * </table>
93 *
94 * The heap area after initialization contains two blocks and looks like:
95 * <table>
96 *   <tr><th>Label</th><th colspan=2>Content</th></tr>
97 *   <tr><td>heap->area_begin</td><td colspan=2>heap area begin address</td></tr>
98 *   <tr>
99 *     <td>first_block->prev_size</td>
100 *     <td colspan=2>
101 *       subordinate heap area end address (this will be used to maintain a
102 *       linked list of scattered heap areas)
103 *     </td>
104 *   </tr>
105 *   <tr>
106 *     <td>first_block->size</td>
107 *     <td colspan=2>size available for allocation
108 *       | @c HEAP_PREV_BLOCK_USED</td>
109 *   </tr>
110 *   <tr>
111 *     <td>first_block->next</td><td>_Heap_Free_list_tail(heap)</td>
112 *     <td rowspan=3>memory area available for allocation</td>
113 *   </tr>
114 *   <tr><td>first_block->prev</td><td>_Heap_Free_list_head(heap)</td></tr>
115 *   <tr><td>...</td></tr>
116 *   <tr>
117 *     <td>last_block->prev_size</td><td colspan=2>size of first block</td>
118 *   </tr>
119 *   <tr>
120 *     <td>last_block->size</td>
121 *     <td colspan=2>first block begin address - last block begin address</td>
122 *   </tr>
123 *   <tr><td>heap->area_end</td><td colspan=2>heap area end address</td></tr>
124 * </table>
125 * The next block of the last block is the first block.  Since the first
126 * block indicates that the previous block is used, this ensures that the
127 * last block appears as used for the _Heap_Is_used() and _Heap_Is_free()
128 * functions.
129 */
130/**@{**/
131
132typedef struct Heap_Control Heap_Control;
133
134typedef struct Heap_Block Heap_Block;
135
136#ifndef HEAP_PROTECTION
137  #define HEAP_PROTECTION_HEADER_SIZE 0
138#else
139  #define HEAP_PROTECTOR_COUNT 2
140
141  #define HEAP_BEGIN_PROTECTOR_0 ((uintptr_t) 0xfd75a98f)
142  #define HEAP_BEGIN_PROTECTOR_1 ((uintptr_t) 0xbfa1f177)
143  #define HEAP_END_PROTECTOR_0 ((uintptr_t) 0xd6b8855e)
144  #define HEAP_END_PROTECTOR_1 ((uintptr_t) 0x13a44a5b)
145
146  #define HEAP_FREE_PATTERN ((uintptr_t) 0xe7093cdf)
147
148  #define HEAP_PROTECTION_OBOLUS ((Heap_Block *) 1)
149
150  typedef void (*_Heap_Protection_handler)(
151     Heap_Control *heap,
152     Heap_Block *block
153  );
154
155  typedef struct {
156    _Heap_Protection_handler block_initialize;
157    _Heap_Protection_handler block_check;
158    _Heap_Protection_handler block_error;
159    void *handler_data;
160    Heap_Block *first_delayed_free_block;
161    Heap_Block *last_delayed_free_block;
162    uintptr_t delayed_free_block_count;
163  } Heap_Protection;
164
165  typedef struct {
166    uintptr_t protector [HEAP_PROTECTOR_COUNT];
167    Heap_Block *next_delayed_free_block;
168    Thread_Control *task;
169    void *tag;
170  } Heap_Protection_block_begin;
171
172  typedef struct {
173    uintptr_t protector [HEAP_PROTECTOR_COUNT];
174  } Heap_Protection_block_end;
175
176  #define HEAP_PROTECTION_HEADER_SIZE \
177    (sizeof(Heap_Protection_block_begin) + sizeof(Heap_Protection_block_end))
178#endif
179
180/**
181 * @brief See also @ref Heap_Block.size_and_flag.
182 */
183#define HEAP_PREV_BLOCK_USED ((uintptr_t) 1)
184
185/**
186 * @brief Size of the part at the block begin which may be used for allocation
187 * in charge of the previous block.
188 */
189#define HEAP_ALLOC_BONUS sizeof(uintptr_t)
190
191/**
192 * @brief The block header consists of the two size fields
193 * (@ref Heap_Block.prev_size and @ref Heap_Block.size_and_flag).
194 */
195#define HEAP_BLOCK_HEADER_SIZE \
196  (2 * sizeof(uintptr_t) + HEAP_PROTECTION_HEADER_SIZE)
197
198/**
199 * @brief Description for free or used blocks.
200 */
201struct Heap_Block {
202  /**
203   * @brief Size of the previous block or part of the allocated area of the
204   * previous block.
205   *
206   * This field is only valid if the previous block is free.  This case is
207   * indicated by a cleared @c HEAP_PREV_BLOCK_USED flag in the
208   * @a size_and_flag field of the current block.
209   *
210   * In a used block only the @a size_and_flag field needs to be valid.  The
211   * @a prev_size field of the current block is maintained by the previous
212   * block.  The current block can use the @a prev_size field in the next block
213   * for allocation.
214   */
215  uintptr_t prev_size;
216
217  #ifdef HEAP_PROTECTION
218    Heap_Protection_block_begin Protection_begin;
219  #endif
220
221  /**
222   * @brief Contains the size of the current block and a flag which indicates
223   * if the previous block is free or used.
224   *
225   * If the flag @c HEAP_PREV_BLOCK_USED is set, then the previous block is
226   * used, otherwise the previous block is free.  A used previous block may
227   * claim the @a prev_size field for allocation.  This trick allows to
228   * decrease the overhead in the used blocks by the size of the @a prev_size
229   * field.  As sizes are required to be multiples of two, the least
230   * significant bits would be always zero. We use this bit to store the flag.
231   *
232   * This field is always valid.
233   */
234  uintptr_t size_and_flag;
235
236  #ifdef HEAP_PROTECTION
237    Heap_Protection_block_end Protection_end;
238  #endif
239
240  /**
241   * @brief Pointer to the next free block or part of the allocated area.
242   *
243   * This field is page size aligned and begins of the allocated area in case
244   * the block is used.
245   *
246   * This field is only valid if the block is free and thus part of the free
247   * block list.
248   */
249  Heap_Block *next;
250
251  /**
252   * @brief Pointer to the previous free block or part of the allocated area.
253   *
254   * This field is only valid if the block is free and thus part of the free
255   * block list.
256   */
257  Heap_Block *prev;
258};
259
260/**
261 * @brief Run-time heap statistics.
262 *
263 * The value @a searches / @a allocs gives the mean number of searches per
264 * allocation, while @a max_search gives maximum number of searches ever
265 * performed on a single allocation call.
266 */
267typedef struct {
268  /**
269   * @brief Instance number of this heap.
270   */
271  uint32_t instance;
272
273  /**
274   * @brief Size of the allocatable area in bytes.
275   *
276   * This value is an integral multiple of the page size.
277   */
278  uintptr_t size;
279
280  /**
281   * @brief Current free size in bytes.
282   *
283   * This value is an integral multiple of the page size.
284   */
285  uintptr_t free_size;
286
287  /**
288   * @brief Minimum free size ever in bytes.
289   *
290   * This value is an integral multiple of the page size.
291   */
292  uintptr_t min_free_size;
293
294  /**
295   * @brief Current number of free blocks.
296   */
297  uint32_t free_blocks;
298
299  /**
300   * @brief Maximum number of free blocks ever.
301   */
302  uint32_t max_free_blocks;
303
304  /**
305   * @brief Current number of used blocks.
306   */
307  uint32_t used_blocks;
308
309  /**
310   * @brief Maximum number of blocks searched ever.
311   */
312  uint32_t max_search;
313
314  /**
315   * @brief Total number of successful allocations.
316   */
317  uint32_t allocs;
318
319  /**
320   * @brief Total number of searches ever.
321   */
322  uint32_t searches;
323
324  /**
325   * @brief Total number of suceessful calls to free.
326   */
327  uint32_t frees;
328
329  /**
330   * @brief Total number of successful resizes.
331   */
332  uint32_t resizes;
333} Heap_Statistics;
334
335/**
336 * @brief Control block used to manage a heap.
337 */
338struct Heap_Control {
339  Heap_Block free_list;
340  uintptr_t page_size;
341  uintptr_t min_block_size;
342  uintptr_t area_begin;
343  uintptr_t area_end;
344  Heap_Block *first_block;
345  Heap_Block *last_block;
346  Heap_Statistics stats;
347  #ifdef HEAP_PROTECTION
348    Heap_Protection Protection;
349  #endif
350};
351
352/**
353 * @brief Information about blocks.
354 */
355typedef struct {
356  /**
357   * @brief Number of blocks of this type.
358   */
359  uint32_t number;
360
361  /**
362   * @brief Largest block of this type.
363   */
364  uint32_t largest;
365
366  /**
367   * @brief Total size of the blocks of this type.
368   */
369  uint32_t total;
370} Heap_Information;
371
372/**
373 * @brief Information block returned by _Heap_Get_information().
374 */
375typedef struct {
376  Heap_Information Free;
377  Heap_Information Used;
378} Heap_Information_block;
379
380/**
381 * @brief See _Heap_Resize_block().
382 */
383typedef enum {
384  HEAP_RESIZE_SUCCESSFUL,
385  HEAP_RESIZE_UNSATISFIED,
386  HEAP_RESIZE_FATAL_ERROR
387} Heap_Resize_status;
388
389/**
390 * @brief Heap area structure for table based heap initialization and
391 * extension.
392 *
393 * @see Heap_Initialization_or_extend_handler.
394 */
395typedef struct {
396  void *begin;
397  uintptr_t size;
398} Heap_Area;
399
400/**
401 * @brief Heap initialization and extend handler type.
402 *
403 * This helps to do a table based heap initialization and extension.  Create a
404 * table of Heap_Area elements and iterate through it.  Set the handler to
405 * _Heap_Initialize() in the first iteration and then to _Heap_Extend().
406 *
407 * @see Heap_Area, _Heap_Initialize(), _Heap_Extend(), or _Heap_No_extend().
408 */
409typedef uintptr_t (*Heap_Initialization_or_extend_handler)(
410  Heap_Control *heap,
411  void *area_begin,
412  uintptr_t area_size,
413  uintptr_t page_size_or_unused
414);
415
416/**
417 * @brief Gets the first and last block for the heap area with begin
418 * @a heap_area_begin and size @a heap_area_size.
419 *
420 * A page size of @a page_size and minimal block size of @a min_block_size will
421 * be used for calculation.
422 *
423 * Nothing will be written to this area.
424 *
425 * In case of success the pointers to the first and last block will be returned
426 * via @a first_block_ptr and @a last_block_ptr.
427 *
428 * Returns @c true if the area is big enough, and @c false otherwise.
429 */
430bool _Heap_Get_first_and_last_block(
431  uintptr_t heap_area_begin,
432  uintptr_t heap_area_size,
433  uintptr_t page_size,
434  uintptr_t min_block_size,
435  Heap_Block **first_block_ptr,
436  Heap_Block **last_block_ptr
437);
438
439/**
440 * @brief Initializes the heap control block @a heap to manage the area
441 * starting at @a area_begin of size @a area_size bytes.
442 *
443 * Blocks of memory are allocated from the heap in multiples of @a page_size
444 * byte units.  If the @a page_size is equal to zero or is not multiple of
445 * @c CPU_ALIGNMENT, it is aligned up to the nearest @c CPU_ALIGNMENT boundary.
446 *
447 * Returns the maximum memory available, or zero in case of failure.
448 *
449 * @see Heap_Initialization_or_extend_handler.
450 */
451uintptr_t _Heap_Initialize(
452  Heap_Control *heap,
453  void *area_begin,
454  uintptr_t area_size,
455  uintptr_t page_size
456);
457
458/**
459 * @brief Extends the memory available for the heap @a heap using the memory
460 * area starting at @a area_begin of size @a area_size bytes.
461 *
462 * There are no alignment requirements.  The memory area must be big enough to
463 * contain some maintainance blocks.  It must not overlap parts of the current
464 * heap areas.  Disconnected subordinate heap areas will lead to used blocks
465 * which cover the gaps.  Extending with an inappropriate memory area will
466 * corrupt the heap.
467 *
468 * The unused fourth parameter is provided to have the same signature as
469 * _Heap_Initialize().
470 *
471 * Returns the extended space available for allocation, or zero in case of failure.
472 *
473 * @see Heap_Initialization_or_extend_handler.
474 */
475uintptr_t _Heap_Extend(
476  Heap_Control *heap,
477  void *area_begin,
478  uintptr_t area_size,
479  uintptr_t unused
480);
481
482/**
483 * @brief This function returns always zero.
484 *
485 * This function only returns zero and does nothing else.
486 *
487 * Returns always zero.
488 *
489 * @see Heap_Initialization_or_extend_handler.
490 */
491uintptr_t _Heap_No_extend(
492  Heap_Control *unused_0,
493  void *unused_1,
494  uintptr_t unused_2,
495  uintptr_t unused_3
496);
497
498/**
499 * @brief Allocates a memory area of size @a size bytes from the heap @a heap.
500 *
501 * If the alignment parameter @a alignment is not equal to zero, the allocated
502 * memory area will begin at an address aligned by this value.
503 *
504 * If the boundary parameter @a boundary is not equal to zero, the allocated
505 * memory area will fulfill a boundary constraint.  The boundary value
506 * specifies the set of addresses which are aligned by the boundary value.  The
507 * interior of the allocated memory area will not contain an element of this
508 * set.  The begin or end address of the area may be a member of the set.
509 *
510 * A size value of zero will return a unique address which may be freed with
511 * _Heap_Free().
512 *
513 * Returns a pointer to the begin of the allocated memory area, or @c NULL if
514 * no memory is available or the parameters are inconsistent.
515 */
516void *_Heap_Allocate_aligned_with_boundary(
517  Heap_Control *heap,
518  uintptr_t size,
519  uintptr_t alignment,
520  uintptr_t boundary
521);
522
523/**
524 * @brief See _Heap_Allocate_aligned_with_boundary() with boundary equals zero.
525 */
526RTEMS_INLINE_ROUTINE void *_Heap_Allocate_aligned(
527  Heap_Control *heap,
528  uintptr_t size,
529  uintptr_t alignment
530)
531{
532  return _Heap_Allocate_aligned_with_boundary( heap, size, alignment, 0 );
533}
534
535/**
536 * @brief See _Heap_Allocate_aligned_with_boundary() with alignment and
537 * boundary equals zero.
538 */
539RTEMS_INLINE_ROUTINE void *_Heap_Allocate( Heap_Control *heap, uintptr_t size )
540{
541  return _Heap_Allocate_aligned_with_boundary( heap, size, 0, 0 );
542}
543
544/**
545 * @brief Frees the allocated memory area starting at @a addr in the heap
546 * @a heap.
547 *
548 * Inappropriate values for @a addr may corrupt the heap.
549 *
550 * Returns @c true in case of success, and @c false otherwise.
551 */
552bool _Heap_Free( Heap_Control *heap, void *addr );
553
554/**
555 * @brief Walks the heap @a heap to verify its integrity.
556 *
557 * If @a dump is @c true, then diagnostic messages will be printed to standard
558 * output.  In this case @a source is used to mark the output lines.
559 *
560 * Returns @c true if no errors occurred, and @c false if the heap is corrupt.
561 */
562bool _Heap_Walk(
563  Heap_Control *heap,
564  int source,
565  bool dump
566);
567
568/**
569 * @brief Heap block visitor.
570 *
571 * @see _Heap_Iterate().
572 *
573 * @retval true Stop the iteration.
574 * @retval false Continue the iteration.
575 */
576typedef bool (*Heap_Block_visitor)(
577  const Heap_Block *block,
578  uintptr_t block_size,
579  bool block_is_used,
580  void *visitor_arg
581);
582
583/**
584 * @brief Iterates over all blocks of the heap.
585 *
586 * For each block the @a visitor with the argument @a visitor_arg will be
587 * called.
588 */
589void _Heap_Iterate(
590  Heap_Control *heap,
591  Heap_Block_visitor visitor,
592  void *visitor_arg
593);
594
595/**
596 * @brief Greedy allocate that empties the heap.
597 *
598 * Afterwards the heap has at most @a block_count allocatable blocks of sizes
599 * specified by @a block_sizes.  The @a block_sizes must point to an array with
600 * @a block_count members.  All other blocks are used.
601 *
602 * @see _Heap_Greedy_free().
603 */
604Heap_Block *_Heap_Greedy_allocate(
605  Heap_Control *heap,
606  const uintptr_t *block_sizes,
607  size_t block_count
608);
609
610/**
611 * @brief Greedy allocate all blocks except the largest free block.
612 *
613 * Afterwards the heap has at most one allocatable block.  This block is the
614 * largest free block if it exists.  The allocatable size of this block is
615 * stored in @a allocatable_size.  All other blocks are used.
616 *
617 * @see _Heap_Greedy_free().
618 */
619Heap_Block *_Heap_Greedy_allocate_all_except_largest(
620  Heap_Control *heap,
621  uintptr_t *allocatable_size
622);
623
624/**
625 * @brief Frees blocks of a greedy allocation.
626 *
627 * The @a blocks must be the return value of _Heap_Greedy_allocate().
628 */
629void _Heap_Greedy_free(
630  Heap_Control *heap,
631  Heap_Block *blocks
632);
633
634/**
635 * @brief Returns information about used and free blocks for the heap @a heap
636 * in @a info.
637 */
638void _Heap_Get_information(
639  Heap_Control *heap,
640  Heap_Information_block *info
641);
642
643/**
644 * @brief Returns information about free blocks for the heap @a heap in
645 * @a info.
646 */
647void _Heap_Get_free_information(
648  Heap_Control *heap,
649  Heap_Information *info
650);
651
652/**
653 * @brief Returns the size of the allocatable memory area starting at @a addr
654 * in @a size.
655 *
656 * The size value may be greater than the initially requested size in
657 * _Heap_Allocate_aligned_with_boundary().
658 *
659 * Inappropriate values for @a addr will not corrupt the heap, but may yield
660 * invalid size values.
661 *
662 * Returns @a true if successful, and @c false otherwise.
663 */
664bool _Heap_Size_of_alloc_area(
665  Heap_Control *heap,
666  void *addr,
667  uintptr_t *size
668);
669
670/**
671 * @brief Resizes the block of the allocated memory area starting at @a addr.
672 *
673 * The new memory area will have a size of at least @a size bytes.  A resize
674 * may be impossible and depends on the current heap usage.
675 *
676 * The size available for allocation in the current block before the resize
677 * will be returned in @a old_size.  The size available for allocation in
678 * the resized block will be returned in @a new_size.  If the resize was not
679 * successful, then a value of zero will be returned in @a new_size.
680 *
681 * Inappropriate values for @a addr may corrupt the heap.
682 */
683Heap_Resize_status _Heap_Resize_block(
684  Heap_Control *heap,
685  void *addr,
686  uintptr_t size,
687  uintptr_t *old_size,
688  uintptr_t *new_size
689);
690
691RTEMS_INLINE_ROUTINE uintptr_t _Heap_Align_up(
692  uintptr_t value,
693  uintptr_t alignment
694)
695{
696  uintptr_t remainder = value % alignment;
697
698  if ( remainder != 0 ) {
699    return value - remainder + alignment;
700  } else {
701    return value;
702  }
703}
704
705/**
706 * @brief Returns the worst case overhead to manage a memory area.
707 */
708RTEMS_INLINE_ROUTINE uintptr_t _Heap_Area_overhead(
709  uintptr_t page_size
710)
711{
712  if ( page_size != 0 ) {
713    page_size = _Heap_Align_up( page_size, CPU_ALIGNMENT );
714  } else {
715    page_size = CPU_ALIGNMENT;
716  }
717
718  return 2 * (page_size - 1) + HEAP_BLOCK_HEADER_SIZE;
719}
720
721#if !defined(__RTEMS_APPLICATION__)
722
723#include <rtems/score/heap.inl>
724
725/**
726 * @brief Allocates the memory area starting at @a alloc_begin of size
727 * @a alloc_size bytes in the block @a block.
728 *
729 * The block may be split up into multiple blocks.  The previous and next block
730 * may be used or free.  Free block parts which form a vaild new block will be
731 * inserted into the free list or merged with an adjacent free block.  If the
732 * block is used, they will be inserted after the free list head.  If the block
733 * is free, they will be inserted after the previous block in the free list.
734 *
735 * Inappropriate values for @a alloc_begin or @a alloc_size may corrupt the
736 * heap.
737 *
738 * Returns the block containing the allocated memory area.
739 */
740Heap_Block *_Heap_Block_allocate(
741  Heap_Control *heap,
742  Heap_Block *block,
743  uintptr_t alloc_begin,
744  uintptr_t alloc_size
745);
746
747#ifndef HEAP_PROTECTION
748  #define _Heap_Protection_block_initialize( heap, block ) ((void) 0)
749  #define _Heap_Protection_block_check( heap, block ) ((void) 0)
750  #define _Heap_Protection_block_error( heap, block ) ((void) 0)
751  #define _Heap_Protection_free_all_delayed_blocks( heap ) ((void) 0)
752#else
753  static inline void _Heap_Protection_block_initialize(
754    Heap_Control *heap,
755    Heap_Block *block
756  )
757  {
758    (*heap->Protection.block_initialize)( heap, block );
759  }
760
761  static inline void _Heap_Protection_block_check(
762    Heap_Control *heap,
763    Heap_Block *block
764  )
765  {
766    (*heap->Protection.block_check)( heap, block );
767  }
768
769  static inline void _Heap_Protection_block_error(
770    Heap_Control *heap,
771    Heap_Block *block
772  )
773  {
774    (*heap->Protection.block_error)( heap, block );
775  }
776
777  static inline void _Heap_Protection_free_all_delayed_blocks( Heap_Control *heap )
778  {
779    uintptr_t large = 0
780      - (uintptr_t) HEAP_BLOCK_HEADER_SIZE
781      - (uintptr_t) HEAP_ALLOC_BONUS
782      - (uintptr_t) 1;
783    void *p = _Heap_Allocate( heap, large );
784    _Heap_Free( heap, p );
785  }
786#endif
787
788/** @} */
789
790#ifdef RTEMS_DEBUG
791  #define RTEMS_HEAP_DEBUG
792#endif
793
794#ifdef RTEMS_HEAP_DEBUG
795  #include <assert.h>
796  #define _HAssert( cond ) \
797    do { \
798      if ( !(cond) ) { \
799        __assert( __FILE__, __LINE__, #cond ); \
800      } \
801    } while (0)
802#else
803  #define _HAssert( cond ) ((void) 0)
804#endif
805
806#endif /* !defined(__RTEMS_APPLICATION__) */
807
808#ifdef __cplusplus
809}
810#endif
811
812#endif
813/* end of include file */
Note: See TracBrowser for help on using the repository browser.