Changeset 962e894f in rtems


Ignore:
Timestamp:
01/20/05 18:22:29 (19 years ago)
Author:
Joel Sherrill <joel.sherrill@…>
Branches:
4.10, 4.11, 4.8, 4.9, 5, master
Children:
3e528b7
Parents:
f3b7be3
Message:

2005-01-20 Sergei Organov <osv@…>

PR 536/rtems
Heap manager re-implementation to consume less memory and still satisfy
alignment requirements.


  • score/src/heap.c, score/src/heapallocate.c, score/src/heapextend.c, score/src/heapfree.c, score/src/heapgetinfo.c, score/src/heapgetfreeinfo.c, core/src/heapsizeofuserarea.c, score/src/heapwalk.c, core/macros/rtems/score/heap.inl, score/inline/rtems/score/heap.inl, score/include/rtems/score/heap.h: Reimplemented.
  • score/src/heapallocatealigned.c: new file
  • score/Makefile.am: HEAP_C_FILES: add score/src/heapallocatealigned.c
Location:
cpukit/score
Files:
11 edited

Legend:

Unmodified
Added
Removed
  • cpukit/score/include/rtems/score/heap.h

    rf3b7be3 r962e894f  
    1 /** 
     1/**
    22 *  @file  rtems/score/heap.h
    33 *
     
    77 *  collection is performed each time a block is returned to the heap by
    88 *  coalescing neighbor blocks.  Control information for both allocated
    9  *  and unallocated blocks is contained in the heap space.  A heap header
    10  *  contains control information for the heap.
    11  */
    12 
    13 /*
     9 *  and unallocated blocks is contained in the heap space.  A heap control
     10 *  structure contains control information for the heap.
     11 *
     12 *  FIXME: the alignment routines could be made faster should we require only
     13 *         powers of two to be supported both for 'page_size' and for
     14 *         'alignment' arguments. However, both workspace and malloc heaps are
     15 *         initialized with CPU_HEAP_ALIGNMENT as 'page_size', and while all
     16 *         the BSPs seem to use CPU_ALIGNMENT (that is power of two) as
     17 *         CPU_HEAP_ALIGNMENT, for whatever reason CPU_HEAP_ALIGNMENT is only
     18 *         required to be multiple of CPU_ALIGNMENT and explicitly not
     19 *         required to be a power of two.
     20 *
    1421 *  COPYRIGHT (c) 1989-2004.
    1522 *  On-Line Applications Research Corporation (OAR).
     
    3845
    3946/**
     47 * This type defines unsigned integer type to store 'void*'. Analog of C99
     48 * 'uintptr_t'. This should work on 16/32/64 bit architectures.
     49 *
     50 * FIXME: Something like this should better be defined by
     51 *        'rtems/score/types.h' and used here.
     52 */
     53
     54typedef unsigned long int _H_uptr_t;
     55
     56/**
     57 *  Forward reference
     58 *
     59 *  @ref Heap_Block
     60 */
     61typedef struct Heap_Block_struct Heap_Block;
     62
     63/**
     64 *  The following defines the data structure used to manage individual blocks
     65 *  in a heap.  When the block is allocated, the 'next' and 'prev' fields, as
     66 *  well as 'prev_size' field of the next block, are not used by the heap
     67 *  manager and thus the address returned for the block starts at the address
     68 *  of the 'next' field and the size of the user accessible area includes the
     69 *  size of the 'prev_size' field.
     70 *
     71 *  @note The 'next' and 'prev' pointers are only valid when the block is free.
     72 *     Caution must be taken to ensure that every block is large enough to
     73 *     hold them and that they are not accessed while the block is actually
     74 *     allocated (i.e., not free).
     75 *
     76 *  @note The 'prev_size' field is only valid when HEAP_PREV_USED bit is clear
     77 *     in the 'size' field indicating that previous block is not allocated.
     78 *     If the bit is set, the 'prev_size' field is part of user-accessible
     79 *     space of the previous allocated block and thus shouldn't be accessed
     80 *     by the heap manager code. This trick allows to further decrease
     81 *     overhead in the used blocks to the size of 'size' field (4 bytes).
     82 *
     83 */
     84
     85struct Heap_Block_struct {
     86  /** size of prev block (if prev block is free) */
     87  uint32_t  prev_size;
     88  /** size of block in bytes and status of prev block */
     89  uint32_t  size;
     90  /** pointer to the next free block */
     91  Heap_Block *next;
     92  /** pointer to the previous free block */
     93  Heap_Block *prev;
     94};
     95
     96/**
     97 *  This flag used in the 'size' field of each heap block to indicate
     98 *  if previous block is free or in use. As sizes are always multiples of
     99 *  4, the 2 least significant bits would always be 0, and we use one of them
     100 *  to store the flag.
     101 */
     102
     103#define HEAP_PREV_USED    1     /* indicates previous block is in use */
     104
     105/**
     106 *  The following constants reflect various requirements of the
     107 *  heap data structures which impact the management of a heap.
     108 */
     109
     110#define HEAP_MIN_BLOCK_SIZE (sizeof(Heap_Block))
     111
     112/**
     113 *  Offset of the block header from the block pointer. Equal to the
     114 *  offsetof(Heap_Block.size).
     115 */
     116#define HEAP_BLOCK_HEADER_OFFSET (sizeof(uint32_t))
     117
     118/**
     119 *  Offset of user data pointer from the block pointer. Equal to the
     120 *  offsetof(Heap_Block.next).
     121 */
     122#define HEAP_BLOCK_USER_OFFSET (sizeof(uint32_t) * 2)
     123
     124/**
     125 *  Num bytes of overhead in used block. Equal to the sizeof(Heap_Block.size).
     126 */
     127#define HEAP_BLOCK_USED_OVERHEAD \
     128  (HEAP_BLOCK_USER_OFFSET - HEAP_BLOCK_HEADER_OFFSET)
     129
     130/** Size of the permanent dummy last block. */
     131#define HEAP_OVERHEAD HEAP_BLOCK_USER_OFFSET
     132
     133/**
     134 *  Run-time heap statistics.
     135 *
     136 *  @note (double)searches/allocs gives mean number of searches per alloc while
     137 *        max_search gives maximum number of searches ever performed on a
     138 *        single call to alloc.
     139 *
     140 *  @note the statistics is always gathered. I believe the imposed overhead is
     141 *        rather small. Feel free to make it compile-time option if you think
     142 *        the overhead is too high for your application.
     143 */
     144
     145typedef struct Heap_Statistics_tag {
     146  /** instance number of this heap */
     147  uint32_t instance;
     148  /** the size of the memory for heap */
     149  uint32_t size;
     150  /** current free size */
     151  uint32_t free_size;
     152  /** minimum free size ever */
     153  uint32_t min_free_size;
     154  /** current number of free blocks */
     155  uint32_t free_blocks;
     156  /** maximum number of free blocks ever */
     157  uint32_t max_free_blocks;
     158  /** current number of used blocks */
     159  uint32_t used_blocks;
     160  /** maximum number of blocks searched ever */
     161  uint32_t max_search;
     162  /** total number of successful calls to alloc */
     163  uint32_t allocs;
     164  /** total number of searches ever */
     165  uint32_t searches;
     166  /** total number of suceessful calls to free */
     167  uint32_t frees;
     168} Heap_Statistics;
     169
     170/**
     171 *  Control block used to manage each heap.
     172 */
     173typedef struct {
     174  /** head and tail of circular list of free blocks */
     175  Heap_Block  free_list;
     176  /** allocation unit and alignment */
     177  uint32_t  page_size;
     178  /** minimum block size aligned on page_size */
     179  uint32_t  min_block_size;
     180  /** first address of memory for the heap */
     181  void       *begin;
     182  /** first address past end of memory for the heap */
     183  void       *end;
     184  /** first valid block address in the heap */
     185  Heap_Block *start;
     186  /** last valid block address in the heap */
     187  Heap_Block *final;
     188  /** run-time statistics */
     189  Heap_Statistics stats;
     190} Heap_Control;
     191
     192/**
    40193 *  Status codes for heap_extend
    41194 */
     195
    42196typedef enum {
    43197  HEAP_EXTEND_SUCCESSFUL,
    44198  HEAP_EXTEND_ERROR,
    45199  HEAP_EXTEND_NOT_IMPLEMENTED
    46 }  Heap_Extend_status;
     200} Heap_Extend_status;
    47201
    48202/**
    49203 *  Status codes for _Heap_Get_information
    50204 */
     205
    51206typedef enum {
    52     HEAP_GET_INFORMATION_SUCCESSFUL = 0,
    53     HEAP_GET_INFORMATION_SYSTEM_STATE_ERROR,
    54     HEAP_GET_INFORMATION_BLOCK_ERROR
    55 }  Heap_Get_information_status;
     207  HEAP_GET_INFORMATION_SUCCESSFUL = 0,
     208  HEAP_GET_INFORMATION_BLOCK_ERROR
     209} Heap_Get_information_status;
    56210
    57211/**
    58212 *  Information block returned by the Heap routines used to
    59  *  obtain statistics.  This information is returned about
     213 *  obtain heap information.  This information is returned about
    60214 *  either free or used blocks.
    61215 */
    62 
    63216typedef struct {
    64217  /** Number of blocks of this type. */
     
    68221  /** Total size of the blocks of this type. */
    69222  uint32_t total;
    70 
    71223} Heap_Information;
    72 
    73224
    74225/**
     
    83234
    84235/**
    85  *  This constant is used in the size/used field of each heap block to
    86  *  indicate when a block is in use.
    87  */
    88 #define HEAP_BLOCK_USED    1
    89 
    90 /**
    91  *  This constant is used in the size/used field of each heap block to
    92  *  indicate when a block is free.
    93  */
    94 #define HEAP_BLOCK_FREE    0
    95 
    96 /**
    97  *  The size/used field value for the dummy front and back flags.
    98  */
    99 #define HEAP_DUMMY_FLAG    (0 + HEAP_BLOCK_USED)
    100 
    101 /*
    102  *  The following constants reflect various requirements of the
    103  *  heap data structures which impact the management of a heap.
    104  *
    105  *  NOTE:  Because free block overhead is greater than used block
    106  *         overhead AND a portion of the allocated space is from
    107  *         the extra free block overhead, the absolute lower bound
    108  *         of the minimum fragment size is equal to the size of
    109  *         the free block overhead.
    110  */
    111 
    112 /** size dummy first and last blocks */
    113 #define HEAP_OVERHEAD (sizeof( uint32_t   ) * 2)
    114 
    115 /** number of bytes of overhead in used block */
    116 #define HEAP_BLOCK_USED_OVERHEAD (sizeof( void * ) * 2)
    117 
    118 /** Minimum number of bytes the user may specify for the heap size */
    119 #define HEAP_MINIMUM_SIZE (HEAP_OVERHEAD + sizeof (Heap_Block))
    120 
    121 /**
    122  *  Forward reference
    123  *
    124  *  @ref Heap_Block
    125  */
    126 typedef struct Heap_Block_struct Heap_Block;
    127 
    128 /**
    129  *  The following defines the data structure used to manage
    130  *  individual blocks in a heap.   When the block is allocated, the
    131  *  next and previous fields are not used by the Heap Handler
    132  *  and thus the address returned for the block starts at
    133  *  the address of the next field.
    134  *
    135  *  @note  The next and previous pointers are only valid when the
    136  *         block is free.  Caution must be exercised to insure that
    137  *         allocated blocks are large enough to contain them and
    138  *         that they are not accidentally overwritten when the
    139  *         block is actually allocated.
    140  */
    141 struct Heap_Block_struct {
    142   /** size and status of prev block */
    143   uint32_t    back_flag;
    144   /** size and status of block */
    145   uint32_t    front_flag;
    146   /** pointer to next block */
    147   Heap_Block *next;
    148   /** pointer to previous block */
    149   Heap_Block *previous;
    150 };
    151 
    152 /**
    153  *  The following defines the control block used to manage each heap.
    154  *
    155  *  @note This structure is layed out such that it can be used a a dummy
    156  *  first and last block on the free block chain.  The extra padding
    157  *  insures the dummy last block is the correct size.
    158  *
    159  *  The first Heap_Block starts at first while the second starts at
    160  *  final.  This is effectively the same trick as is used in the Chain
    161  *  Handler.
    162  */
    163 typedef struct {
    164   /** first valid block address in heap */
    165   Heap_Block *start;
    166   /** last valid block address in heap */
    167   Heap_Block *final;
    168 
    169   /** pointer to first block in heap */
    170   Heap_Block *first;
    171   /** always NULL pointer */
    172   Heap_Block *permanent_null;
    173   /** pointer to last block in heap */
    174   Heap_Block *last;
    175   /** allocation unit */
    176   uint32_t    page_size;
    177   /** reserved field */
    178   uint32_t    reserved;
    179 }   Heap_Control;
    180 
    181 /**
    182236 *  This routine initializes @a the_heap record to manage the
    183237 *  contiguous heap of @a size bytes which starts at @a starting_address.
    184238 *  Blocks of memory are allocated from the heap in multiples of
    185  *  @a page_size byte units.
     239 *  @a page_size byte units. If @a page_size is 0 or is not multiple of
     240 *  CPU_ALIGNMENT, it's aligned up to the nearest CPU_ALIGNMENT boundary.
    186241 *
    187242 *  @param the_heap (in) is the heap to operate upon
     
    207262 *         to add to the heap
    208263 *  @param size (in) is the size in bytes of the memory area to add
    209  *  @param amount_extended (in) points to a user area to return the 
     264 *  @param amount_extended (in) points to a user area to return the
    210265 *  @return a status indicating success or the reason for failure
    211266 *  @return *size filled in with the amount of memory added to the heap
     
    219274
    220275/**
    221  *  This function attempts to allocate a block of size bytes from
     276 *  This function attempts to allocate a block of @a size bytes from
    222277 *  @a the_heap.  If insufficient memory is free in @a the_heap to allocate
    223  *  a  block of the requested size, then NULL is returned.
     278 *  a block of the requested size, then NULL is returned.
    224279 *
    225280 *  @param the_heap (in) is the heap to operate upon
     
    229284void *_Heap_Allocate(
    230285  Heap_Control *the_heap,
    231   uint32_t      size
    232 );
    233 
    234 /**
    235  *  This kernel routine sets size to the size of the given heap block.
    236  *  It returns TRUE if the @a starting_address is in @a the_heap, and FALSE
     286  uint32_t    size
     287);
     288
     289/**
     290 *  This function attempts to allocate a memory block of @a size bytes from
     291 *  @a the_heap so that the start of the user memory is aligned on the
     292 *  @a alignment boundary. If @a alignment is 0, it is set to CPU_ALIGNMENT.
     293 *  Any other value of @a alignment is taken "as is", i.e., even odd
     294 *  alignments are possible.
     295 *  Returns pointer to the start of the memory block if success, NULL if
     296 *  failure.
     297 *
     298 *  @param the_heap (in) is the heap to operate upon
     299 *  @param size (in) is the amount of memory to allocate in bytes
     300 *  @param alignment (in) the required alignment
     301 *  @return NULL if unsuccessful and a pointer to the block if successful
     302 */
     303void *_Heap_Allocate_aligned(
     304  Heap_Control *the_heap,
     305  uint32_t    size,
     306  uint32_t    alignment
     307);
     308
     309/**
     310 *  This function sets @a *size to the size of the block of user memory
     311 *  which begins at @a starting_address. The size returned in @a *size could
     312 *  be greater than the size requested for allocation.
     313 *  Returns TRUE if the @a starting_address is in the heap, and FALSE
    237314 *  otherwise.
    238315 *
    239316 *  @param the_heap (in) is the heap to operate upon
    240  *  @param starting_address (in) is the starting address of the user block 
     317 *  @param starting_address (in) is the starting address of the user block
    241318 *         to obtain the size of
    242319 *  @param size (in) points to a user area to return the size in
     
    247324  Heap_Control        *the_heap,
    248325  void                *starting_address,
    249   uint32_t            *size
     326  size_t              *size
    250327);
    251328
     
    256333 *
    257334 *  @param the_heap (in) is the heap to operate upon
    258  *  @param starting_address (in) is the starting address of the user block 
     335 *  @param starting_address (in) is the starting address of the user block
    259336 *         to free
    260337 *  @return TRUE if successfully freed, FALSE otherwise
     
    267344/**
    268345 *  This routine walks the heap to verify its integrity.
    269  * 
     346 *
    270347 *  @param the_heap (in) is the heap to operate upon
    271348 *  @param source (in) XXX
    272349 *  @param do_dump (in) is set to TRUE if errors should be printed
    273  */
    274 void _Heap_Walk(
     350 *  @return TRUE if the test passed fine, FALSE otherwise.
     351 */
     352
     353boolean _Heap_Walk(
    275354  Heap_Control *the_heap,
    276355  int           source,
     
    279358
    280359/**
    281  *  This kernel routine walks the heap and tots up the free and allocated
    282  *  sizes.  Derived from _Heap_Walk.
     360 *  This routine walks the heap and tots up the free and allocated
     361 *  sizes.
    283362 *
    284363 *  @param the_heap (in) pointer to heap header
     
    295374 *  This heap routine returns information about the free blocks
    296375 *  in the specified heap.
    297  * 
     376 *
    298377 *  @param the_heap (in) pointer to heap header.
    299378 *  @param info (in) pointer to the free block information.
    300  * 
     379 *
    301380 *  @return free block information filled in.
    302  */ 
    303    
     381 */
     382
    304383void _Heap_Get_free_information(
    305384  Heap_Control        *the_heap,
     
    307386);
    308387
    309 #ifndef __RTEMS_APPLICATION__
     388#if !defined(__RTEMS_APPLICATION__)
     389
     390/*
     391 *  A pointer to unsigned integer conversion.
     392 */
     393#define _H_p2u(_p) ((_H_uptr_t)(_p))
     394
    310395#include <rtems/score/heap.inl>
     396
     397/*
     398 *  Internal routines shared by _Heap_Allocate() and _Heap_Allocate_aligned().
     399 *  Refer to 'heap.c' for details.
     400 */
     401
     402extern uint32_t _Heap_Calc_block_size(
     403  uint32_t size,
     404  uint32_t page_size,
     405  uint32_t min_size);
     406
     407extern Heap_Block* _Heap_Block_allocate(
     408  Heap_Control* the_heap,
     409  Heap_Block* the_block,
     410  uint32_t alloc_size);
     411
     412/*
     413 * Debug support
     414 */
     415
     416#if defined(RTEMS_DEBUG)
     417#define RTEMS_HEAP_DEBUG
    311418#endif
     419
     420#if defined(RTEMS_HEAP_DEBUG)
     421
     422#include <assert.h>
     423
     424/*
     425 *  We do asserts only for heaps with instance number greater than 0 assuming
     426 *  that the heap used for malloc is initialized first and thus has instance
     427 *  number 0. Asserting malloc heap may lead to troubles as printf may invoke
     428 *  malloc thus probably leading to infinite recursion.
     429 */
     430
     431#define _HAssert(cond_) \
     432  do { \
     433    if(the_heap->stats.instance && !(cond_)) \
     434      __assert(__FILE__, __LINE__, #cond_);  \
     435  } while(0)
     436
     437#else  /* !defined(RTEMS_HEAP_DEBUG) */
     438
     439#define _HAssert(cond_) ((void)0)
     440
     441#endif /* !defined(RTEMS_HEAP_DEBUG) */
     442
     443#endif /* !defined(__RTEMS_APPLICATION__) */
    312444
    313445#ifdef __cplusplus
  • cpukit/score/inline/rtems/score/heap.inl

    rf3b7be3 r962e894f  
    3535)
    3636{
    37   return (Heap_Block *)&the_heap->start;
     37  return &the_heap->free_list;
    3838}
    3939
     
    4646)
    4747{
    48   return (Heap_Block *)&the_heap->final;
    49 }
    50 
    51 /**
    52  *  This function returns the address of the block which physically
    53  *  precedes the_block in memory.
    54  */
    55 
    56 RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Previous_block (
    57   Heap_Block *the_block
    58 )
    59 {
    60   return (Heap_Block *) _Addresses_Subtract_offset(
    61                           (void *)the_block,
    62                           the_block->back_flag & ~ HEAP_BLOCK_USED
    63                         );
    64 }
    65 
    66 /**
    67  *  This function returns the address of the block which physically
    68  *  follows the_block in memory.
    69  *
    70  *  @note Next_block assumes that the block is free.
    71  */
    72 
    73 RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Next_block (
    74   Heap_Block *the_block
    75 )
    76 {
    77   return (Heap_Block *) _Addresses_Add_offset(
    78                           (void *)the_block,
    79                           the_block->front_flag & ~ HEAP_BLOCK_USED
    80                         );
     48  return &the_heap->free_list;
     49}
     50
     51/**
     52 *  Return the first free block of the specified heap.
     53 */
     54
     55RTEMS_INLINE_ROUTINE Heap_Block *_Heap_First (
     56  Heap_Control *the_heap
     57)
     58{
     59  return _Heap_Head(the_heap)->next;
     60}
     61
     62/*PAGE
     63 *
     64 *  _Heap_Last
     65 *
     66 *  DESCRIPTION:
     67 *
     68 *  Return the last free block of the specified heap.
     69 */
     70
     71RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Last (
     72  Heap_Control *the_heap
     73)
     74{
     75  return _Heap_Tail(the_heap)->prev;
     76}
     77
     78/*PAGE
     79 *
     80 *  _Heap_Block_remove
     81 *
     82 *  DESCRIPTION:
     83 *
     84 *  This function removes 'the_block' from doubly-linked list.
     85 */
     86
     87RTEMS_INLINE_ROUTINE void _Heap_Block_remove (
     88  Heap_Block *the_block
     89)
     90{
     91  Heap_Block *block = the_block;
     92
     93  Heap_Block *next = block->next;
     94  Heap_Block *prev = block->prev;
     95  prev->next = next;
     96  next->prev = prev;
     97}
     98
     99/**
     100 *  This function replaces @a old_block by @a new_block in doubly-linked list.
     101 */
     102
     103RTEMS_INLINE_ROUTINE void _Heap_Block_replace (
     104  Heap_Block *old_block,
     105  Heap_Block *new_block
     106)
     107{
     108  Heap_Block *block = old_block;
     109  Heap_Block *next = block->next;
     110  Heap_Block *prev = block->prev;
     111
     112  block = new_block;
     113  block->next = next;
     114  block->prev = prev;
     115  next->prev = prev->next = block;
     116}
     117
     118/**
     119 *  This function inserts @a the_block after @a prev_block
     120 *  in doubly-linked list.
     121 */
     122RTEMS_INLINE_ROUTINE void _Heap_Block_insert_after (
     123  Heap_Block *prev_block,
     124  Heap_Block *the_block
     125)
     126{
     127  Heap_Block *prev = prev_block;
     128  Heap_Block *block = the_block;
     129
     130  Heap_Block *next = prev->next;
     131  block->next  = next;
     132  block->prev  = prev;
     133  next->prev = prev->next = block;
     134}
     135
     136/**
     137 *  Return TRUE if @a value is a multiple of @a alignment,  FALSE otherwise
     138 */
     139
     140RTEMS_INLINE_ROUTINE boolean _Heap_Is_aligned (
     141  uint32_t  value,
     142  uint32_t  alignment
     143)
     144{
     145  return (value % alignment) == 0;
     146}
     147
     148/**
     149 *  Align @a *value up to the nearest multiple of @a alignment.
     150 */
     151
     152RTEMS_INLINE_ROUTINE void _Heap_Align_up (
     153  uint32_t *value,
     154  uint32_t  alignment
     155)
     156{
     157  uint32_t v = *value;
     158  uint32_t a = alignment;
     159  uint32_t r = v % a;
     160  *value = r ? v - r + a : v;
     161}
     162
     163/**
     164 *  Align @a *value down to the nearest multiple of @a alignment.
     165 */
     166
     167RTEMS_INLINE_ROUTINE void _Heap_Align_down (
     168  uint32_t *value,
     169  uint32_t  alignment
     170)
     171{
     172  uint32_t v = *value;
     173  *value = v - (v % alignment);
     174}
     175
     176/**
     177 *  Return TRUE if @a ptr is aligned at @a alignment boundary,
     178 *  FALSE otherwise
     179 */
     180
     181RTEMS_INLINE_ROUTINE boolean _Heap_Is_aligned_ptr (
     182  void *ptr,
     183  uint32_t  alignment
     184)
     185{
     186  return (_H_p2u(ptr) % alignment) == 0;
     187}
     188
     189/**
     190 *  Align @a *value up to the nearest multiple of @a alignment.
     191 */
     192
     193RTEMS_INLINE_ROUTINE void _Heap_Align_up_uptr (
     194  _H_uptr_t *value,
     195  uint32_t  alignment
     196)
     197{
     198  _H_uptr_t v = *value;
     199  uint32_t a = alignment;
     200  _H_uptr_t r = v % a;
     201  *value = r ? v - r + a : v;
     202}
     203
     204/**
     205 *  Align @a *value down to the nearest multiple of @a alignment.
     206 */
     207
     208RTEMS_INLINE_ROUTINE void _Heap_Align_down_uptr (
     209  _H_uptr_t *value,
     210  uint32_t  alignment
     211)
     212{
     213  _H_uptr_t v = *value;
     214  *value = v - (v % alignment);
    81215}
    82216
    83217/**
    84218 *  This function calculates and returns a block's location (address)
    85  *  in the heap based upon a base address and an offset.
     219 *  in the heap based upon a base address @a base and an @a offset.
    86220 */
    87221
    88222RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Block_at(
    89223  void       *base,
    90   uint32_t    offset
    91 )
    92 {
    93   return (Heap_Block *) _Addresses_Add_offset( (void *)base, offset );
    94 }
    95 
    96 /**
    97  *  XXX
    98  */
    99  
    100 RTEMS_INLINE_ROUTINE Heap_Block *_Heap_User_block_at(
    101   void       *base
    102 )
    103 {
    104   uint32_t           offset;
    105  
    106   offset = *(((uint32_t   *) base) - 1);
    107   return _Heap_Block_at( base, -offset + -HEAP_BLOCK_USED_OVERHEAD);
    108 }
    109 
    110 /**
    111  *  This function returns TRUE if the previous block of the_block
    112  *  is free, and FALSE otherwise.
    113  */
    114 
    115 RTEMS_INLINE_ROUTINE boolean _Heap_Is_previous_block_free (
    116   Heap_Block *the_block
    117 )
    118 {
    119   return !(the_block->back_flag & HEAP_BLOCK_USED);
    120 }
    121 
    122 /**
    123  *  This function returns TRUE if the block is free, and FALSE otherwise.
    124  */
    125 
    126 RTEMS_INLINE_ROUTINE boolean _Heap_Is_block_free (
    127   Heap_Block *the_block
    128 )
    129 {
    130   return !(the_block->front_flag & HEAP_BLOCK_USED);
    131 }
    132 
    133 /**
    134  *  This function returns TRUE if the block is currently allocated,
    135  *  and FALSE otherwise.
    136  */
    137 
    138 RTEMS_INLINE_ROUTINE boolean _Heap_Is_block_used (
    139   Heap_Block *the_block
    140 )
    141 {
    142   return (the_block->front_flag & HEAP_BLOCK_USED);
    143 }
    144 
    145 /**
    146  *  This function returns the size of the_block in bytes.
    147  */
    148 
    149 RTEMS_INLINE_ROUTINE uint32_t   _Heap_Block_size (
    150   Heap_Block *the_block
    151 )
    152 {
    153   return (the_block->front_flag & ~HEAP_BLOCK_USED);
    154 }
    155 
    156 /**
    157  *  This function returns the starting address of the portion of the block
     224  uint32_t  offset
     225)
     226{
     227  return (Heap_Block *) _Addresses_Add_offset( base, offset );
     228}
     229
     230/**
     231 *  This function returns the starting address of the portion of @a the_block
    158232 *  which the user may access.
    159233 */
    160234
    161 RTEMS_INLINE_ROUTINE void *_Heap_Start_of_user_area (
    162   Heap_Block *the_block
    163 )
    164 {
    165   return (void *) &the_block->next;
    166 }
    167 
    168 /**
    169  *  This function returns TRUE if the_block is within the memory area
    170  *  managed by the_heap, and FALSE otherwise.
     235RTEMS_INLINE_ROUTINE void *_Heap_User_area (
     236  Heap_Block *the_block
     237)
     238{
     239  return (void *) _Addresses_Add_offset ( the_block, HEAP_BLOCK_USER_OFFSET );
     240}
     241
     242/**
     243 *  Fill @a *the_block with the address of the beginning of the block given
     244 *  pointer to the user accessible area @a base.
     245 */
     246
     247RTEMS_INLINE_ROUTINE void _Heap_Start_of_block (
     248  Heap_Control  *the_heap,
     249  void          *base,
     250  Heap_Block   **the_block
     251)
     252{
     253  _H_uptr_t addr = _H_p2u(base);
     254  /* The address passed could be greater than the block address plus
     255   * HEAP_BLOCK_USER_OFFSET as _Heap_Allocate_aligned() may produce such user
     256   * pointers. To get rid of this offset we need to align the address down
     257   * to the nearest 'page_size' boundary. */
     258  _Heap_Align_down_uptr ( &addr, the_heap->page_size );
     259  *the_block = (Heap_Block *)(addr - HEAP_BLOCK_USER_OFFSET);
     260}
     261
     262/**
     263 *  This function returns TRUE if the previous block of @a the_block
     264 *  is in use, and FALSE otherwise.
     265 */
     266
     267RTEMS_INLINE_ROUTINE boolean _Heap_Is_prev_used (
     268  Heap_Block *the_block
     269)
     270{
     271  return (the_block->size & HEAP_PREV_USED);
     272}
     273
     274/**
     275 *  This function returns the size of @a the_block in bytes.
     276 */
     277
     278RTEMS_INLINE_ROUTINE uint32_t _Heap_Block_size (
     279  Heap_Block *the_block
     280)
     281{
     282  return (the_block->size & ~HEAP_PREV_USED);
     283}
     284
     285/**
     286 *  This function returns TRUE if @a the_block is within the memory area
     287 *  managed by @a the_heap, and FALSE otherwise.
    171288 */
    172289
     
    179296}
    180297
    181 /**
    182  *  This function validates a specified heap page size.  If the page size
    183  *  is 0 or if lies outside a page size alignment boundary it is invalid
    184  *  and FALSE is returned.  Otherwise, the page size is valid and TRUE is
    185  *  returned.
    186  */
    187 
    188 RTEMS_INLINE_ROUTINE boolean _Heap_Is_page_size_valid(
    189   uint32_t   page_size
    190 )
    191 {
    192   return ((page_size != 0) &&
    193          ((page_size % CPU_HEAP_ALIGNMENT) == 0));
    194 }
    195 
    196 /**
    197  *  This function returns the block flag composed of size and in_use_flag.
    198  *  The flag returned is suitable for use as a back or front flag in a
    199  *  heap block.
    200  */
    201 
    202 RTEMS_INLINE_ROUTINE uint32_t   _Heap_Build_flag (
    203   uint32_t   size,
    204   uint32_t   in_use_flag
    205 )
    206 {
    207   return  size | in_use_flag;
    208 }
    209 
    210298/**@}*/
    211299
  • cpukit/score/macros/rtems/score/heap.inl

    rf3b7be3 r962e894f  
    1717#define __HEAP_inl
    1818
     19/*
     20 * WARNING: this file is only visually checked against
     21 * '../../../inline/rtems/score/heap.inl'. Use those file for reference
     22 * if encounter problems.
     23 */
     24
    1925#include <rtems/score/address.h>
    2026
     
    2430 */
    2531
    26 #define _Heap_Head( _the_heap ) \
    27   ((Heap_Block *)&(_the_heap)->start)
     32#define _Heap_Head( _the_heap ) (&(_the_heap)->free_list)
    2833
    2934/*PAGE
     
    3237 */
    3338
    34 #define _Heap_Tail( _the_heap ) \
    35   ((Heap_Block *)&(_the_heap)->final)
    36 
    37 /*PAGE
    38  *
    39  *  _Heap_Previous_block
    40  */
    41 
    42 #define _Heap_Previous_block( _the_block ) \
    43   ( (Heap_Block *) _Addresses_Subtract_offset( \
    44       (void *)(_the_block), \
    45       (_the_block)->back_flag & ~ HEAP_BLOCK_USED \
    46     ) \
    47   )
    48 
    49 /*PAGE
    50  *
    51  *  _Heap_Next_block
    52  */
    53 
    54 #define _Heap_Next_block( _the_block ) \
    55   ( (Heap_Block *) _Addresses_Add_offset( \
    56       (void *)(_the_block), \
    57       (_the_block)->front_flag & ~ HEAP_BLOCK_USED \
    58     ) \
    59   )
     39#define _Heap_Tail( _the_heap ) (&(_the_heap)->free_list)
     40
     41/*PAGE
     42 *
     43 *  _Heap_First
     44 */
     45
     46#define _Heap_First( _the_heap ) (_Heap_Head(_the_heap)->next)
     47
     48/*PAGE
     49 *
     50 *  _Heap_Last
     51 */
     52
     53#define _Heap_Last( _the_heap )  (_Heap_Tail(_the_heap)->prev)
     54
     55/*PAGE
     56 *
     57 *  _Heap_Block_remove
     58 *
     59 */
     60
     61#define _Heap_Block_remove( _the_block ) \
     62  do { \
     63    Heap_Block *block = (_the_block); \
     64    Heap_Block *next = block->next; \
     65    Heap_Block *prev = block->prev; \
     66    prev->next = next; \
     67    next->prev = prev; \
     68  } while(0)
     69
     70/*PAGE
     71 *
     72 *  _Heap_Block_replace
     73 *
     74 */
     75
     76#define _Heap_Block_replace( _old_block,  _new_block ) \
     77  do { \
     78    Heap_Block *block = (_old_block); \
     79    Heap_Block *next = block->next; \
     80    Heap_Block *prev = block->prev; \
     81    block = (_new_block); \
     82    block->next = next; \
     83    block->prev = prev; \
     84    next->prev = prev->next = block; \
     85  } while(0)
     86
     87/*PAGE
     88 *
     89 *  _Heap_Block_insert_after
     90 *
     91 */
     92
     93#define _Heap_Block_insert_after(  _prev_block, _the_block ) \
     94  do { \
     95    Heap_Block *prev = (_prev_block); \
     96    Heap_Block *block = (_the_block); \
     97    Heap_Block *next = prev->next; \
     98    block->next  = next; \
     99    block->prev  = prev; \
     100    next->prev = prev->next = block; \
     101  } while(0)
     102
     103/*PAGE
     104 *
     105 *  _Heap_Is_aligned
     106 */
     107
     108#define _Heap_Is_aligned( _value, _alignment ) \
     109  (((_value) % (_alignment)) == 0)
     110
     111/*PAGE
     112 *
     113 *  _Heap_Align_up
     114 */
     115
     116#define _Heap_Align_up( _value, _alignment ) \
     117  do { \
     118    unsigned32 v = *(_value); \
     119    unsigned32 a = (_alignment); \
     120    unsigned32 r = v % a; \
     121    *(_value) = r ? v - r + a : v; \
     122  } while(0)
     123
     124/*PAGE
     125 *
     126 *  _Heap_Align_down
     127 */
     128
     129#define _Heap_Align_down( _value, _alignment ) \
     130  do { \
     131    unsigned32 v = *(_value); \
     132    *(_value) = v - (v % (_alignment)); \
     133  } while(0)
     134
     135/*PAGE
     136 *
     137 *  _Heap_Is_aligned_ptr
     138 */
     139
     140#define _Heap_Is_aligned_ptr( _ptr, _alignment ) \
     141  ((_H_p2u(_ptr) % (_alignment)) == 0)
     142
     143/*PAGE
     144 *
     145 *  _Heap_Align_up_uptr
     146 */
     147
     148#define _Heap_Align_up_uptr( _value, _alignment ) \
     149  do { \
     150    _H_uptr_t v = *(_value); \
     151    unsigned32 a = (_alignment); \
     152    _H_uptr_t r = v % a; \
     153    *(_value) = r ? v - r + a : v; \
     154  } while(0)
     155
     156/*PAGE
     157 *
     158 *  _Heap_Align_down_uptr
     159 */
     160
     161#define _Heap_Align_down_uptr( _value, _alignment ) \
     162  do { \
     163    _H_uptr_t v = *(_value); \
     164    *(_value) = v - (v % (_alignment)); \
     165  } while(0)
    60166
    61167/*PAGE
     
    65171
    66172#define _Heap_Block_at( _base, _offset ) \
    67   ( (Heap_Block *) \
    68      _Addresses_Add_offset( (void *)(_base), (_offset) ) )
    69 
    70 /*PAGE
    71  *
    72  *  _Heap_User_block_at
    73  *
     173  ( (Heap_Block *) _Addresses_Add_offset( (_base), (_offset) ) )
     174
     175/*PAGE
     176 *
     177 *  _Heap_User_area
     178 */
     179
     180#define _Heap_User_area( _the_block ) \
     181  ((void *) _Addresses_Add_offset( (_the_block), HEAP_BLOCK_USER_OFFSET ))
     182
     183/*PAGE
     184 *
     185 *  _Heap_Start_of_block
    74186 */
    75187 
    76 #define _Heap_User_block_at( _base ) \
    77   _Heap_Block_at( \
    78     (_base), \
    79     -*(((uint32_t   *) (_base)) - 1) + -HEAP_BLOCK_USED_OVERHEAD \
    80   )
    81 
    82 /*PAGE
    83  *
    84  *  _Heap_Is_previous_block_free
    85  */
    86 
    87 #define _Heap_Is_previous_block_free( _the_block ) \
    88   ( !((_the_block)->back_flag & HEAP_BLOCK_USED) )
    89 
    90 /*PAGE
    91  *
    92  *  _Heap_Is_block_free
    93  */
    94 
    95 #define _Heap_Is_block_free( _the_block ) \
    96   ( !((_the_block)->front_flag & HEAP_BLOCK_USED) )
    97 
    98 /*PAGE
    99  *
    100  *  _Heap_Is_block_used
    101  */
    102 
    103 #define _Heap_Is_block_used( _the_block ) \
    104   ((_the_block)->front_flag & HEAP_BLOCK_USED)
     188#define _Heap_Start_of_block( _the_heap, _base, _the_block_ptr ) \
     189  do { \
     190    _H_uptr_t addr = _H_p2u(_base); \
     191    _Heap_Align_down( &addr, (_the_heap)->page_size ); \
     192    *(_the_block_ptr) = (Heap_Block *)(addr - HEAP_BLOCK_USER_OFFSET); \
     193  } while(0)
     194
     195/*PAGE
     196 *
     197 *  _Heap_Is_prev_used
     198 */
     199
     200#define _Heap_Is_prev_used( _the_block ) \
     201  ((_the_block)->size & HEAP_PREV_USED)
    105202
    106203/*PAGE
     
    110207
    111208#define _Heap_Block_size( _the_block )    \
    112   ((_the_block)->front_flag & ~HEAP_BLOCK_USED)
    113 
    114 /*PAGE
    115  *
    116  *  _Heap_Start_of_user_area
    117  */
    118 
    119 #define _Heap_Start_of_user_area( _the_block ) \
    120   ((void *) &(_the_block)->next)
     209  ((_the_block)->size & ~HEAP_PREV_USED)
    121210
    122211/*PAGE
     
    126215
    127216#define _Heap_Is_block_in( _the_heap, _the_block ) \
    128   ( ((_the_block) >= (_the_heap)->start) && \
    129     ((_the_block) <= (_the_heap)->final) )
    130 
    131 /*PAGE
    132  *
    133  *  _Heap_Is_page_size_valid
    134  */
    135 
    136 #define _Heap_Is_page_size_valid( _page_size ) \
    137   ( ((_page_size) != 0) && \
    138     (((_page_size) % CPU_HEAP_ALIGNMENT) == 0) )
    139 
    140 /*PAGE
    141  *
    142  *  _Heap_Build_flag
    143  */
    144 
    145 #define _Heap_Build_flag( _size, _in_use_flag ) \
    146   ( (_size) | (_in_use_flag))
     217  ( _Addresses_Is_in_range( (_the_block), \
     218    (_the_heap)->start, (_the_heap)->final ) )
    147219
    148220#endif
  • cpukit/score/src/heap.c

    rf3b7be3 r962e894f  
    1616#include <rtems/score/sysstate.h>
    1717#include <rtems/score/heap.h>
     18
     19static uint32_t instance = 0;
    1820
    1921/*PAGE
     
    3335 *    0       - otherwise
    3436 *
    35  *    This is what a heap looks like in memory immediately
    36  *    after initialization:
    37  *
    38  *            +--------------------------------+
    39  *     0      |  size = 0      | status = used |  a.k.a.  dummy back flag
    40  *            +--------------------------------+
    41  *     4      |  size = size-8 | status = free |  a.k.a.  front flag
    42  *            +--------------------------------+
    43  *     8      |  next     = PERM HEAP_TAIL     |
    44  *            +--------------------------------+
    45  *    12      |  previous = PERM HEAP_HEAD     |
    46  *            +--------------------------------+
    47  *            |                                |
    48  *            |      memory available          |
    49  *            |       for allocation           |
    50  *            |                                |
    51  *            +--------------------------------+
    52  *  size - 8  |  size = size-8 | status = free |  a.k.a.  back flag
    53  *            +--------------------------------+
    54  *  size - 4  |  size = 0      | status = used |  a.k.a.  dummy front flag
    55  *            +--------------------------------+
     37 *  This is what a heap looks like in memory immediately after initialization:
     38 *
     39 *
     40 *            +--------------------------------+ <- begin = starting_address
     41 *            |  unused space due to alignment |
     42 *            |       size < page_size         |
     43 *         0  +--------------------------------+ <- first block
     44 *            |  prev_size = 1 (arbitrary)     |
     45 *         4  +--------------------------------+
     46 *            |  size = size0              | 1 |
     47 *         8  +---------------------+----------+ <- aligned on page_size
     48 *            |  next = HEAP_TAIL   |          |
     49 *        12  +---------------------+          |
     50 *            |  prev = HEAP_HEAD   |  memory  |
     51 *            +---------------------+          |
     52 *            |                     available  |
     53 *            |                                |
     54 *            |                for allocation  |
     55 *            |                                |
     56 *     size0  +--------------------------------+ <- last dummy block
     57 *            |  prev_size = size0             |
     58 *        +4  +--------------------------------+
     59 *            |  size = 0 (arbitrary)      | 0 | <- prev block is free
     60 *        +8  +--------------------------------+ <- aligned on page_size
     61 *            |  unused space due to alignment |
     62 *            |       size < page_size         |
     63 *            +--------------------------------+ <- end = begin + size
     64 *
     65 *  This is what a heap looks like after first allocation of SIZE bytes.
     66 *  BSIZE stands for SIZE + 4 aligned up on 'page_size' boundary if allocation
     67 *  has been performed by _Heap_Allocate(). If allocation has been performed
     68 *  by _Heap_Allocate_aligned(), the block size BSIZE is defined differently
     69 *  (see 'heapallocatealigned.c' for details).
     70 *
     71 *            +--------------------------------+ <- begin = starting_address
     72 *            |  unused space due to alignment |
     73 *            |       size < page_size         |
     74 *         0  +--------------------------------+ <- first block
     75 *            |  prev_size = 1 (arbitrary)     |
     76 *         4  +--------------------------------+
     77 *            |  size = S = size0 - BSIZE  | 1 |
     78 *         8  +---------------------+----------+ <- aligned on page_size
     79 *            |  next = HEAP_TAIL   |          |
     80 *        12  +---------------------+          |
     81 *            |  prev = HEAP_HEAD   |  memory  |
     82 *            +---------------------+          |
     83 *            |                     available  |
     84 *            |                                |
     85 *            |                for allocation  |
     86 *            |                                |
     87 *         S  +--------------------------------+ <- used block
     88 *            |  prev_size = size0 - BSIZE     |
     89 *        +4  +--------------------------------+
     90 *            |  size = BSIZE              | 0 | <- prev block is free
     91 *        +8  +--------------------------------+ <- aligned on page_size
     92 *            |              .                 | Pointer returned to the user
     93 *            |              .                 | is (S+8) for _Heap_Allocate()
     94 *            |              .                 | and is in range
     95 * S + 8 +    |         user-accessible        | [S+8,S+8+page_size) for
     96 *   page_size+- - -                      - - -+ _Heap_Allocate_aligned()
     97 *            |             area               |
     98 *            |              .                 |
     99 * S + BSIZE  +- - - - -     .        - - - - -+ <- last dummy block
     100 *            |              .                 |
     101 *        +4  +--------------------------------+
     102 *            |  size = 0 (arbitrary)      | 1 | <- prev block is used
     103 *        +8  +--------------------------------+ <- aligned on page_size
     104 *            |  unused space due to alignment |
     105 *            |       size < page_size         |
     106 *            +--------------------------------+ <- end = begin + size
     107 *
    56108 */
    57109
     
    63115)
    64116{
    65   Heap_Block        *the_block;
    66   uint32_t           the_size;
    67 
    68   if ( !_Heap_Is_page_size_valid( page_size ) ||
    69        (size < HEAP_MINIMUM_SIZE) )
    70     return 0;
     117  Heap_Block *the_block;
     118  uint32_t  the_size;
     119  _H_uptr_t   start;
     120  _H_uptr_t   aligned_start;
     121  uint32_t  overhead;
     122  Heap_Statistics *const stats = &the_heap->stats;
     123
     124  if(page_size == 0)
     125    page_size = CPU_ALIGNMENT;
     126  else
     127    _Heap_Align_up( &page_size, CPU_ALIGNMENT );
     128
     129  /* Calculate aligned_start so that aligned_start + HEAP_BLOCK_USER_OFFSET
     130     (value of user pointer) is aligned on 'page_size' boundary. Make sure
     131     resulting 'aligned_start' is not below 'starting_address'. */
     132  start = _H_p2u(starting_address);
     133  aligned_start = start + HEAP_BLOCK_USER_OFFSET;
     134  _Heap_Align_up_uptr ( &aligned_start, page_size );
     135  aligned_start -= HEAP_BLOCK_USER_OFFSET;
     136
     137  /* Calculate 'min_block_size'. It's HEAP_MIN_BLOCK_SIZE aligned up to the
     138     nearest multiple of 'page_size'. */
     139  the_heap->min_block_size = HEAP_MIN_BLOCK_SIZE;
     140  _Heap_Align_up ( &the_heap->min_block_size, page_size );
     141
     142  /* Calculate 'the_size' -- size of the first block so that there is enough
     143     space at the end for the permanent last block. It is equal to 'size'
     144     minus total overhead aligned down to the nearest multiple of
     145     'page_size'. */
     146  overhead = HEAP_OVERHEAD + (aligned_start - start);
     147  if ( size < overhead )
     148    return 0;   /* Too small area for the heap */
     149  the_size = size - overhead;
     150  _Heap_Align_down ( &the_size, page_size );
     151  if ( the_size == 0 )
     152    return 0;   /* Too small area for the heap */
    71153
    72154  the_heap->page_size = page_size;
    73   the_size = size - HEAP_OVERHEAD;
    74 
    75   the_block             = (Heap_Block *) starting_address;
    76   the_block->back_flag  = HEAP_DUMMY_FLAG;
    77   the_block->front_flag = the_size;
    78   the_block->next       = _Heap_Tail( the_heap );
    79   the_block->previous   = _Heap_Head( the_heap );
    80 
    81   the_heap->start          = the_block;
    82   the_heap->first          = the_block;
    83   the_heap->permanent_null = NULL;
    84   the_heap->last           = the_block;
    85 
    86   the_block             = _Heap_Next_block( the_block );
    87   the_block->back_flag  = the_size;
    88   the_block->front_flag = HEAP_DUMMY_FLAG;
    89   the_heap->final       = the_block;
     155  the_heap->begin = starting_address;
     156  the_heap->end = starting_address + size;
     157
     158  the_block = (Heap_Block *) aligned_start;
     159
     160  the_block->prev_size = HEAP_PREV_USED;
     161  the_block->size = the_size | HEAP_PREV_USED;
     162  the_block->next = _Heap_Tail( the_heap );
     163  the_block->prev = _Heap_Head( the_heap );
     164  _Heap_Head(the_heap)->next = the_block;
     165  _Heap_Tail(the_heap)->prev = the_block;
     166  the_heap->start = the_block;
     167
     168  _HAssert(_Heap_Is_aligned(the_heap->page_size, CPU_ALIGNMENT));
     169  _HAssert(_Heap_Is_aligned(the_heap->min_block_size, page_size));
     170  _HAssert(_Heap_Is_aligned_ptr(_Heap_User_area(the_block), page_size));
     171
     172
     173  the_block = _Heap_Block_at( the_block, the_size );
     174  the_heap->final = the_block;       /* Permanent final block of the heap */
     175  the_block->prev_size = the_size;   /* Previous block is free */
     176  the_block->size = 0;  /* This is the only block with size=0  */
     177
     178  stats->size = size;
     179  stats->free_size = the_size;
     180  stats->min_free_size = the_size;
     181  stats->free_blocks = 1;
     182  stats->max_free_blocks = 1;
     183  stats->used_blocks = 0;
     184  stats->max_search = 0;
     185  stats->allocs = 0;
     186  stats->searches = 0;
     187  stats->frees = 0;
     188  stats->instance = instance++;
    90189
    91190  return ( the_size - HEAP_BLOCK_USED_OVERHEAD );
    92191}
     192
     193/*PAGE
     194 *
     195 * Internal routines shared by _Heap_Allocate() and _Heap_Allocate_aligned().
     196 *
     197 * Note: there is no reason to put them into a separate file(s) as they are
     198 * always required for heap to be usefull.
     199 *
     200 */
     201
     202
     203/*
     204 * Convert user requested 'size' of memory block to the block size.
     205 * Return block size on success, 0 if overflow occured
     206 */
     207uint32_t _Heap_Calc_block_size(
     208  uint32_t size,
     209  uint32_t page_size,
     210  uint32_t min_size)
     211{
     212  uint32_t block_size = size + HEAP_BLOCK_USED_OVERHEAD;
     213  _Heap_Align_up(&block_size, page_size);
     214  if(block_size < min_size) block_size = min_size;
     215  return (block_size > size) ? block_size : 0;
     216}
     217
     218
     219/*
     220 * Allocate block of size 'alloc_size' from 'the_block' belonging to
     221 * 'the_heap'. Either split 'the_block' or allocate it entirely.
     222 * Return the block allocated.
     223 */
     224Heap_Block* _Heap_Block_allocate(
     225  Heap_Control* the_heap,
     226  Heap_Block* the_block,
     227  uint32_t alloc_size)
     228{
     229  Heap_Statistics *const stats = &the_heap->stats;
     230  uint32_t const block_size = _Heap_Block_size(the_block);
     231  uint32_t const the_rest = block_size - alloc_size;
     232
     233  _HAssert(_Heap_Is_aligned(block_size, the_heap->page_size));
     234  _HAssert(_Heap_Is_aligned(alloc_size, the_heap->page_size));
     235  _HAssert(alloc_size <= block_size);
     236
     237  if(the_rest >= the_heap->min_block_size) {
     238    /* Split the block so that lower part is still free, and upper part
     239       becomes used. */
     240    the_block->size = the_rest | HEAP_PREV_USED;
     241    the_block = _Heap_Block_at(the_block, the_rest);
     242    the_block->prev_size = the_rest;
     243    the_block->size = alloc_size;
     244  }
     245  else {
     246    /* Don't split the block as remainder is either zero or too small to be
     247       used as a separate free block. Change 'alloc_size' to the size of the
     248       block and remove the block from the list of free blocks. */
     249    _Heap_Block_remove(the_block);
     250    alloc_size = block_size;
     251    stats->free_blocks -= 1;
     252  }
     253  /* Mark the block as used (in the next block). */
     254  _Heap_Block_at(the_block, alloc_size)->size |= HEAP_PREV_USED;
     255  /* Update statistics */
     256  stats->free_size -= alloc_size;
     257  if(stats->min_free_size > stats->free_size)
     258    stats->min_free_size = stats->free_size;
     259  stats->used_blocks += 1;
     260  return the_block;
     261}
  • cpukit/score/src/heapallocate.c

    rf3b7be3 r962e894f  
    3737)
    3838{
    39   uint32_t    excess;
    40   uint32_t    the_size;
     39  uint32_t  the_size;
     40  uint32_t  search_count;
    4141  Heap_Block *the_block;
    42   Heap_Block *next_block;
    43   Heap_Block *temporary_block;
    44   void       *ptr;
    45   uint32_t    offset;
     42  void       *ptr = NULL;
     43  Heap_Statistics *const stats = &the_heap->stats;
     44  Heap_Block *const tail = _Heap_Tail(the_heap);
    4645
    47   /*
    48    * Catch the case of a user allocating close to the limit of the
    49    * uint32_t  .
    50    */
     46  the_size =
     47    _Heap_Calc_block_size(size, the_heap->page_size, the_heap->min_block_size);
     48  if(the_size == 0)
     49    return NULL;
    5150
    52   if ( size >= (-1 - HEAP_BLOCK_USED_OVERHEAD) )
    53     return( NULL );
     51  /* Find large enough free block. */
     52  for(the_block = _Heap_First(the_heap), search_count = 0;
     53      the_block != tail;
     54      the_block = the_block->next, ++search_count)
     55  {
     56    /* As we always coalesce free blocks, prev block must have been used. */
     57    _HAssert(_Heap_Is_prev_used(the_block));
    5458
    55   excess   = size % the_heap->page_size;
    56   the_size = size + the_heap->page_size + HEAP_BLOCK_USED_OVERHEAD;
     59    /* Don't bother to mask out the HEAP_PREV_USED bit as it won't change the
     60       result of the comparison. */
     61    if(the_block->size >= the_size) {
     62      the_block = _Heap_Block_allocate(the_heap, the_block, the_size );
    5763
    58   if ( excess )
    59     the_size += the_heap->page_size - excess;
     64      ptr = _Heap_User_area(the_block);
    6065
    61   if ( the_size < sizeof( Heap_Block ) )
    62     the_size = sizeof( Heap_Block );
     66      stats->allocs += 1;
     67      stats->searches += search_count + 1;
    6368
    64   for ( the_block = the_heap->first;
    65         ;
    66         the_block = the_block->next ) {
    67     if ( the_block == _Heap_Tail( the_heap ) )
    68       return( NULL );
    69     if ( the_block->front_flag >= the_size )
     69      _HAssert(_Heap_Is_aligned_ptr(ptr, the_heap->page_size));
    7070      break;
     71    }
    7172  }
    7273
    73   if ( (the_block->front_flag - the_size) >
    74        (the_heap->page_size + HEAP_BLOCK_USED_OVERHEAD) ) {
    75     the_block->front_flag -= the_size;
    76     next_block             = _Heap_Next_block( the_block );
    77     next_block->back_flag  = the_block->front_flag;
    78 
    79     temporary_block            = _Heap_Block_at( next_block, the_size );
    80     temporary_block->back_flag =
    81     next_block->front_flag     = _Heap_Build_flag( the_size,
    82                                     HEAP_BLOCK_USED );
    83     ptr = _Heap_Start_of_user_area( next_block );
    84   } else {
    85     next_block                = _Heap_Next_block( the_block );
    86     next_block->back_flag     = _Heap_Build_flag( the_block->front_flag,
    87                                    HEAP_BLOCK_USED );
    88     the_block->front_flag     = next_block->back_flag;
    89     the_block->next->previous = the_block->previous;
    90     the_block->previous->next = the_block->next;
    91     ptr = _Heap_Start_of_user_area( the_block );
    92   }
    93 
    94   /*
    95    * round ptr up to a multiple of page size
    96    * Have to save the bump amount in the buffer so that free can figure it out
    97    */
    98 
    99   offset = the_heap->page_size - (((uint32_t  ) ptr) & (the_heap->page_size - 1));
    100   ptr = _Addresses_Add_offset( ptr, offset );
    101   *(((uint32_t   *) ptr) - 1) = offset;
    102 
    103 #ifdef RTEMS_DEBUG
    104   {
    105       uint32_t   ptr_u32;
    106       ptr_u32 = (uint32_t  ) ptr;
    107       if (ptr_u32 & (the_heap->page_size - 1))
    108           abort();
    109   }
    110 #endif
     74  if(stats->max_search < search_count)
     75    stats->max_search = search_count;
    11176
    11277  return ptr;
  • cpukit/score/src/heapextend.c

    rf3b7be3 r962e894f  
    4040)
    4141{
    42   Heap_Block        *the_block;
    43   uint32_t          *p;
     42  uint32_t         the_size;
     43  Heap_Statistics *const stats = &the_heap->stats;
    4444
    4545  /*
     
    6363   */
    6464
    65   if ( starting_address >= (void *) the_heap->start &&        /* case 3 */
    66        starting_address <= (void *) the_heap->final
     65  if ( starting_address >= the_heap->begin &&        /* case 3 */
     66       starting_address < the_heap->end
    6767     )
    6868    return HEAP_EXTEND_ERROR;
    6969
    70   if ( starting_address < (void *) the_heap->start ) {  /* cases 1 and 2 */
    71 
    72       return HEAP_EXTEND_NOT_IMPLEMENTED;               /* cases 1 and 2 */
    73 
    74   } else {                                              /* cases 4 and 5 */
    75 
    76     the_block = (Heap_Block *)
    77        _Addresses_Subtract_offset( starting_address, HEAP_OVERHEAD );
    78     if ( the_block != the_heap->final )
    79       return HEAP_EXTEND_NOT_IMPLEMENTED;                   /* case 5 */
    80   }
     70  if ( starting_address != the_heap->end )
     71    return HEAP_EXTEND_NOT_IMPLEMENTED;         /* cases 1, 2, and 5 */
    8172
    8273  /*
     
    8677   */
    8778
     79  old_final = the_heap->final;
     80  the_heap->end = _Addresses_Add_offset( the_heap->end, size );
     81  the_size = _Addresses_Subtract( the_heap->end, old_final ) - HEAP_OVERHEAD;
     82  _Heap_Align_down( &the_size, the_heap->page_size );
     83
    8884  *amount_extended = size;
    8985
    90   old_final = the_heap->final;
    91   new_final = _Addresses_Add_offset( old_final, size );
    92   /* SAME AS: _Addresses_Add_offset( starting_address, size-HEAP_OVERHEAD ); */
     86  if( the_size < the_heap->min_block_size )
     87    return HEAP_EXTEND_SUCCESSFUL;
    9388
     89  old_final->size = the_size | (old_final->size & HEAP_PREV_USED);
     90  new_final = _Heap_Block_at( old_final, the_size );
     91  new_final->size = HEAP_PREV_USED;
    9492  the_heap->final = new_final;
    9593
    96   old_final->front_flag =
    97   new_final->back_flag  = _Heap_Build_flag( size, HEAP_BLOCK_USED );
    98   new_final->front_flag = HEAP_DUMMY_FLAG;
     94  stats->size += size;
     95  stats->used_blocks += 1;
     96  stats->frees -= 1;    /* Don't count subsequent call as actual free() */
    9997
    100   /*
    101    *  Must pass in address of "user" area
    102    *  So add in the offset field.
    103    */
    104 
    105   p = (uint32_t   *) &old_final->next;
    106   *p = sizeof(uint32_t  );
    107   p++;
    108   _Heap_Free( the_heap, p );
     98  _Heap_Free( the_heap, _Heap_User_area( old_final ) );
    10999
    110100  return HEAP_EXTEND_SUCCESSFUL;
  • cpukit/score/src/heapfree.c

    rf3b7be3 r962e894f  
    4040  Heap_Block        *the_block;
    4141  Heap_Block        *next_block;
    42   Heap_Block        *new_next_block;
    43   Heap_Block        *previous_block;
    44   Heap_Block        *temporary_block;
    45   uint32_t           the_size;
     42  uint32_t         the_size;
     43  uint32_t         next_size;
     44  Heap_Statistics *const stats = &the_heap->stats;
     45  boolean            next_is_free;
    4646
    47   the_block = _Heap_User_block_at( starting_address );
     47  _Heap_Start_of_block( the_heap, starting_address, &the_block );
    4848
    49   if ( !_Heap_Is_block_in( the_heap, the_block ) ||
    50         _Heap_Is_block_free( the_block ) ) {
    51       return( FALSE );
     49  if ( !_Heap_Is_block_in( the_heap, the_block ) ) {
     50    _HAssert(starting_address == NULL);
     51    return( FALSE );
    5252  }
    5353
    54   the_size   = _Heap_Block_size( the_block );
     54  the_size = _Heap_Block_size( the_block );
    5555  next_block = _Heap_Block_at( the_block, the_size );
    5656
    57   if ( !_Heap_Is_block_in( the_heap, next_block ) ||
    58        (the_block->front_flag != next_block->back_flag) ) {
    59       return( FALSE );
     57  if ( !_Heap_Is_prev_used( next_block ) ) {
     58    _HAssert(FALSE);
     59    return( FALSE );
    6060  }
    6161
    62   if ( _Heap_Is_previous_block_free( the_block ) ) {
    63     previous_block = _Heap_Previous_block( the_block );
     62  if ( !_Heap_Is_block_in( the_heap, next_block ) ) {
     63    _HAssert(FALSE);
     64    return( FALSE );
     65  }
    6466
    65     if ( !_Heap_Is_block_in( the_heap, previous_block ) ) {
    66         return( FALSE );
     67  next_size = _Heap_Block_size( next_block );
     68  next_is_free = next_block < the_heap->final &&
     69    !_Heap_Is_prev_used(_Heap_Block_at(next_block, next_size));
     70
     71  if ( !_Heap_Is_prev_used( the_block ) ) {
     72    uint32_t const prev_size = the_block->prev_size;
     73    Heap_Block *const prev_block = _Heap_Block_at( the_block, -prev_size );
     74
     75    if ( !_Heap_Is_block_in( the_heap, prev_block ) ) {
     76      _HAssert(FALSE);
     77      return( FALSE );
    6778    }
    6879
    69     if ( _Heap_Is_block_free( next_block ) ) {      /* coalesce both */
    70       previous_block->front_flag += next_block->front_flag + the_size;
    71       temporary_block             = _Heap_Next_block( previous_block );
    72       temporary_block->back_flag  = previous_block->front_flag;
    73       next_block->next->previous  = next_block->previous;
    74       next_block->previous->next  = next_block->next;
     80    /* As we always coalesce free blocks, the block that preceedes prev_block
     81       must have been used. */
     82    if ( !_Heap_Is_prev_used ( prev_block) ) {
     83      _HAssert(FALSE);
     84      return( FALSE );
    7585    }
    76     else {                     /* coalesce prev */
    77       previous_block->front_flag =
    78       next_block->back_flag      = previous_block->front_flag + the_size;
     86
     87    if ( next_is_free ) {       /* coalesce both */
     88      uint32_t const size = the_size + prev_size + next_size;
     89      _Heap_Block_remove( next_block );
     90      stats->free_blocks -= 1;
     91      prev_block->size = size | HEAP_PREV_USED;
     92      next_block = _Heap_Block_at( prev_block, size );
     93      _HAssert(!_Heap_Is_prev_used( next_block));
     94      next_block->prev_size = size;
     95    }
     96    else {                      /* coalesce prev */
     97      uint32_t const size = the_size + prev_size;
     98      prev_block->size = size | HEAP_PREV_USED;
     99      next_block->size &= ~HEAP_PREV_USED;
     100      next_block->prev_size = size;
    79101    }
    80102  }
    81   else if ( _Heap_Is_block_free( next_block ) ) { /* coalesce next */
    82     the_block->front_flag     = the_size + next_block->front_flag;
    83     new_next_block            = _Heap_Next_block( the_block );
    84     new_next_block->back_flag = the_block->front_flag;
    85     the_block->next           = next_block->next;
    86     the_block->previous       = next_block->previous;
    87     next_block->previous->next = the_block;
    88     next_block->next->previous = the_block;
     103  else if ( next_is_free ) {    /* coalesce next */
     104    uint32_t const size = the_size + next_size;
     105    _Heap_Block_replace( next_block, the_block );
     106    the_block->size = size | HEAP_PREV_USED;
     107    next_block  = _Heap_Block_at( the_block, size );
     108    next_block->prev_size = size;
     109  }
     110  else {                        /* no coalesce */
     111    /* Add 'the_block' to the head of the free blocks list as it tends to
     112       produce less fragmentation than adding to the tail. */
     113    _Heap_Block_insert_after( _Heap_Head( the_heap), the_block );
     114    the_block->size = the_size | HEAP_PREV_USED;
     115    next_block->size &= ~HEAP_PREV_USED;
     116    next_block->prev_size = the_size;
    89117
    90     if (the_heap->first == next_block)
    91         the_heap->first = the_block;
     118    stats->free_blocks += 1;
     119    if ( stats->max_free_blocks < stats->free_blocks )
     120      stats->max_free_blocks = stats->free_blocks;
    92121  }
    93   else {                          /* no coalesce */
    94     next_block->back_flag     =
    95     the_block->front_flag     = the_size;
    96     the_block->previous       = _Heap_Head( the_heap );
    97     the_block->next           = the_heap->first;
    98     the_heap->first           = the_block;
    99     the_block->next->previous = the_block;
    100   }
     122
     123  stats->used_blocks -= 1;
     124  stats->free_size += the_size;
     125  stats->frees += 1;
    101126
    102127  return( TRUE );
  • cpukit/score/src/heapgetfreeinfo.c

    rf3b7be3 r962e894f  
    3838{
    3939  Heap_Block *the_block;
     40  Heap_Block *const tail = _Heap_Tail(the_heap);
    4041
    4142  info->number = 0;
     
    4344  info->total = 0;
    4445
    45   for ( the_block = the_heap->first;
    46         ;
    47         the_block = the_block->next ) {
    48     if ( the_block == _Heap_Tail( the_heap ) )
    49       return;
    50    
     46  for(the_block = _Heap_First(the_heap);
     47      the_block != tail;
     48      the_block = the_block->next)
     49  {
     50    uint32_t const the_size = _Heap_Block_size(the_block);
     51
     52    /* As we always coalesce free blocks, prev block must have been used. */
     53    _HAssert(_Heap_Is_prev_used(the_block));
     54
    5155    info->number++;
    52     info->total += the_block->front_flag;
    53 
    54     if ( the_block->front_flag >= info->largest )
    55       info->largest = the_block->front_flag;
     56    info->total += the_size;
     57    if ( info->largest < the_size )
     58        info->largest = the_size;
    5659  }
    5760}
  • cpukit/score/src/heapgetinfo.c

    rf3b7be3 r962e894f  
    99 *  http://www.rtems.com/license/LICENSE.
    1010 *
     11 *  $Id$
    1112 */
    1213
     
    3839)
    3940{
    40   Heap_Block *the_block  = 0;  /* avoid warnings */
    41   Heap_Block *next_block = 0;  /* avoid warnings */
    42   int         notdone = 1;
    43   uint32_t    size;
     41  Heap_Block *the_block = the_heap->start;
     42  Heap_Block *const end = the_heap->final;
    4443
     44  _HAssert(the_block->prev_size == HEAP_PREV_USED);
     45  _HAssert(_Heap_Is_prev_used(the_block));
    4546
    4647  the_info->Free.number  = 0;
     
    5152  the_info->Used.largest = 0;
    5253
    53   /*
    54    * We don't want to allow walking the heap until we have
    55    * transferred control to the user task so we watch the
    56    * system state.
    57    */
     54  while ( the_block != end ) {
     55    uint32_t const the_size = _Heap_Block_size(the_block);
     56    Heap_Block *const next_block = _Heap_Block_at(the_block, the_size);
    5857
    59   if ( !_System_state_Is_up( _System_state_Get() ) )
    60     return HEAP_GET_INFORMATION_SYSTEM_STATE_ERROR;
     58    if ( _Heap_Is_prev_used(next_block) ) {
     59      the_info->Used.number++;
     60      the_info->Used.total += the_size;
     61      if ( the_info->Used.largest < the_size )
     62        the_info->Used.largest = the_size;
     63    } else {
     64      the_info->Free.number++;
     65      the_info->Free.total += the_size;
     66      if ( the_info->Free.largest < the_size )
     67        the_info->Free.largest = the_size;
     68      if ( the_size != next_block->prev_size )
     69        return HEAP_GET_INFORMATION_BLOCK_ERROR;
     70    }
    6171
    62   the_block = the_heap->start;
    63 
    64   /*
    65    * Handle the 1st block
    66    */
    67 
    68   if ( the_block->back_flag != HEAP_DUMMY_FLAG ) {
    69     return HEAP_GET_INFORMATION_BLOCK_ERROR;
     72    the_block = next_block;
    7073  }
    7174
    72   while (notdone) {
    73 
    74       /*
    75        * Accumulate size
    76        */
    77 
    78       size = _Heap_Block_size(the_block);
    79       if ( _Heap_Is_block_free(the_block) ) {
    80         the_info->Free.number++;
    81         the_info->Free.total += size;
    82         if ( size > the_info->Free.largest )
    83           the_info->Free.largest = size;
    84       } else {
    85         the_info->Used.number++;
    86         the_info->Used.total += size;
    87         if ( size > the_info->Used.largest )
    88           the_info->Used.largest = size;
    89       }
    90 
    91       /*
    92        * Handle the last block
    93        */
    94 
    95       if ( the_block->front_flag != HEAP_DUMMY_FLAG ) {
    96         next_block = _Heap_Next_block(the_block);
    97         if ( the_block->front_flag != next_block->back_flag ) {
    98           return HEAP_GET_INFORMATION_BLOCK_ERROR;
    99         }
    100       }
    101 
    102       if ( the_block->front_flag == HEAP_DUMMY_FLAG )
    103         notdone = 0;
    104       else
    105         the_block = next_block;
    106   }  /* while(notdone) */
     75  /* Handle the last dummy block. Don't consider this block to be
     76     "used" as client never allocated it. Make 'Used.total' contain this
     77     blocks' overhead though. */
     78  the_info->Used.total += HEAP_OVERHEAD;
    10779
    10880  return HEAP_GET_INFORMATION_SUCCESSFUL;
  • cpukit/score/src/heapsizeofuserarea.c

    rf3b7be3 r962e894f  
    2121 *  _Heap_Size_of_user_area
    2222 *
    23  *  This kernel routine returns the size of the memory area
    24  *  given heap block.
     23 *  This kernel routine sets '*size' to the size of the block of memory
     24 *  which begins at 'starting_address'.
     25 *  It returns TRUE if the 'starting_address' is in the heap, and FALSE
     26 *  otherwise.
    2527 *
    2628 *  Input parameters:
    2729 *    the_heap         - pointer to heap header
    28  *    starting_address - starting address of the memory block to free.
     30 *    starting_address - starting address of the memory block
    2931 *    size             - pointer to size of area
    3032 *
     
    3840  Heap_Control        *the_heap,
    3941  void                *starting_address,
    40   uint32_t            *size
     42  size_t              *size
    4143)
    4244{
     
    4951    return( FALSE );
    5052
    51   the_block = _Heap_User_block_at( starting_address );
    52  
     53  _Heap_Start_of_block( the_heap, starting_address, &the_block );
     54
    5355  if ( !_Heap_Is_block_in( the_heap, the_block ) )
    54     return( FALSE );
    55 
    56   if ( _Heap_Is_block_free( the_block ) )
    5756    return( FALSE );
    5857
     
    6059  next_block = _Heap_Block_at( the_block, the_size );
    6160
    62   if ( !_Heap_Is_block_in( the_heap, next_block ) ||
    63        (the_block->front_flag != next_block->back_flag) )
     61  if (
     62    !_Heap_Is_block_in( the_heap, next_block ) ||
     63    !_Heap_Is_prev_used( next_block )
     64  )
    6465    return( FALSE );
    6566
    66   *size = the_size;
     67  /* 'starting_address' could be greater than 'the_block' address plus
     68     HEAP_BLOCK_USER_OFFSET as _Heap_Allocate_aligned() may produce such user
     69     pointers. To get rid of this offset we calculate user size as difference
     70     between the end of 'the_block' (='next_block') and 'starting_address'
     71     and then add correction equal to the offset of the 'size' field of the
     72     'Heap_Block' structure. The correction is due to the fact that
     73     'prev_size' field of the next block is actually used as user accessible
     74     area of 'the_block'. */
     75
     76  *size = _Addresses_Subtract ( next_block, starting_address )
     77    + HEAP_BLOCK_HEADER_OFFSET;
     78
    6779  return( TRUE );
    6880}
  • cpukit/score/src/heapwalk.c

    rf3b7be3 r962e894f  
    3131 */
    3232
    33 #ifndef RTEMS_DEBUG
     33#if !defined(RTEMS_HEAP_DEBUG)
    3434
    35 void _Heap_Walk(
     35boolean _Heap_Walk(
    3636  Heap_Control  *the_heap,
    3737  int            source,
     
    3939)
    4040{
     41  return TRUE;
    4142}
    4243
    43 #else
     44#else /* defined(RTEMS_HEAP_DEBUG) */
    4445
    4546#include <stdio.h>
    46 #include <unistd.h>
    4747
    48 void _Heap_Walk(
     48boolean _Heap_Walk(
    4949  Heap_Control  *the_heap,
    5050  int            source,
     
    5252)
    5353{
    54   Heap_Block *the_block  = 0;  /* avoid warnings */
    55   Heap_Block *next_block = 0;  /* avoid warnings */
    56   int         notdone = 1;
    57   int         error = 0;
    58   int         passes = 0;
     54  Heap_Block *the_block = the_heap->start;
     55  Heap_Block *const end = the_heap->final;
     56  Heap_Block *const tail = _Heap_Tail(the_heap);
     57  int error = 0;
     58  int passes = 0;
    5959
    6060  /*
     
    6565
    6666  if ( !_System_state_Is_up( _System_state_Get() ) )
    67     return;
     67    return FALSE;
    6868
    69   the_block = the_heap->start;
     69  if (source < 0)
     70    source = the_heap->stats.instance;
    7071
    71   if (do_dump == TRUE) {
    72     printf("\nPASS: %d  start @ 0x%p   final 0x%p,   first 0x%p  last 0x%p\n",
    73             source, the_heap->start, the_heap->final,
    74                   the_heap->first, the_heap->last
    75           );
    76   }
     72  if (do_dump == TRUE)
     73    printf("\nPASS: %d start %p final %p first %p last %p begin %p end %p\n",
     74      source, the_block, end,
     75      _Heap_First(the_heap), _Heap_Last(the_heap),
     76      the_heap->begin, the_heap->end);
    7777
    7878  /*
     
    8080   */
    8181
    82   if (the_block->back_flag != HEAP_DUMMY_FLAG) {
    83     printf("PASS: %d  Back flag of 1st block isn't HEAP_DUMMY_FLAG\n", source);
     82  if (!_Heap_Is_prev_used(the_block)) {
     83    printf("PASS: %d !HEAP_PREV_USED flag of 1st block isn't set\n", source);
    8484    error = 1;
    8585  }
    8686
    87   while (notdone) {
    88     passes++;
    89     if (error && (passes > 10))
    90         abort();
     87  if (the_block->prev_size != HEAP_PREV_USED) {
     88    printf("PASS: %d !prev_size of 1st block isn't HEAP_PREV_USED\n", source);
     89    error = 1;
     90  }
    9191
    92     if (do_dump == TRUE) {
    93       printf("PASS: %d  Block @ 0x%p   Back %d,   Front %d",
    94               source, the_block,
    95               the_block->back_flag, the_block->front_flag);
    96       if ( _Heap_Is_block_free(the_block) ) {
    97         printf( "      Prev 0x%p,   Next 0x%p\n",
    98                           the_block->previous, the_block->next);
    99       } else {
    100         printf("\n");
    101       }
     92  while ( the_block < end ) {
     93    uint32_t const the_size = _Heap_Block_size(the_block);
     94    Heap_Block *const next_block = _Heap_Block_at(the_block, the_size);
     95    boolean prev_used = _Heap_Is_prev_used(the_block);
     96
     97    if (do_dump) {
     98      printf("PASS: %d block %p size %d(%c)",
     99        source, the_block, the_size, (prev_used ? 'U' : 'F'));
     100      if (prev_used)
     101        printf(" prev_size %d", the_block->prev_size);
     102      else
     103        printf(" (prev_size) %d", the_block->prev_size);
    102104    }
    103105
    104     /*
    105      * Handle the last block
    106      */
     106    if (!_Heap_Is_block_in(the_heap, next_block)) {
     107      if (do_dump) printf("\n");
     108      printf("PASS: %d !block %p is out of heap\n", source, next_block);
     109      error = 1;
     110      break;
     111    }
    107112
    108     if ( the_block->front_flag != HEAP_DUMMY_FLAG ) {
    109       next_block = _Heap_Next_block(the_block);
    110       if ( the_block->front_flag != next_block->back_flag ) {
     113    if (!_Heap_Is_prev_used(next_block)) {
     114      if (do_dump)
     115        printf( " prev %p next %p", the_block->prev, the_block->next);
     116      if (_Heap_Block_size(the_block) != next_block->prev_size) {
     117        if (do_dump) printf("\n");
     118        printf("PASS: %d !front and back sizes don't match", source);
    111119        error = 1;
    112         printf("PASS: %d  Front and back flags don't match\n", source);
    113         printf("         Current Block (%p):  Back - %d,  Front - %d",
    114                the_block, the_block->back_flag, the_block->front_flag);
    115         if (do_dump == TRUE) {
    116           if (_Heap_Is_block_free(the_block)) {
    117             printf("      Prev 0x%p,   Next 0x%p\n",
    118                    the_block->previous, the_block->next);
    119           } else {
    120             printf("\n");
    121           }
    122         } else {
    123           printf("\n");
    124         }
    125         printf("         Next Block (%p):     Back - %d,  Front - %d",
    126                next_block, next_block->back_flag, next_block->front_flag);
    127         if (do_dump == TRUE) {
    128           if (_Heap_Is_block_free(next_block)) {
    129             printf("      Prev 0x%p,   Next 0x%p\n",
    130                    the_block->previous, the_block->next);
    131           } else {
    132             printf("\n");
    133           }
    134         } else {
    135           printf("\n");
     120      }
     121      if (!prev_used) {
     122        if (do_dump || error) printf("\n");
     123        printf("PASS: %d !two consecutive blocks are free", source);
     124        error = 1;
     125      }
     126
     127      { /* Check if 'the_block' is in the free block list */
     128        Heap_Block* block = _Heap_First(the_heap);
     129        while(block != the_block && block != tail)
     130          block = block->next;
     131        if(block != the_block) {
     132          if (do_dump || error) printf("\n");
     133          printf("PASS: %d !the_block not in the free list", source);
     134          error = 1;
    136135        }
    137136      }
     137
     138    }
     139    if (do_dump || error) printf("\n");
     140
     141    if (the_size < the_heap->min_block_size) {
     142      printf("PASS: %d !block size is too small\n", source);
     143      error = 1;
     144      break;
     145    }
     146    if (!_Heap_Is_aligned( the_size, the_heap->page_size)) {
     147      printf("PASS: %d !block size is misaligned\n", source);
     148      error = 1;
    138149    }
    139150
    140     if (the_block->front_flag == HEAP_DUMMY_FLAG)
    141       notdone = 0;
    142     else
    143       the_block = next_block;
     151    if (++passes > (do_dump ? 10 : 0) && error)
     152      break;
     153
     154    the_block = next_block;
    144155  }
    145156
    146   if (error)
    147       abort();
     157  if (the_block != end) {
     158    printf("PASS: %d !last block address isn't equal to 'final'\n", source);
     159    error = 1;
     160  }
     161
     162  if (_Heap_Block_size(the_block) != 0) {
     163    printf("PASS: %d !last block's size isn't 0\n", source);
     164    error = 1;
     165  }
     166
     167  if(do_dump && error)
     168    abort();
     169
     170  return error == 0;
     171
    148172}
    149 #endif
     173#endif  /* defined(RTEMS_HEAP_DEBUG) */
Note: See TracChangeset for help on using the changeset viewer.