Changeset 371cea31 in rtems for cpukit


Ignore:
Timestamp:
Aug 26, 2009, 12:00:24 PM (10 years ago)
Author:
Joel Sherrill <joel.sherrill@…>
Branches:
4.10, 4.11, master
Children:
8de136b
Parents:
2bbfbf1
Message:

2009-08-24 Sebastian Huber <Sebastian.Huber@…>

  • libmisc/stackchk/check.c, rtems/src/regionreturnsegment.c, rtems/src/regiongetsegmentsize.c, src/heapalignupuptr.c, src/heapallocatealigned.c, src/heapallocate.c, src/heap.c, src/heapextend.c, src/heapfree.c, src/heapgetfreeinfo.c, src/heapgetinfo.c, src/heapresizeblock.c, src/heapsizeofuserarea.c, src/heapwalk.c, src/pheapgetblocksize.c, inline/rtems/score/heap.inl, include/rtems/score/heap.h: Overall cleanup. Changed all types for addresses, sizes, offsets and alignments to uintptr_t. Reformatted. Added variables for clarity. Renamed various objects. Enabled _HAssert() for all instances. More changes follow.
Location:
cpukit
Files:
18 edited

Legend:

Unmodified
Added
Removed
  • cpukit/ChangeLog

    r2bbfbf1 r371cea31  
     12009-08-24      Sebastian Huber <Sebastian.Huber@embedded-brains.de>
     2
     3        * libmisc/stackchk/check.c, rtems/src/regionreturnsegment.c,
     4        rtems/src/regiongetsegmentsize.c, src/heapalignupuptr.c,
     5        src/heapallocatealigned.c, src/heapallocate.c, src/heap.c,
     6        src/heapextend.c, src/heapfree.c, src/heapgetfreeinfo.c,
     7        src/heapgetinfo.c, src/heapresizeblock.c, src/heapsizeofuserarea.c,
     8        src/heapwalk.c, src/pheapgetblocksize.c, inline/rtems/score/heap.inl,
     9        include/rtems/score/heap.h: Overall cleanup.  Changed all types for
     10        addresses, sizes, offsets and alignments to uintptr_t.  Reformatted.
     11        Added variables for clarity.  Renamed various objects.  Enabled
     12        _HAssert() for all instances.  More changes follow.
     13
    1142009-08-25      Joel Sherrill <joel.sherrill@OARcorp.com>
    215
  • cpukit/libmisc/stackchk/check.c

    r2bbfbf1 r371cea31  
    9393#else
    9494  #define Stack_check_Get_pattern_area( _the_stack ) \
    95     ((Stack_check_Control *) ((char *)(_the_stack)->area + HEAP_OVERHEAD))
     95    ((Stack_check_Control *) ((char *)(_the_stack)->area + HEAP_LAST_BLOCK_OVERHEAD))
    9696
    9797  #define Stack_check_Calculate_used( _low, _size, _high_water) \
  • cpukit/rtems/src/regiongetsegmentsize.c

    r2bbfbf1 r371cea31  
    6565
    6666      case OBJECTS_LOCAL:
    67         if ( !_Heap_Size_of_user_area( &the_region->Memory, segment, size ) )
     67        if ( !_Heap_Size_of_alloc_area( &the_region->Memory, segment, size ) )
    6868          return_status = RTEMS_INVALID_ADDRESS;
    6969        else
  • cpukit/rtems/src/regionreturnsegment.c

    r2bbfbf1 r371cea31  
    7373
    7474#ifdef RTEMS_REGION_FREE_SHRED_PATTERN
    75         if ( !_Heap_Size_of_user_area( &the_region->Memory, segment, &size ) )
     75        if ( !_Heap_Size_of_alloc_area( &the_region->Memory, segment, &size ) )
    7676          return_status = RTEMS_INVALID_ADDRESS;
    7777        else {
  • cpukit/score/include/rtems/score/heap.h

    r2bbfbf1 r371cea31  
    11/**
    2  *  @file  rtems/score/heap.h
    3  *
    4  *  This include file contains the information pertaining to the Heap
    5  *  Handler.  A heap is a doubly linked list of variable size
    6  *  blocks which are allocated using the first fit method.  Garbage
    7  *  collection is performed each time a block is returned to the heap by
    8  *  coalescing neighbor blocks.  Control information for both allocated
    9  *  and unallocated blocks is contained in the heap space.  A heap control
    10  *  structure contains control information for the heap.
    11  *
    12  *  FIXME: the alignment routines could be made faster should we require only
    13  *         powers of two to be supported both for 'page_size' and for
    14  *         'alignment' arguments. However, both workspace and malloc heaps are
    15  *         initialized with CPU_HEAP_ALIGNMENT as 'page_size', and while all
    16  *         the BSPs seem to use CPU_ALIGNMENT (that is power of two) as
    17  *         CPU_HEAP_ALIGNMENT, for whatever reason CPU_HEAP_ALIGNMENT is only
    18  *         required to be multiple of CPU_ALIGNMENT and explicitly not
    19  *         required to be a power of two.
    20  *
     2 * @file
     3 *
     4 * Heap Handler API.
     5 */
     6
     7/*
    218 *  COPYRIGHT (c) 1989-2006.
    229 *  On-Line Applications Research Corporation (OAR).
     
    3219#define _RTEMS_SCORE_HEAP_H
    3320
    34 /**
    35  *  @defgroup ScoreHeap Heap Handler
    36  *
    37  *  This handler encapsulates functionality which provides the foundation
    38  *  Heap services used in all of the APIs supported by RTEMS.
    39  */
    40 /**@{*/
    41 
    4221#ifdef __cplusplus
    4322extern "C" {
     
    4524
    4625/**
    47  * This type defines unsigned integer type to store 'void*'. Analog of C99
    48  * 'uintptr_t'. This should work on 16/32/64 bit architectures.
    49  *
    50  * FIXME: Something like this should better be defined by
    51  *        'rtems/score/types.h' and used here.
    52  */
    53 
    54 typedef uintptr_t _H_uptr_t;
    55 
    56 /**
    57  *  Forward reference
    58  *
    59  *  @ref Heap_Block
    60  */
    61 typedef struct Heap_Block_struct Heap_Block;
    62 
    63 /**
    64  *  The following defines the data structure used to manage individual blocks
    65  *  in a heap.  When the block is allocated, the 'next' and 'prev' fields, as
    66  *  well as 'prev_size' field of the next block, are not used by the heap
    67  *  manager and thus the address returned for the block starts at the address
    68  *  of the 'next' field and the size of the user accessible area includes the
    69  *  size of the 'prev_size' field.
    70  *
    71  *  @note The 'next' and 'prev' pointers are only valid when the block is free.
    72  *     Caution must be taken to ensure that every block is large enough to
    73  *     hold them and that they are not accessed while the block is actually
    74  *     allocated (i.e., not free).
    75  *
    76  *  @note The 'prev_size' field is only valid when HEAP_PREV_USED bit is clear
    77  *     in the 'size' field indicating that previous block is not allocated.
    78  *     If the bit is set, the 'prev_size' field is part of user-accessible
    79  *     space of the previous allocated block and thus shouldn't be accessed
    80  *     by the heap manager code. This trick allows to further decrease
    81  *     overhead in the used blocks to the size of 'size' field (4 bytes).
    82  *
    83  */
    84 
    85 struct Heap_Block_struct {
    86   /** size of prev block (if prev block is free) */
    87   uint32_t  prev_size;
    88   /** size of block in bytes and status of prev block */
    89   uint32_t  size;
    90   /** pointer to the next free block */
    91   Heap_Block *next;
    92   /** pointer to the previous free block */
    93   Heap_Block *prev;
    94 };
    95 
    96 /**
    97  *  This flag used in the 'size' field of each heap block to indicate
    98  *  if previous block is free or in use. As sizes are always multiples of
    99  *  4, the 2 least significant bits would always be 0, and we use one of them
    100  *  to store the flag.
    101  */
    102 
    103 #define HEAP_PREV_USED    1u    /* indicates previous block is in use */
    104 
    105 /**
    106  *  The following constants reflect various requirements of the
    107  *  heap data structures which impact the management of a heap.
    108  */
    109 
    110 #define HEAP_MIN_BLOCK_SIZE (sizeof(Heap_Block))
    111 
    112 /**
    113  *  Offset of the block header from the block pointer. Equal to the
    114  *  offsetof(Heap_Block.size).
    115  */
    116 #define HEAP_BLOCK_HEADER_OFFSET (sizeof(uint32_t))
    117 
    118 /**
    119  *  Offset of user data pointer from the block pointer. Equal to the
    120  *  offset of(Heap_Block.next).
    121  */
    122 #define HEAP_BLOCK_USER_OFFSET (sizeof(uint32_t) * 2)
    123 
    124 /**
    125  *  This is the number of bytes of overhead in a used block.
    126  *  Equal to the sizeof(Heap_Block.previous and next).
    127  */
    128 #define HEAP_BLOCK_USED_OVERHEAD (sizeof(uint32_t) * 2)
    129 
    130 /** Size of the permanent dummy last block. */
    131 #define HEAP_OVERHEAD HEAP_BLOCK_USER_OFFSET
     26 * @defgroup ScoreHeap Heap Handler
     27 *
     28 * The Heap Handler provides a heap.
     29 *
     30 * A heap is a doubly linked list of variable size blocks which are allocated
     31 * using the first fit method.  Garbage collection is performed each time a
     32 * block is returned to the heap by coalescing neighbor blocks.  Control
     33 * information for both allocated and free blocks is contained in the heap
     34 * area.  A heap control structure contains control information for the heap.
     35 *
     36 * FIXME: The alignment routines could be made faster should we require only
     37 * powers of two to be supported both for 'page_size' and for 'alignment'
     38 * arguments. However, both workspace and malloc heaps are initialized with
     39 * CPU_HEAP_ALIGNMENT as 'page_size', and while all the BSPs seem to use
     40 * CPU_ALIGNMENT (that is power of two) as CPU_HEAP_ALIGNMENT, for whatever
     41 * reason CPU_HEAP_ALIGNMENT is only required to be multiple of CPU_ALIGNMENT
     42 * and explicitly not required to be a power of two.
     43 *
     44 * There are two kinds of blocks.  One sort describes a free block from which
     45 * we can allocate memory.  The other blocks are used and contain allocated
     46 * memory.  The free blocks are accessible via a list of free blocks.
     47 *
     48 * Free blocks look like:
     49 * <table>
     50 *   <tr>
     51 *     <td rowspan=4>@ref Heap_Block</td><td>previous block size in case the
     52 *       previous block is free, <br> otherwise it may contain data used by
     53 *       the previous block</td>
     54 *   </tr>
     55 *   <tr>
     56 *     <td>block size and a flag which indicates if the previous block is free
     57 *       or used, <br> this field contains always valid data regardless of the
     58 *       block usage</td>
     59 *   </tr>
     60 *   <tr><td>pointer to next block (this field is page size aligned)</td></tr>
     61 *   <tr><td>pointer to previous block</td></tr>
     62 *   <tr><td colspan=2>free space</td></tr>
     63 * </table>
     64 *
     65 * Used blocks look like:
     66 * <table>
     67 *   <tr>
     68 *     <td rowspan=4>@ref Heap_Block</td><td>previous block size in case the
     69 *       previous block is free,<br>otherwise it may contain data used by
     70 *       the previous block</td>
     71 *   </tr>
     72 *   <tr>
     73 *     <td>block size and a flag which indicates if the previous block is free
     74 *       or used, <br> this field contains always valid data regardless of the
     75 *       block usage</td>
     76 *   </tr>
     77 *   <tr><td>begin of allocated area (this field is page size aligned)</td></tr>
     78 *   <tr><td>allocated space</td></tr>
     79 *   <tr><td colspan=2>allocated space</td></tr>
     80 * </table>
     81 *
     82 * The heap area after initialization contains two blocks and looks like:
     83 * <table>
     84 *   <tr><th>Label</th><th colspan=2>Content</th></tr>
     85 *   <tr><td>heap->begin</td><td colspan=2>heap area begin address</td></tr>
     86 *   <tr><td>first_block->prev_size</td><td colspan=2>arbitrary value</td></tr>
     87 *   <tr>
     88 *     <td>first_block->size</td>
     89 *     <td colspan=2>size available for allocation
     90 *       | @c HEAP_PREV_BLOCK_USED</td>
     91 *   </tr>
     92 *   <tr>
     93 *     <td>first_block->next</td><td>_Heap_Free_list_tail(heap)</td>
     94 *     <td rowspan=3>memory area available for allocation</td>
     95 *   </tr>
     96 *   <tr><td>first_block->prev</td><td>_Heap_Free_list_head(heap)</td></tr>
     97 *   <tr><td>...</td></tr>
     98 *   <tr>
     99 *     <td>second_block->prev_size</td><td colspan=2>size of first block</td>
     100 *   </tr>
     101 *   <tr>
     102 *     <td>second_block->size</td>
     103 *     <td colspan=2>arbitrary size | @c HEAP_PREV_BLOCK_FREE</td>
     104 *   </tr>
     105 *   <tr><td>heap->end</td><td colspan=2>heap area end address</td></tr>
     106 * </table>
     107 *
     108 * @{
     109 */
     110
     111/**
     112 * @brief Description for free or used blocks.
     113 */
     114typedef struct Heap_Block {
     115  /**
     116   * @brief Size of the previous block or part of the allocated area of the
     117   * previous block.
     118   *
     119   * This field is only valid if the previous block is free.  This case is
     120   * indicated by a cleared @c HEAP_PREV_BLOCK_USED flag in the
     121   * @a size_and_flag field of the current block.
     122   */
     123  uintptr_t prev_size;
     124
     125  /**
     126   * @brief Contains the size of the current block and a flag which indicates
     127   * if the previous block is free or used.
     128   *
     129   * If the flag @c HEAP_PREV_BLOCK_USED is set, then the previous block is
     130   * used, otherwise the previous block is free.  A used previous block may
     131   * claim the @a prev_size field for allocation.  This trick allows to
     132   * decrease the overhead in the used blocks by the size of the
     133   * @a prev_size field.  As sizes are always multiples of four, the two least
     134   * significant bits are always zero. We use one of them to store the flag.
     135   *
     136   * This field is always valid.
     137   */
     138  uintptr_t size_and_flag;
     139
     140  /**
     141   * @brief Pointer to the next free block or part of the allocated area.
     142   *
     143   * This field is page size aligned and begins of the allocated area in case
     144   * the block is used.
     145   *
     146   * This field is only valid if the block is free and thus part of the free
     147   * block list.
     148   */
     149  struct Heap_Block *next;
     150
     151  /**
     152   * @brief Pointer to the previous free block or part of the allocated area.
     153   *
     154   * This field is only valid if the block is free and thus part of the free
     155   * block list.
     156   */
     157  struct Heap_Block *prev;
     158} Heap_Block;
     159
     160#define HEAP_PREV_BLOCK_USED ((uintptr_t) 1)
     161
     162#define HEAP_PREV_BLOCK_FREE ((uintptr_t) 0)
     163
     164/**
     165 * @brief Offset from the block begin up to the block size field.
     166 */
     167#define HEAP_BLOCK_SIZE_OFFSET (sizeof(uintptr_t))
     168
     169/**
     170 * @brief Offset from the block begin up to the allocated area begin.
     171 */
     172#define HEAP_BLOCK_ALLOC_AREA_OFFSET (sizeof(uintptr_t) * 2)
     173
     174#define HEAP_BLOCK_USED_OVERHEAD (sizeof(uintptr_t) * 2)
     175
     176#define HEAP_LAST_BLOCK_OVERHEAD HEAP_BLOCK_ALLOC_AREA_OFFSET
    132177
    133178/**
     
    146191  uint32_t instance;
    147192  /** the size of the memory for heap */
    148   intptr_t size;
     193  uintptr_t size;
    149194  /** current free size */
    150   intptr_t free_size;
     195  uintptr_t free_size;
    151196  /** minimum free size ever */
    152   intptr_t min_free_size;
     197  uintptr_t min_free_size;
    153198  /** current number of free blocks */
    154199  uint32_t free_blocks;
     
    174219typedef struct {
    175220  /** head and tail of circular list of free blocks */
    176   Heap_Block  free_list;
     221  Heap_Block free_list;
    177222  /** allocation unit and alignment */
    178   uint32_t page_size;
     223  uintptr_t page_size;
    179224  /** minimum block size aligned on page_size */
    180   uint32_t min_block_size;
     225  uintptr_t min_block_size;
    181226  /** first address of memory for the heap */
    182   void       *begin;
     227  uintptr_t begin;
    183228  /** first address past end of memory for the heap */
    184   void       *end;
     229  uintptr_t end;
    185230  /** first valid block address in the heap */
    186231  Heap_Block *start;
     
    194239 *  Status codes for _Heap_Extend
    195240 */
    196 
    197241typedef enum {
    198242  HEAP_EXTEND_SUCCESSFUL,
     
    204248 *  Status codes for _Heap_Resize_block
    205249 */
    206 
    207250typedef enum {
    208251  HEAP_RESIZE_SUCCESSFUL,
     
    214257 *  Status codes for _Heap_Get_information
    215258 */
    216 
    217259typedef enum {
    218260  HEAP_GET_INFORMATION_SUCCESSFUL = 0,
     
    245287
    246288/**
    247  *  This routine initializes @a the_heap record to manage the
    248  *  contiguous heap of @a size bytes which starts at @a starting_address.
    249  *  Blocks of memory are allocated from the heap in multiples of
    250  *  @a page_size byte units. If @a page_size is 0 or is not multiple of
    251  *  CPU_ALIGNMENT, it's aligned up to the nearest CPU_ALIGNMENT boundary.
    252  *
    253  *  @param[in] the_heap is the heap to operate upon
    254  *  @param[in] starting_address is the starting address of the memory for
    255  *         the heap
    256  *  @param[in] size is the size in bytes of the memory area for the heap
    257  *  @param[in] page_size is the size in bytes of the allocation unit
    258  *
    259  *  @return This method returns the maximum memory available.  If
    260  *          unsuccessful, 0 will be returned.
    261  */
    262 uint32_t   _Heap_Initialize(
    263   Heap_Control *the_heap,
    264   void         *starting_address,
    265   intptr_t      size,
    266   uint32_t      page_size
    267 );
    268 
    269 /**
    270  *  This routine grows @a the_heap memory area using the size bytes which
     289 * Initializes the @a heap control block to manage the area starting at
     290 * @a area_begin of @a area_size bytes.
     291 *
     292 * Blocks of memory are allocated from the heap in multiples of @a page_size
     293 * byte units.  If the @a page_size is equal to zero or is not multiple of
     294 * @c CPU_ALIGNMENT, it is aligned up to the nearest @c CPU_ALIGNMENT boundary.
     295 *
     296 * Returns the maximum memory available, or zero in case of failure.
     297 */
     298uintptr_t _Heap_Initialize(
     299  Heap_Control *heap,
     300  void *area_begin,
     301  uintptr_t area_size,
     302  uintptr_t page_size
     303);
     304
     305/**
     306 *  This routine grows @a heap memory area using the size bytes which
    271307 *  begin at @a starting_address.
    272308 *
    273  *  @param[in] the_heap is the heap to operate upon
     309 *  @param[in] heap is the heap to operate upon
    274310 *  @param[in] starting_address is the starting address of the memory
    275311 *         to add to the heap
     
    280316 */
    281317Heap_Extend_status _Heap_Extend(
    282   Heap_Control *the_heap,
    283   void         *starting_address,
    284   intptr_t      size,
    285   intptr_t    *amount_extended
     318  Heap_Control *heap,
     319  void *area_begin,
     320  uintptr_t area_size,
     321  uintptr_t *amount_extended
    286322);
    287323
    288324/**
    289325 *  This function attempts to allocate a block of @a size bytes from
    290  *  @a the_heap.  If insufficient memory is free in @a the_heap to allocate
     326 *  @a heap.  If insufficient memory is free in @a heap to allocate
    291327 *  a block of the requested size, then NULL is returned.
    292328 *
    293  *  @param[in] the_heap is the heap to operate upon
     329 *  @param[in] heap is the heap to operate upon
    294330 *  @param[in] size is the amount of memory to allocate in bytes
    295331 *  @return NULL if unsuccessful and a pointer to the block if successful
    296332 */
    297 void *_Heap_Allocate(
    298   Heap_Control *the_heap,
    299   intptr_t      size
    300 );
     333void *_Heap_Allocate( Heap_Control *heap, uintptr_t size );
    301334
    302335/**
    303336 *  This function attempts to allocate a memory block of @a size bytes from
    304  *  @a the_heap so that the start of the user memory is aligned on the
     337 *  @a heap so that the start of the user memory is aligned on the
    305338 *  @a alignment boundary. If @a alignment is 0, it is set to CPU_ALIGNMENT.
    306339 *  Any other value of @a alignment is taken "as is", i.e., even odd
     
    309342 *  failure.
    310343 *
    311  *  @param[in] the_heap is the heap to operate upon
     344 *  @param[in] heap is the heap to operate upon
    312345 *  @param[in] size is the amount of memory to allocate in bytes
    313346 *  @param[in] alignment the required alignment
     
    315348 */
    316349void *_Heap_Allocate_aligned(
    317   Heap_Control *the_heap,
    318   intptr_t      size,
    319   uint32_t      alignment
    320 );
    321 
    322 /**
    323  *  This function sets @a *size to the size of the block of user memory
     350  Heap_Control *heap,
     351  uintptr_t size,
     352  uintptr_t alignment
     353);
     354
     355/**
     356 *  This function sets @a size to the size of the block of allocatable area
    324357 *  which begins at @a starting_address. The size returned in @a *size could
    325358 *  be greater than the size requested for allocation.
     
    327360 *  otherwise.
    328361 *
    329  *  @param[in] the_heap is the heap to operate upon
     362 *  @param[in] heap is the heap to operate upon
    330363 *  @param[in] starting_address is the starting address of the user block
    331364 *         to obtain the size of
     
    334367 *  @return *size filled in with the size of the user area for this block
    335368 */
    336 bool _Heap_Size_of_user_area(
    337   Heap_Control        *the_heap,
    338   void                *starting_address,
    339   intptr_t            *size
     369bool _Heap_Size_of_alloc_area(
     370  Heap_Control *heap,
     371  void *area_begin,
     372  uintptr_t *size
    340373);
    341374
     
    344377 *  @a starting_address to the new @a size.
    345378 *
    346  *  @param[in] the_heap is the heap to operate upon
     379 *  @param[in] heap is the heap to operate upon
    347380 *  @param[in] starting_address is the starting address of the user block
    348381 *         to be resized
     
    363396 */
    364397Heap_Resize_status _Heap_Resize_block(
    365   Heap_Control *the_heap,
     398  Heap_Control *heap,
    366399  void         *starting_address,
    367   intptr_t      size,
    368   intptr_t     *old_mem_size,
    369   intptr_t     *avail_mem_size
     400  uintptr_t      size,
     401  uintptr_t     *old_mem_size,
     402  uintptr_t     *avail_mem_size
    370403);
    371404
    372405/**
    373406 *  This routine returns the block of memory which begins
    374  *  at @a starting_address to @a the_heap.  Any coalescing which is
     407 *  at @a alloc_area_begin to @a heap.  Any coalescing which is
    375408 *  possible with the freeing of this routine is performed.
    376409 *
    377  *  @param[in] the_heap is the heap to operate upon
     410 *  @param[in] heap is the heap to operate upon
    378411 *  @param[in] start_address is the starting address of the user block
    379412 *         to free
    380413 *  @return true if successfully freed, false otherwise
    381414 */
    382 bool _Heap_Free(
    383   Heap_Control *the_heap,
    384   void         *start_address
    385 );
     415bool _Heap_Free( Heap_Control *heap, void *alloc_area_begin );
    386416
    387417/**
    388418 *  This routine walks the heap to verify its integrity.
    389419 *
    390  *  @param[in] the_heap is the heap to operate upon
     420 *  @param[in] heap is the heap to operate upon
    391421 *  @param[in] source is a user specified integer which may be used to
    392422 *         indicate where in the application this was invoked from
     
    395425 */
    396426bool _Heap_Walk(
    397   Heap_Control *the_heap,
     427  Heap_Control *heap,
    398428  int           source,
    399429  bool          do_dump
     
    404434 *  sizes.
    405435 *
    406  *  @param[in] the_heap pointer to heap header
     436 *  @param[in] heap pointer to heap header
    407437 *  @param[in] the_info pointer to a status information area
    408438 *  @return *the_info is filled with status information
     
    410440 */
    411441Heap_Get_information_status _Heap_Get_information(
    412   Heap_Control            *the_heap,
     442  Heap_Control            *heap,
    413443  Heap_Information_block  *the_info
    414444);
     
    418448 *  in the specified heap.
    419449 *
    420  *  @param[in] the_heap pointer to heap header.
     450 *  @param[in] heap pointer to heap header.
    421451 *  @param[in] info pointer to the free block information.
    422452 *
     
    424454 */
    425455void _Heap_Get_free_information(
    426   Heap_Control        *the_heap,
     456  Heap_Control        *heap,
    427457  Heap_Information    *info
    428458);
     
    430460#if !defined(__RTEMS_APPLICATION__)
    431461
    432 /**
    433  *  A pointer to unsigned integer conversion.
    434  */
    435 #define _H_p2u(_p) ((_H_uptr_t)(_p))
    436 
    437462#include <rtems/score/heap.inl>
    438463
    439464/**
    440  *  Convert user requested 'size' of memory block to the block size.
    441  *
    442  *  @note This is an internal routine used by _Heap_Allocate() and
    443  *  _Heap_Allocate_aligned().  Refer to 'heap.c' for details.
    444  *
    445  *  @param[in] size is the size of the block requested
    446  *  @param[in] page_size is the page size of this heap instance
    447  *  @param[in] min_size is minimum size block that should be allocated
    448  *         from this heap instance
    449  *
    450  *  @return This method returns block size on success, 0 if overflow occured.
    451  */
    452 size_t _Heap_Calc_block_size(
    453   size_t   size,
    454   uint32_t page_size,
    455   uint32_t min_size
     465 * @brief Returns the minimal block size for a block which may contain an area
     466 * of size @a alloc_size for allocation, or zero in case of an overflow.
     467 *
     468 * Uses the heap values @a page_size and @a min_block_size.
     469 */
     470uintptr_t _Heap_Calc_block_size(
     471  uintptr_t alloc_size,
     472  uintptr_t page_size,
     473  uintptr_t min_block_size
    456474);
    457475
    458476/**
    459477 *  This method allocates a block of size @a alloc_size from @a the_block
    460  *  belonging to @a the_heap. Split @a the_block if possible, otherwise
     478 *  belonging to @a heap. Split @a the_block if possible, otherwise
    461479 *  allocate it entirely.  When split, make the lower part used, and leave
    462480 *  the upper part free.
     
    465483 *  _Heap_Allocate_aligned().  Refer to 'heap.c' for details.
    466484 *
    467  *  @param[in] the_heap is the heap to operate upon
     485 *  @param[in] heap is the heap to operate upon
    468486 *  @param[in] the_block is the block to allocates the requested size from
    469487 *  @param[in] alloc_size is the requested number of bytes to take out of
     
    472490 *  @return This methods returns the size of the allocated block.
    473491 */
    474 uint32_t _Heap_Block_allocate(
    475   Heap_Control* the_heap,
    476   Heap_Block*   the_block,
    477   uint32_t      alloc_size
    478 );
    479 
    480 /**
    481  *  Align @a *value up to the nearest multiple of @a alignment.
    482  *
    483  *  @param[in] value is a pointer to be aligned.
    484  *  @param[in] alignment is the alignment value.
    485  *
    486  *  @return Upon return, @a value will contain the aligned result.
    487  */
    488 void _Heap_Align_up_uptr (
    489   _H_uptr_t *value,
    490   uint32_t  alignment
    491 );
    492 
    493 /*
    494  * Debug support
    495  */
    496 
    497 #if defined(RTEMS_DEBUG)
    498 #define RTEMS_HEAP_DEBUG
     492uintptr_t _Heap_Block_allocate(
     493  Heap_Control *heap,
     494  Heap_Block *block,
     495  uintptr_t alloc_size
     496);
     497
     498/** @} */
     499
     500#ifdef RTEMS_DEBUG
     501  #define RTEMS_HEAP_DEBUG
     502  #define RTEMS_MALLOC_BOUNDARY_HELPERS
    499503#endif
    500504
    501 /**
    502  *  We do asserts only for heaps with instance number greater than 0 assuming
    503  *  that the heap used for malloc is initialized first and thus has instance
    504  *  number 0. Asserting malloc heap may lead to troubles as printf may invoke
    505  *  malloc thus probably leading to infinite recursion.
    506  */
    507 #if defined(RTEMS_HEAP_DEBUG)
    508 #include <assert.h>
    509 
    510 #define _HAssert(cond_) \
    511   do { \
    512     if (the_heap->stats.instance && !(cond_)) \
    513       __assert(__FILE__, __LINE__, #cond_);  \
    514   } while(0)
    515 
    516 #else  /* !defined(RTEMS_HEAP_DEBUG) */
    517 
    518 #define _HAssert(cond_) ((void)0)
    519 
    520 #endif /* !defined(RTEMS_HEAP_DEBUG) */
     505#ifdef RTEMS_HEAP_DEBUG
     506  #include <assert.h>
     507  #define _HAssert( cond ) \
     508    do { \
     509      if ( !(cond) ) { \
     510        __assert( __FILE__, __LINE__, #cond ); \
     511      } \
     512    } while (0)
     513#else
     514  #define _HAssert( cond ) ((void) 0)
     515#endif
    521516
    522517#endif /* !defined(__RTEMS_APPLICATION__) */
     
    526521#endif
    527522
    528 /**@}*/
    529 
    530523#endif
    531524/* end of include file */
  • cpukit/score/inline/rtems/score/heap.inl

    r2bbfbf1 r371cea31  
    11/**
    2  *  @file  rtems/score/heap.inl
     2 * @file
    33 *
    4  *  This file contains the static inline implementation of the inlined
    5  *  routines from the heap handler.
     4 * @brief Static inline implementations of the inlined routines from the heap
     5 * handler.
    66 */
    77
     
    2424#define _RTEMS_SCORE_HEAP_INL
    2525
    26 /**
    27  *  @addtogroup ScoreHeap
    28  *  @{
    29  */
    30 
    3126#include <rtems/score/address.h>
    3227
    3328/**
    34  *  This function returns the head of the specified heap.
     29 * @addtogroup ScoreHeap
    3530 *
    36  *  @param[in] the_heap points to the heap being operated upon
    37  *
    38  *  @return This method returns a pointer to the dummy head of the free
    39  *          block list.
     31 * @{
    4032 */
    41 RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Head (
    42   Heap_Control *the_heap
    43 )
     33
     34RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Free_list_head( Heap_Control *heap )
    4435{
    45   return &the_heap->free_list;
     36  return &heap->free_list;
    4637}
    4738
    48 /**
    49  *  This function returns the tail of the specified heap.
    50  *
    51  *  @param[in] the_heap points to the heap being operated upon
    52  *
    53  *  @return This method returns a pointer to the dummy tail of the heap
    54  *          free list.
    55  */
    56 RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Tail (
    57   Heap_Control *the_heap
    58 )
     39RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Free_list_tail( Heap_Control *heap )
    5940{
    60   return &the_heap->free_list;
     41  return &heap->free_list;
    6142}
    6243
    63 /**
    64  *  Return the first free block of the specified heap.
    65  *
    66  *  @param[in] the_heap points to the heap being operated upon
    67  *
    68  *  @return This method returns a pointer to the first free block.
    69  */
    70 RTEMS_INLINE_ROUTINE Heap_Block *_Heap_First (
    71   Heap_Control *the_heap
    72 )
     44RTEMS_INLINE_ROUTINE Heap_Block *_Heap_First_free_block( Heap_Control *heap )
    7345{
    74   return _Heap_Head(the_heap)->next;
     46  return _Heap_Free_list_head(heap)->next;
    7547}
    7648
    77 /**
    78  *  Return the last free block of the specified heap.
    79  *
    80  *  @param[in] the_heap points to the heap being operated upon
    81  *
    82  *  @return This method returns a pointer to the last block on the free list.
    83  */
    84 RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Last (
    85   Heap_Control *the_heap
    86 )
     49RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Last_free_block( Heap_Control *heap )
    8750{
    88   return _Heap_Tail(the_heap)->prev;
     51  return _Heap_Free_list_tail(heap)->prev;
    8952}
    9053
    91 /**
    92  *  This function removes 'the_block' from doubly-linked list.
    93  *
    94  *  @param[in] the_block is the block to remove from the heap used block
    95  *             list.
    96  */
    97 RTEMS_INLINE_ROUTINE void _Heap_Block_remove (
    98   Heap_Block *the_block
    99 )
     54RTEMS_INLINE_ROUTINE void _Heap_Block_remove_from_free_list( Heap_Block *block )
    10055{
    101   Heap_Block *block = the_block;
    102 
    10356  Heap_Block *next = block->next;
    10457  Heap_Block *prev = block->prev;
     58
    10559  prev->next = next;
    10660  next->prev = prev;
    10761}
    10862
    109 /**
    110  *  This function replaces @a old_block by @a new_block in doubly-linked list.
    111  *  When a block is allocated, the memory is allocated from the low memory
    112  *  address range of the block.  This means that the upper address range of
    113  *  the memory block must be added to the free block list in place of the
    114  *  lower address portion being allocated.  This method is also used as part
    115  *  of resizing a block.
    116  *
    117  *  @param[in] old_block is the block which is currently on the list.
    118  *  @param[in] new_block is the new block which will replace it on the list.
    119  */
    120 
    121 RTEMS_INLINE_ROUTINE void _Heap_Block_replace (
     63RTEMS_INLINE_ROUTINE void _Heap_Block_replace_in_free_list(
    12264  Heap_Block *old_block,
    12365  Heap_Block *new_block
    12466)
    12567{
    126   Heap_Block *block = old_block;
    127   Heap_Block *next = block->next;
    128   Heap_Block *prev = block->prev;
     68  Heap_Block *next = old_block->next;
     69  Heap_Block *prev = old_block->prev;
    12970
    130   block = new_block;
    131   block->next = next;
    132   block->prev = prev;
    133   next->prev = prev->next = block;
     71  new_block->next = next;
     72  new_block->prev = prev;
     73
     74  next->prev = new_block;
     75  prev->next = new_block;
    13476}
    13577
    136 /**
    137  *  This function inserts @a the_block after @a prev_block
    138  *  in the doubly-linked free block list.
    139  *
    140  *  @param[in] prev_block is the previous block in the free list.
    141  *  @param[in] the_block is the block being freed.
    142  */
    143 RTEMS_INLINE_ROUTINE void _Heap_Block_insert_after (
     78RTEMS_INLINE_ROUTINE void _Heap_Block_insert_after(
    14479  Heap_Block *prev_block,
    145   Heap_Block *the_block
     80  Heap_Block *new_block
    14681)
    14782{
    148   Heap_Block *prev = prev_block;
    149   Heap_Block *block = the_block;
     83  Heap_Block *next = prev_block->next;
    15084
    151   Heap_Block *next = prev->next;
    152   block->next  = next;
    153   block->prev  = prev;
    154   next->prev = prev->next = block;
     85  new_block->next = next;
     86  new_block->prev = prev_block;
     87  prev_block->next = new_block;
     88  next->prev = new_block;
    15589}
    15690
    157 /**
    158  *  Return true if @a value is a multiple of @a alignment,  false otherwise
    159  *
    160  *  @param[in] value is the address to verify alignment of.
    161  *  @param[in] alignment is the alignment factor to verify.
    162  *
    163  *  @return This method returns true if the address is aligned and false
    164  *          otherwise.
    165  */
    166 RTEMS_INLINE_ROUTINE bool _Heap_Is_aligned (
    167   uint32_t  value,
    168   uint32_t  alignment
     91RTEMS_INLINE_ROUTINE bool _Heap_Is_aligned(
     92  uintptr_t value,
     93  uintptr_t alignment
    16994)
    17095{
     
    17297}
    17398
    174 /**
    175  *  Align @a *value up to the nearest multiple of @a alignment.
    176  *
    177  *  @param[in] value is a pointer to be aligned.
    178  *  @param[in] alignment is the alignment value.
    179  *
    180  *  @return Upon return, @a value will contain the aligned result.
    181  */
    182 RTEMS_INLINE_ROUTINE void _Heap_Align_up (
    183   uint32_t *value,
    184   uint32_t  alignment
     99RTEMS_INLINE_ROUTINE uintptr_t _Heap_Align_up(
     100  uintptr_t value,
     101  uintptr_t alignment
    185102)
    186103{
    187   uint32_t v = *value;
    188   uint32_t a = alignment;
    189   uint32_t r = v % a;
    190   *value = r ? v - r + a : v;
     104  uintptr_t remainder = value % alignment;
     105
     106  if ( remainder != 0 ) {
     107    return value - remainder + alignment;
     108  } else {
     109    return value;
     110  }
     111}
     112
     113RTEMS_INLINE_ROUTINE uintptr_t _Heap_Align_down(
     114  uintptr_t value,
     115  uintptr_t alignment
     116)
     117{
     118  return value - (value % alignment);
    191119}
    192120
    193121/**
    194  *  Align @a *value down to the nearest multiple of @a alignment.
    195  *
    196  *  @param[in] value is a pointer to be aligned.
    197  *  @param[in] alignment is the alignment value.
    198  *
    199  *  @return Upon return, @a value will contain the aligned result.
     122 * @brief Returns the block which is @a offset away from @a block.
    200123 */
    201 RTEMS_INLINE_ROUTINE void _Heap_Align_down (
    202   uint32_t *value,
    203   uint32_t  alignment
     124RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Block_at(
     125  const Heap_Block *block,
     126  uintptr_t offset
    204127)
    205128{
    206   uint32_t v = *value;
    207   *value = v - (v % alignment);
     129  return (Heap_Block *) ((uintptr_t) block + offset);
    208130}
    209131
    210132/**
    211  *  Return true if @a ptr is aligned at @a alignment boundary,
    212  *  false otherwise.
    213  *
    214  *  @param[in] ptr is the pointer to verify alignment of.
    215  *  @param[in] alignment is the alignment factor.
    216  *
    217  *  @return This method returns true if @a ptr is aligned at @a alignment
    218  *          boundary, and false otherwise.
     133 * @brief Returns the begin of the allocatable area of @a block.
    219134 */
    220 RTEMS_INLINE_ROUTINE bool _Heap_Is_aligned_ptr (
    221   void      *ptr,
    222   uint32_t  alignment
     135RTEMS_INLINE_ROUTINE uintptr_t _Heap_Alloc_area_of_block(
     136  Heap_Block *block
    223137)
    224138{
    225   return (_H_p2u(ptr) % alignment) == 0;
     139  return (uintptr_t) block + HEAP_BLOCK_ALLOC_AREA_OFFSET;
    226140}
    227141
    228142/**
    229  *  Align @a *value down to the nearest multiple of @a alignment.
    230  *
    231  *  @param[in] value is a pointer to be aligned.
    232  *  @param[in] alignment is the alignment value.
    233  *
    234  *  @return Upon return, @a value will contain the aligned result.
     143 * @brief Returns the block associated with the allocatable area starting at
     144 * @a alloc_area_begin inside a heap with a page size of @a page_size.
    235145 */
    236 RTEMS_INLINE_ROUTINE void _Heap_Align_down_uptr (
    237   _H_uptr_t *value,
    238   uint32_t  alignment
     146RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Block_of_alloc_area(
     147  uintptr_t alloc_area_begin,
     148  uintptr_t page_size
    239149)
    240150{
    241   _H_uptr_t v = *value;
    242   *value = v - (v % alignment);
     151  return (Heap_Block *) (_Heap_Align_down( alloc_area_begin, page_size )
     152    - HEAP_BLOCK_ALLOC_AREA_OFFSET);
     153}
     154
     155RTEMS_INLINE_ROUTINE bool _Heap_Is_prev_used( Heap_Block *block )
     156{
     157  return block->size_and_flag & HEAP_PREV_BLOCK_USED;
     158}
     159
     160RTEMS_INLINE_ROUTINE uintptr_t _Heap_Block_size( Heap_Block *block )
     161{
     162  return block->size_and_flag & ~HEAP_PREV_BLOCK_USED;
     163}
     164
     165RTEMS_INLINE_ROUTINE bool _Heap_Is_block_in_heap(
     166  Heap_Control *heap,
     167  Heap_Block *block
     168)
     169{
     170  return _Addresses_Is_in_range( block, heap->start, heap->final );
    243171}
    244172
    245173/**
    246  *  This function calculates and returns a block's location (address)
    247  *  in the heap based upon a base address @a base and an @a offset.
    248  *
    249  *  @param[in] base is the base address of the memory area.
    250  *  @param[in] offset is the byte offset into @a base.
    251  *
    252  *  @return This method returns a pointer to the block's address.
     174 * @brief Returns the maximum size of the heap.
    253175 */
    254 RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Block_at(
    255   void     *base,
    256   int32_t   offset
    257 )
     176RTEMS_INLINE_ROUTINE uintptr_t _Heap_Get_size( Heap_Control *heap )
    258177{
    259   return (Heap_Block *) _Addresses_Add_offset( base, offset );
     178  return (uintptr_t) heap->end - (uintptr_t) heap->begin;
    260179}
    261180
    262 /**
    263  *  This function returns the starting address of the portion of @a the_block
    264  *  which the user may access.
    265  *
    266  *  @param[in] the_block is the heap block to find the user area of.
    267  *
    268  *  @return This method returns a pointer to the start of the user area in
    269  *          the block.
    270  */
    271 RTEMS_INLINE_ROUTINE void *_Heap_User_area (
    272   Heap_Block *the_block
    273 )
    274 {
    275   return (void *) _Addresses_Add_offset ( the_block, HEAP_BLOCK_USER_OFFSET );
    276 }
    277 
    278 /**
    279  *  Fill @a *the_block with the address of the beginning of the block given
    280  *  pointer to the user accessible area @a base.
    281  *
    282  *  @param[in] the_heap points to the heap being operated upon
    283  *  @param[in] base points to the user area in the block.
    284  *  @param[in] the_block will be the address of the heap block.
    285  *
    286  *  @return This method returns a pointer to the heap block based upon the
    287  *               given heap and user pointer.
    288  */
    289 RTEMS_INLINE_ROUTINE void _Heap_Start_of_block (
    290   Heap_Control  *the_heap,
    291   void          *base,
    292   Heap_Block   **the_block
    293 )
    294 {
    295   _H_uptr_t addr = _H_p2u(base);
    296   /* The address passed could be greater than the block address plus
    297    * HEAP_BLOCK_USER_OFFSET as _Heap_Allocate_aligned() may produce such user
    298    * pointers. To get rid of this offset we need to align the address down
    299    * to the nearest 'page_size' boundary. */
    300   _Heap_Align_down_uptr ( &addr, the_heap->page_size );
    301   *the_block = (Heap_Block *)(addr - HEAP_BLOCK_USER_OFFSET);
    302 }
    303 
    304 /**
    305  *  This function returns true if the previous block of @a the_block
    306  *  is in use, and false otherwise.
    307  *
    308  *  @param[in] the_block is the block to operate upon.
    309  *
    310  *  @return This method returns true if the previous block is used and false
    311  *          if the previous block is free.
    312  */
    313 RTEMS_INLINE_ROUTINE bool _Heap_Is_prev_used (
    314   Heap_Block *the_block
    315 )
    316 {
    317   return (the_block->size & HEAP_PREV_USED);
    318 }
    319 
    320 /**
    321  *  This function returns the size of @a the_block in bytes.
    322  *
    323  *  @param[in] the_block is the block to operate upon.
    324  *
    325  *  @return This method returns the size of the specified heap block in bytes.
    326  */
    327 RTEMS_INLINE_ROUTINE uint32_t _Heap_Block_size (
    328   Heap_Block *the_block
    329 )
    330 {
    331   return (the_block->size & ~HEAP_PREV_USED);
    332 }
    333 
    334 /**
    335  *  This function returns true if @a the_block is within the memory area
    336  *  managed by @a the_heap, and false otherwise.
    337  *
    338  *  @param[in] the_heap points to the heap being operated upon
    339  *  @param[in] the_block is the block address to check.
    340  *
    341  *  @return This method returns true if @a the_block appears to have been
    342  *          allocated from @a the_heap, and false otherwise.
    343  */
    344 RTEMS_INLINE_ROUTINE bool _Heap_Is_block_in (
    345   Heap_Control *the_heap,
    346   Heap_Block   *the_block
    347 )
    348 {
    349   return _Addresses_Is_in_range( the_block, the_heap->start, the_heap->final );
    350 }
    351 
    352 /**
    353  *  This function returns the maximum size of the heap.
    354  *
    355  *  @param[in] the_heap points to the heap being operated upon
    356  *
    357  *  @return This method returns the total amount of memory
    358  *          allocated to the heap.
    359  */
    360 RTEMS_INLINE_ROUTINE int32_t _Heap_Get_size (
    361   Heap_Control *the_heap
    362 )
    363 {
    364   return _Addresses_Subtract( the_heap->end, the_heap->begin );
    365 }
    366 
    367 /**@}*/
     181/** @} */
    368182
    369183#endif
  • cpukit/score/src/heap.c

    r2bbfbf1 r371cea31  
    2828 *
    2929 *  Input parameters:
    30  *    the_heap         - pointer to heap header
    31  *    starting_address - starting address of heap
     30 *    heap         - pointer to heap header
     31 *    area_begin - starting address of heap
    3232 *    size             - size of heap
    3333 *    page_size        - allocatable unit of memory
     
    4040 *
    4141 *
    42  *            +--------------------------------+ <- begin = starting_address
     42 *            +--------------------------------+ <- begin = area_begin
    4343 *            |  unused space due to alignment |
    4444 *            |       size < page_size         |
     
    7373 *  'heapallocatealigned.c' for details).]
    7474 *
    75  *            +--------------------------------+ <- begin = starting_address
     75 *            +--------------------------------+ <- begin = area_begin
    7676 *            |  unused space due to alignment |
    7777 *            |       size < page_size         |
     
    112112 */
    113113
    114 uint32_t  _Heap_Initialize(
    115   Heap_Control        *the_heap,
    116   void                *starting_address,
    117   intptr_t             size,
    118   uint32_t            page_size
     114uintptr_t _Heap_Initialize(
     115  Heap_Control *heap,
     116  void *area_begin,
     117  uintptr_t area_size,
     118  uintptr_t page_size
    119119)
    120120{
    121   Heap_Block            *the_block;
    122   uint32_t               the_size;
    123   _H_uptr_t              start;
    124   _H_uptr_t              aligned_start;
    125   uint32_t               overhead;
    126   Heap_Statistics *const stats = &the_heap->stats;
    127 
    128   if (page_size == 0)
     121  Heap_Statistics * const stats = &heap->stats;
     122  uintptr_t heap_area_begin = (uintptr_t) area_begin;
     123  uintptr_t heap_area_end = heap_area_begin + area_size;
     124  uintptr_t alloc_area_begin = heap_area_begin + HEAP_BLOCK_ALLOC_AREA_OFFSET;
     125  uintptr_t alloc_area_size = 0;
     126  uintptr_t overhead = 0;
     127  Heap_Block *first_block = NULL;
     128  Heap_Block *second_block = NULL;
     129
     130  if ( page_size == 0 ) {
    129131    page_size = CPU_ALIGNMENT;
    130   else
    131     _Heap_Align_up( &page_size, CPU_ALIGNMENT );
    132 
    133   /* Calculate aligned_start so that aligned_start + HEAP_BLOCK_USER_OFFSET
    134      (value of user pointer) is aligned on 'page_size' boundary. Make sure
    135      resulting 'aligned_start' is not below 'starting_address'. */
    136   start = _H_p2u(starting_address);
    137   aligned_start = start + HEAP_BLOCK_USER_OFFSET;
    138   _Heap_Align_up_uptr ( &aligned_start, page_size );
    139   aligned_start -= HEAP_BLOCK_USER_OFFSET;
    140 
    141   /* Calculate 'min_block_size'. It's HEAP_MIN_BLOCK_SIZE aligned up to the
    142      nearest multiple of 'page_size'. */
    143   the_heap->min_block_size = HEAP_MIN_BLOCK_SIZE;
    144   _Heap_Align_up ( &the_heap->min_block_size, page_size );
    145 
    146   /* Calculate 'the_size' -- size of the first block so that there is enough
    147      space at the end for the permanent last block. It is equal to 'size'
    148      minus total overhead aligned down to the nearest multiple of
    149      'page_size'. */
    150   overhead = HEAP_OVERHEAD + (aligned_start - start);
    151   if ( size < overhead )
    152     return 0;   /* Too small area for the heap */
    153   the_size = size - overhead;
    154   _Heap_Align_down ( &the_size, page_size );
    155   if ( the_size == 0 )
    156     return 0;   /* Too small area for the heap */
    157 
    158   the_heap->page_size = page_size;
    159   the_heap->begin = starting_address;
    160   the_heap->end = starting_address + size;
    161 
    162   the_block = (Heap_Block *) aligned_start;
    163 
    164   the_block->prev_size = page_size;
    165   the_block->size = the_size | HEAP_PREV_USED;
    166   the_block->next = _Heap_Tail( the_heap );
    167   the_block->prev = _Heap_Head( the_heap );
    168   _Heap_Head(the_heap)->next = the_block;
    169   _Heap_Tail(the_heap)->prev = the_block;
    170   the_heap->start = the_block;
    171 
    172   _HAssert(_Heap_Is_aligned(the_heap->page_size, CPU_ALIGNMENT));
    173   _HAssert(_Heap_Is_aligned(the_heap->min_block_size, page_size));
    174   _HAssert(_Heap_Is_aligned_ptr(_Heap_User_area(the_block), page_size));
    175 
    176   the_block = _Heap_Block_at( the_block, the_size );
    177   the_heap->final = the_block;       /* Permanent final block of the heap */
    178   the_block->prev_size = the_size;   /* Previous block is free */
    179   the_block->size = page_size;
    180 
    181   stats->size = size;
    182   stats->free_size = the_size;
    183   stats->min_free_size = the_size;
     132  } else {
     133    page_size = _Heap_Align_up( page_size, CPU_ALIGNMENT );
     134  }
     135
     136  heap->min_block_size = _Heap_Align_up( sizeof( Heap_Block ), page_size );
     137
     138  alloc_area_begin = _Heap_Align_up( alloc_area_begin, page_size );
     139  overhead = HEAP_LAST_BLOCK_OVERHEAD
     140    + (alloc_area_begin - HEAP_BLOCK_ALLOC_AREA_OFFSET - heap_area_begin);
     141  alloc_area_size = _Heap_Align_down ( area_size - overhead, page_size );
     142
     143  if (
     144    heap_area_end < heap_area_begin
     145      || area_size < overhead
     146      || alloc_area_size == 0
     147  ) {
     148    /* Invalid area or area too small */
     149    return 0;
     150  }
     151
     152  heap->page_size = page_size;
     153  heap->begin = heap_area_begin;
     154  heap->end = heap_area_end;
     155
     156  /* First block */
     157  first_block = _Heap_Block_of_alloc_area( alloc_area_begin, page_size );
     158  first_block->prev_size = page_size;
     159  first_block->size_and_flag = alloc_area_size | HEAP_PREV_BLOCK_USED;
     160  first_block->next = _Heap_Free_list_tail( heap );
     161  first_block->prev = _Heap_Free_list_head( heap );
     162  _Heap_Free_list_head( heap )->next = first_block;
     163  _Heap_Free_list_tail( heap )->prev = first_block;
     164  heap->start = first_block;
     165
     166  /* Second and last block */
     167  second_block = _Heap_Block_at( first_block, alloc_area_size );
     168  second_block->prev_size = alloc_area_size;
     169  second_block->size_and_flag = page_size | HEAP_PREV_BLOCK_FREE;
     170  heap->final = second_block;
     171
     172  /* Statistics */
     173  stats->size = area_size;
     174  stats->free_size = alloc_area_size;
     175  stats->min_free_size = alloc_area_size;
    184176  stats->free_blocks = 1;
    185177  stats->max_free_blocks = 1;
     
    192184  stats->instance = instance++;
    193185
    194   return ( the_size - HEAP_BLOCK_USED_OVERHEAD );
     186  _HAssert( _Heap_Is_aligned( CPU_ALIGNMENT, 4 ));
     187  _HAssert( _Heap_Is_aligned( heap->page_size, CPU_ALIGNMENT ));
     188  _HAssert( _Heap_Is_aligned( heap->min_block_size, page_size ));
     189  _HAssert(
     190    _Heap_Is_aligned( _Heap_Alloc_area_of_block( first_block ), page_size )
     191  );
     192  _HAssert(
     193    _Heap_Is_aligned( _Heap_Alloc_area_of_block( second_block ), page_size )
     194  );
     195
     196  return alloc_area_size;
    195197}
    196198
    197 /*PAGE
    198  *
    199  * Internal routines shared by _Heap_Allocate() and _Heap_Allocate_aligned().
    200  *
    201  * Note: there is no reason to put them into a separate file(s) as they are
    202  * always required for heap to be useful.
    203  */
    204 
    205 /*
    206  * Convert user requested 'size' of memory block to the block size.
    207  * Return block size on success, 0 if overflow occured
    208  */
    209 size_t _Heap_Calc_block_size(
    210   size_t   size,
    211   uint32_t page_size,
    212   uint32_t min_size)
     199uintptr_t _Heap_Calc_block_size(
     200  uintptr_t alloc_size,
     201  uintptr_t page_size,
     202  uintptr_t min_block_size)
    213203{
    214   uint32_t block_size = size + HEAP_BLOCK_USED_OVERHEAD;
    215   _Heap_Align_up(&block_size, page_size);
    216   if (block_size < min_size) block_size = min_size;
    217   /* 'block_size' becomes <= 'size' if and only if overflow occured. */
    218   return (block_size > size) ? block_size : 0;
     204  uintptr_t block_size =
     205    _Heap_Align_up( alloc_size + HEAP_BLOCK_USED_OVERHEAD, page_size );
     206
     207  if (block_size < min_block_size) {
     208    block_size = min_block_size;
     209  }
     210
     211  if (block_size > alloc_size) {
     212    return block_size;
     213  } else {
     214    /* Integer overflow occured */
     215    return 0;
     216  }
    219217}
    220218
    221 /*
    222  * Allocate block of size 'alloc_size' from 'the_block' belonging to
    223  * 'the_heap'. Split 'the_block' if possible, otherwise allocate it entirely.
    224  * When split, make the lower part used, and leave the upper part free.
    225  * Return the size of allocated block.
    226  */
    227 uint32_t _Heap_Block_allocate(
    228   Heap_Control* the_heap,
    229   Heap_Block*   the_block,
    230   uint32_t      alloc_size
     219uintptr_t _Heap_Block_allocate(
     220  Heap_Control *heap,
     221  Heap_Block *block,
     222  uintptr_t alloc_size
    231223)
    232224{
    233   Heap_Statistics *const stats = &the_heap->stats;
    234   uint32_t const block_size = _Heap_Block_size(the_block);
    235   uint32_t const the_rest = block_size - alloc_size;
    236 
    237   _HAssert(_Heap_Is_aligned(block_size, the_heap->page_size));
    238   _HAssert(_Heap_Is_aligned(alloc_size, the_heap->page_size));
    239   _HAssert(alloc_size <= block_size);
    240   _HAssert(_Heap_Is_prev_used(the_block));
    241 
    242   if(the_rest >= the_heap->min_block_size) {
    243     /* Split the block so that upper part is still free, and lower part
    244        becomes used. This is slightly less optimal than leaving lower part
    245        free as it requires replacing block in the free blocks list, but it
    246        makes it possible to reuse this code in the _Heap_Resize_block(). */
    247     Heap_Block *next_block = _Heap_Block_at(the_block, alloc_size);
    248     _Heap_Block_replace(the_block, next_block);
    249     the_block->size = alloc_size | HEAP_PREV_USED;
    250     next_block->size = the_rest | HEAP_PREV_USED;
    251     _Heap_Block_at(next_block, the_rest)->prev_size = the_rest;
    252   }
    253   else {
    254     /* Don't split the block as remainder is either zero or too small to be
    255        used as a separate free block. Change 'alloc_size' to the size of the
    256        block and remove the block from the list of free blocks. */
    257     _Heap_Block_remove(the_block);
     225  Heap_Statistics * const stats = &heap->stats;
     226  uintptr_t const block_size = _Heap_Block_size( block );
     227  uintptr_t const unused_size = block_size - alloc_size;
     228  Heap_Block *next_block = _Heap_Block_at( block, block_size );
     229
     230  _HAssert( _Heap_Is_aligned( block_size, heap->page_size ));
     231  _HAssert( _Heap_Is_aligned( alloc_size, heap->page_size ));
     232  _HAssert( alloc_size <= block_size );
     233  _HAssert( _Heap_Is_prev_used( block ));
     234
     235  if (unused_size >= heap->min_block_size) {
     236    /*
     237     * Split the block so that the upper part is still free, and the lower part
     238     * becomes used.  This is slightly less optimal than leaving the lower part
     239     * free as it requires replacing block in the free blocks list, but it
     240     * makes it possible to reuse this code in the _Heap_Resize_block().
     241     */
     242    Heap_Block *new_block = _Heap_Block_at( block, alloc_size );
     243    block->size_and_flag = alloc_size | HEAP_PREV_BLOCK_USED;
     244    new_block->size_and_flag = unused_size | HEAP_PREV_BLOCK_USED;
     245    next_block->prev_size = unused_size;
     246    _Heap_Block_replace_in_free_list( block, new_block );
     247  } else {
     248    next_block->size_and_flag |= HEAP_PREV_BLOCK_USED;
    258249    alloc_size = block_size;
    259     _Heap_Block_at(the_block, alloc_size)->size |= HEAP_PREV_USED;
    260     stats->free_blocks -= 1;
    261   }
    262   /* Update statistics */
     250    _Heap_Block_remove_from_free_list( block );
     251
     252    /* Statistics */
     253    --stats->free_blocks;
     254  }
     255
     256  /* Statistics */
     257  ++stats->used_blocks;
    263258  stats->free_size -= alloc_size;
    264   if(stats->min_free_size > stats->free_size)
     259  if(stats->min_free_size > stats->free_size) {
    265260    stats->min_free_size = stats->free_size;
    266   stats->used_blocks += 1;
     261  }
     262
    267263  return alloc_size;
    268264}
  • cpukit/score/src/heapalignupuptr.c

    r2bbfbf1 r371cea31  
     1#if 0
    12/*
    23 *  Heap Handler
     
    2425 * difficult.
    2526 */
    26 void _Heap_Align_up_uptr (
    27   _H_uptr_t *value,
    28   uint32_t alignment
     27uintptr_t _Heap_Align_up(
     28  uintptr_t value,
     29  uintptr_t alignment
    2930)
    3031{
    31   _H_uptr_t remainder;
    32   _H_uptr_t v = *value;
     32  uintptr_t remainder = value % alignment;
    3333
    34   remainder = v % alignment;
    35 
    36   if ( remainder )
    37     *value = v - remainder + alignment;
     34  if ( remainder != 0 ) {
     35    return value - remainder + alignment;
     36  } else {
     37    return value;
     38  }
    3839}
    39 
    40 
     40#endif
  • cpukit/score/src/heapallocate.c

    r2bbfbf1 r371cea31  
    2020#include <rtems/score/heap.h>
    2121
    22 /*PAGE
    23  *
    24  *  _Heap_Allocate
    25  *
    26  *  This kernel routine allocates the requested size of memory
    27  *  from the specified heap.
    28  *
    29  *  Input parameters:
    30  *    the_heap  - pointer to heap header.
    31  *    size      - size in bytes of the memory block to allocate.
    32  *
    33  *  Output parameters:
    34  *    returns - starting address of memory block allocated
    35  */
     22void *_Heap_Allocate( Heap_Control *heap, uintptr_t size )
     23{
     24  Heap_Statistics *const stats = &heap->stats;
     25  Heap_Block * const tail = _Heap_Free_list_tail( heap );
     26  Heap_Block *block = _Heap_First_free_block( heap );
     27  uint32_t search_count = 0;
     28  void *alloc_area_begin_ptr = NULL;
    3629
    37 void *_Heap_Allocate(
    38   Heap_Control        *the_heap,
    39   intptr_t             size
    40 )
    41 {
    42   uint32_t  the_size;
    43   uint32_t  search_count;
    44   Heap_Block *the_block;
    45   void       *ptr = NULL;
    46   Heap_Statistics *const stats = &the_heap->stats;
    47   Heap_Block *const tail = _Heap_Tail(the_heap);
    48 
    49   the_size =
    50     _Heap_Calc_block_size(size, the_heap->page_size, the_heap->min_block_size);
    51   if(the_size == 0)
     30  size = _Heap_Calc_block_size( size, heap->page_size, heap->min_block_size );
     31  if( size == 0 ) {
    5232    return NULL;
    53 
    54   /* Find large enough free block. */
    55   for(the_block = _Heap_First(the_heap), search_count = 0;
    56       the_block != tail;
    57       the_block = the_block->next, ++search_count)
    58   {
    59     /* As we always coalesce free blocks, prev block must have been used. */
    60     _HAssert(_Heap_Is_prev_used(the_block));
    61 
    62     /* Don't bother to mask out the HEAP_PREV_USED bit as it won't change the
    63        result of the comparison. */
    64     if(the_block->size >= the_size) {
    65       (void)_Heap_Block_allocate(the_heap, the_block, the_size );
    66 
    67       ptr = _Heap_User_area(the_block);
    68 
    69       stats->allocs += 1;
    70       stats->searches += search_count + 1;
    71 
    72       _HAssert(_Heap_Is_aligned_ptr(ptr, the_heap->page_size));
    73       break;
    74     }
    7533  }
    7634
    77   if(stats->max_search < search_count)
     35  /*
     36   * Find large enough free block.
     37   *
     38   * Do not bother to mask out the HEAP_PREV_BLOCK_USED bit as it will not
     39   * change the result of the size comparison.
     40   */
     41  while (block != tail && block->size_and_flag < size) {
     42    _HAssert( _Heap_Is_prev_used( block ));
     43
     44    block = block->next;
     45    ++search_count;
     46  }
     47
     48  if (block != tail) {
     49    _Heap_Block_allocate( heap, block, size );
     50
     51    alloc_area_begin_ptr = (void *) _Heap_Alloc_area_of_block( block );
     52
     53    _HAssert( _Heap_Is_aligned( (uintptr_t) alloc_area_begin_ptr, heap->page_size ));
     54
     55    /* Statistics */
     56    ++stats->allocs;
     57    stats->searches += search_count;
     58  }
     59
     60  /* Statistics */
     61  if (stats->max_search < search_count) {
    7862    stats->max_search = search_count;
     63  }
    7964
    80   return ptr;
     65  return alloc_area_begin_ptr;
    8166}
  • cpukit/score/src/heapallocatealigned.c

    r2bbfbf1 r371cea31  
    2626  Heap_Control *the_heap,
    2727  Heap_Block   *the_block,
    28   _H_uptr_t     user_addr,
    29   _H_uptr_t     aligned_user_addr,
    30   intptr_t      size
     28  uintptr_t     user_addr,
     29  uintptr_t     aligned_user_addr,
     30  uintptr_t      size
    3131)
    3232{
    33   _H_uptr_t const user_area = _H_p2u(_Heap_User_area(the_block));
    34   _H_uptr_t const block_end = _H_p2u(the_block)
    35     + _Heap_Block_size(the_block) + HEAP_BLOCK_HEADER_OFFSET;
    36   _H_uptr_t const user_end = aligned_user_addr + size;
    37   _H_uptr_t const heap_start = _H_p2u(the_heap->start) + HEAP_OVERHEAD;
    38   _H_uptr_t const heap_end = _H_p2u(the_heap->final)
    39     + HEAP_BLOCK_HEADER_OFFSET;
    40   uint32_t const page_size = the_heap->page_size;
     33  uintptr_t const user_area = _Heap_Alloc_area_of_block(the_block);
     34  uintptr_t const block_end = the_block
     35    + _Heap_Block_size(the_block) + HEAP_BLOCK_SIZE_OFFSET;
     36  uintptr_t const user_end = aligned_user_addr + size;
     37  uintptr_t const heap_start = (uintptr_t) the_heap->start + HEAP_LAST_BLOCK_OVERHEAD;
     38  uintptr_t const heap_end = (uintptr_t) the_heap->final
     39    + HEAP_BLOCK_SIZE_OFFSET;
     40  uintptr_t const page_size = the_heap->page_size;
    4141
    4242  _HAssert(user_addr == user_area);
     
    7575  Heap_Control  *the_heap,
    7676  Heap_Block    *the_block,
    77   intptr_t       alloc_size
     77  uintptr_t       alloc_size
    7878)
    7979{
    8080  Heap_Statistics *const stats = &the_heap->stats;
    81   uint32_t const block_size = _Heap_Block_size(the_block);
    82   uint32_t const the_rest = block_size - alloc_size;
     81  uintptr_t const block_size = _Heap_Block_size(the_block);
     82  uintptr_t const the_rest = block_size - alloc_size;
    8383
    8484  _HAssert(_Heap_Is_aligned(block_size, the_heap->page_size));
     
    9090    /* Split the block so that lower part is still free, and upper part
    9191       becomes used. */
    92     the_block->size = the_rest | HEAP_PREV_USED;
     92    the_block->size_and_flag = the_rest | HEAP_PREV_BLOCK_USED;
    9393    the_block = _Heap_Block_at(the_block, the_rest);
    9494    the_block->prev_size = the_rest;
    95     the_block->size = alloc_size;
     95    the_block->size_and_flag = alloc_size;
    9696  } else {
    9797    /* Don't split the block as remainder is either zero or too small to be
    9898       used as a separate free block. Change 'alloc_size' to the size of the
    9999       block and remove the block from the list of free blocks. */
    100     _Heap_Block_remove(the_block);
     100    _Heap_Block_remove_from_free_list(the_block);
    101101    alloc_size = block_size;
    102102    stats->free_blocks -= 1;
    103103  }
    104104  /* Mark the block as used (in the next block). */
    105   _Heap_Block_at(the_block, alloc_size)->size |= HEAP_PREV_USED;
     105  _Heap_Block_at(the_block, alloc_size)->size_and_flag |= HEAP_PREV_BLOCK_USED;
    106106  /* Update statistics */
    107107  stats->free_size -= alloc_size;
     
    133133void *_Heap_Allocate_aligned(
    134134  Heap_Control *the_heap,
    135   intptr_t      size,
    136   uint32_t      alignment
     135  uintptr_t      size,
     136  uintptr_t      alignment
    137137)
    138138{
    139   uint32_t search_count;
     139  uintptr_t search_count;
    140140  Heap_Block *the_block;
    141141
    142142  void *user_ptr = NULL;
    143   uint32_t  const page_size = the_heap->page_size;
     143  uintptr_t  const page_size = the_heap->page_size;
    144144  Heap_Statistics *const stats = &the_heap->stats;
    145   Heap_Block *const tail = _Heap_Tail(the_heap);
    146 
    147   uint32_t const end_to_user_offs = size - HEAP_BLOCK_HEADER_OFFSET;
    148 
    149   uint32_t const the_size =
     145  Heap_Block *const tail = _Heap_Free_list_tail(the_heap);
     146
     147  uintptr_t const end_to_user_offs = size - HEAP_BLOCK_SIZE_OFFSET;
     148
     149  uintptr_t const the_size =
    150150    _Heap_Calc_block_size(size, page_size, the_heap->min_block_size);
    151151
     
    158158  /* Find large enough free block that satisfies the alignment requirements. */
    159159
    160   for (the_block = _Heap_First(the_heap), search_count = 0;
     160  for (the_block = _Heap_First_free_block(the_heap), search_count = 0;
    161161      the_block != tail;
    162162      the_block = the_block->next, ++search_count)
    163163  {
    164     uint32_t const block_size = _Heap_Block_size(the_block);
     164    uintptr_t const block_size = _Heap_Block_size(the_block);
    165165
    166166    /* As we always coalesce free blocks, prev block must have been used. */
     
    169169    if (block_size >= the_size) { /* the_block is large enough. */
    170170
    171       _H_uptr_t user_addr;
    172       _H_uptr_t aligned_user_addr;
    173       _H_uptr_t const user_area = _H_p2u(_Heap_User_area(the_block));
     171      uintptr_t user_addr;
     172      uintptr_t aligned_user_addr;
     173      uintptr_t const user_area = _Heap_Alloc_area_of_block(the_block);
    174174
    175175      /* Calculate 'aligned_user_addr' that will become the user pointer we
     
    178178         Calculations are from the 'block_end' as we are going to split free
    179179         block so that the upper part of the block becomes used block. */
    180       _H_uptr_t const block_end = _H_p2u(the_block) + block_size;
    181       aligned_user_addr = block_end - end_to_user_offs;
    182       _Heap_Align_down_uptr(&aligned_user_addr, alignment);
     180      uintptr_t const block_end = (uintptr_t) the_block + block_size;
     181      aligned_user_addr =
     182        _Heap_Align_down(block_end - end_to_user_offs, alignment);
    183183
    184184      /* 'user_addr' is the 'aligned_user_addr' further aligned down to the
    185185         'page_size' boundary. We need it as blocks' user areas should begin
    186186         only at 'page_size' aligned addresses */
    187       user_addr = aligned_user_addr;
    188       _Heap_Align_down_uptr(&user_addr, page_size);
     187      user_addr = _Heap_Align_down(aligned_user_addr, page_size);
    189188
    190189      /* Make sure 'user_addr' calculated didn't run out of 'the_block'. */
     
    211210               can make 'aligned_user_addr' to be close enough to the
    212211               'user_addr'. */
    213             aligned_user_addr = user_addr;
    214             _Heap_Align_up_uptr(&aligned_user_addr, alignment);
     212            aligned_user_addr = _Heap_Align_up(user_addr, alignment);
    215213            if (aligned_user_addr - user_addr >= page_size) {
    216214              /* No, we can't use the block */
     
    222220        /* The block is indeed acceptable: calculate the size of the block
    223221           to be allocated and perform allocation. */
    224         uint32_t const alloc_size =
    225             block_end - user_addr + HEAP_BLOCK_USER_OFFSET;
    226 
    227         _HAssert(_Heap_Is_aligned_ptr((void*)aligned_user_addr, alignment));
     222        uintptr_t const alloc_size =
     223            block_end - user_addr + HEAP_BLOCK_ALLOC_AREA_OFFSET;
     224
     225        _HAssert(_Heap_Is_aligned(aligned_user_addr, alignment));
    228226
    229227        the_block = block_allocate(the_heap, the_block, alloc_size);
  • cpukit/score/src/heapextend.c

    r2bbfbf1 r371cea31  
    2020#include <rtems/score/heap.h>
    2121
    22 /*PAGE
    23  *
    24  *  _Heap_Extend
    25  *
    26  *  This routine grows the_heap memory area using the size bytes which
    27  *  begin at starting_address.
    28  *
    29  *  Input parameters:
    30  *    the_heap          - pointer to heap header.
    31  *    starting_address  - pointer to the memory area.
    32  *    size              - size in bytes of the memory block to allocate.
    33  *
    34  *  Output parameters:
    35  *    *amount_extended  - amount of memory added to the_heap
    36  */
    37 
    3822Heap_Extend_status _Heap_Extend(
    39   Heap_Control        *the_heap,
    40   void                *starting_address,
    41   intptr_t             size,
    42   intptr_t            *amount_extended
     23  Heap_Control *heap,
     24  void *area_begin_ptr,
     25  uintptr_t area_size,
     26  uintptr_t *amount_extended
    4327)
    4428{
    45   uint32_t         the_size;
    46   Heap_Statistics *const stats = &the_heap->stats;
    47 
    48   /*
    49    *  The overhead was taken from the original heap memory.
    50    */
    51 
    52   Heap_Block  *old_final;
    53   Heap_Block  *new_final;
     29  Heap_Statistics *const stats = &heap->stats;
     30  uintptr_t const area_begin = (uintptr_t) area_begin_ptr;
     31  uintptr_t const heap_area_begin = heap->begin;
     32  uintptr_t const heap_area_end = heap->end;
     33  uintptr_t const new_heap_area_end = heap_area_end + area_size;
     34  uintptr_t extend_size = 0;
     35  Heap_Block *const old_final = heap->final;
     36  Heap_Block *new_final = NULL;
    5437
    5538  /*
     
    6649   */
    6750
    68   if ( starting_address >= the_heap->begin &&        /* case 3 */
    69        starting_address < the_heap->end
    70      )
    71     return HEAP_EXTEND_ERROR;
    72 
    73   if ( starting_address != the_heap->end )
    74     return HEAP_EXTEND_NOT_IMPLEMENTED;         /* cases 1, 2, and 5 */
     51  if ( area_begin >= heap_area_begin && area_begin < heap_area_end ) {
     52    return HEAP_EXTEND_ERROR; /* case 3 */
     53  } else if ( area_begin != heap_area_end ) {
     54    return HEAP_EXTEND_NOT_IMPLEMENTED; /* cases 1, 2, and 5 */
     55  }
    7556
    7657  /*
     
    8061   */
    8162
    82   old_final = the_heap->final;
    83   the_heap->end = _Addresses_Add_offset( the_heap->end, size );
    84   the_size = _Addresses_Subtract( the_heap->end, old_final ) - HEAP_OVERHEAD;
    85   _Heap_Align_down( &the_size, the_heap->page_size );
     63  heap->end = new_heap_area_end;
    8664
    87   *amount_extended = size;
     65  extend_size = new_heap_area_end
     66    - (uintptr_t) old_final - HEAP_LAST_BLOCK_OVERHEAD;
     67  extend_size = _Heap_Align_down( extend_size, heap->page_size );
    8868
    89   if( the_size < the_heap->min_block_size )
    90     return HEAP_EXTEND_SUCCESSFUL;
     69  *amount_extended = extend_size;
    9170
    92   old_final->size = the_size | (old_final->size & HEAP_PREV_USED);
    93   new_final = _Heap_Block_at( old_final, the_size );
    94   new_final->size = HEAP_PREV_USED;
    95   the_heap->final = new_final;
     71  if( extend_size >= heap->min_block_size ) {
     72    old_final->size_and_flag = extend_size
     73      | (old_final->size_and_flag & HEAP_PREV_BLOCK_USED);
     74    new_final = _Heap_Block_at( old_final, extend_size );
     75    new_final->size_and_flag = heap->page_size | HEAP_PREV_BLOCK_USED;
    9676
    97   stats->size += size;
    98   stats->used_blocks += 1;
    99   stats->frees -= 1;    /* Don't count subsequent call as actual free() */
     77    heap->final = new_final;
    10078
    101   _Heap_Free( the_heap, _Heap_User_area( old_final ) );
     79    stats->size += area_size;
     80    ++stats->used_blocks;
     81    --stats->frees; /* Do not count subsequent call as actual free() */
     82
     83    _Heap_Free( heap, (void *) _Heap_Alloc_area_of_block( old_final ));
     84  }
    10285
    10386  return HEAP_EXTEND_SUCCESSFUL;
  • cpukit/score/src/heapfree.c

    r2bbfbf1 r371cea31  
    2020#include <rtems/score/heap.h>
    2121
    22 /*PAGE
    23  *
    24  *  _Heap_Free
    25  *
    26  *  This kernel routine returns the memory designated by the
    27  *  given heap and given starting address to the memory pool.
    28  *
    29  *  Input parameters:
    30  *    the_heap         - pointer to heap header
    31  *    starting_address - starting address of the memory block to free.
    32  *
    33  *  Output parameters:
    34  *    true  - if starting_address is valid heap address
    35  *    false - if starting_address is invalid heap address
    36  */
     22bool _Heap_Free( Heap_Control *heap, void *alloc_area_begin_ptr )
     23{
     24  Heap_Statistics *const stats = &heap->stats;
     25  uintptr_t alloc_area_begin = (uintptr_t) alloc_area_begin_ptr;
     26  Heap_Block *block =
     27    _Heap_Block_of_alloc_area( alloc_area_begin, heap->page_size );
     28  Heap_Block *next_block = NULL;
     29  uintptr_t block_size = 0;
     30  uintptr_t next_block_size = 0;
     31  bool next_is_free = false;
    3732
    38 bool _Heap_Free(
    39   Heap_Control        *the_heap,
    40   void                *starting_address
    41 )
    42 {
    43   Heap_Block      *the_block;
    44   Heap_Block      *next_block;
    45   uint32_t         the_size;
    46   uint32_t         next_size;
    47   Heap_Statistics *const stats = &the_heap->stats;
    48   bool             next_is_free;
    49 
    50   if ( !_Addresses_Is_in_range(
    51        starting_address, (void *)the_heap->start, (void *)the_heap->final ) ) {
    52     _HAssert(starting_address != NULL);
    53     return( false );
     33  if (
     34    !_Addresses_Is_in_range( alloc_area_begin_ptr, heap->start, heap->final)
     35  ) {
     36    _HAssert( alloc_area_begin_ptr != NULL );
     37    return false;
    5438  }
    5539
    56   _Heap_Start_of_block( the_heap, starting_address, &the_block );
    57 
    58   if ( !_Heap_Is_block_in( the_heap, the_block ) ) {
     40  if ( !_Heap_Is_block_in_heap( heap, block ) ) {
    5941    _HAssert( false );
    60     return( false );
     42    return false;
    6143  }
    6244
    63   the_size = _Heap_Block_size( the_block );
    64   next_block = _Heap_Block_at( the_block, the_size );
     45  block_size = _Heap_Block_size( block );
     46  next_block = _Heap_Block_at( block, block_size );
    6547
    66   if ( !_Heap_Is_block_in( the_heap, next_block ) ) {
     48  if ( !_Heap_Is_block_in_heap( heap, next_block ) ) {
    6749    _HAssert( false );
    68     return( false );
     50    return false;
    6951  }
    7052
    7153  if ( !_Heap_Is_prev_used( next_block ) ) {
    7254    _HAssert( false );
    73     return( false );
     55    return false;
    7456  }
    7557
    76   next_size = _Heap_Block_size( next_block );
    77   next_is_free = next_block < the_heap->final &&
    78     !_Heap_Is_prev_used(_Heap_Block_at(next_block, next_size));
     58  next_block_size = _Heap_Block_size( next_block );
     59  next_is_free = next_block != heap->final
     60    && !_Heap_Is_prev_used( _Heap_Block_at( next_block, next_block_size ));
    7961
    80   if ( !_Heap_Is_prev_used( the_block ) ) {
    81     uint32_t const prev_size = the_block->prev_size;
    82     Heap_Block *const prev_block = _Heap_Block_at( the_block, -prev_size );
     62  if ( !_Heap_Is_prev_used( block ) ) {
     63    uintptr_t const prev_size = block->prev_size;
     64    Heap_Block * const prev_block = _Heap_Block_at( block, -prev_size );
    8365
    84     if ( !_Heap_Is_block_in( the_heap, prev_block ) ) {
     66    if ( !_Heap_Is_block_in_heap( heap, prev_block ) ) {
    8567      _HAssert( false );
    8668      return( false );
     
    9577
    9678    if ( next_is_free ) {       /* coalesce both */
    97       uint32_t const size = the_size + prev_size + next_size;
    98       _Heap_Block_remove( next_block );
     79      uintptr_t const size = block_size + prev_size + next_block_size;
     80      _Heap_Block_remove_from_free_list( next_block );
    9981      stats->free_blocks -= 1;
    100       prev_block->size = size | HEAP_PREV_USED;
     82      prev_block->size_and_flag = size | HEAP_PREV_BLOCK_USED;
    10183      next_block = _Heap_Block_at( prev_block, size );
    10284      _HAssert(!_Heap_Is_prev_used( next_block));
    10385      next_block->prev_size = size;
    104     }
    105     else {                      /* coalesce prev */
    106       uint32_t const size = the_size + prev_size;
    107       prev_block->size = size | HEAP_PREV_USED;
    108       next_block->size &= ~HEAP_PREV_USED;
     86    } else {                      /* coalesce prev */
     87      uintptr_t const size = block_size + prev_size;
     88      prev_block->size_and_flag = size | HEAP_PREV_BLOCK_USED;
     89      next_block->size_and_flag &= ~HEAP_PREV_BLOCK_USED;
    10990      next_block->prev_size = size;
    11091    }
    111   }
    112   else if ( next_is_free ) {    /* coalesce next */
    113     uint32_t const size = the_size + next_size;
    114     _Heap_Block_replace( next_block, the_block );
    115     the_block->size = size | HEAP_PREV_USED;
    116     next_block  = _Heap_Block_at( the_block, size );
     92  } else if ( next_is_free ) {    /* coalesce next */
     93    uintptr_t const size = block_size + next_block_size;
     94    _Heap_Block_replace_in_free_list( next_block, block );
     95    block->size_and_flag = size | HEAP_PREV_BLOCK_USED;
     96    next_block  = _Heap_Block_at( block, size );
    11797    next_block->prev_size = size;
    118   }
    119   else {                        /* no coalesce */
    120     /* Add 'the_block' to the head of the free blocks list as it tends to
     98  } else {                        /* no coalesce */
     99    /* Add 'block' to the head of the free blocks list as it tends to
    121100       produce less fragmentation than adding to the tail. */
    122     _Heap_Block_insert_after( _Heap_Head( the_heap), the_block );
    123     the_block->size = the_size | HEAP_PREV_USED;
    124     next_block->size &= ~HEAP_PREV_USED;
    125     next_block->prev_size = the_size;
     101    _Heap_Block_insert_after( _Heap_Free_list_head( heap), block );
     102    block->size_and_flag = block_size | HEAP_PREV_BLOCK_USED;
     103    next_block->size_and_flag &= ~HEAP_PREV_BLOCK_USED;
     104    next_block->prev_size = block_size;
    126105
    127     stats->free_blocks += 1;
    128     if ( stats->max_free_blocks < stats->free_blocks )
     106    /* Statistics */
     107    ++stats->free_blocks;
     108    if ( stats->max_free_blocks < stats->free_blocks ) {
    129109      stats->max_free_blocks = stats->free_blocks;
     110    }
    130111  }
    131112
    132   stats->used_blocks -= 1;
    133   stats->free_size += the_size;
    134   stats->frees += 1;
     113  /* Statistics */
     114  --stats->used_blocks;
     115  ++stats->frees;
     116  stats->free_size += block_size;
    135117
    136118  return( true );
  • cpukit/score/src/heapgetfreeinfo.c

    r2bbfbf1 r371cea31  
    4141{
    4242  Heap_Block *the_block;
    43   Heap_Block *const tail = _Heap_Tail(the_heap);
     43  Heap_Block *const tail = _Heap_Free_list_tail(the_heap);
    4444
    4545  info->number = 0;
     
    4747  info->total = 0;
    4848
    49   for(the_block = _Heap_First(the_heap);
     49  for(the_block = _Heap_First_free_block(the_heap);
    5050      the_block != tail;
    5151      the_block = the_block->next)
  • cpukit/score/src/heapgetinfo.c

    r2bbfbf1 r371cea31  
    4242  Heap_Block *const end = the_heap->final;
    4343
    44   _HAssert(the_block->prev_size == HEAP_PREV_USED);
     44  _HAssert(the_block->prev_size == the_heap->page_size);
    4545  _HAssert(_Heap_Is_prev_used(the_block));
    4646
     
    7575   *  blocks' overhead though.
    7676   */
    77   the_info->Used.total += HEAP_OVERHEAD;
     77  the_info->Used.total += HEAP_LAST_BLOCK_OVERHEAD;
    7878
    7979  return HEAP_GET_INFORMATION_SUCCESSFUL;
  • cpukit/score/src/heapresizeblock.c

    r2bbfbf1 r371cea31  
    2020#include <rtems/score/heap.h>
    2121
    22 /*
    23  *  _Heap_Resize_block
    24  *
    25  *  DESCRIPTION:
    26  *
    27  *  This routine tries to resize in place the block that is pointed to by the
    28  *  'starting_address' to the new 'size'.
    29  *
    30  *  Input parameters:
    31  *    the_heap         - pointer to heap header
    32  *    starting_address - starting address of the memory block
    33  *    size             - new size
    34  *
    35  *  Output parameters:
    36  *    'old_mem_size'   - the size of the user memory area of the block before
    37  *                       resizing.
    38  *    'avail_mem_size' - the size of the user memory area of the free block
    39  *                       that has been enlarged or created due to resizing,
    40  *                       0 if none.
    41  *    Returns
    42  *      HEAP_RESIZE_SUCCESSFUL  - if success
    43  *      HEAP_RESIZE_UNSATISFIED - if the block can't be resized in place
    44  *      HEAP_RESIZE_FATAL_ERROR - if failure
    45  */
    46 
    4722Heap_Resize_status _Heap_Resize_block(
    48   Heap_Control *the_heap,
    49   void         *starting_address,
    50   intptr_t      size,
    51   intptr_t     *old_mem_size,
    52   intptr_t     *avail_mem_size
     23  Heap_Control *heap,
     24  void         *alloc_area_begin_ptr,
     25  uintptr_t      size,
     26  uintptr_t     *old_mem_size,
     27  uintptr_t     *avail_mem_size
    5328)
    5429{
    55   Heap_Block *the_block;
     30  uintptr_t alloc_area_begin = (uintptr_t) alloc_area_begin_ptr;
     31  Heap_Block *block;
    5632  Heap_Block *next_block;
    57   uint32_t   next_block_size;
     33  uintptr_t   next_block_size;
    5834  bool       next_is_used;
    5935  Heap_Block *next_next_block;
    60   uint32_t   old_block_size;
    61   uint32_t   old_user_size;
    62   uint32_t   prev_used_flag;
    63   Heap_Statistics *const stats = &the_heap->stats;
    64   uint32_t const min_block_size = the_heap->min_block_size;
    65   uint32_t const page_size = the_heap->page_size;
     36  uintptr_t   old_block_size;
     37  uintptr_t   old_user_size;
     38  uintptr_t   prev_used_flag;
     39  Heap_Statistics *const stats = &heap->stats;
     40  uintptr_t const min_block_size = heap->min_block_size;
     41  uintptr_t const page_size = heap->page_size;
    6642
    6743  *old_mem_size = 0;
    6844  *avail_mem_size = 0;
    6945
    70   _Heap_Start_of_block(the_heap, starting_address, &the_block);
    71   _HAssert(_Heap_Is_block_in(the_heap, the_block));
    72   if (!_Heap_Is_block_in(the_heap, the_block))
     46  block = _Heap_Block_of_alloc_area(alloc_area_begin, heap->page_size);
     47  _HAssert(_Heap_Is_block_in_heap(heap, block));
     48  if (!_Heap_Is_block_in_heap(heap, block))
    7349    return HEAP_RESIZE_FATAL_ERROR;
    7450
    75   prev_used_flag = the_block->size & HEAP_PREV_USED;
    76   old_block_size = _Heap_Block_size(the_block);
    77   next_block = _Heap_Block_at(the_block, old_block_size);
     51  prev_used_flag = block->size_and_flag & HEAP_PREV_BLOCK_USED;
     52  old_block_size = _Heap_Block_size(block);
     53  next_block = _Heap_Block_at(block, old_block_size);
    7854
    79   _HAssert(_Heap_Is_block_in(the_heap, next_block));
     55  _HAssert(_Heap_Is_block_in_heap(heap, next_block));
    8056  _HAssert(_Heap_Is_prev_used(next_block));
    81   if ( !_Heap_Is_block_in(the_heap, next_block) ||
     57  if ( !_Heap_Is_block_in_heap(heap, next_block) ||
    8258       !_Heap_Is_prev_used(next_block))
    8359    return HEAP_RESIZE_FATAL_ERROR;
     
    8561  next_block_size = _Heap_Block_size(next_block);
    8662  next_next_block = _Heap_Block_at(next_block, next_block_size);
    87   next_is_used    = (next_block == the_heap->final) ||
     63  next_is_used    = (next_block == heap->final) ||
    8864                     _Heap_Is_prev_used(next_next_block);
    8965
    90   /* See _Heap_Size_of_user_area() source for explanations */
    91   old_user_size = _Addresses_Subtract(next_block, starting_address)
    92     + HEAP_BLOCK_HEADER_OFFSET;
     66  /* See _Heap_Size_of_alloc_area() source for explanations */
     67  old_user_size = (uintptr_t) next_block - alloc_area_begin
     68    + HEAP_BLOCK_SIZE_OFFSET;
    9369
    9470  *old_mem_size = old_user_size;
     
    9672  if (size > old_user_size) {
    9773    /* Need to extend the block: allocate part of the next block and then
    98        merge 'the_block' and allocated block together. */
     74       merge 'block' and allocated block together. */
    9975    if (next_is_used)    /* Next block is in use, -- no way to extend */
    10076      return HEAP_RESIZE_UNSATISFIED;
    10177    else {
    102       uint32_t add_block_size = size - old_user_size;
    103       _Heap_Align_up(&add_block_size, page_size);
     78      uintptr_t add_block_size =
     79        _Heap_Align_up(size - old_user_size, page_size);
    10480      if (add_block_size < min_block_size)
    10581        add_block_size = min_block_size;
     
    10783        return HEAP_RESIZE_UNSATISFIED; /* Next block is too small or none. */
    10884      add_block_size =
    109         _Heap_Block_allocate(the_heap, next_block, add_block_size);
     85        _Heap_Block_allocate(heap, next_block, add_block_size);
    11086      /* Merge two subsequent blocks */
    111       the_block->size = (old_block_size + add_block_size) | prev_used_flag;
     87      block->size_and_flag = (old_block_size + add_block_size) | prev_used_flag;
    11288      --stats->used_blocks;
    11389    }
     
    11591
    11692    /* Calculate how much memory we could free */
    117     uint32_t free_block_size = old_user_size - size;
    118     _Heap_Align_down(&free_block_size, page_size);
     93    uintptr_t free_block_size =
     94      _Heap_Align_down(old_user_size - size, page_size);
    11995
    12096    if (free_block_size > 0) {
     
    124100         'min_block_size'. */
    125101
    126       uint32_t new_block_size = old_block_size - free_block_size;
     102      uintptr_t new_block_size = old_block_size - free_block_size;
    127103
    128104      if (new_block_size < min_block_size) {
    129         uint32_t delta = min_block_size - new_block_size;
     105        uintptr_t delta = min_block_size - new_block_size;
    130106        _HAssert(free_block_size >= delta);
    131107        free_block_size -= delta;
     
    145121        /* Extend the next block to the low addresses by 'free_block_size' */
    146122        Heap_Block *const new_next_block =
    147           _Heap_Block_at(the_block, new_block_size);
    148         uint32_t const new_next_block_size =
     123          _Heap_Block_at(block, new_block_size);
     124        uintptr_t const new_next_block_size =
    149125          next_block_size + free_block_size;
    150         _HAssert(_Heap_Is_block_in(the_heap, next_next_block));
    151         the_block->size = new_block_size | prev_used_flag;
    152         new_next_block->size = new_next_block_size | HEAP_PREV_USED;
     126        _HAssert(_Heap_Is_block_in_heap(heap, next_next_block));
     127        block->size_and_flag = new_block_size | prev_used_flag;
     128        new_next_block->size_and_flag = new_next_block_size | HEAP_PREV_BLOCK_USED;
    153129        next_next_block->prev_size = new_next_block_size;
    154         _Heap_Block_replace(next_block, new_next_block);
    155         the_heap->stats.free_size += free_block_size;
     130        _Heap_Block_replace_in_free_list(next_block, new_next_block);
     131        heap->stats.free_size += free_block_size;
    156132        *avail_mem_size = new_next_block_size - HEAP_BLOCK_USED_OVERHEAD;
    157133
    158134      } else if (free_block_size >= min_block_size) {
    159135        /* Split the block into 2 used  parts, then free the second one. */
    160         the_block->size = new_block_size | prev_used_flag;
    161         next_block = _Heap_Block_at(the_block, new_block_size);
    162         next_block->size = free_block_size | HEAP_PREV_USED;
     136        block->size_and_flag = new_block_size | prev_used_flag;
     137        next_block = _Heap_Block_at(block, new_block_size);
     138        next_block->size_and_flag = free_block_size | HEAP_PREV_BLOCK_USED;
    163139        ++stats->used_blocks; /* We have created used block */
    164140        --stats->frees;       /* Don't count next call in stats */
    165         _Heap_Free(the_heap, _Heap_User_area(next_block));
     141        _Heap_Free(heap, (void *) _Heap_Alloc_area_of_block(next_block));
    166142        *avail_mem_size = free_block_size - HEAP_BLOCK_USED_OVERHEAD;
    167143      }
  • cpukit/score/src/heapsizeofuserarea.c

    r2bbfbf1 r371cea31  
    2020#include <rtems/score/heap.h>
    2121
    22 /*PAGE
    23  *
    24  *  _Heap_Size_of_user_area
    25  *
    26  *  This kernel routine sets '*size' to the size of the block of memory
    27  *  which begins at 'starting_address'.
    28  *  It returns true if the 'starting_address' is in the heap, and false
    29  *  otherwise.
    30  *
    31  *  Input parameters:
    32  *    the_heap         - pointer to heap header
    33  *    starting_address - starting address of the memory block
    34  *    size             - pointer to size of area
    35  *
    36  *  Output parameters:
    37  *    size  - size of area filled in
    38  *    true  - if starting_address is valid heap address
    39  *    false - if starting_address is invalid heap address
    40  */
    41 
    42 bool _Heap_Size_of_user_area(
    43   Heap_Control        *the_heap,
    44   void                *starting_address,
    45   intptr_t            *size
     22bool _Heap_Size_of_alloc_area(
     23  Heap_Control *heap,
     24  void *alloc_area_begin_ptr,
     25  uintptr_t *size
    4626)
    4727{
    48   Heap_Block        *the_block;
    49   Heap_Block        *next_block;
    50   uint32_t           the_size;
     28  uintptr_t alloc_area_begin = (uintptr_t) alloc_area_begin_ptr;
     29  Heap_Block *block =
     30    _Heap_Block_of_alloc_area( alloc_area_begin, heap->page_size );
     31  Heap_Block *next_block = NULL;
     32  uintptr_t block_size = 0;
    5133
    52   if ( !_Addresses_Is_in_range(
    53          starting_address, (void *)the_heap->start, (void *)the_heap->final ) )
    54     return( false );
     34  if (
     35    !_Addresses_Is_in_range( alloc_area_begin_ptr, heap->start, heap->final )
     36  ) {
     37    return false;
     38  }
    5539
    56   _Heap_Start_of_block( the_heap, starting_address, &the_block );
    5740
    58   _HAssert(_Heap_Is_block_in( the_heap, the_block ));
    59   if ( !_Heap_Is_block_in( the_heap, the_block ) )
    60     return( false );
     41  _HAssert(_Heap_Is_block_in_heap( heap, block ));
     42  if ( !_Heap_Is_block_in_heap( heap, block ) ) {
     43    return false;
     44  }
    6145
    62   the_size   = _Heap_Block_size( the_block );
    63   next_block = _Heap_Block_at( the_block, the_size );
     46  block_size = _Heap_Block_size( block );
     47  next_block = _Heap_Block_at( block, block_size );
    6448
    65   _HAssert(_Heap_Is_block_in( the_heap, next_block ));
    66   _HAssert(_Heap_Is_prev_used( next_block ));
     49  _HAssert( _Heap_Is_block_in_heap( heap, next_block ));
     50  _HAssert( _Heap_Is_prev_used( next_block ));
    6751  if (
    68     !_Heap_Is_block_in( the_heap, next_block ) ||
     52    !_Heap_Is_block_in_heap( heap, next_block ) ||
    6953    !_Heap_Is_prev_used( next_block )
    70   )
    71     return( false );
     54  ) {
     55    return false;
     56  }
    7257
    73   /* 'starting_address' could be greater than 'the_block' address plus
    74      HEAP_BLOCK_USER_OFFSET as _Heap_Allocate_aligned() may produce such user
    75      pointers. To get rid of this offset we calculate user size as difference
    76      between the end of 'the_block' (='next_block') and 'starting_address'
    77      and then add correction equal to the offset of the 'size' field of the
    78      'Heap_Block' structure. The correction is due to the fact that
    79      'prev_size' field of the next block is actually used as user accessible
    80      area of 'the_block'. */
     58  /*
     59   * 'alloc_area_begin' could be greater than 'block' address plus
     60   * HEAP_BLOCK_ALLOC_AREA_OFFSET as _Heap_Allocate_aligned() may produce such
     61   * user pointers. To get rid of this offset we calculate user size as
     62   * difference between the end of 'block' (='next_block') and
     63   * 'alloc_area_begin' and then add correction equal to the offset of the
     64   * 'size' field of the 'Heap_Block' structure. The correction is due to the
     65   * fact that 'prev_size' field of the next block is actually used as user
     66   * accessible area of 'block'.
     67   */
     68  *size = (uintptr_t) next_block - alloc_area_begin + HEAP_BLOCK_SIZE_OFFSET;
    8169
    82   *size = _Addresses_Subtract ( next_block, starting_address )
    83     + HEAP_BLOCK_HEADER_OFFSET;
    84 
    85   return( true );
     70  return true;
    8671}
    8772
  • cpukit/score/src/heapwalk.c

    r2bbfbf1 r371cea31  
    6161  Heap_Block *the_block = the_heap->start;
    6262  Heap_Block *const end = the_heap->final;
    63   Heap_Block *const tail = _Heap_Tail(the_heap);
     63  Heap_Block *const tail = _Heap_Free_list_tail(the_heap);
    6464  int error = 0;
    6565  int passes = 0;
    6666
     67  /* FIXME: Why is this disabled? */
    6768  do_dump = false;
     69
     70  /* FIXME: Why is this disabled? */
    6871  /*
    6972   * We don't want to allow walking the heap until we have
     
    7780*/
    7881
     82  /* FIXME: Reason for this? */
    7983  if (source < 0)
    80     source = the_heap->stats.instance;
    81 
    82   if (do_dump == true)
     84    source = (int) the_heap->stats.instance;
     85
     86  if (do_dump)
    8387    printk("\nPASS: %d start %p final %p first %p last %p begin %p end %p\n",
    8488      source, the_block, end,
    85       _Heap_First(the_heap), _Heap_Last(the_heap),
     89      _Heap_First_free_block(the_heap), _Heap_Last_free_block(the_heap),
    8690      the_heap->begin, the_heap->end);
    8791
     
    9195
    9296  if (!_Heap_Is_prev_used(the_block)) {
    93     printk("PASS: %d !HEAP_PREV_USED flag of 1st block isn't set\n", source);
     97    printk("PASS: %d !HEAP_PREV_BLOCK_USED flag of 1st block isn't set\n", source);
    9498    error = 1;
    9599  }
     
    137141
    138142      { /* Check if 'the_block' is in the free block list */
    139         Heap_Block* block = _Heap_First(the_heap);
     143        Heap_Block* block = _Heap_First_free_block(the_heap);
    140144        if (!_Addresses_Is_aligned(block) ) {
    141145          printk(
     
    151155            break;
    152156          }
    153           if (!_Heap_Is_block_in(the_heap, block)) {
     157          if (!_Heap_Is_block_in_heap(the_heap, block)) {
    154158            printk("PASS: %d a free block %p is not in heap\n", source, block);
    155159            error = 1;
  • cpukit/score/src/pheapgetblocksize.c

    r2bbfbf1 r371cea31  
    2626
    2727  _RTEMS_Lock_allocator();
    28     status = _Heap_Size_of_user_area( the_heap, starting_address, size );
     28    status = _Heap_Size_of_alloc_area( the_heap, starting_address, size );
    2929  _RTEMS_Unlock_allocator();
    3030  return status;
Note: See TracChangeset for help on using the changeset viewer.