Changeset 518c2aeb in rtems


Ignore:
Timestamp:
Sep 9, 2009, 2:58:37 PM (11 years ago)
Author:
Joel Sherrill <joel.sherrill@…>
Branches:
4.10, 4.11, 5, master
Children:
809fb589
Parents:
1c6b228
Message:

2009-09-09 Sebastian Huber <Sebastian.Huber@…>

  • score/include/rtems/score/heap.h, score/inline/rtems/score/heap.inl, score/src/heapallocate.c, score/src/heap.c, score/src/heapextend.c, score/src/heapresizeblock.c, score/src/heapwalk.c: Documenation. Simplified block resize. Improved heap walk. Changed heap layout to avoid a special case for _Heap_Is_used() and _Heap_Is_free().
  • libmisc/stackchk/check.c: Update for heap API changes.
Location:
cpukit
Files:
9 edited

Legend:

Unmodified
Added
Removed
  • cpukit/ChangeLog

    r1c6b228 r518c2aeb  
     12009-09-09      Sebastian Huber <Sebastian.Huber@embedded-brains.de>
     2
     3        * score/include/rtems/score/heap.h, score/inline/rtems/score/heap.inl,
     4        score/src/heapallocate.c, score/src/heap.c, score/src/heapextend.c,
     5        score/src/heapresizeblock.c, score/src/heapwalk.c: Documenation.
     6        Simplified block resize. Improved heap walk. Changed heap layout to
     7        avoid a special case for _Heap_Is_used() and _Heap_Is_free().
     8        * libmisc/stackchk/check.c: Update for heap API changes.
     9
    1102009-09-06      Ralf Corsépius <ralf.corsepius@rtems.org>
    211
  • cpukit/libmisc/stackchk/check.c

    r1c6b228 r518c2aeb  
    9292
    9393#else
     94  /*
     95   * We need this magic offset because during a task delete the task stack will
     96   * be freed before we enter the task switch extension which checks the stack.
     97   * The task stack free operation will write the next and previous pointers
     98   * for the free list into this area.
     99   */
    94100  #define Stack_check_Get_pattern_area( _the_stack ) \
    95     ((Stack_check_Control *) ((char *)(_the_stack)->area + HEAP_BLOCK_HEADER_SIZE))
     101    ((Stack_check_Control *) ((char *)(_the_stack)->area \
     102      + sizeof(Heap_Block) - HEAP_BLOCK_HEADER_SIZE))
    96103
    97104  #define Stack_check_Calculate_used( _low, _size, _high_water) \
  • cpukit/score/include/rtems/score/heap.h

    r1c6b228 r518c2aeb  
    9292 * <table>
    9393 *   <tr><th>Label</th><th colspan=2>Content</th></tr>
    94  *   <tr><td>heap->begin</td><td colspan=2>heap area begin address</td></tr>
     94 *   <tr><td>heap->area_begin</td><td colspan=2>heap area begin address</td></tr>
    9595 *   <tr>
    9696 *     <td>first_block->prev_size</td>
     
    109109 *   <tr><td>...</td></tr>
    110110 *   <tr>
    111  *     <td>second_block->prev_size</td><td colspan=2>size of first block</td>
    112  *   </tr>
    113  *   <tr>
    114  *     <td>second_block->size</td>
    115  *     <td colspan=2>page size (the value is arbitrary)</td>
    116  *   </tr>
    117  *   <tr><td>heap->end</td><td colspan=2>heap area end address</td></tr>
     111 *     <td>last_block->prev_size</td><td colspan=2>size of first block</td>
     112 *   </tr>
     113 *   <tr>
     114 *     <td>last_block->size</td>
     115 *     <td colspan=2>first block begin address - last block begin address</td>
     116 *   </tr>
     117 *   <tr><td>heap->area_end</td><td colspan=2>heap area end address</td></tr>
    118118 * </table>
     119 * The next block of the last block is the first block.  Since the first
     120 * block indicates that the previous block is used, this ensures that the
     121 * last block appears as used for the _Heap_Is_used() and _Heap_Is_free()
     122 * functions.
    119123 *
    120124 * @{
     
    206210
    207211  /**
    208    * @brief The size of the memory for heap.
     212   * @brief Size of the allocatable area in bytes.
     213   *
     214   * This value is an integral multiple of the page size.
    209215   */
    210216  uintptr_t size;
    211217
    212218  /**
    213    * @brief Current free size.
     219   * @brief Current free size in bytes.
     220   *
     221   * This value is an integral multiple of the page size.
    214222   */
    215223  uintptr_t free_size;
    216224
    217225  /**
    218    * @brief Minimum free size ever.
     226   * @brief Minimum free size ever in bytes.
     227   *
     228   * This value is an integral multiple of the page size.
    219229   */
    220230  uintptr_t min_free_size;
     
    241251
    242252  /**
    243    * @brief Total number of successful calls to alloc.
     253   * @brief Total number of successful allocations.
    244254   */
    245255  uint32_t allocs;
     
    355365
    356366/**
    357  * @brief Allocates a memory area of size @a size bytes.
     367 * @brief Allocates a memory area of size @a size bytes from the heap @a heap.
    358368 *
    359369 * If the alignment parameter @a alignment is not equal to zero, the allocated
     
    379389);
    380390
    381 #define _Heap_Allocate_aligned( heap, size, alignment ) \
    382   _Heap_Allocate_aligned_with_boundary( heap, size, alignment, 0 )
    383 
    384 #define _Heap_Allocate( heap, size ) \
    385   _Heap_Allocate_aligned_with_boundary( heap, size, 0, 0 )
     391/**
     392 * @brief See _Heap_Allocate_aligned_with_boundary() with boundary equals zero.
     393 */
     394RTEMS_INLINE_ROUTINE void *_Heap_Allocate_aligned(
     395  Heap_Control *heap,
     396  uintptr_t size,
     397  uintptr_t alignment
     398)
     399{
     400  return _Heap_Allocate_aligned_with_boundary( heap, size, alignment, 0 );
     401}
     402
     403/**
     404 * @brief See _Heap_Allocate_aligned_with_boundary() with alignment and
     405 * boundary equals zero.
     406 */
     407RTEMS_INLINE_ROUTINE void *_Heap_Allocate( Heap_Control *heap, uintptr_t size )
     408{
     409  return _Heap_Allocate_aligned_with_boundary( heap, size, 0, 0 );
     410}
    386411
    387412/**
     
    474499 * @a alloc_size bytes in the block @a block.
    475500 *
    476  * The block may be split up into multiple blocks.
     501 * The block may be split up into multiple blocks.  The previous and next block
     502 * may be used or free.  Free block parts which form a vaild new block will be
     503 * inserted into the free list or merged with an adjacent free block.  If the
     504 * block is used, they will be inserted after the free list head.  If the block
     505 * is free, they will be inserted after the previous block in the free list.
    477506 *
    478507 * Inappropriate values for @a alloc_begin or @a alloc_size may corrupt the
  • cpukit/score/inline/rtems/score/heap.inl

    r1c6b228 r518c2aeb  
    124124 */
    125125RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Block_at(
    126   Heap_Block *block,
     126  const Heap_Block *block,
    127127  uintptr_t offset
    128128)
    129129{
    130130  return (Heap_Block *) ((uintptr_t) block + offset);
     131}
     132
     133RTEMS_INLINE_ROUTINE Heap_Block *_Heap_Prev_block(
     134  const Heap_Block *block
     135)
     136{
     137  return (Heap_Block *) ((uintptr_t) block - block->prev_size);
    131138}
    132139
     
    147154}
    148155
     156RTEMS_INLINE_ROUTINE uintptr_t _Heap_Block_size( const Heap_Block *block )
     157{
     158  return block->size_and_flag & ~HEAP_PREV_BLOCK_USED;
     159}
     160
     161RTEMS_INLINE_ROUTINE void _Heap_Block_set_size(
     162  Heap_Block *block,
     163  uintptr_t size
     164)
     165{
     166  uintptr_t flag = block->size_and_flag & HEAP_PREV_BLOCK_USED;
     167
     168  block->size_and_flag = size | flag;
     169}
     170
    149171RTEMS_INLINE_ROUTINE bool _Heap_Is_prev_used( const Heap_Block *block )
    150172{
     
    152174}
    153175
    154 RTEMS_INLINE_ROUTINE uintptr_t _Heap_Block_size( const Heap_Block *block )
    155 {
    156   return block->size_and_flag & ~HEAP_PREV_BLOCK_USED;
     176RTEMS_INLINE_ROUTINE bool _Heap_Is_used(
     177  const Heap_Block *block
     178)
     179{
     180  const Heap_Block *const next_block =
     181    _Heap_Block_at( block, _Heap_Block_size( block ) );
     182
     183  return _Heap_Is_prev_used( next_block );
     184}
     185
     186RTEMS_INLINE_ROUTINE bool _Heap_Is_free(
     187  const Heap_Block *block
     188)
     189{
     190  return !_Heap_Is_used( block );
    157191}
    158192
     
    167201
    168202/**
    169  * @brief Returns the heap area size.
     203 * @brief Returns the size of the allocatable area in bytes.
     204 *
     205 * This value is an integral multiple of the page size.
    170206 */
    171207RTEMS_INLINE_ROUTINE uintptr_t _Heap_Get_size( const Heap_Control *heap )
    172208{
    173   return heap->area_end - heap->area_begin;
     209  return heap->stats.size;
    174210}
    175211
  • cpukit/score/src/heap.c

    r1c6b228 r518c2aeb  
    141141  uintptr_t overhead = 0;
    142142  Heap_Block *first_block = NULL;
    143   Heap_Block *second_block = NULL;
     143  Heap_Block *last_block = NULL;
    144144
    145145  if ( page_size == 0 ) {
     
    147147  } else {
    148148    page_size = _Heap_Align_up( page_size, CPU_ALIGNMENT );
     149
     150    if ( page_size < CPU_ALIGNMENT ) {
     151      /* Integer overflow */
     152      return 0;
     153    }
    149154  }
    150155  min_block_size = _Heap_Align_up( sizeof( Heap_Block ), page_size );
     
    173178  first_block->prev = _Heap_Free_list_head( heap );
    174179
    175   /* Second and last block */
    176   second_block = _Heap_Block_at( first_block, first_block_size );
    177   second_block->prev_size = first_block_size;
    178   second_block->size_and_flag = page_size;
     180  /*
     181   * Last block.
     182   *
     183   * The next block of the last block is the first block.  Since the first
     184   * block indicates that the previous block is used, this ensures that the
     185   * last block appears as used for the _Heap_Is_used() and _Heap_Is_free()
     186   * functions.
     187   */
     188  last_block = _Heap_Block_at( first_block, first_block_size );
     189  last_block->prev_size = first_block_size;
     190  last_block->size_and_flag = first_block_begin - (uintptr_t) last_block;
    179191
    180192  /* Heap control */
     
    184196  heap->area_end = heap_area_end;
    185197  heap->first_block = first_block;
    186   heap->last_block = second_block;
     198  heap->last_block = last_block;
    187199  _Heap_Free_list_head( heap )->next = first_block;
    188200  _Heap_Free_list_tail( heap )->prev = first_block;
    189201
    190202  /* Statistics */
    191   stats->size = heap_area_size;
     203  stats->size = first_block_size;
    192204  stats->free_size = first_block_size;
    193205  stats->min_free_size = first_block_size;
     
    209221  );
    210222  _HAssert(
    211     _Heap_Is_aligned( _Heap_Alloc_area_of_block( second_block ), page_size )
     223    _Heap_Is_aligned( _Heap_Alloc_area_of_block( last_block ), page_size )
    212224  );
    213 
    214   if ( !_Heap_Walk( heap, 0, false ) ) {
    215     _Heap_Walk( heap, 0, true );
    216   }
    217225
    218226  return alloc_area_size;
    219227}
    220228
    221 static Heap_Block *_Heap_Block_split(
     229void _Heap_Block_split(
    222230  Heap_Control *heap,
    223231  Heap_Block *block,
     232  Heap_Block *free_list_anchor,
    224233  uintptr_t alloc_size
    225234)
    226235{
     236  Heap_Statistics *const stats = &heap->stats;
     237
    227238  uintptr_t const page_size = heap->page_size;
    228239  uintptr_t const min_block_size = heap->min_block_size;
     
    238249  uintptr_t const free_size_limit = min_block_size + HEAP_BLOCK_SIZE_OFFSET;
    239250
    240   Heap_Block *const next_block = _Heap_Block_at( block, block_size );
     251  Heap_Block *next_block = _Heap_Block_at( block, block_size );
    241252
    242253  _HAssert( used_size <= block_size + HEAP_BLOCK_SIZE_OFFSET );
     
    244255
    245256  if ( free_size >= free_size_limit ) {
    246     uintptr_t const free_block_size = block_size - used_block_size;
    247257    Heap_Block *const free_block = _Heap_Block_at( block, used_block_size );
     258    uintptr_t free_block_size = block_size - used_block_size;
    248259
    249260    _HAssert( used_block_size + free_block_size == block_size );
    250261
    251     block->size_and_flag = used_block_size
    252       | (block->size_and_flag & HEAP_PREV_BLOCK_USED);
     262    _Heap_Block_set_size( block, used_block_size );
     263
     264    /* Statistics */
     265    stats->free_size += free_block_size;
     266
     267    if ( _Heap_Is_used( next_block ) ) {
     268      _Heap_Free_list_insert_after( free_list_anchor, free_block );
     269
     270      /* Statistics */
     271      ++stats->free_blocks;
     272    } else {
     273      uintptr_t const next_block_size = _Heap_Block_size( next_block );
     274
     275      _Heap_Free_list_replace( next_block, free_block );
     276
     277      free_block_size += next_block_size;
     278
     279      next_block = _Heap_Block_at( free_block, free_block_size );
     280    }
     281
    253282    free_block->size_and_flag = free_block_size | HEAP_PREV_BLOCK_USED;
     283
    254284    next_block->prev_size = free_block_size;
    255 
    256     return free_block;
     285    next_block->size_and_flag &= ~HEAP_PREV_BLOCK_USED;
    257286  } else {
    258287    next_block->size_and_flag |= HEAP_PREV_BLOCK_USED;
    259 
    260     return NULL;
    261288  }
    262289}
     
    265292  Heap_Control *heap,
    266293  Heap_Block *block,
     294  Heap_Block *free_list_anchor,
    267295  uintptr_t alloc_size
    268296)
    269297{
    270   Heap_Block *const free_block = _Heap_Block_split( heap, block, alloc_size );
    271 
    272   if ( free_block != NULL ) {
    273     _Heap_Free_list_replace( block, free_block );
    274   } else {
    275     Heap_Statistics *const stats = &heap->stats;
    276 
    277     _Heap_Free_list_remove( block );
    278 
    279     /* Statistics */
    280     --stats->free_blocks;
    281   }
     298  _Heap_Block_split( heap, block, free_list_anchor, alloc_size );
    282299
    283300  return block;
     
    287304  Heap_Control *heap,
    288305  Heap_Block *block,
     306  Heap_Block *free_list_anchor,
    289307  uintptr_t alloc_begin,
    290308  uintptr_t alloc_size
    291309)
    292310{
    293   uintptr_t const block_begin = (uintptr_t) block;
     311  Heap_Statistics *const stats = &heap->stats;
     312
     313  uintptr_t block_begin = (uintptr_t) block;
    294314  uintptr_t block_size = _Heap_Block_size( block );
    295315  uintptr_t block_end = block_begin + block_size;
     
    300320  uintptr_t const new_block_size = block_end - new_block_begin;
    301321
    302   Heap_Block *free_block = NULL;
    303 
    304322  block_end = new_block_begin;
    305323  block_size = block_end - block_begin;
     
    308326  _HAssert( new_block_size >= heap->min_block_size );
    309327
     328  /* Statistics */
     329  stats->free_size += block_size;
     330
     331  if ( _Heap_Is_prev_used( block ) ) {
     332    _Heap_Free_list_insert_after( free_list_anchor, block );
     333
     334    free_list_anchor = block;
     335
     336    /* Statistics */
     337    ++stats->free_blocks;
     338  } else {
     339    Heap_Block *const prev_block = _Heap_Prev_block( block );
     340    uintptr_t const prev_block_size = _Heap_Block_size( prev_block );
     341
     342    block = prev_block;
     343    block_begin = (uintptr_t) block;
     344    block_size += prev_block_size;
     345  }
     346
    310347  block->size_and_flag = block_size | HEAP_PREV_BLOCK_USED;
     348
    311349  new_block->prev_size = block_size;
    312350  new_block->size_and_flag = new_block_size;
    313351
    314   free_block = _Heap_Block_split( heap, new_block, alloc_size );
    315   if ( free_block != NULL ) {
    316     _Heap_Free_list_insert_after( block, free_block );
    317   }
     352  _Heap_Block_split( heap, new_block, free_list_anchor, alloc_size );
    318353
    319354  return new_block;
     
    328363{
    329364  Heap_Statistics *const stats = &heap->stats;
     365
    330366  uintptr_t const alloc_area_begin = _Heap_Alloc_area_of_block( block );
    331367  uintptr_t const alloc_area_offset = alloc_begin - alloc_area_begin;
    332368
    333   _HAssert( _Heap_Is_prev_used( block ) );
     369  Heap_Block *free_list_anchor = NULL;
     370
    334371  _HAssert( alloc_area_begin <= alloc_begin );
     372
     373  if ( _Heap_Is_free( block ) ) {
     374    free_list_anchor = block->prev;
     375
     376    _Heap_Free_list_remove( block );
     377
     378    /* Statistics */
     379    --stats->free_blocks;
     380    ++stats->used_blocks;
     381    stats->free_size -= _Heap_Block_size( block );
     382  } else {
     383    free_list_anchor = _Heap_Free_list_head( heap );
     384  }
    335385
    336386  if ( alloc_area_offset < heap->page_size ) {
    337387    alloc_size += alloc_area_offset;
    338388
    339     block = _Heap_Block_allocate_from_begin( heap, block, alloc_size );
     389    block = _Heap_Block_allocate_from_begin(
     390      heap,
     391      block,
     392      free_list_anchor,
     393      alloc_size
     394    );
    340395  } else {
    341     block = _Heap_Block_allocate_from_end( heap, block, alloc_begin, alloc_size );
     396    block = _Heap_Block_allocate_from_end(
     397      heap,
     398      block,
     399      free_list_anchor,
     400      alloc_begin,
     401      alloc_size
     402    );
    342403  }
    343404
    344405  /* Statistics */
    345   ++stats->used_blocks;
    346   stats->free_size -= _Heap_Block_size( block );
    347406  if ( stats->min_free_size > stats->free_size ) {
    348407    stats->min_free_size = stats->free_size;
  • cpukit/score/src/heapallocate.c

    r1c6b228 r518c2aeb  
    4949    uintptr_t const alloc_area_begin = _Heap_Alloc_area_of_block( block );
    5050    uintptr_t const alloc_area_offset = alloc_begin - alloc_area_begin;
    51     uintptr_t const alloc_area_size = alloc_area_offset + alloc_size;
    5251
    5352    _HAssert( block_size >= min_block_size );
     
    207206
    208207  if ( alloc_begin != 0 ) {
     208    /* Statistics */
     209    stats->searches += search_count;
     210
    209211    block = _Heap_Block_allocate( heap, block, alloc_begin, alloc_size );
    210212
     
    217219      boundary
    218220    );
    219 
    220     /* Statistics */
    221     stats->searches += search_count;
    222221  }
    223222
  • cpukit/score/src/heapextend.c

    r1c6b228 r518c2aeb  
    3939  uintptr_t const new_heap_area_end = heap_area_end + area_size;
    4040  uintptr_t extend_size = 0;
    41   Heap_Block *const old_final = heap->last_block;
    42   Heap_Block *new_final = NULL;
     41  Heap_Block *const last_block = heap->last_block;
    4342
    4443  /*
     
    7069
    7170  extend_size = new_heap_area_end
    72     - (uintptr_t) old_final - HEAP_BLOCK_HEADER_SIZE;
     71    - (uintptr_t) last_block - HEAP_BLOCK_HEADER_SIZE;
    7372  extend_size = _Heap_Align_down( extend_size, heap->page_size );
    7473
     
    7675
    7776  if( extend_size >= heap->min_block_size ) {
    78     old_final->size_and_flag = extend_size
    79       | (old_final->size_and_flag & HEAP_PREV_BLOCK_USED);
    80     new_final = _Heap_Block_at( old_final, extend_size );
    81     new_final->size_and_flag = heap->page_size | HEAP_PREV_BLOCK_USED;
     77    Heap_Block *const new_last_block = _Heap_Block_at( last_block, extend_size );
    8278
    83     heap->last_block = new_final;
     79    _Heap_Block_set_size( last_block, extend_size );
    8480
    85     stats->size += area_size;
     81    new_last_block->size_and_flag =
     82      ((uintptr_t) heap->first_block - (uintptr_t) new_last_block)
     83        | HEAP_PREV_BLOCK_USED;
     84
     85    heap->last_block = new_last_block;
     86
     87    /* Statistics */
     88    stats->size += extend_size;
    8689    ++stats->used_blocks;
    8790    --stats->frees; /* Do not count subsequent call as actual free() */
    8891
    89     _Heap_Free( heap, (void *) _Heap_Alloc_area_of_block( old_final ));
     92    _Heap_Free( heap, (void *) _Heap_Alloc_area_of_block( last_block ));
    9093  }
    9194
  • cpukit/score/src/heapresizeblock.c

    r1c6b228 r518c2aeb  
    1010 *  COPYRIGHT (c) 1989-1999.
    1111 *  On-Line Applications Research Corporation (OAR).
     12 *
     13 *  Copyright (c) 2009 embedded brains GmbH.
    1214 *
    1315 *  The license and distribution terms for this file may be
     
    2628#include <rtems/score/heap.h>
    2729
     30static Heap_Resize_status _Heap_Resize_block_checked(
     31  Heap_Control *heap,
     32  Heap_Block *block,
     33  uintptr_t alloc_begin,
     34  uintptr_t new_alloc_size,
     35  uintptr_t *old_size,
     36  uintptr_t *new_size
     37)
     38{
     39  Heap_Statistics *const stats = &heap->stats;
     40
     41  uintptr_t const block_begin = (uintptr_t) block;
     42  uintptr_t block_size = _Heap_Block_size( block );
     43  uintptr_t block_end = block_begin + block_size;
     44
     45  uintptr_t alloc_size = block_end - alloc_begin + HEAP_BLOCK_SIZE_OFFSET;
     46
     47  Heap_Block *next_block = _Heap_Block_at( block, block_size );
     48  uintptr_t next_block_size = _Heap_Block_size( next_block );
     49  bool next_block_is_free = _Heap_Is_free( next_block );;
     50
     51  _HAssert( _Heap_Is_block_in_heap( heap, next_block ) );
     52  _HAssert( _Heap_Is_prev_used( next_block ) );
     53
     54  *old_size = alloc_size;
     55
     56  if ( next_block_is_free ) {
     57    block_size += next_block_size;
     58    alloc_size += next_block_size;
     59  }
     60
     61  if ( new_alloc_size > alloc_size ) {
     62    return HEAP_RESIZE_UNSATISFIED;
     63  }
     64
     65  if ( next_block_is_free ) {
     66    _Heap_Block_set_size( block, block_size );
     67
     68    _Heap_Free_list_remove( next_block );
     69
     70    next_block = _Heap_Block_at( block, block_size );
     71    next_block->size_and_flag |= HEAP_PREV_BLOCK_USED;
     72
     73    /* Statistics */
     74    --stats->free_blocks;
     75    stats->free_size -= next_block_size;
     76  }
     77
     78  block = _Heap_Block_allocate( heap, block, alloc_begin, new_alloc_size );
     79
     80  block_size = _Heap_Block_size( block );
     81  next_block = _Heap_Block_at( block, block_size );
     82  *new_size = (uintptr_t) next_block - alloc_begin + HEAP_BLOCK_SIZE_OFFSET;
     83
     84  /* Statistics */
     85  ++stats->resizes;
     86
     87  return HEAP_RESIZE_SUCCESSFUL;
     88}
     89
    2890Heap_Resize_status _Heap_Resize_block(
    2991  Heap_Control *heap,
     
    3496)
    3597{
    36   Heap_Statistics *const stats = &heap->stats;
    37   uintptr_t const min_block_size = heap->min_block_size;
    3898  uintptr_t const page_size = heap->page_size;
     99
    39100  uintptr_t const alloc_begin = (uintptr_t) alloc_begin_ptr;
     101
    40102  Heap_Block *const block = _Heap_Block_of_alloc_area( alloc_begin, page_size );
    41   Heap_Block *next_block = NULL;
    42   Heap_Block *next_next_block = NULL;
    43   uintptr_t block_size = 0;
    44   uintptr_t block_end = 0;
    45   uintptr_t next_block_size = 0;
    46   bool next_block_is_used = false;;
    47   uintptr_t alloc_size = 0;
    48   uintptr_t prev_block_used_flag = 0;
    49103
    50104  *old_size = 0;
    51105  *new_size = 0;
    52106
    53   if ( !_Heap_Is_block_in_heap( heap, block ) ) {
     107  if ( _Heap_Is_block_in_heap( heap, block ) ) {
     108    return _Heap_Resize_block_checked(
     109      heap,
     110      block,
     111      alloc_begin,
     112      new_alloc_size,
     113      old_size,
     114      new_size
     115    );
     116  } else {
    54117    return HEAP_RESIZE_FATAL_ERROR;
    55118  }
    56 
    57   block_size = _Heap_Block_size( block );
    58   block_end = (uintptr_t) block + block_size;
    59   prev_block_used_flag = block->size_and_flag & HEAP_PREV_BLOCK_USED;
    60   next_block = _Heap_Block_at( block, block_size );
    61 
    62   _HAssert( _Heap_Is_block_in_heap( heap, next_block ) );
    63   _HAssert( _Heap_Is_prev_used( next_block ) );
    64 
    65   next_block_size = _Heap_Block_size( next_block );
    66   next_next_block = _Heap_Block_at( next_block, next_block_size );
    67 
    68   _HAssert(
    69     next_block == heap->last_block
    70       || _Heap_Is_block_in_heap( heap, next_next_block )
    71   );
    72 
    73   next_block_is_used = next_block == heap->last_block
    74     || _Heap_Is_prev_used( next_next_block );
    75 
    76   alloc_size = block_end - alloc_begin + HEAP_BLOCK_SIZE_OFFSET;
    77 
    78   *old_size = alloc_size;
    79 
    80   if ( new_alloc_size > alloc_size ) {
    81     /*
    82      * Need to extend the block: allocate part of the next block and then
    83      * merge the blocks.
    84      */
    85     if ( next_block_is_used ) {
    86       return HEAP_RESIZE_UNSATISFIED;
    87     } else {
    88       uintptr_t add_block_size =
    89         _Heap_Align_up( new_alloc_size - alloc_size, page_size );
    90 
    91       if ( add_block_size < min_block_size ) {
    92         add_block_size = min_block_size;
    93       }
    94 
    95       if ( add_block_size > next_block_size ) {
    96         return HEAP_RESIZE_UNSATISFIED;
    97       }
    98 
    99       next_block = _Heap_Block_allocate(
    100         heap,
    101         next_block,
    102         _Heap_Alloc_area_of_block( next_block ),
    103         add_block_size - HEAP_BLOCK_HEADER_SIZE
    104       );
    105 
    106       /* Merge the blocks */
    107       block->size_and_flag = ( block_size + _Heap_Block_size( next_block ) )
    108         | prev_block_used_flag;
    109 
    110       /* Statistics */
    111       --stats->used_blocks;
    112     }
    113   } else {
    114     /* Calculate how much memory we could free */
    115     uintptr_t free_block_size =
    116       _Heap_Align_down( alloc_size - new_alloc_size, page_size );
    117 
    118     if ( free_block_size > 0 ) {
    119       /*
    120        * To free some memory the block should be shortened so that it can can
    121        * hold 'new_alloc_size' user bytes and still remain not shorter than
    122        * 'min_block_size'.
    123        */
    124       uintptr_t new_block_size = block_size - free_block_size;
    125 
    126       if ( new_block_size < min_block_size ) {
    127         uintptr_t const delta = min_block_size - new_block_size;
    128 
    129         _HAssert( free_block_size >= delta );
    130 
    131         free_block_size -= delta;
    132 
    133         if ( free_block_size == 0 ) {
    134           /* Statistics */
    135           ++stats->resizes;
    136 
    137           return HEAP_RESIZE_SUCCESSFUL;
    138         }
    139 
    140         new_block_size += delta;
    141       }
    142 
    143       _HAssert( new_block_size >= min_block_size );
    144       _HAssert( new_block_size + free_block_size == block_size );
    145       _HAssert( _Heap_Is_aligned( new_block_size, page_size ) );
    146       _HAssert( _Heap_Is_aligned( free_block_size, page_size ) );
    147 
    148       if ( !next_block_is_used ) {
    149         /* Extend the next block */
    150         Heap_Block *const new_next_block =
    151           _Heap_Block_at( block, new_block_size );
    152         uintptr_t const new_next_block_size =
    153           next_block_size + free_block_size;
    154 
    155         _HAssert( _Heap_Is_block_in_heap( heap, next_next_block ) );
    156 
    157         block->size_and_flag = new_block_size | prev_block_used_flag;
    158         new_next_block->size_and_flag =
    159           new_next_block_size | HEAP_PREV_BLOCK_USED;
    160         next_next_block->prev_size = new_next_block_size;
    161 
    162         _Heap_Free_list_replace( next_block, new_next_block );
    163 
    164         *new_size = new_next_block_size - HEAP_BLOCK_SIZE_OFFSET;
    165 
    166         /* Statistics */
    167         stats->free_size += free_block_size;
    168       } else if ( free_block_size >= min_block_size ) {
    169         /* Split the block into two used parts, then free the second one */
    170         block->size_and_flag = new_block_size | prev_block_used_flag;
    171         next_block = _Heap_Block_at( block, new_block_size );
    172         next_block->size_and_flag = free_block_size | HEAP_PREV_BLOCK_USED;
    173 
    174         _Heap_Free( heap, (void *) _Heap_Alloc_area_of_block( next_block ) );
    175 
    176         *new_size = free_block_size - HEAP_BLOCK_SIZE_OFFSET;
    177 
    178         /* Statistics */
    179         ++stats->used_blocks; /* We have created used block */
    180         --stats->frees; /* Do not count next call in stats */
    181       }
    182     }
    183   }
    184 
    185   /* Statistics */
    186   ++stats->resizes;
    187 
    188   return HEAP_RESIZE_SUCCESSFUL;
    189119}
  • cpukit/score/src/heapwalk.c

    r1c6b228 r518c2aeb  
    5252)
    5353{
     54  uintptr_t const page_size = heap->page_size;
    5455  const Heap_Block *const free_list_tail = _Heap_Free_list_tail( heap );
    5556  const Heap_Block *const first_free_block = _Heap_Free_list_first( heap );
     57  const Heap_Block *prev_block = free_list_tail;
    5658  const Heap_Block *free_block = first_free_block;
    57   uintptr_t const page_size = heap->page_size;
    58   uintptr_t const loop_limit =
    59     ((uintptr_t) heap->last_block - (uintptr_t) heap->first_block)
    60       / heap->min_block_size;
    61   uintptr_t loop_counter = 0;
    62 
    63   while ( free_block != free_list_tail && loop_counter < loop_limit ) {
     59
     60  while ( free_block != free_list_tail ) {
    6461    if ( !_Heap_Is_block_in_heap( heap, free_block ) ) {
    6562      _Heap_Walk_printk(
     
    8885    }
    8986
    90     ++loop_counter;
    91 
     87    if ( _Heap_Is_used( free_block ) ) {
     88      _Heap_Walk_printk(
     89        source,
     90        dump,
     91        true,
     92        "free block 0x%08x: is used\n",
     93        free_block
     94      );
     95
     96      return false;
     97    }
     98
     99    if ( free_block->prev != prev_block ) {
     100      _Heap_Walk_printk(
     101        source,
     102        dump,
     103        true,
     104        "free block 0x%08x: invalid previous block 0x%08x\n",
     105        free_block,
     106        free_block->prev
     107      );
     108
     109      return false;
     110    }
     111
     112    prev_block = free_block;
    92113    free_block = free_block->next;
    93   }
    94 
    95   if ( loop_counter >= loop_limit ) {
    96     _Heap_Walk_printk(
    97       source,
    98       dump,
    99       true,
    100       "free list contains a loop\n"
    101     );
    102 
    103     return false;
    104114  }
    105115
     
    133143  uintptr_t const page_size = heap->page_size;
    134144  uintptr_t const min_block_size = heap->min_block_size;
    135   Heap_Block *const free_list_tail = _Heap_Free_list_tail( heap );
    136   Heap_Block *const free_list_head = _Heap_Free_list_head( heap );
    137145  Heap_Block *const first_free_block = _Heap_Free_list_first( heap );
    138146  Heap_Block *const last_free_block = _Heap_Free_list_last( heap );
     
    185193
    186194  if (
    187     first_free_block != free_list_head
    188       && !_Addresses_Is_aligned( first_free_block )
    189   ) {
    190     _Heap_Walk_printk(
    191       source,
    192       dump,
    193       true,
    194       "first free block: 0x%08x not CPU aligned\n",
    195       first_free_block
    196     );
    197 
    198     return false;
    199   }
    200 
    201   if (
    202     last_free_block != free_list_tail
    203       && !_Addresses_Is_aligned( last_free_block )
    204   ) {
    205     _Heap_Walk_printk(
    206       source,
    207       dump,
    208       true,
    209       "last free block: 0x%08x not CPU aligned\n",
    210       last_free_block
    211     );
    212    
    213     return false;
    214   }
    215 
    216   if (
    217195    !_Heap_Is_aligned( _Heap_Alloc_area_of_block( first_block ), page_size )
    218196  ) {
     
    221199      dump,
    222200      true,
    223       "first block: 0x%08x not page aligned\n",
     201      "first block 0x%08x: alloc area not page aligned\n",
    224202      first_block
    225203    );
     
    228206  }
    229207
    230   if (
    231     !_Heap_Is_aligned( _Heap_Alloc_area_of_block( last_block ), page_size )
    232   ) {
    233     _Heap_Walk_printk(
    234       source,
    235       dump,
    236       true,
    237       "last block: 0x%08x not page aligned\n",
    238       last_block
    239     );
    240 
    241     return false;
    242   }
    243 
    244208  if ( !_Heap_Is_prev_used( first_block ) ) {
    245209    _Heap_Walk_printk(
     
    249213      "first block: HEAP_PREV_BLOCK_USED is cleared\n"
    250214    );
     215
     216    return false;
    251217  }
    252218
     
    260226      page_size
    261227    );
     228
     229    return false;
     230  }
     231
     232  if ( _Heap_Is_free( last_block ) ) {
     233    _Heap_Walk_printk(
     234      source,
     235      dump,
     236      true,
     237      "last block: is free\n"
     238    );
     239
     240    return false;
    262241  }
    263242
    264243  return _Heap_Walk_check_free_list( source, dump, heap );
     244}
     245
     246static bool _Heap_Walk_check_free_block(
     247  int source,
     248  bool dump,
     249  Heap_Control *heap,
     250  Heap_Block *block
     251)
     252{
     253  Heap_Block *const free_list_tail = _Heap_Free_list_tail( heap );
     254  Heap_Block *const free_list_head = _Heap_Free_list_head( heap );
     255  Heap_Block *const first_free_block = _Heap_Free_list_first( heap );
     256  Heap_Block *const last_free_block = _Heap_Free_list_last( heap );
     257  bool const prev_used = _Heap_Is_prev_used( block );
     258  uintptr_t const block_size = _Heap_Block_size( block );
     259  Heap_Block *const next_block = _Heap_Block_at( block, block_size );
     260
     261  _Heap_Walk_printk(
     262    source,
     263    dump,
     264    false,
     265    "block 0x%08x: prev 0x%08x%s, next 0x%08x%s\n",
     266    block,
     267    block->prev,
     268    block->prev == first_free_block ?
     269      " (= first)"
     270        : (block->prev == free_list_head ? " (= head)" : ""),
     271    block->next,
     272    block->next == last_free_block ?
     273      " (= last)"
     274        : (block->next == free_list_tail ? " (= tail)" : "")
     275  );
     276
     277  if ( block_size != next_block->prev_size ) {
     278    _Heap_Walk_printk(
     279      source,
     280      dump,
     281      true,
     282      "block 0x%08x: size %u != size %u (in next block 0x%08x)\n",
     283      block,
     284      block_size,
     285      next_block->prev_size,
     286      next_block
     287    );
     288
     289    return false;
     290  }
     291
     292  if ( !prev_used ) {
     293    _Heap_Walk_printk(
     294      source,
     295      dump,
     296      true,
     297      "block 0x%08x: two consecutive blocks are free\n",
     298      block
     299    );
     300
     301    return false;
     302  }
     303
     304  if ( !_Heap_Walk_is_in_free_list( heap, block ) ) {
     305    _Heap_Walk_printk(
     306      source,
     307      dump,
     308      true,
     309      "block 0x%08x: free block not in free list\n",
     310      block
     311    );
     312
     313    return false;
     314  }
     315
     316  return true;
    265317}
    266318
     
    273325  uintptr_t const page_size = heap->page_size;
    274326  uintptr_t const min_block_size = heap->min_block_size;
    275   Heap_Block *const free_list_tail = _Heap_Free_list_tail( heap );
    276   Heap_Block *const free_list_head = _Heap_Free_list_head( heap );
    277   Heap_Block *const first_free_block = _Heap_Free_list_first( heap );
    278   Heap_Block *const last_free_block = _Heap_Free_list_last( heap );
    279327  Heap_Block *const last_block = heap->last_block;
    280328  Heap_Block *block = heap->first_block;
    281   bool error = false;
    282329
    283330  if ( !_System_state_Is_up( _System_state_Get() ) ) {
     
    289336  }
    290337
    291   while ( block != last_block && _Addresses_Is_aligned( block ) ) {
     338  while ( block != last_block ) {
    292339    uintptr_t const block_begin = (uintptr_t) block;
    293340    uintptr_t const block_size = _Heap_Block_size( block );
     
    300347        source,
    301348        dump,
    302         error,
     349        false,
    303350        "block 0x%08x: size %u\n",
    304351        block,
     
    309356        source,
    310357        dump,
    311         error,
     358        false,
    312359        "block 0x%08x: size %u, prev_size %u\n",
    313360        block,
     
    317364    }
    318365
    319     if (
    320       !_Heap_Is_aligned( block_begin + HEAP_BLOCK_HEADER_SIZE, page_size )
    321     ) {
    322       error = true;
    323       _Heap_Walk_printk(
    324         source,
    325         dump,
    326         error,
    327         "block 0x%08x: not page (%u) aligned\n",
    328         block,
    329         page_size
    330       );
    331       break;
     366    if ( !_Heap_Is_block_in_heap( heap, next_block ) ) {
     367      _Heap_Walk_printk(
     368        source,
     369        dump,
     370        true,
     371        "block 0x%08x: next block 0x%08x not in heap\n",
     372        block,
     373        next_block
     374      );
     375     
     376      return false;
    332377    }
    333378
    334379    if ( !_Heap_Is_aligned( block_size, page_size ) ) {
    335       error = true;
    336       _Heap_Walk_printk(
    337         source,
    338         dump,
    339         error,
    340         "block 0x%08x: block size %u not page (%u) aligned\n",
    341         block,
    342         block_size,
    343         page_size
    344       );
    345       break;
     380      _Heap_Walk_printk(
     381        source,
     382        dump,
     383        true,
     384        "block 0x%08x: block size %u not page aligned\n",
     385        block,
     386        block_size
     387      );
     388     
     389      return false;
    346390    }
    347391
    348392    if ( block_size < min_block_size ) {
    349       error = true;
    350       _Heap_Walk_printk(
    351         source,
    352         dump,
    353         error,
     393      _Heap_Walk_printk(
     394        source,
     395        dump,
     396        true,
    354397        "block 0x%08x: size %u < min block size %u\n",
    355398        block,
     
    357400        min_block_size
    358401      );
    359       break;
     402     
     403      return false;
    360404    }
    361405
    362406    if ( next_block_begin <= block_begin ) {
    363       error = true;
    364       _Heap_Walk_printk(
    365         source,
    366         dump,
    367         error,
     407      _Heap_Walk_printk(
     408        source,
     409        dump,
     410        true,
    368411        "block 0x%08x: next block 0x%08x is not a successor\n",
    369412        block,
    370413        next_block
    371414      );
    372       break;
     415     
     416      return false;
    373417    }
    374418   
    375419    if ( !_Heap_Is_prev_used( next_block ) ) {
    376       _Heap_Walk_printk(
    377         source,
    378         dump,
    379         error,
    380         "block 0x%08x: prev 0x%08x%s, next 0x%08x%s\n",
    381         block,
    382         block->prev,
    383         block->prev == first_free_block ?
    384           " (= first)"
    385             : (block->prev == free_list_head ? " (= head)" : ""),
    386         block->next,
    387         block->next == last_free_block ?
    388           " (= last)"
    389             : (block->next == free_list_tail ? " (= tail)" : "")
    390       );
    391 
    392       if ( block_size != next_block->prev_size ) {
    393         error = true;
    394         _Heap_Walk_printk(
    395           source,
    396           dump,
    397           error,
    398           "block 0x%08x: size %u != size %u (in next block 0x%08x)\n",
    399           block,
    400           block_size,
    401           next_block->prev_size,
    402           next_block
    403         );
     420      if ( !_Heap_Walk_check_free_block( source, dump, heap, block ) ) {
     421        return false;
    404422      }
    405 
    406       if ( !prev_used ) {
    407         error = true;
    408         _Heap_Walk_printk(
    409           source,
    410           dump,
    411           error,
    412           "block 0x%08x: two consecutive blocks are free\n",
    413           block
    414         );
    415       }
    416 
    417       if ( !_Heap_Walk_is_in_free_list( heap, block ) ) {
    418         error = true;
    419         _Heap_Walk_printk(
    420           source,
    421           dump,
    422           error,
    423           "block 0x%08x: free block not in free list\n",
    424           block
    425         );
    426       }
    427423    }
    428424
     
    430426  }
    431427
    432   if ( !_Addresses_Is_aligned( block ) ) {
    433     error = true;
    434     _Heap_Walk_printk(
    435       source,
    436       dump,
    437       error,
    438       "block 0x%08x: not CPU aligned\n",
    439       block
    440     );
    441 
    442     return false;
    443   }
    444 
    445   if ( block == last_block ) {
    446     uintptr_t const block_size = _Heap_Block_size( block );
    447 
    448     if ( block_size != page_size ) {
    449       error = true;
    450       _Heap_Walk_printk(
    451         source,
    452         dump,
    453         error,
    454         "last block 0x%08x: size %u != page size %u\n",
    455         block,
    456         block_size,
    457         page_size
    458       );
    459     }
    460   } else {
    461     error = true;
    462     _Heap_Walk_printk(
    463       source,
    464       dump,
    465       error,
    466       "last block 0x%08x != last block 0x%08x\n",
    467       block,
    468       last_block
    469     );
    470   }
    471 
    472   return !error;
    473 }
     428  return true;
     429}
Note: See TracChangeset for help on using the changeset viewer.