source: rtems/cpukit/score/src/heapfree.c @ fce900b5

5
Last change on this file since fce900b5 was d006b46d, checked in by Sebastian Huber <sebastian.huber@…>, on 11/28/14 at 10:53:55

score: Add heap statistics

Add lifetime bytes allocated and freed since they were present in the
malloc statistics. Add number of failed allocations.

  • Property mode set to 100644
File size: 6.2 KB
Line 
1/**
2 * @file
3 *
4 * @ingroup ScoreHeap
5 *
6 * @brief Heap Handler implementation.
7 */
8
9/*
10 *  COPYRIGHT (c) 1989-2007.
11 *  On-Line Applications Research Corporation (OAR).
12 *
13 *  The license and distribution terms for this file may be
14 *  found in the file LICENSE in this distribution or at
15 *  http://www.rtems.org/license/LICENSE.
16 */
17
18#if HAVE_CONFIG_H
19#include "config.h"
20#endif
21
22#include <rtems/score/heapimpl.h>
23#include <rtems/score/threaddispatch.h>
24
25#ifndef HEAP_PROTECTION
26  #define _Heap_Protection_determine_block_free( heap, block ) true
27#else
28  static void _Heap_Protection_delay_block_free(
29    Heap_Control *heap,
30    Heap_Block *block
31  )
32  {
33    uintptr_t *const pattern_begin = (uintptr_t *)
34      _Heap_Alloc_area_of_block( block );
35    uintptr_t *const pattern_end = (uintptr_t *)
36      ((uintptr_t) block + _Heap_Block_size( block ) + HEAP_ALLOC_BONUS);
37    uintptr_t const delayed_free_block_count =
38      heap->Protection.delayed_free_block_count;
39    uintptr_t *current = NULL;
40
41    block->Protection_begin.next_delayed_free_block = block;
42    block->Protection_begin.task = _Thread_Get_executing();
43
44    if ( delayed_free_block_count > 0 ) {
45      Heap_Block *const last = heap->Protection.last_delayed_free_block;
46
47      last->Protection_begin.next_delayed_free_block = block;
48    } else {
49      heap->Protection.first_delayed_free_block = block;
50    }
51    heap->Protection.last_delayed_free_block = block;
52    heap->Protection.delayed_free_block_count = delayed_free_block_count + 1;
53
54    for ( current = pattern_begin; current != pattern_end; ++current ) {
55      *current = HEAP_FREE_PATTERN;
56    }
57  }
58
59  static void _Heap_Protection_check_free_block(
60    Heap_Control *heap,
61    Heap_Block *block
62  )
63  {
64    uintptr_t *const pattern_begin = (uintptr_t *)
65      _Heap_Alloc_area_of_block( block );
66    uintptr_t *const pattern_end = (uintptr_t *)
67      ((uintptr_t) block + _Heap_Block_size( block ) + HEAP_ALLOC_BONUS);
68    uintptr_t *current = NULL;
69
70    for ( current = pattern_begin; current != pattern_end; ++current ) {
71      if ( *current != HEAP_FREE_PATTERN ) {
72        _Heap_Protection_block_error( heap, block );
73        break;
74      }
75    }
76  }
77
78  static bool _Heap_Protection_determine_block_free(
79    Heap_Control *heap,
80    Heap_Block *block
81  )
82  {
83    bool do_free = true;
84    Heap_Block *const next = block->Protection_begin.next_delayed_free_block;
85
86    if ( next == NULL ) {
87      _Heap_Protection_delay_block_free( heap, block );
88      do_free = false;
89    } else if ( next == HEAP_PROTECTION_OBOLUS ) {
90      _Heap_Protection_check_free_block( heap, block );
91    } else {
92      _Heap_Protection_block_error( heap, block );
93    }
94
95    return do_free;
96  }
97#endif
98
99bool _Heap_Free( Heap_Control *heap, void *alloc_begin_ptr )
100{
101  Heap_Statistics *const stats = &heap->stats;
102  uintptr_t alloc_begin;
103  Heap_Block *block;
104  Heap_Block *next_block = NULL;
105  uintptr_t block_size = 0;
106  uintptr_t next_block_size = 0;
107  bool next_is_free = false;
108
109  /*
110   * If NULL return true so a free on NULL is considered a valid release. This
111   * is a special case that could be handled by the in heap check how-ever that
112   * would result in false being returned which is wrong.
113   */
114  if ( alloc_begin_ptr == NULL ) {
115    return true;
116  }
117
118  alloc_begin = (uintptr_t) alloc_begin_ptr;
119  block = _Heap_Block_of_alloc_area( alloc_begin, heap->page_size );
120
121  if ( !_Heap_Is_block_in_heap( heap, block ) ) {
122    return false;
123  }
124
125  _Heap_Protection_block_check( heap, block );
126
127  block_size = _Heap_Block_size( block );
128  next_block = _Heap_Block_at( block, block_size );
129
130  if ( !_Heap_Is_block_in_heap( heap, next_block ) ) {
131    return false;
132  }
133
134  _Heap_Protection_block_check( heap, next_block );
135
136  if ( !_Heap_Is_prev_used( next_block ) ) {
137    _Heap_Protection_block_error( heap, block );
138    return false;
139  }
140
141  if ( !_Heap_Protection_determine_block_free( heap, block ) ) {
142    return true;
143  }
144
145  next_block_size = _Heap_Block_size( next_block );
146  next_is_free = next_block != heap->last_block
147    && !_Heap_Is_prev_used( _Heap_Block_at( next_block, next_block_size ));
148
149  if ( !_Heap_Is_prev_used( block ) ) {
150    uintptr_t const prev_size = block->prev_size;
151    Heap_Block * const prev_block = _Heap_Block_at( block, -prev_size );
152
153    if ( !_Heap_Is_block_in_heap( heap, prev_block ) ) {
154      _HAssert( false );
155      return( false );
156    }
157
158    /* As we always coalesce free blocks, the block that preceedes prev_block
159       must have been used. */
160    if ( !_Heap_Is_prev_used ( prev_block) ) {
161      _HAssert( false );
162      return( false );
163    }
164
165    if ( next_is_free ) {       /* coalesce both */
166      uintptr_t const size = block_size + prev_size + next_block_size;
167      _Heap_Free_list_remove( next_block );
168      stats->free_blocks -= 1;
169      prev_block->size_and_flag = size | HEAP_PREV_BLOCK_USED;
170      next_block = _Heap_Block_at( prev_block, size );
171      _HAssert(!_Heap_Is_prev_used( next_block));
172      next_block->prev_size = size;
173    } else {                      /* coalesce prev */
174      uintptr_t const size = block_size + prev_size;
175      prev_block->size_and_flag = size | HEAP_PREV_BLOCK_USED;
176      next_block->size_and_flag &= ~HEAP_PREV_BLOCK_USED;
177      next_block->prev_size = size;
178    }
179  } else if ( next_is_free ) {    /* coalesce next */
180    uintptr_t const size = block_size + next_block_size;
181    _Heap_Free_list_replace( next_block, block );
182    block->size_and_flag = size | HEAP_PREV_BLOCK_USED;
183    next_block  = _Heap_Block_at( block, size );
184    next_block->prev_size = size;
185  } else {                        /* no coalesce */
186    /* Add 'block' to the head of the free blocks list as it tends to
187       produce less fragmentation than adding to the tail. */
188    _Heap_Free_list_insert_after( _Heap_Free_list_head( heap), block );
189    block->size_and_flag = block_size | HEAP_PREV_BLOCK_USED;
190    next_block->size_and_flag &= ~HEAP_PREV_BLOCK_USED;
191    next_block->prev_size = block_size;
192
193    /* Statistics */
194    ++stats->free_blocks;
195    if ( stats->max_free_blocks < stats->free_blocks ) {
196      stats->max_free_blocks = stats->free_blocks;
197    }
198  }
199
200  /* Statistics */
201  --stats->used_blocks;
202  ++stats->frees;
203  stats->free_size += block_size;
204  stats->lifetime_freed += block_size;
205
206  return( true );
207}
Note: See TracBrowser for help on using the repository browser.