source: rtems/cpukit/score/src/heapallocate.c @ d006b46d

4.115
Last change on this file since d006b46d was d006b46d, checked in by Sebastian Huber <sebastian.huber@…>, on 11/28/14 at 10:53:55

score: Add heap statistics

Add lifetime bytes allocated and freed since they were present in the
malloc statistics. Add number of failed allocations.

  • Property mode set to 100644
File size: 7.6 KB
Line 
1/**
2 * @file
3 *
4 * @ingroup ScoreHeap
5 *
6 * @brief Heap Handler implementation.
7 */
8
9/*
10 *  COPYRIGHT (c) 1989-1999.
11 *  On-Line Applications Research Corporation (OAR).
12 *
13 *  Copyright (c) 2009 embedded brains GmbH.
14 *
15 *  The license and distribution terms for this file may be
16 *  found in the file LICENSE in this distribution or at
17 *  http://www.rtems.org/license/LICENSE.
18 */
19
20#if HAVE_CONFIG_H
21#include "config.h"
22#endif
23
24#include <rtems/system.h>
25#include <rtems/score/heapimpl.h>
26
27#ifndef HEAP_PROTECTION
28  #define _Heap_Protection_free_delayed_blocks( heap, alloc_begin ) false
29#else
30  static bool _Heap_Protection_free_delayed_blocks(
31    Heap_Control *heap,
32    uintptr_t alloc_begin
33  )
34  {
35    bool search_again = false;
36    uintptr_t const blocks_to_free_count =
37      (heap->Protection.delayed_free_block_count
38         + heap->Protection.delayed_free_fraction - 1)
39      / heap->Protection.delayed_free_fraction;
40
41    if ( alloc_begin == 0 && blocks_to_free_count > 0 ) {
42      Heap_Block *block_to_free = heap->Protection.first_delayed_free_block;
43      uintptr_t count = 0;
44
45      for ( count = 0; count < blocks_to_free_count; ++count ) {
46        Heap_Block *next_block_to_free =
47          block_to_free->Protection_begin.next_delayed_free_block;
48
49        block_to_free->Protection_begin.next_delayed_free_block =
50          HEAP_PROTECTION_OBOLUS;
51
52        _Heap_Free(
53          heap,
54          (void *) _Heap_Alloc_area_of_block( block_to_free )
55        );
56
57        block_to_free = next_block_to_free;
58      }
59
60      heap->Protection.delayed_free_block_count -= blocks_to_free_count;
61      heap->Protection.first_delayed_free_block = block_to_free;
62
63      search_again = true;
64    }
65
66    return search_again;
67  }
68#endif
69
70#ifdef RTEMS_HEAP_DEBUG
71  static void _Heap_Check_allocation(
72    const Heap_Control *heap,
73    const Heap_Block *block,
74    uintptr_t alloc_begin,
75    uintptr_t alloc_size,
76    uintptr_t alignment,
77    uintptr_t boundary
78  )
79  {
80    uintptr_t const min_block_size = heap->min_block_size;
81    uintptr_t const page_size = heap->page_size;
82
83    uintptr_t const block_begin = (uintptr_t) block;
84    uintptr_t const block_size = _Heap_Block_size( block );
85    uintptr_t const block_end = block_begin + block_size;
86
87    uintptr_t const alloc_end = alloc_begin + alloc_size;
88
89    uintptr_t const alloc_area_begin = _Heap_Alloc_area_of_block( block );
90    uintptr_t const alloc_area_offset = alloc_begin - alloc_area_begin;
91
92    _HAssert( block_size >= min_block_size );
93    _HAssert( block_begin < block_end );
94    _HAssert(
95      _Heap_Is_aligned( block_begin + HEAP_BLOCK_HEADER_SIZE, page_size )
96    );
97    _HAssert(
98      _Heap_Is_aligned( block_size, page_size )
99    );
100
101    _HAssert( alloc_end <= block_end + HEAP_ALLOC_BONUS );
102    _HAssert( alloc_area_begin == block_begin + HEAP_BLOCK_HEADER_SIZE);
103    _HAssert( alloc_area_offset < page_size );
104
105    _HAssert( _Heap_Is_aligned( alloc_area_begin, page_size ) );
106    if ( alignment == 0 ) {
107      _HAssert( alloc_begin == alloc_area_begin );
108    } else {
109      _HAssert( _Heap_Is_aligned( alloc_begin, alignment ) );
110    }
111
112    if ( boundary != 0 ) {
113      uintptr_t boundary_line = _Heap_Align_down( alloc_end, boundary );
114
115      _HAssert( alloc_size <= boundary );
116      _HAssert( boundary_line <= alloc_begin || alloc_end <= boundary_line );
117    }
118  }
119#else
120  #define _Heap_Check_allocation( h, b, ab, as, ag, bd ) ((void) 0)
121#endif
122
123static uintptr_t _Heap_Check_block(
124  const Heap_Control *heap,
125  const Heap_Block *block,
126  uintptr_t alloc_size,
127  uintptr_t alignment,
128  uintptr_t boundary
129)
130{
131  uintptr_t const page_size = heap->page_size;
132  uintptr_t const min_block_size = heap->min_block_size;
133
134  uintptr_t const block_begin = (uintptr_t) block;
135  uintptr_t const block_size = _Heap_Block_size( block );
136  uintptr_t const block_end = block_begin + block_size;
137
138  uintptr_t const alloc_begin_floor = _Heap_Alloc_area_of_block( block );
139  uintptr_t const alloc_begin_ceiling = block_end - min_block_size
140    + HEAP_BLOCK_HEADER_SIZE + page_size - 1;
141
142  uintptr_t alloc_end = block_end + HEAP_ALLOC_BONUS;
143  uintptr_t alloc_begin = alloc_end - alloc_size;
144
145  alloc_begin = _Heap_Align_down( alloc_begin, alignment );
146
147  /* Ensure that the we have a valid new block at the end */
148  if ( alloc_begin > alloc_begin_ceiling ) {
149    alloc_begin = _Heap_Align_down( alloc_begin_ceiling, alignment );
150  }
151
152  alloc_end = alloc_begin + alloc_size;
153
154  /* Ensure boundary constaint */
155  if ( boundary != 0 ) {
156    uintptr_t const boundary_floor = alloc_begin_floor + alloc_size;
157    uintptr_t boundary_line = _Heap_Align_down( alloc_end, boundary );
158
159    while ( alloc_begin < boundary_line && boundary_line < alloc_end ) {
160      if ( boundary_line < boundary_floor ) {
161        return 0;
162      }
163      alloc_begin = boundary_line - alloc_size;
164      alloc_begin = _Heap_Align_down( alloc_begin, alignment );
165      alloc_end = alloc_begin + alloc_size;
166      boundary_line = _Heap_Align_down( alloc_end, boundary );
167    }
168  }
169
170  /* Ensure that the we have a valid new block at the beginning */
171  if ( alloc_begin >= alloc_begin_floor ) {
172    uintptr_t const alloc_block_begin =
173      (uintptr_t) _Heap_Block_of_alloc_area( alloc_begin, page_size );
174    uintptr_t const free_size = alloc_block_begin - block_begin;
175
176    if ( free_size >= min_block_size || free_size == 0 ) {
177      return alloc_begin;
178    }
179  }
180
181  return 0;
182}
183
184void *_Heap_Allocate_aligned_with_boundary(
185  Heap_Control *heap,
186  uintptr_t alloc_size,
187  uintptr_t alignment,
188  uintptr_t boundary
189)
190{
191  Heap_Statistics *const stats = &heap->stats;
192  uintptr_t const block_size_floor = alloc_size + HEAP_BLOCK_HEADER_SIZE
193    - HEAP_ALLOC_BONUS;
194  uintptr_t const page_size = heap->page_size;
195  Heap_Block *block = NULL;
196  uintptr_t alloc_begin = 0;
197  uint32_t search_count = 0;
198  bool search_again = false;
199
200  if ( block_size_floor < alloc_size ) {
201    /* Integer overflow occured */
202    return NULL;
203  }
204
205  if ( boundary != 0 ) {
206    if ( boundary < alloc_size ) {
207      return NULL;
208    }
209
210    if ( alignment == 0 ) {
211      alignment = page_size;
212    }
213  }
214
215  do {
216    Heap_Block *const free_list_tail = _Heap_Free_list_tail( heap );
217
218    block = _Heap_Free_list_first( heap );
219    while ( block != free_list_tail ) {
220      _HAssert( _Heap_Is_prev_used( block ) );
221
222      _Heap_Protection_block_check( heap, block );
223
224      /*
225       * The HEAP_PREV_BLOCK_USED flag is always set in the block size_and_flag
226       * field.  Thus the value is about one unit larger than the real block
227       * size.  The greater than operator takes this into account.
228       */
229      if ( block->size_and_flag > block_size_floor ) {
230        if ( alignment == 0 ) {
231          alloc_begin = _Heap_Alloc_area_of_block( block );
232        } else {
233          alloc_begin = _Heap_Check_block(
234            heap,
235            block,
236            alloc_size,
237            alignment,
238            boundary
239          );
240        }
241      }
242
243      /* Statistics */
244      ++search_count;
245
246      if ( alloc_begin != 0 ) {
247        break;
248      }
249
250      block = block->next;
251    }
252
253    search_again = _Heap_Protection_free_delayed_blocks( heap, alloc_begin );
254  } while ( search_again );
255
256  if ( alloc_begin != 0 ) {
257    block = _Heap_Block_allocate( heap, block, alloc_begin, alloc_size );
258
259    _Heap_Check_allocation(
260      heap,
261      block,
262      alloc_begin,
263      alloc_size,
264      alignment,
265      boundary
266    );
267
268    /* Statistics */
269    ++stats->allocs;
270    stats->searches += search_count;
271    stats->lifetime_allocated += _Heap_Block_size( block );
272  } else {
273    /* Statistics */
274    ++stats->failed_allocs;
275  }
276
277  /* Statistics */
278  if ( stats->max_search < search_count ) {
279    stats->max_search = search_count;
280  }
281
282  return (void *) alloc_begin;
283}
Note: See TracBrowser for help on using the repository browser.