source: rtems/cpukit/score/src/heapallocate.c @ 3859cd63

5
Last change on this file since 3859cd63 was 3859cd63, checked in by sebastian.huber <sebastian.huber@…>, on 10/25/19 at 13:19:01

rtems-5: Improve heap fatal error information

Update #3806.

  • Property mode set to 100644
File size: 7.8 KB
Line 
1/**
2 * @file
3 *
4 * @ingroup RTEMSScoreHeap
5 *
6 * @brief Heap Handler implementation.
7 */
8
9/*
10 *  COPYRIGHT (c) 1989-1999.
11 *  On-Line Applications Research Corporation (OAR).
12 *
13 *  Copyright (c) 2009 embedded brains GmbH.
14 *
15 *  The license and distribution terms for this file may be
16 *  found in the file LICENSE in this distribution or at
17 *  http://www.rtems.org/license/LICENSE.
18 */
19
20#if HAVE_CONFIG_H
21#include "config.h"
22#endif
23
24#include <rtems/score/heapimpl.h>
25
26#ifndef HEAP_PROTECTION
27  #define _Heap_Protection_free_delayed_blocks( heap, alloc_begin ) false
28#else
29  static bool _Heap_Protection_free_delayed_blocks(
30    Heap_Control *heap,
31    uintptr_t alloc_begin
32  )
33  {
34    bool search_again = false;
35    uintptr_t const blocks_to_free_count =
36      (heap->Protection.delayed_free_block_count
37         + heap->Protection.delayed_free_fraction - 1)
38      / heap->Protection.delayed_free_fraction;
39
40    if ( alloc_begin == 0 && blocks_to_free_count > 0 ) {
41      Heap_Block *block_to_free = heap->Protection.first_delayed_free_block;
42      uintptr_t count = 0;
43
44      for ( count = 0; count < blocks_to_free_count; ++count ) {
45        Heap_Block *next_block_to_free;
46
47        if ( !_Heap_Is_block_in_heap( heap, block_to_free ) ) {
48          _Heap_Protection_block_error(
49            heap,
50            block_to_free,
51            HEAP_ERROR_BAD_FREE_BLOCK
52          );
53        }
54
55        next_block_to_free =
56          block_to_free->Protection_begin.next_delayed_free_block;
57        block_to_free->Protection_begin.next_delayed_free_block =
58          HEAP_PROTECTION_OBOLUS;
59
60        _Heap_Free(
61          heap,
62          (void *) _Heap_Alloc_area_of_block( block_to_free )
63        );
64
65        block_to_free = next_block_to_free;
66      }
67
68      heap->Protection.delayed_free_block_count -= blocks_to_free_count;
69      heap->Protection.first_delayed_free_block = block_to_free;
70
71      search_again = true;
72    }
73
74    return search_again;
75  }
76#endif
77
78#ifdef RTEMS_HEAP_DEBUG
79  static void _Heap_Check_allocation(
80    const Heap_Control *heap,
81    const Heap_Block *block,
82    uintptr_t alloc_begin,
83    uintptr_t alloc_size,
84    uintptr_t alignment,
85    uintptr_t boundary
86  )
87  {
88    uintptr_t const min_block_size = heap->min_block_size;
89    uintptr_t const page_size = heap->page_size;
90
91    uintptr_t const block_begin = (uintptr_t) block;
92    uintptr_t const block_size = _Heap_Block_size( block );
93    uintptr_t const block_end = block_begin + block_size;
94
95    uintptr_t const alloc_end = alloc_begin + alloc_size;
96
97    uintptr_t const alloc_area_begin = _Heap_Alloc_area_of_block( block );
98    uintptr_t const alloc_area_offset = alloc_begin - alloc_area_begin;
99
100    _HAssert( block_size >= min_block_size );
101    _HAssert( block_begin < block_end );
102    _HAssert(
103      _Heap_Is_aligned( block_begin + HEAP_BLOCK_HEADER_SIZE, page_size )
104    );
105    _HAssert(
106      _Heap_Is_aligned( block_size, page_size )
107    );
108
109    _HAssert( alloc_end <= block_end + HEAP_ALLOC_BONUS );
110    _HAssert( alloc_area_begin == block_begin + HEAP_BLOCK_HEADER_SIZE);
111    _HAssert( alloc_area_offset < page_size );
112
113    _HAssert( _Heap_Is_aligned( alloc_area_begin, page_size ) );
114    if ( alignment == 0 ) {
115      _HAssert( alloc_begin == alloc_area_begin );
116    } else {
117      _HAssert( _Heap_Is_aligned( alloc_begin, alignment ) );
118    }
119
120    if ( boundary != 0 ) {
121      uintptr_t boundary_line = _Heap_Align_down( alloc_end, boundary );
122
123      _HAssert( alloc_size <= boundary );
124      _HAssert( boundary_line <= alloc_begin || alloc_end <= boundary_line );
125    }
126  }
127#else
128  #define _Heap_Check_allocation( h, b, ab, as, ag, bd ) ((void) 0)
129#endif
130
131static uintptr_t _Heap_Check_block(
132  const Heap_Control *heap,
133  const Heap_Block *block,
134  uintptr_t alloc_size,
135  uintptr_t alignment,
136  uintptr_t boundary
137)
138{
139  uintptr_t const page_size = heap->page_size;
140  uintptr_t const min_block_size = heap->min_block_size;
141
142  uintptr_t const block_begin = (uintptr_t) block;
143  uintptr_t const block_size = _Heap_Block_size( block );
144  uintptr_t const block_end = block_begin + block_size;
145
146  uintptr_t const alloc_begin_floor = _Heap_Alloc_area_of_block( block );
147  uintptr_t const alloc_begin_ceiling = block_end - min_block_size
148    + HEAP_BLOCK_HEADER_SIZE + page_size - 1;
149
150  uintptr_t alloc_end = block_end + HEAP_ALLOC_BONUS;
151  uintptr_t alloc_begin = alloc_end - alloc_size;
152
153  alloc_begin = _Heap_Align_down( alloc_begin, alignment );
154
155  /* Ensure that the we have a valid new block at the end */
156  if ( alloc_begin > alloc_begin_ceiling ) {
157    alloc_begin = _Heap_Align_down( alloc_begin_ceiling, alignment );
158  }
159
160  alloc_end = alloc_begin + alloc_size;
161
162  /* Ensure boundary constaint */
163  if ( boundary != 0 ) {
164    uintptr_t const boundary_floor = alloc_begin_floor + alloc_size;
165    uintptr_t boundary_line = _Heap_Align_down( alloc_end, boundary );
166
167    while ( alloc_begin < boundary_line && boundary_line < alloc_end ) {
168      if ( boundary_line < boundary_floor ) {
169        return 0;
170      }
171      alloc_begin = boundary_line - alloc_size;
172      alloc_begin = _Heap_Align_down( alloc_begin, alignment );
173      alloc_end = alloc_begin + alloc_size;
174      boundary_line = _Heap_Align_down( alloc_end, boundary );
175    }
176  }
177
178  /* Ensure that the we have a valid new block at the beginning */
179  if ( alloc_begin >= alloc_begin_floor ) {
180    uintptr_t const alloc_block_begin =
181      (uintptr_t) _Heap_Block_of_alloc_area( alloc_begin, page_size );
182    uintptr_t const free_size = alloc_block_begin - block_begin;
183
184    if ( free_size >= min_block_size || free_size == 0 ) {
185      return alloc_begin;
186    }
187  }
188
189  return 0;
190}
191
192void *_Heap_Allocate_aligned_with_boundary(
193  Heap_Control *heap,
194  uintptr_t alloc_size,
195  uintptr_t alignment,
196  uintptr_t boundary
197)
198{
199  Heap_Statistics *const stats = &heap->stats;
200  uintptr_t const block_size_floor = alloc_size + HEAP_BLOCK_HEADER_SIZE
201    - HEAP_ALLOC_BONUS;
202  uintptr_t const page_size = heap->page_size;
203  Heap_Block *block = NULL;
204  uintptr_t alloc_begin = 0;
205  uint32_t search_count = 0;
206  bool search_again = false;
207
208  if ( block_size_floor < alloc_size ) {
209    /* Integer overflow occured */
210    return NULL;
211  }
212
213  if ( boundary != 0 ) {
214    if ( boundary < alloc_size ) {
215      return NULL;
216    }
217
218    if ( alignment == 0 ) {
219      alignment = page_size;
220    }
221  }
222
223  do {
224    Heap_Block *const free_list_tail = _Heap_Free_list_tail( heap );
225
226    block = _Heap_Free_list_first( heap );
227    while ( block != free_list_tail ) {
228      _HAssert( _Heap_Is_prev_used( block ) );
229
230      _Heap_Protection_block_check( heap, block );
231
232      /*
233       * The HEAP_PREV_BLOCK_USED flag is always set in the block size_and_flag
234       * field.  Thus the value is about one unit larger than the real block
235       * size.  The greater than operator takes this into account.
236       */
237      if ( block->size_and_flag > block_size_floor ) {
238        if ( alignment == 0 ) {
239          alloc_begin = _Heap_Alloc_area_of_block( block );
240        } else {
241          alloc_begin = _Heap_Check_block(
242            heap,
243            block,
244            alloc_size,
245            alignment,
246            boundary
247          );
248        }
249      }
250
251      /* Statistics */
252      ++search_count;
253
254      if ( alloc_begin != 0 ) {
255        break;
256      }
257
258      block = block->next;
259    }
260
261    search_again = _Heap_Protection_free_delayed_blocks( heap, alloc_begin );
262  } while ( search_again );
263
264  if ( alloc_begin != 0 ) {
265    block = _Heap_Block_allocate( heap, block, alloc_begin, alloc_size );
266
267    _Heap_Check_allocation(
268      heap,
269      block,
270      alloc_begin,
271      alloc_size,
272      alignment,
273      boundary
274    );
275
276    /* Statistics */
277    ++stats->allocs;
278    stats->searches += search_count;
279    stats->lifetime_allocated += _Heap_Block_size( block );
280  } else {
281    /* Statistics */
282    ++stats->failed_allocs;
283  }
284
285  /* Statistics */
286  if ( stats->max_search < search_count ) {
287    stats->max_search = search_count;
288  }
289
290  return (void *) alloc_begin;
291}
Note: See TracBrowser for help on using the repository browser.