source: rtems/cpukit/score/src/heapallocate.c @ 8a8b95aa

5
Last change on this file since 8a8b95aa was 4c20da4b, checked in by Sebastian Huber <sebastian.huber@…>, on 04/04/19 at 07:18:11

doxygen: Rename Score* groups in RTEMSScore*

Update #3706

  • Property mode set to 100644
File size: 7.7 KB
Line 
1/**
2 * @file
3 *
4 * @ingroup RTEMSScoreHeap
5 *
6 * @brief Heap Handler implementation.
7 */
8
9/*
10 *  COPYRIGHT (c) 1989-1999.
11 *  On-Line Applications Research Corporation (OAR).
12 *
13 *  Copyright (c) 2009 embedded brains GmbH.
14 *
15 *  The license and distribution terms for this file may be
16 *  found in the file LICENSE in this distribution or at
17 *  http://www.rtems.org/license/LICENSE.
18 */
19
20#if HAVE_CONFIG_H
21#include "config.h"
22#endif
23
24#include <rtems/score/heapimpl.h>
25
26#ifndef HEAP_PROTECTION
27  #define _Heap_Protection_free_delayed_blocks( heap, alloc_begin ) false
28#else
29  static bool _Heap_Protection_free_delayed_blocks(
30    Heap_Control *heap,
31    uintptr_t alloc_begin
32  )
33  {
34    bool search_again = false;
35    uintptr_t const blocks_to_free_count =
36      (heap->Protection.delayed_free_block_count
37         + heap->Protection.delayed_free_fraction - 1)
38      / heap->Protection.delayed_free_fraction;
39
40    if ( alloc_begin == 0 && blocks_to_free_count > 0 ) {
41      Heap_Block *block_to_free = heap->Protection.first_delayed_free_block;
42      uintptr_t count = 0;
43
44      for ( count = 0; count < blocks_to_free_count; ++count ) {
45        Heap_Block *next_block_to_free;
46
47        if ( !_Heap_Is_block_in_heap( heap, block_to_free ) ) {
48          _Heap_Protection_block_error( heap, block_to_free );
49        }
50
51        next_block_to_free =
52          block_to_free->Protection_begin.next_delayed_free_block;
53        block_to_free->Protection_begin.next_delayed_free_block =
54          HEAP_PROTECTION_OBOLUS;
55
56        _Heap_Free(
57          heap,
58          (void *) _Heap_Alloc_area_of_block( block_to_free )
59        );
60
61        block_to_free = next_block_to_free;
62      }
63
64      heap->Protection.delayed_free_block_count -= blocks_to_free_count;
65      heap->Protection.first_delayed_free_block = block_to_free;
66
67      search_again = true;
68    }
69
70    return search_again;
71  }
72#endif
73
74#ifdef RTEMS_HEAP_DEBUG
75  static void _Heap_Check_allocation(
76    const Heap_Control *heap,
77    const Heap_Block *block,
78    uintptr_t alloc_begin,
79    uintptr_t alloc_size,
80    uintptr_t alignment,
81    uintptr_t boundary
82  )
83  {
84    uintptr_t const min_block_size = heap->min_block_size;
85    uintptr_t const page_size = heap->page_size;
86
87    uintptr_t const block_begin = (uintptr_t) block;
88    uintptr_t const block_size = _Heap_Block_size( block );
89    uintptr_t const block_end = block_begin + block_size;
90
91    uintptr_t const alloc_end = alloc_begin + alloc_size;
92
93    uintptr_t const alloc_area_begin = _Heap_Alloc_area_of_block( block );
94    uintptr_t const alloc_area_offset = alloc_begin - alloc_area_begin;
95
96    _HAssert( block_size >= min_block_size );
97    _HAssert( block_begin < block_end );
98    _HAssert(
99      _Heap_Is_aligned( block_begin + HEAP_BLOCK_HEADER_SIZE, page_size )
100    );
101    _HAssert(
102      _Heap_Is_aligned( block_size, page_size )
103    );
104
105    _HAssert( alloc_end <= block_end + HEAP_ALLOC_BONUS );
106    _HAssert( alloc_area_begin == block_begin + HEAP_BLOCK_HEADER_SIZE);
107    _HAssert( alloc_area_offset < page_size );
108
109    _HAssert( _Heap_Is_aligned( alloc_area_begin, page_size ) );
110    if ( alignment == 0 ) {
111      _HAssert( alloc_begin == alloc_area_begin );
112    } else {
113      _HAssert( _Heap_Is_aligned( alloc_begin, alignment ) );
114    }
115
116    if ( boundary != 0 ) {
117      uintptr_t boundary_line = _Heap_Align_down( alloc_end, boundary );
118
119      _HAssert( alloc_size <= boundary );
120      _HAssert( boundary_line <= alloc_begin || alloc_end <= boundary_line );
121    }
122  }
123#else
124  #define _Heap_Check_allocation( h, b, ab, as, ag, bd ) ((void) 0)
125#endif
126
127static uintptr_t _Heap_Check_block(
128  const Heap_Control *heap,
129  const Heap_Block *block,
130  uintptr_t alloc_size,
131  uintptr_t alignment,
132  uintptr_t boundary
133)
134{
135  uintptr_t const page_size = heap->page_size;
136  uintptr_t const min_block_size = heap->min_block_size;
137
138  uintptr_t const block_begin = (uintptr_t) block;
139  uintptr_t const block_size = _Heap_Block_size( block );
140  uintptr_t const block_end = block_begin + block_size;
141
142  uintptr_t const alloc_begin_floor = _Heap_Alloc_area_of_block( block );
143  uintptr_t const alloc_begin_ceiling = block_end - min_block_size
144    + HEAP_BLOCK_HEADER_SIZE + page_size - 1;
145
146  uintptr_t alloc_end = block_end + HEAP_ALLOC_BONUS;
147  uintptr_t alloc_begin = alloc_end - alloc_size;
148
149  alloc_begin = _Heap_Align_down( alloc_begin, alignment );
150
151  /* Ensure that the we have a valid new block at the end */
152  if ( alloc_begin > alloc_begin_ceiling ) {
153    alloc_begin = _Heap_Align_down( alloc_begin_ceiling, alignment );
154  }
155
156  alloc_end = alloc_begin + alloc_size;
157
158  /* Ensure boundary constaint */
159  if ( boundary != 0 ) {
160    uintptr_t const boundary_floor = alloc_begin_floor + alloc_size;
161    uintptr_t boundary_line = _Heap_Align_down( alloc_end, boundary );
162
163    while ( alloc_begin < boundary_line && boundary_line < alloc_end ) {
164      if ( boundary_line < boundary_floor ) {
165        return 0;
166      }
167      alloc_begin = boundary_line - alloc_size;
168      alloc_begin = _Heap_Align_down( alloc_begin, alignment );
169      alloc_end = alloc_begin + alloc_size;
170      boundary_line = _Heap_Align_down( alloc_end, boundary );
171    }
172  }
173
174  /* Ensure that the we have a valid new block at the beginning */
175  if ( alloc_begin >= alloc_begin_floor ) {
176    uintptr_t const alloc_block_begin =
177      (uintptr_t) _Heap_Block_of_alloc_area( alloc_begin, page_size );
178    uintptr_t const free_size = alloc_block_begin - block_begin;
179
180    if ( free_size >= min_block_size || free_size == 0 ) {
181      return alloc_begin;
182    }
183  }
184
185  return 0;
186}
187
188void *_Heap_Allocate_aligned_with_boundary(
189  Heap_Control *heap,
190  uintptr_t alloc_size,
191  uintptr_t alignment,
192  uintptr_t boundary
193)
194{
195  Heap_Statistics *const stats = &heap->stats;
196  uintptr_t const block_size_floor = alloc_size + HEAP_BLOCK_HEADER_SIZE
197    - HEAP_ALLOC_BONUS;
198  uintptr_t const page_size = heap->page_size;
199  Heap_Block *block = NULL;
200  uintptr_t alloc_begin = 0;
201  uint32_t search_count = 0;
202  bool search_again = false;
203
204  if ( block_size_floor < alloc_size ) {
205    /* Integer overflow occured */
206    return NULL;
207  }
208
209  if ( boundary != 0 ) {
210    if ( boundary < alloc_size ) {
211      return NULL;
212    }
213
214    if ( alignment == 0 ) {
215      alignment = page_size;
216    }
217  }
218
219  do {
220    Heap_Block *const free_list_tail = _Heap_Free_list_tail( heap );
221
222    block = _Heap_Free_list_first( heap );
223    while ( block != free_list_tail ) {
224      _HAssert( _Heap_Is_prev_used( block ) );
225
226      _Heap_Protection_block_check( heap, block );
227
228      /*
229       * The HEAP_PREV_BLOCK_USED flag is always set in the block size_and_flag
230       * field.  Thus the value is about one unit larger than the real block
231       * size.  The greater than operator takes this into account.
232       */
233      if ( block->size_and_flag > block_size_floor ) {
234        if ( alignment == 0 ) {
235          alloc_begin = _Heap_Alloc_area_of_block( block );
236        } else {
237          alloc_begin = _Heap_Check_block(
238            heap,
239            block,
240            alloc_size,
241            alignment,
242            boundary
243          );
244        }
245      }
246
247      /* Statistics */
248      ++search_count;
249
250      if ( alloc_begin != 0 ) {
251        break;
252      }
253
254      block = block->next;
255    }
256
257    search_again = _Heap_Protection_free_delayed_blocks( heap, alloc_begin );
258  } while ( search_again );
259
260  if ( alloc_begin != 0 ) {
261    block = _Heap_Block_allocate( heap, block, alloc_begin, alloc_size );
262
263    _Heap_Check_allocation(
264      heap,
265      block,
266      alloc_begin,
267      alloc_size,
268      alignment,
269      boundary
270    );
271
272    /* Statistics */
273    ++stats->allocs;
274    stats->searches += search_count;
275    stats->lifetime_allocated += _Heap_Block_size( block );
276  } else {
277    /* Statistics */
278    ++stats->failed_allocs;
279  }
280
281  /* Statistics */
282  if ( stats->max_search < search_count ) {
283    stats->max_search = search_count;
284  }
285
286  return (void *) alloc_begin;
287}
Note: See TracBrowser for help on using the repository browser.