source: rtems/cpukit/score/src/heapallocate.c @ 9278f3d

Last change on this file since 9278f3d was 9278f3d, checked in by Sebastian Huber <sebastian.huber@…>, on 11/27/20 at 16:21:23

score: Canonicalize Doxygen @file comments

Use common phrases for the file brief descriptions.

Update #3706.

  • Property mode set to 100644
File size: 7.9 KB
Line 
1/**
2 * @file
3 *
4 * @ingroup RTEMSScoreHeap
5 *
6 * @brief This source file contains the implementation of
7 *   _Heap_Allocate_aligned_with_boundary().
8 */
9
10/*
11 *  COPYRIGHT (c) 1989-1999.
12 *  On-Line Applications Research Corporation (OAR).
13 *
14 *  Copyright (c) 2009 embedded brains GmbH.
15 *
16 *  The license and distribution terms for this file may be
17 *  found in the file LICENSE in this distribution or at
18 *  http://www.rtems.org/license/LICENSE.
19 */
20
21#ifdef HAVE_CONFIG_H
22#include "config.h"
23#endif
24
25#include <rtems/score/heapimpl.h>
26
27#ifndef HEAP_PROTECTION
28  #define _Heap_Protection_free_delayed_blocks( heap, alloc_begin ) false
29#else
30  static bool _Heap_Protection_free_delayed_blocks(
31    Heap_Control *heap,
32    uintptr_t alloc_begin
33  )
34  {
35    bool search_again = false;
36    uintptr_t const blocks_to_free_count =
37      (heap->Protection.delayed_free_block_count
38         + heap->Protection.delayed_free_fraction - 1)
39      / heap->Protection.delayed_free_fraction;
40
41    if ( alloc_begin == 0 && blocks_to_free_count > 0 ) {
42      Heap_Block *block_to_free = heap->Protection.first_delayed_free_block;
43      uintptr_t count = 0;
44
45      for ( count = 0; count < blocks_to_free_count; ++count ) {
46        Heap_Block *next_block_to_free;
47
48        if ( !_Heap_Is_block_in_heap( heap, block_to_free ) ) {
49          _Heap_Protection_block_error(
50            heap,
51            block_to_free,
52            HEAP_ERROR_BAD_FREE_BLOCK
53          );
54        }
55
56        next_block_to_free =
57          block_to_free->Protection_begin.next_delayed_free_block;
58        block_to_free->Protection_begin.next_delayed_free_block =
59          HEAP_PROTECTION_OBOLUS;
60
61        _Heap_Free(
62          heap,
63          (void *) _Heap_Alloc_area_of_block( block_to_free )
64        );
65
66        block_to_free = next_block_to_free;
67      }
68
69      heap->Protection.delayed_free_block_count -= blocks_to_free_count;
70      heap->Protection.first_delayed_free_block = block_to_free;
71
72      search_again = true;
73    }
74
75    return search_again;
76  }
77#endif
78
79#ifdef RTEMS_HEAP_DEBUG
80  static void _Heap_Check_allocation(
81    const Heap_Control *heap,
82    const Heap_Block *block,
83    uintptr_t alloc_begin,
84    uintptr_t alloc_size,
85    uintptr_t alignment,
86    uintptr_t boundary
87  )
88  {
89    uintptr_t const min_block_size = heap->min_block_size;
90    uintptr_t const page_size = heap->page_size;
91
92    uintptr_t const block_begin = (uintptr_t) block;
93    uintptr_t const block_size = _Heap_Block_size( block );
94    uintptr_t const block_end = block_begin + block_size;
95
96    uintptr_t const alloc_end = alloc_begin + alloc_size;
97
98    uintptr_t const alloc_area_begin = _Heap_Alloc_area_of_block( block );
99    uintptr_t const alloc_area_offset = alloc_begin - alloc_area_begin;
100
101    _HAssert( block_size >= min_block_size );
102    _HAssert( block_begin < block_end );
103    _HAssert(
104      _Heap_Is_aligned( block_begin + HEAP_BLOCK_HEADER_SIZE, page_size )
105    );
106    _HAssert(
107      _Heap_Is_aligned( block_size, page_size )
108    );
109
110    _HAssert( alloc_end <= block_end + HEAP_ALLOC_BONUS );
111    _HAssert( alloc_area_begin == block_begin + HEAP_BLOCK_HEADER_SIZE);
112    _HAssert( alloc_area_offset < page_size );
113
114    _HAssert( _Heap_Is_aligned( alloc_area_begin, page_size ) );
115    if ( alignment == 0 ) {
116      _HAssert( alloc_begin == alloc_area_begin );
117    } else {
118      _HAssert( _Heap_Is_aligned( alloc_begin, alignment ) );
119    }
120
121    if ( boundary != 0 ) {
122      uintptr_t boundary_line = _Heap_Align_down( alloc_end, boundary );
123
124      _HAssert( alloc_size <= boundary );
125      _HAssert( boundary_line <= alloc_begin || alloc_end <= boundary_line );
126    }
127  }
128#else
129  #define _Heap_Check_allocation( h, b, ab, as, ag, bd ) ((void) 0)
130#endif
131
132static uintptr_t _Heap_Check_block(
133  const Heap_Control *heap,
134  const Heap_Block *block,
135  uintptr_t alloc_size,
136  uintptr_t alignment,
137  uintptr_t boundary
138)
139{
140  uintptr_t const page_size = heap->page_size;
141  uintptr_t const min_block_size = heap->min_block_size;
142
143  uintptr_t const block_begin = (uintptr_t) block;
144  uintptr_t const block_size = _Heap_Block_size( block );
145  uintptr_t const block_end = block_begin + block_size;
146
147  uintptr_t const alloc_begin_floor = _Heap_Alloc_area_of_block( block );
148  uintptr_t const alloc_begin_ceiling = block_end - min_block_size
149    + HEAP_BLOCK_HEADER_SIZE + page_size - 1;
150
151  uintptr_t alloc_end = block_end + HEAP_ALLOC_BONUS;
152  uintptr_t alloc_begin = alloc_end - alloc_size;
153
154  alloc_begin = _Heap_Align_down( alloc_begin, alignment );
155
156  /* Ensure that the we have a valid new block at the end */
157  if ( alloc_begin > alloc_begin_ceiling ) {
158    alloc_begin = _Heap_Align_down( alloc_begin_ceiling, alignment );
159  }
160
161  alloc_end = alloc_begin + alloc_size;
162
163  /* Ensure boundary constaint */
164  if ( boundary != 0 ) {
165    uintptr_t const boundary_floor = alloc_begin_floor + alloc_size;
166    uintptr_t boundary_line = _Heap_Align_down( alloc_end, boundary );
167
168    while ( alloc_begin < boundary_line && boundary_line < alloc_end ) {
169      if ( boundary_line < boundary_floor ) {
170        return 0;
171      }
172      alloc_begin = boundary_line - alloc_size;
173      alloc_begin = _Heap_Align_down( alloc_begin, alignment );
174      alloc_end = alloc_begin + alloc_size;
175      boundary_line = _Heap_Align_down( alloc_end, boundary );
176    }
177  }
178
179  /* Ensure that the we have a valid new block at the beginning */
180  if ( alloc_begin >= alloc_begin_floor ) {
181    uintptr_t const alloc_block_begin =
182      (uintptr_t) _Heap_Block_of_alloc_area( alloc_begin, page_size );
183    uintptr_t const free_size = alloc_block_begin - block_begin;
184
185    if ( free_size >= min_block_size || free_size == 0 ) {
186      return alloc_begin;
187    }
188  }
189
190  return 0;
191}
192
193void *_Heap_Allocate_aligned_with_boundary(
194  Heap_Control *heap,
195  uintptr_t alloc_size,
196  uintptr_t alignment,
197  uintptr_t boundary
198)
199{
200  Heap_Statistics *const stats = &heap->stats;
201  uintptr_t const block_size_floor = alloc_size + HEAP_BLOCK_HEADER_SIZE
202    - HEAP_ALLOC_BONUS;
203  uintptr_t const page_size = heap->page_size;
204  Heap_Block *block = NULL;
205  uintptr_t alloc_begin = 0;
206  uint32_t search_count = 0;
207  bool search_again = false;
208
209  if ( block_size_floor < alloc_size ) {
210    /* Integer overflow occured */
211    return NULL;
212  }
213
214  if ( boundary != 0 ) {
215    if ( boundary < alloc_size ) {
216      return NULL;
217    }
218
219    if ( alignment == 0 ) {
220      alignment = page_size;
221    }
222  }
223
224  do {
225    Heap_Block *const free_list_tail = _Heap_Free_list_tail( heap );
226
227    block = _Heap_Free_list_first( heap );
228    while ( block != free_list_tail ) {
229      _HAssert( _Heap_Is_prev_used( block ) );
230
231      _Heap_Protection_block_check( heap, block );
232
233      /*
234       * The HEAP_PREV_BLOCK_USED flag is always set in the block size_and_flag
235       * field.  Thus the value is about one unit larger than the real block
236       * size.  The greater than operator takes this into account.
237       */
238      if ( block->size_and_flag > block_size_floor ) {
239        if ( alignment == 0 ) {
240          alloc_begin = _Heap_Alloc_area_of_block( block );
241        } else {
242          alloc_begin = _Heap_Check_block(
243            heap,
244            block,
245            alloc_size,
246            alignment,
247            boundary
248          );
249        }
250      }
251
252      /* Statistics */
253      ++search_count;
254
255      if ( alloc_begin != 0 ) {
256        break;
257      }
258
259      block = block->next;
260    }
261
262    search_again = _Heap_Protection_free_delayed_blocks( heap, alloc_begin );
263  } while ( search_again );
264
265  if ( alloc_begin != 0 ) {
266    block = _Heap_Block_allocate( heap, block, alloc_begin, alloc_size );
267
268    _Heap_Check_allocation(
269      heap,
270      block,
271      alloc_begin,
272      alloc_size,
273      alignment,
274      boundary
275    );
276
277    /* Statistics */
278    ++stats->allocs;
279    stats->searches += search_count;
280    stats->lifetime_allocated += _Heap_Block_size( block );
281  } else {
282    /* Statistics */
283    ++stats->failed_allocs;
284  }
285
286  /* Statistics */
287  if ( stats->max_search < search_count ) {
288    stats->max_search = search_count;
289  }
290
291  return (void *) alloc_begin;
292}
Note: See TracBrowser for help on using the repository browser.