source: rtems/cpukit/score/src/heapallocate.c

Last change on this file was bcef89f2, checked in by Sebastian Huber <sebastian.huber@…>, on 05/19/23 at 06:18:25

Update company name

The embedded brains GmbH & Co. KG is the legal successor of embedded
brains GmbH.

  • Property mode set to 100644
File size: 9.3 KB
Line 
1/* SPDX-License-Identifier: BSD-2-Clause */
2
3/**
4 * @file
5 *
6 * @ingroup RTEMSScoreHeap
7 *
8 * @brief This source file contains the implementation of
9 *   _Heap_Allocate_aligned_with_boundary().
10 */
11
12/*
13 *  COPYRIGHT (c) 1989-1999.
14 *  On-Line Applications Research Corporation (OAR).
15 *
16 *  Copyright (c) 2009 embedded brains GmbH & Co. KG
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions
20 * are met:
21 * 1. Redistributions of source code must retain the above copyright
22 *    notice, this list of conditions and the following disclaimer.
23 * 2. Redistributions in binary form must reproduce the above copyright
24 *    notice, this list of conditions and the following disclaimer in the
25 *    documentation and/or other materials provided with the distribution.
26 *
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
28 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
30 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
31 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
32 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
33 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
34 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
35 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
36 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
37 * POSSIBILITY OF SUCH DAMAGE.
38 */
39
40#ifdef HAVE_CONFIG_H
41#include "config.h"
42#endif
43
44#include <rtems/score/heapimpl.h>
45
46#ifndef HEAP_PROTECTION
47  #define _Heap_Protection_free_delayed_blocks( heap, alloc_begin ) false
48#else
49  static bool _Heap_Protection_free_delayed_blocks(
50    Heap_Control *heap,
51    uintptr_t alloc_begin
52  )
53  {
54    bool search_again = false;
55    uintptr_t const blocks_to_free_count =
56      (heap->Protection.delayed_free_block_count
57         + heap->Protection.delayed_free_fraction - 1)
58      / heap->Protection.delayed_free_fraction;
59
60    if ( alloc_begin == 0 && blocks_to_free_count > 0 ) {
61      Heap_Block *block_to_free = heap->Protection.first_delayed_free_block;
62      uintptr_t count = 0;
63
64      for ( count = 0; count < blocks_to_free_count; ++count ) {
65        Heap_Block *next_block_to_free;
66
67        if ( !_Heap_Is_block_in_heap( heap, block_to_free ) ) {
68          _Heap_Protection_block_error(
69            heap,
70            block_to_free,
71            HEAP_ERROR_BAD_FREE_BLOCK
72          );
73        }
74
75        next_block_to_free =
76          block_to_free->Protection_begin.next_delayed_free_block;
77        block_to_free->Protection_begin.next_delayed_free_block =
78          HEAP_PROTECTION_OBOLUS;
79
80        _Heap_Free(
81          heap,
82          (void *) _Heap_Alloc_area_of_block( block_to_free )
83        );
84
85        block_to_free = next_block_to_free;
86      }
87
88      heap->Protection.delayed_free_block_count -= blocks_to_free_count;
89      heap->Protection.first_delayed_free_block = block_to_free;
90
91      search_again = true;
92    }
93
94    return search_again;
95  }
96
97  void _Heap_Protection_free_all_delayed_blocks( Heap_Control *heap )
98  {
99    bool search_again;
100
101    do {
102      search_again = _Heap_Protection_free_delayed_blocks( heap, 0 );
103    } while ( search_again );
104  }
105#endif
106
107#ifdef RTEMS_HEAP_DEBUG
108  static void _Heap_Check_allocation(
109    const Heap_Control *heap,
110    const Heap_Block *block,
111    uintptr_t alloc_begin,
112    uintptr_t alloc_size,
113    uintptr_t alignment,
114    uintptr_t boundary
115  )
116  {
117    uintptr_t const min_block_size = heap->min_block_size;
118    uintptr_t const page_size = heap->page_size;
119
120    uintptr_t const block_begin = (uintptr_t) block;
121    uintptr_t const block_size = _Heap_Block_size( block );
122    uintptr_t const block_end = block_begin + block_size;
123
124    uintptr_t const alloc_end = alloc_begin + alloc_size;
125
126    uintptr_t const alloc_area_begin = _Heap_Alloc_area_of_block( block );
127    uintptr_t const alloc_area_offset = alloc_begin - alloc_area_begin;
128
129    _HAssert( block_size >= min_block_size );
130    _HAssert( block_begin < block_end );
131    _HAssert(
132      _Heap_Is_aligned( block_begin + HEAP_BLOCK_HEADER_SIZE, page_size )
133    );
134    _HAssert(
135      _Heap_Is_aligned( block_size, page_size )
136    );
137
138    _HAssert( alloc_end <= block_end + HEAP_ALLOC_BONUS );
139    _HAssert( alloc_area_begin == block_begin + HEAP_BLOCK_HEADER_SIZE);
140    _HAssert( alloc_area_offset < page_size );
141
142    _HAssert( _Heap_Is_aligned( alloc_area_begin, page_size ) );
143    if ( alignment == 0 ) {
144      _HAssert( alloc_begin == alloc_area_begin );
145    } else {
146      _HAssert( _Heap_Is_aligned( alloc_begin, alignment ) );
147    }
148
149    if ( boundary != 0 ) {
150      uintptr_t boundary_line = _Heap_Align_down( alloc_end, boundary );
151
152      _HAssert( alloc_size <= boundary );
153      _HAssert( boundary_line <= alloc_begin || alloc_end <= boundary_line );
154    }
155  }
156#else
157  #define _Heap_Check_allocation( h, b, ab, as, ag, bd ) ((void) 0)
158#endif
159
160static uintptr_t _Heap_Check_block(
161  const Heap_Control *heap,
162  const Heap_Block *block,
163  uintptr_t alloc_size,
164  uintptr_t alignment,
165  uintptr_t boundary
166)
167{
168  uintptr_t const page_size = heap->page_size;
169  uintptr_t const min_block_size = heap->min_block_size;
170
171  uintptr_t const block_begin = (uintptr_t) block;
172  uintptr_t const block_size = _Heap_Block_size( block );
173  uintptr_t const block_end = block_begin + block_size;
174
175  uintptr_t const alloc_begin_floor = _Heap_Alloc_area_of_block( block );
176  uintptr_t const alloc_begin_ceiling = block_end - min_block_size
177    + HEAP_BLOCK_HEADER_SIZE + page_size - 1;
178
179  uintptr_t alloc_end = block_end + HEAP_ALLOC_BONUS;
180  uintptr_t alloc_begin = alloc_end - alloc_size;
181
182  alloc_begin = _Heap_Align_down( alloc_begin, alignment );
183
184  /* Ensure that the we have a valid new block at the end */
185  if ( alloc_begin > alloc_begin_ceiling ) {
186    alloc_begin = _Heap_Align_down( alloc_begin_ceiling, alignment );
187  }
188
189  alloc_end = alloc_begin + alloc_size;
190
191  /* Ensure boundary constaint */
192  if ( boundary != 0 ) {
193    uintptr_t const boundary_floor = alloc_begin_floor + alloc_size;
194    uintptr_t boundary_line = _Heap_Align_down( alloc_end, boundary );
195
196    while ( alloc_begin < boundary_line && boundary_line < alloc_end ) {
197      if ( boundary_line < boundary_floor ) {
198        return 0;
199      }
200      alloc_begin = boundary_line - alloc_size;
201      alloc_begin = _Heap_Align_down( alloc_begin, alignment );
202      alloc_end = alloc_begin + alloc_size;
203      boundary_line = _Heap_Align_down( alloc_end, boundary );
204    }
205  }
206
207  /* Ensure that the we have a valid new block at the beginning */
208  if ( alloc_begin >= alloc_begin_floor ) {
209    uintptr_t const alloc_block_begin =
210      (uintptr_t) _Heap_Block_of_alloc_area( alloc_begin, page_size );
211    uintptr_t const free_size = alloc_block_begin - block_begin;
212
213    if ( free_size >= min_block_size || free_size == 0 ) {
214      return alloc_begin;
215    }
216  }
217
218  return 0;
219}
220
221void *_Heap_Allocate_aligned_with_boundary(
222  Heap_Control *heap,
223  uintptr_t alloc_size,
224  uintptr_t alignment,
225  uintptr_t boundary
226)
227{
228  Heap_Statistics *const stats = &heap->stats;
229  uintptr_t const block_size_floor = alloc_size + HEAP_BLOCK_HEADER_SIZE
230    - HEAP_ALLOC_BONUS;
231  uintptr_t const page_size = heap->page_size;
232  Heap_Block *block = NULL;
233  uintptr_t alloc_begin = 0;
234  uint32_t search_count = 0;
235  bool search_again = false;
236
237  if ( block_size_floor < alloc_size ) {
238    /* Integer overflow occurred */
239    return NULL;
240  }
241
242  if ( boundary != 0 ) {
243    if ( boundary < alloc_size ) {
244      return NULL;
245    }
246
247    if ( alignment == 0 ) {
248      alignment = page_size;
249    }
250  }
251
252  do {
253    Heap_Block *const free_list_tail = _Heap_Free_list_tail( heap );
254
255    block = _Heap_Free_list_first( heap );
256    while ( block != free_list_tail ) {
257      _HAssert( _Heap_Is_prev_used( block ) );
258
259      _Heap_Protection_block_check( heap, block );
260
261      /*
262       * The HEAP_PREV_BLOCK_USED flag is always set in the block size_and_flag
263       * field.  Thus the value is about one unit larger than the real block
264       * size.  The greater than operator takes this into account.
265       */
266      if ( block->size_and_flag > block_size_floor ) {
267        if ( alignment == 0 ) {
268          alloc_begin = _Heap_Alloc_area_of_block( block );
269        } else {
270          alloc_begin = _Heap_Check_block(
271            heap,
272            block,
273            alloc_size,
274            alignment,
275            boundary
276          );
277        }
278      }
279
280      /* Statistics */
281      ++search_count;
282
283      if ( alloc_begin != 0 ) {
284        break;
285      }
286
287      block = block->next;
288    }
289
290    search_again = _Heap_Protection_free_delayed_blocks( heap, alloc_begin );
291  } while ( search_again );
292
293  if ( alloc_begin != 0 ) {
294    block = _Heap_Block_allocate( heap, block, alloc_begin, alloc_size );
295
296    _Heap_Check_allocation(
297      heap,
298      block,
299      alloc_begin,
300      alloc_size,
301      alignment,
302      boundary
303    );
304
305    /* Statistics */
306    ++stats->allocs;
307    stats->searches += search_count;
308    stats->lifetime_allocated += _Heap_Block_size( block );
309  } else {
310    /* Statistics */
311    ++stats->failed_allocs;
312  }
313
314  /* Statistics */
315  if ( stats->max_search < search_count ) {
316    stats->max_search = search_count;
317  }
318
319  return (void *) alloc_begin;
320}
Note: See TracBrowser for help on using the repository browser.