source: rtems/cpukit/score/src/heapallocate.c @ 0daa8ab

5
Last change on this file since 0daa8ab was 006af6ac, checked in by Alexander Krutwig <alexander.krutwig@…>, on 06/14/16 at 07:26:12

score: Improve heap protection

Check block pointers of deferred free list before use.

  • Property mode set to 100644
File size: 7.8 KB
Line 
1/**
2 * @file
3 *
4 * @ingroup ScoreHeap
5 *
6 * @brief Heap Handler implementation.
7 */
8
9/*
10 *  COPYRIGHT (c) 1989-1999.
11 *  On-Line Applications Research Corporation (OAR).
12 *
13 *  Copyright (c) 2009 embedded brains GmbH.
14 *
15 *  The license and distribution terms for this file may be
16 *  found in the file LICENSE in this distribution or at
17 *  http://www.rtems.org/license/LICENSE.
18 */
19
20#if HAVE_CONFIG_H
21#include "config.h"
22#endif
23
24#include <rtems/system.h>
25#include <rtems/score/heapimpl.h>
26
27#ifndef HEAP_PROTECTION
28  #define _Heap_Protection_free_delayed_blocks( heap, alloc_begin ) false
29#else
30  static bool _Heap_Protection_free_delayed_blocks(
31    Heap_Control *heap,
32    uintptr_t alloc_begin
33  )
34  {
35    bool search_again = false;
36    uintptr_t const blocks_to_free_count =
37      (heap->Protection.delayed_free_block_count
38         + heap->Protection.delayed_free_fraction - 1)
39      / heap->Protection.delayed_free_fraction;
40
41    if ( alloc_begin == 0 && blocks_to_free_count > 0 ) {
42      Heap_Block *block_to_free = heap->Protection.first_delayed_free_block;
43      uintptr_t count = 0;
44
45      for ( count = 0; count < blocks_to_free_count; ++count ) {
46        Heap_Block *next_block_to_free;
47
48        if ( !_Heap_Is_block_in_heap( heap, block_to_free ) ) {
49          _Heap_Protection_block_error( heap, block_to_free );
50        }
51
52        next_block_to_free =
53          block_to_free->Protection_begin.next_delayed_free_block;
54        block_to_free->Protection_begin.next_delayed_free_block =
55          HEAP_PROTECTION_OBOLUS;
56
57        _Heap_Free(
58          heap,
59          (void *) _Heap_Alloc_area_of_block( block_to_free )
60        );
61
62        block_to_free = next_block_to_free;
63      }
64
65      heap->Protection.delayed_free_block_count -= blocks_to_free_count;
66      heap->Protection.first_delayed_free_block = block_to_free;
67
68      search_again = true;
69    }
70
71    return search_again;
72  }
73#endif
74
75#ifdef RTEMS_HEAP_DEBUG
76  static void _Heap_Check_allocation(
77    const Heap_Control *heap,
78    const Heap_Block *block,
79    uintptr_t alloc_begin,
80    uintptr_t alloc_size,
81    uintptr_t alignment,
82    uintptr_t boundary
83  )
84  {
85    uintptr_t const min_block_size = heap->min_block_size;
86    uintptr_t const page_size = heap->page_size;
87
88    uintptr_t const block_begin = (uintptr_t) block;
89    uintptr_t const block_size = _Heap_Block_size( block );
90    uintptr_t const block_end = block_begin + block_size;
91
92    uintptr_t const alloc_end = alloc_begin + alloc_size;
93
94    uintptr_t const alloc_area_begin = _Heap_Alloc_area_of_block( block );
95    uintptr_t const alloc_area_offset = alloc_begin - alloc_area_begin;
96
97    _HAssert( block_size >= min_block_size );
98    _HAssert( block_begin < block_end );
99    _HAssert(
100      _Heap_Is_aligned( block_begin + HEAP_BLOCK_HEADER_SIZE, page_size )
101    );
102    _HAssert(
103      _Heap_Is_aligned( block_size, page_size )
104    );
105
106    _HAssert( alloc_end <= block_end + HEAP_ALLOC_BONUS );
107    _HAssert( alloc_area_begin == block_begin + HEAP_BLOCK_HEADER_SIZE);
108    _HAssert( alloc_area_offset < page_size );
109
110    _HAssert( _Heap_Is_aligned( alloc_area_begin, page_size ) );
111    if ( alignment == 0 ) {
112      _HAssert( alloc_begin == alloc_area_begin );
113    } else {
114      _HAssert( _Heap_Is_aligned( alloc_begin, alignment ) );
115    }
116
117    if ( boundary != 0 ) {
118      uintptr_t boundary_line = _Heap_Align_down( alloc_end, boundary );
119
120      _HAssert( alloc_size <= boundary );
121      _HAssert( boundary_line <= alloc_begin || alloc_end <= boundary_line );
122    }
123  }
124#else
125  #define _Heap_Check_allocation( h, b, ab, as, ag, bd ) ((void) 0)
126#endif
127
128static uintptr_t _Heap_Check_block(
129  const Heap_Control *heap,
130  const Heap_Block *block,
131  uintptr_t alloc_size,
132  uintptr_t alignment,
133  uintptr_t boundary
134)
135{
136  uintptr_t const page_size = heap->page_size;
137  uintptr_t const min_block_size = heap->min_block_size;
138
139  uintptr_t const block_begin = (uintptr_t) block;
140  uintptr_t const block_size = _Heap_Block_size( block );
141  uintptr_t const block_end = block_begin + block_size;
142
143  uintptr_t const alloc_begin_floor = _Heap_Alloc_area_of_block( block );
144  uintptr_t const alloc_begin_ceiling = block_end - min_block_size
145    + HEAP_BLOCK_HEADER_SIZE + page_size - 1;
146
147  uintptr_t alloc_end = block_end + HEAP_ALLOC_BONUS;
148  uintptr_t alloc_begin = alloc_end - alloc_size;
149
150  alloc_begin = _Heap_Align_down( alloc_begin, alignment );
151
152  /* Ensure that the we have a valid new block at the end */
153  if ( alloc_begin > alloc_begin_ceiling ) {
154    alloc_begin = _Heap_Align_down( alloc_begin_ceiling, alignment );
155  }
156
157  alloc_end = alloc_begin + alloc_size;
158
159  /* Ensure boundary constaint */
160  if ( boundary != 0 ) {
161    uintptr_t const boundary_floor = alloc_begin_floor + alloc_size;
162    uintptr_t boundary_line = _Heap_Align_down( alloc_end, boundary );
163
164    while ( alloc_begin < boundary_line && boundary_line < alloc_end ) {
165      if ( boundary_line < boundary_floor ) {
166        return 0;
167      }
168      alloc_begin = boundary_line - alloc_size;
169      alloc_begin = _Heap_Align_down( alloc_begin, alignment );
170      alloc_end = alloc_begin + alloc_size;
171      boundary_line = _Heap_Align_down( alloc_end, boundary );
172    }
173  }
174
175  /* Ensure that the we have a valid new block at the beginning */
176  if ( alloc_begin >= alloc_begin_floor ) {
177    uintptr_t const alloc_block_begin =
178      (uintptr_t) _Heap_Block_of_alloc_area( alloc_begin, page_size );
179    uintptr_t const free_size = alloc_block_begin - block_begin;
180
181    if ( free_size >= min_block_size || free_size == 0 ) {
182      return alloc_begin;
183    }
184  }
185
186  return 0;
187}
188
189void *_Heap_Allocate_aligned_with_boundary(
190  Heap_Control *heap,
191  uintptr_t alloc_size,
192  uintptr_t alignment,
193  uintptr_t boundary
194)
195{
196  Heap_Statistics *const stats = &heap->stats;
197  uintptr_t const block_size_floor = alloc_size + HEAP_BLOCK_HEADER_SIZE
198    - HEAP_ALLOC_BONUS;
199  uintptr_t const page_size = heap->page_size;
200  Heap_Block *block = NULL;
201  uintptr_t alloc_begin = 0;
202  uint32_t search_count = 0;
203  bool search_again = false;
204
205  if ( block_size_floor < alloc_size ) {
206    /* Integer overflow occured */
207    return NULL;
208  }
209
210  if ( boundary != 0 ) {
211    if ( boundary < alloc_size ) {
212      return NULL;
213    }
214
215    if ( alignment == 0 ) {
216      alignment = page_size;
217    }
218  }
219
220  do {
221    Heap_Block *const free_list_tail = _Heap_Free_list_tail( heap );
222
223    block = _Heap_Free_list_first( heap );
224    while ( block != free_list_tail ) {
225      _HAssert( _Heap_Is_prev_used( block ) );
226
227      _Heap_Protection_block_check( heap, block );
228
229      /*
230       * The HEAP_PREV_BLOCK_USED flag is always set in the block size_and_flag
231       * field.  Thus the value is about one unit larger than the real block
232       * size.  The greater than operator takes this into account.
233       */
234      if ( block->size_and_flag > block_size_floor ) {
235        if ( alignment == 0 ) {
236          alloc_begin = _Heap_Alloc_area_of_block( block );
237        } else {
238          alloc_begin = _Heap_Check_block(
239            heap,
240            block,
241            alloc_size,
242            alignment,
243            boundary
244          );
245        }
246      }
247
248      /* Statistics */
249      ++search_count;
250
251      if ( alloc_begin != 0 ) {
252        break;
253      }
254
255      block = block->next;
256    }
257
258    search_again = _Heap_Protection_free_delayed_blocks( heap, alloc_begin );
259  } while ( search_again );
260
261  if ( alloc_begin != 0 ) {
262    block = _Heap_Block_allocate( heap, block, alloc_begin, alloc_size );
263
264    _Heap_Check_allocation(
265      heap,
266      block,
267      alloc_begin,
268      alloc_size,
269      alignment,
270      boundary
271    );
272
273    /* Statistics */
274    ++stats->allocs;
275    stats->searches += search_count;
276    stats->lifetime_allocated += _Heap_Block_size( block );
277  } else {
278    /* Statistics */
279    ++stats->failed_allocs;
280  }
281
282  /* Statistics */
283  if ( stats->max_search < search_count ) {
284    stats->max_search = search_count;
285  }
286
287  return (void *) alloc_begin;
288}
Note: See TracBrowser for help on using the repository browser.