source: rtems/cpukit/score/src/heapfree.c @ eea7c937

4.115
Last change on this file since eea7c937 was 5618c37a, checked in by Sebastian Huber <sebastian.huber@…>, on 07/24/13 at 13:14:48

score: Create thread implementation header

Move implementation specific parts of thread.h and thread.inl into new
header file threadimpl.h. The thread.h contains now only the
application visible API.

Remove superfluous header file includes from various files.

  • Property mode set to 100644
File size: 6.6 KB
Line 
1/**
2 * @file
3 *
4 * @ingroup ScoreHeap
5 *
6 * @brief Heap Handler implementation.
7 */
8
9/*
10 *  COPYRIGHT (c) 1989-2007.
11 *  On-Line Applications Research Corporation (OAR).
12 *
13 *  The license and distribution terms for this file may be
14 *  found in the file LICENSE in this distribution or at
15 *  http://www.rtems.com/license/LICENSE.
16 */
17
18#if HAVE_CONFIG_H
19#include "config.h"
20#endif
21
22#include <rtems/score/heapimpl.h>
23#include <rtems/score/threadimpl.h>
24
25#ifndef HEAP_PROTECTION
26  #define _Heap_Protection_determine_block_free( heap, block ) true
27#else
28  static void _Heap_Protection_delay_block_free(
29    Heap_Control *heap,
30    Heap_Block *block
31  )
32  {
33    uintptr_t *const pattern_begin = (uintptr_t *)
34      _Heap_Alloc_area_of_block( block );
35    uintptr_t *const pattern_end = (uintptr_t *)
36      ((uintptr_t) block + _Heap_Block_size( block ) + HEAP_ALLOC_BONUS);
37    uintptr_t const delayed_free_block_count =
38      heap->Protection.delayed_free_block_count;
39    uintptr_t *current = NULL;
40
41    block->Protection_begin.next_delayed_free_block = block;
42    block->Protection_begin.task = _Thread_Get_executing();
43
44    if ( delayed_free_block_count > 0 ) {
45      Heap_Block *const last = heap->Protection.last_delayed_free_block;
46
47      last->Protection_begin.next_delayed_free_block = block;
48    } else {
49      heap->Protection.first_delayed_free_block = block;
50    }
51    heap->Protection.last_delayed_free_block = block;
52    heap->Protection.delayed_free_block_count = delayed_free_block_count + 1;
53
54    for ( current = pattern_begin; current != pattern_end; ++current ) {
55      *current = HEAP_FREE_PATTERN;
56    }
57  }
58
59  static void _Heap_Protection_check_free_block(
60    Heap_Control *heap,
61    Heap_Block *block
62  )
63  {
64    uintptr_t *const pattern_begin = (uintptr_t *)
65      _Heap_Alloc_area_of_block( block );
66    uintptr_t *const pattern_end = (uintptr_t *)
67      ((uintptr_t) block + _Heap_Block_size( block ) + HEAP_ALLOC_BONUS);
68    uintptr_t *current = NULL;
69
70    for ( current = pattern_begin; current != pattern_end; ++current ) {
71      if ( *current != HEAP_FREE_PATTERN ) {
72        _Heap_Protection_block_error( heap, block );
73        break;
74      }
75    }
76  }
77
78  static bool _Heap_Protection_determine_block_free(
79    Heap_Control *heap,
80    Heap_Block *block
81  )
82  {
83    bool do_free = true;
84    Heap_Block *const next = block->Protection_begin.next_delayed_free_block;
85
86    /*
87     * Sometimes after a free the allocated area is still in use.  An example
88     * is the task stack of a thread that deletes itself.  The thread dispatch
89     * disable level is a way to detect this use case.
90     */
91    if ( _Thread_Dispatch_is_enabled() ) {
92      if ( next == NULL ) {
93        _Heap_Protection_delay_block_free( heap, block );
94        do_free = false;
95      } else if ( next == HEAP_PROTECTION_OBOLUS ) {
96        _Heap_Protection_check_free_block( heap, block );
97      } else {
98        _Heap_Protection_block_error( heap, block );
99      }
100    } else if ( next == NULL ) {
101      /*
102       * This is a hack to prevent heavy workspace fragmentation which would
103       * lead to test suite failures.
104       */
105      _Heap_Protection_free_all_delayed_blocks( heap );
106    }
107
108    return do_free;
109  }
110#endif
111
112bool _Heap_Free( Heap_Control *heap, void *alloc_begin_ptr )
113{
114  Heap_Statistics *const stats = &heap->stats;
115  uintptr_t alloc_begin;
116  Heap_Block *block;
117  Heap_Block *next_block = NULL;
118  uintptr_t block_size = 0;
119  uintptr_t next_block_size = 0;
120  bool next_is_free = false;
121
122  /*
123   * If NULL return true so a free on NULL is considered a valid release. This
124   * is a special case that could be handled by the in heap check how-ever that
125   * would result in false being returned which is wrong.
126   */
127  if ( alloc_begin_ptr == NULL ) {
128    return true;
129  }
130
131  alloc_begin = (uintptr_t) alloc_begin_ptr;
132  block = _Heap_Block_of_alloc_area( alloc_begin, heap->page_size );
133
134  if ( !_Heap_Is_block_in_heap( heap, block ) ) {
135    return false;
136  }
137
138  _Heap_Protection_block_check( heap, block );
139
140  block_size = _Heap_Block_size( block );
141  next_block = _Heap_Block_at( block, block_size );
142
143  if ( !_Heap_Is_block_in_heap( heap, next_block ) ) {
144    return false;
145  }
146
147  _Heap_Protection_block_check( heap, next_block );
148
149  if ( !_Heap_Is_prev_used( next_block ) ) {
150    _Heap_Protection_block_error( heap, block );
151    return false;
152  }
153
154  if ( !_Heap_Protection_determine_block_free( heap, block ) ) {
155    return true;
156  }
157
158  next_block_size = _Heap_Block_size( next_block );
159  next_is_free = next_block != heap->last_block
160    && !_Heap_Is_prev_used( _Heap_Block_at( next_block, next_block_size ));
161
162  if ( !_Heap_Is_prev_used( block ) ) {
163    uintptr_t const prev_size = block->prev_size;
164    Heap_Block * const prev_block = _Heap_Block_at( block, -prev_size );
165
166    if ( !_Heap_Is_block_in_heap( heap, prev_block ) ) {
167      _HAssert( false );
168      return( false );
169    }
170
171    /* As we always coalesce free blocks, the block that preceedes prev_block
172       must have been used. */
173    if ( !_Heap_Is_prev_used ( prev_block) ) {
174      _HAssert( false );
175      return( false );
176    }
177
178    if ( next_is_free ) {       /* coalesce both */
179      uintptr_t const size = block_size + prev_size + next_block_size;
180      _Heap_Free_list_remove( next_block );
181      stats->free_blocks -= 1;
182      prev_block->size_and_flag = size | HEAP_PREV_BLOCK_USED;
183      next_block = _Heap_Block_at( prev_block, size );
184      _HAssert(!_Heap_Is_prev_used( next_block));
185      next_block->prev_size = size;
186    } else {                      /* coalesce prev */
187      uintptr_t const size = block_size + prev_size;
188      prev_block->size_and_flag = size | HEAP_PREV_BLOCK_USED;
189      next_block->size_and_flag &= ~HEAP_PREV_BLOCK_USED;
190      next_block->prev_size = size;
191    }
192  } else if ( next_is_free ) {    /* coalesce next */
193    uintptr_t const size = block_size + next_block_size;
194    _Heap_Free_list_replace( next_block, block );
195    block->size_and_flag = size | HEAP_PREV_BLOCK_USED;
196    next_block  = _Heap_Block_at( block, size );
197    next_block->prev_size = size;
198  } else {                        /* no coalesce */
199    /* Add 'block' to the head of the free blocks list as it tends to
200       produce less fragmentation than adding to the tail. */
201    _Heap_Free_list_insert_after( _Heap_Free_list_head( heap), block );
202    block->size_and_flag = block_size | HEAP_PREV_BLOCK_USED;
203    next_block->size_and_flag &= ~HEAP_PREV_BLOCK_USED;
204    next_block->prev_size = block_size;
205
206    /* Statistics */
207    ++stats->free_blocks;
208    if ( stats->max_free_blocks < stats->free_blocks ) {
209      stats->max_free_blocks = stats->free_blocks;
210    }
211  }
212
213  /* Statistics */
214  --stats->used_blocks;
215  ++stats->frees;
216  stats->free_size += block_size;
217
218  return( true );
219}
Note: See TracBrowser for help on using the repository browser.