source: rtems/cpukit/score/src/heap.c @ af690b85

4.104.115
Last change on this file since af690b85 was af690b85, checked in by Joel Sherrill <joel.sherrill@…>, on 07/09/09 at 17:39:51

2009-07-09 Joel Sherrill <joel.sherrill@…>

  • score/src/heap.c: Remove unneeded include.
  • Property mode set to 100644
File size: 10.1 KB
Line 
1/*
2 *  Heap Handler
3 *
4 *  COPYRIGHT (c) 1989-2009.
5 *  On-Line Applications Research Corporation (OAR).
6 *
7 *  The license and distribution terms for this file may be
8 *  found in the file LICENSE in this distribution or at
9 *  http://www.rtems.com/license/LICENSE.
10 *
11 *  $Id$
12 */
13
14#if HAVE_CONFIG_H
15#include "config.h"
16#endif
17
18#include <rtems/system.h>
19#include <rtems/score/heap.h>
20
21static uint32_t instance = 0;
22
23/*PAGE
24 *
25 *  _Heap_Initialize
26 *
27 *  This kernel routine initializes a heap.
28 *
29 *  Input parameters:
30 *    the_heap         - pointer to heap header
31 *    starting_address - starting address of heap
32 *    size             - size of heap
33 *    page_size        - allocatable unit of memory
34 *
35 *  Output parameters:
36 *    returns - maximum memory available if RTEMS_SUCCESSFUL
37 *    0       - otherwise
38 *
39 *  This is what a heap looks like in memory immediately after initialization:
40 *
41 *
42 *            +--------------------------------+ <- begin = starting_address
43 *            |  unused space due to alignment |
44 *            |       size < page_size         |
45 *         0  +--------------------------------+ <- first block
46 *            |  prev_size = page_size         |
47 *         4  +--------------------------------+
48 *            |  size = size0              | 1 |
49 *         8  +---------------------+----------+ <- aligned on page_size
50 *            |  next = HEAP_TAIL   |          |
51 *        12  +---------------------+          |
52 *            |  prev = HEAP_HEAD   |  memory  |
53 *            +---------------------+          |
54 *            |                     available  |
55 *            |                                |
56 *            |                for allocation  |
57 *            |                                |
58 *     size0  +--------------------------------+ <- last dummy block
59 *            |  prev_size = size0             |
60 *        +4  +--------------------------------+
61 *            |  size = page_size          | 0 | <- prev block is free
62 *        +8  +--------------------------------+ <- aligned on page_size
63 *            |  unused space due to alignment |
64 *            |       size < page_size         |
65 *            +--------------------------------+ <- end = begin + size
66 *
67 *  Below is what a heap looks like after first allocation of SIZE bytes using
68 *  _Heap_allocate(). BSIZE stands for SIZE + 4 aligned up on 'page_size'
69 *  boundary.
70 *  [NOTE: If allocation were performed by _Heap_Allocate_aligned(), the
71 *  block size BSIZE is defined differently, and previously free block will
72 *  be split so that upper part of it will become used block (see
73 *  'heapallocatealigned.c' for details).]
74 *
75 *            +--------------------------------+ <- begin = starting_address
76 *            |  unused space due to alignment |
77 *            |       size < page_size         |
78 *         0  +--------------------------------+ <- used block
79 *            |  prev_size = page_size         |
80 *         4  +--------------------------------+
81 *            |  size = BSIZE              | 1 | <- prev block is used
82 *         8  +--------------------------------+ <- aligned on page_size
83 *            |              .                 | Pointer returned to the user
84 *            |              .                 | is 8 for _Heap_Allocate()
85 *            |              .                 | and is in range
86 * 8 +        |         user-accessible        | [8,8+page_size) for
87 *  page_size +- - -                      - - -+ _Heap_Allocate_aligned()
88 *            |             area               |
89 *            |              .                 |
90 *     BSIZE  +- - - - -     .        - - - - -+ <- free block
91 *            |              .                 |
92 * BSIZE  +4  +--------------------------------+
93 *            |  size = S = size0 - BSIZE  | 1 | <- prev block is used
94 * BSIZE  +8  +-------------------+------------+ <- aligned on page_size
95 *            |  next = HEAP_TAIL |            |
96 * BSIZE +12  +-------------------+            |
97 *            |  prev = HEAP_HEAD |     memory |
98 *            +-------------------+            |
99 *            |                   .  available |
100 *            |                   .            |
101 *            |                   .        for |
102 *            |                   .            |
103 * BSIZE +S+0 +-------------------+ allocation + <- last dummy block
104 *            |  prev_size = S    |            |
105 *       +S+4 +-------------------+------------+
106 *            |  size = page_size          | 0 | <- prev block is free
107 *       +S+8 +--------------------------------+ <- aligned on page_size
108 *            |  unused space due to alignment |
109 *            |       size < page_size         |
110 *            +--------------------------------+ <- end = begin + size
111 *
112 */
113
114uint32_t   _Heap_Initialize(
115  Heap_Control        *the_heap,
116  void                *starting_address,
117  intptr_t             size,
118  uint32_t             page_size
119)
120{
121  Heap_Block            *the_block;
122  uint32_t               the_size;
123  _H_uptr_t              start;
124  _H_uptr_t              aligned_start;
125  uint32_t               overhead;
126  Heap_Statistics *const stats = &the_heap->stats;
127
128  if (page_size == 0)
129    page_size = CPU_ALIGNMENT;
130  else
131    _Heap_Align_up( &page_size, CPU_ALIGNMENT );
132
133  /* Calculate aligned_start so that aligned_start + HEAP_BLOCK_USER_OFFSET
134     (value of user pointer) is aligned on 'page_size' boundary. Make sure
135     resulting 'aligned_start' is not below 'starting_address'. */
136  start = _H_p2u(starting_address);
137  aligned_start = start + HEAP_BLOCK_USER_OFFSET;
138  _Heap_Align_up_uptr ( &aligned_start, page_size );
139  aligned_start -= HEAP_BLOCK_USER_OFFSET;
140
141  /* Calculate 'min_block_size'. It's HEAP_MIN_BLOCK_SIZE aligned up to the
142     nearest multiple of 'page_size'. */
143  the_heap->min_block_size = HEAP_MIN_BLOCK_SIZE;
144  _Heap_Align_up ( &the_heap->min_block_size, page_size );
145
146  /* Calculate 'the_size' -- size of the first block so that there is enough
147     space at the end for the permanent last block. It is equal to 'size'
148     minus total overhead aligned down to the nearest multiple of
149     'page_size'. */
150  overhead = HEAP_OVERHEAD + (aligned_start - start);
151  if ( size < overhead )
152    return 0;   /* Too small area for the heap */
153  the_size = size - overhead;
154  _Heap_Align_down ( &the_size, page_size );
155  if ( the_size == 0 )
156    return 0;   /* Too small area for the heap */
157
158  the_heap->page_size = page_size;
159  the_heap->begin = starting_address;
160  the_heap->end = starting_address + size;
161
162  the_block = (Heap_Block *) aligned_start;
163
164  the_block->prev_size = page_size;
165  the_block->size = the_size | HEAP_PREV_USED;
166  the_block->next = _Heap_Tail( the_heap );
167  the_block->prev = _Heap_Head( the_heap );
168  _Heap_Head(the_heap)->next = the_block;
169  _Heap_Tail(the_heap)->prev = the_block;
170  the_heap->start = the_block;
171
172  _HAssert(_Heap_Is_aligned(the_heap->page_size, CPU_ALIGNMENT));
173  _HAssert(_Heap_Is_aligned(the_heap->min_block_size, page_size));
174  _HAssert(_Heap_Is_aligned_ptr(_Heap_User_area(the_block), page_size));
175
176  the_block = _Heap_Block_at( the_block, the_size );
177  the_heap->final = the_block;       /* Permanent final block of the heap */
178  the_block->prev_size = the_size;   /* Previous block is free */
179  the_block->size = page_size;
180
181  stats->size = size;
182  stats->free_size = the_size;
183  stats->min_free_size = the_size;
184  stats->free_blocks = 1;
185  stats->max_free_blocks = 1;
186  stats->used_blocks = 0;
187  stats->max_search = 0;
188  stats->allocs = 0;
189  stats->searches = 0;
190  stats->frees = 0;
191  stats->resizes = 0;
192  stats->instance = instance++;
193
194  return ( the_size - HEAP_BLOCK_USED_OVERHEAD );
195}
196
197/*PAGE
198 *
199 * Internal routines shared by _Heap_Allocate() and _Heap_Allocate_aligned().
200 *
201 * Note: there is no reason to put them into a separate file(s) as they are
202 * always required for heap to be useful.
203 */
204
205/*
206 * Convert user requested 'size' of memory block to the block size.
207 * Return block size on success, 0 if overflow occured
208 */
209size_t _Heap_Calc_block_size(
210  size_t   size,
211  uint32_t page_size,
212  uint32_t min_size)
213{
214  uint32_t block_size = size + HEAP_BLOCK_USED_OVERHEAD;
215  _Heap_Align_up(&block_size, page_size);
216  if (block_size < min_size) block_size = min_size;
217  /* 'block_size' becomes <= 'size' if and only if overflow occured. */
218  return (block_size > size) ? block_size : 0;
219}
220
221/*
222 * Allocate block of size 'alloc_size' from 'the_block' belonging to
223 * 'the_heap'. Split 'the_block' if possible, otherwise allocate it entirely.
224 * When split, make the lower part used, and leave the upper part free.
225 * Return the size of allocated block.
226 */
227uint32_t _Heap_Block_allocate(
228  Heap_Control* the_heap,
229  Heap_Block*   the_block,
230  uint32_t      alloc_size
231)
232{
233  Heap_Statistics *const stats = &the_heap->stats;
234  uint32_t const block_size = _Heap_Block_size(the_block);
235  uint32_t const the_rest = block_size - alloc_size;
236
237  _HAssert(_Heap_Is_aligned(block_size, the_heap->page_size));
238  _HAssert(_Heap_Is_aligned(alloc_size, the_heap->page_size));
239  _HAssert(alloc_size <= block_size);
240  _HAssert(_Heap_Is_prev_used(the_block));
241
242  if(the_rest >= the_heap->min_block_size) {
243    /* Split the block so that upper part is still free, and lower part
244       becomes used. This is slightly less optimal than leaving lower part
245       free as it requires replacing block in the free blocks list, but it
246       makes it possible to reuse this code in the _Heap_Resize_block(). */
247    Heap_Block *next_block = _Heap_Block_at(the_block, alloc_size);
248    _Heap_Block_replace(the_block, next_block);
249    the_block->size = alloc_size | HEAP_PREV_USED;
250    next_block->size = the_rest | HEAP_PREV_USED;
251    _Heap_Block_at(next_block, the_rest)->prev_size = the_rest;
252  }
253  else {
254    /* Don't split the block as remainder is either zero or too small to be
255       used as a separate free block. Change 'alloc_size' to the size of the
256       block and remove the block from the list of free blocks. */
257    _Heap_Block_remove(the_block);
258    alloc_size = block_size;
259    _Heap_Block_at(the_block, alloc_size)->size |= HEAP_PREV_USED;
260    stats->free_blocks -= 1;
261  }
262  /* Update statistics */
263  stats->free_size -= alloc_size;
264  if(stats->min_free_size > stats->free_size)
265    stats->min_free_size = stats->free_size;
266  stats->used_blocks += 1;
267  return alloc_size;
268}
Note: See TracBrowser for help on using the repository browser.