1 | /* |
---|
2 | * Heap Handler |
---|
3 | * |
---|
4 | * COPYRIGHT (c) 1989-2006. |
---|
5 | * On-Line Applications Research Corporation (OAR). |
---|
6 | * |
---|
7 | * The license and distribution terms for this file may be |
---|
8 | * found in the file LICENSE in this distribution or at |
---|
9 | * http://www.rtems.com/license/LICENSE. |
---|
10 | * |
---|
11 | * $Id$ |
---|
12 | */ |
---|
13 | |
---|
14 | #if HAVE_CONFIG_H |
---|
15 | #include "config.h" |
---|
16 | #endif |
---|
17 | |
---|
18 | #include <rtems/system.h> |
---|
19 | #include <rtems/score/sysstate.h> |
---|
20 | #include <rtems/score/heap.h> |
---|
21 | |
---|
22 | static uint32_t instance = 0; |
---|
23 | |
---|
24 | /*PAGE |
---|
25 | * |
---|
26 | * _Heap_Initialize |
---|
27 | * |
---|
28 | * This kernel routine initializes a heap. |
---|
29 | * |
---|
30 | * Input parameters: |
---|
31 | * the_heap - pointer to heap header |
---|
32 | * starting_address - starting address of heap |
---|
33 | * size - size of heap |
---|
34 | * page_size - allocatable unit of memory |
---|
35 | * |
---|
36 | * Output parameters: |
---|
37 | * returns - maximum memory available if RTEMS_SUCCESSFUL |
---|
38 | * 0 - otherwise |
---|
39 | * |
---|
40 | * This is what a heap looks like in memory immediately after initialization: |
---|
41 | * |
---|
42 | * |
---|
43 | * +--------------------------------+ <- begin = starting_address |
---|
44 | * | unused space due to alignment | |
---|
45 | * | size < page_size | |
---|
46 | * 0 +--------------------------------+ <- first block |
---|
47 | * | prev_size = page_size | |
---|
48 | * 4 +--------------------------------+ |
---|
49 | * | size = size0 | 1 | |
---|
50 | * 8 +---------------------+----------+ <- aligned on page_size |
---|
51 | * | next = HEAP_TAIL | | |
---|
52 | * 12 +---------------------+ | |
---|
53 | * | prev = HEAP_HEAD | memory | |
---|
54 | * +---------------------+ | |
---|
55 | * | available | |
---|
56 | * | | |
---|
57 | * | for allocation | |
---|
58 | * | | |
---|
59 | * size0 +--------------------------------+ <- last dummy block |
---|
60 | * | prev_size = size0 | |
---|
61 | * +4 +--------------------------------+ |
---|
62 | * | size = page_size | 0 | <- prev block is free |
---|
63 | * +8 +--------------------------------+ <- aligned on page_size |
---|
64 | * | unused space due to alignment | |
---|
65 | * | size < page_size | |
---|
66 | * +--------------------------------+ <- end = begin + size |
---|
67 | * |
---|
68 | * This is what a heap looks like after first allocation of SIZE bytes. |
---|
69 | * BSIZE stands for SIZE + 4 aligned up on 'page_size' boundary if allocation |
---|
70 | * has been performed by _Heap_Allocate(). If allocation has been performed |
---|
71 | * by _Heap_Allocate_aligned(), the block size BSIZE is defined differently |
---|
72 | * (see 'heapallocatealigned.c' for details). |
---|
73 | * |
---|
74 | * +--------------------------------+ <- begin = starting_address |
---|
75 | * | unused space due to alignment | |
---|
76 | * | size < page_size | |
---|
77 | * 0 +--------------------------------+ <- used block |
---|
78 | * | prev_size = page_size | |
---|
79 | * 4 +--------------------------------+ |
---|
80 | * | size = BSIZE | 1 | <- prev block is used |
---|
81 | * 8 +--------------------------------+ <- aligned on page_size |
---|
82 | * | . | Pointer returned to the user |
---|
83 | * | . | is 8 for _Heap_Allocate() |
---|
84 | * | . | and is in range |
---|
85 | * 8 + | user-accessible | [8,8+page_size) for |
---|
86 | * page_size +- - - - - -+ _Heap_Allocate_aligned() |
---|
87 | * | area | |
---|
88 | * | . | |
---|
89 | * BSIZE +- - - - - . - - - - -+ <- free block |
---|
90 | * | . | |
---|
91 | * BSIZE +4 +--------------------------------+ |
---|
92 | * | size = S = size0 - BSIZE | 1 | <- prev block is used |
---|
93 | * BSIZE +8 +-------------------+------------+ <- aligned on page_size |
---|
94 | * | next = HEAP_TAIL | | |
---|
95 | * BSIZE +12 +-------------------+ | |
---|
96 | * | prev = HEAP_HEAD | memory | |
---|
97 | * +-------------------+ | |
---|
98 | * | . available | |
---|
99 | * | . | |
---|
100 | * | . for | |
---|
101 | * | . | |
---|
102 | * BSIZE +S+0 +-------------------+ allocation + <- last dummy block |
---|
103 | * | prev_size = S | | |
---|
104 | * +S+4 +-------------------+------------+ |
---|
105 | * | size = page_size | 0 | <- prev block is free |
---|
106 | * +S+8 +--------------------------------+ <- aligned on page_size |
---|
107 | * | unused space due to alignment | |
---|
108 | * | size < page_size | |
---|
109 | * +--------------------------------+ <- end = begin + size |
---|
110 | * |
---|
111 | */ |
---|
112 | |
---|
113 | uint32_t _Heap_Initialize( |
---|
114 | Heap_Control *the_heap, |
---|
115 | void *starting_address, |
---|
116 | size_t size, |
---|
117 | uint32_t page_size |
---|
118 | ) |
---|
119 | { |
---|
120 | Heap_Block *the_block; |
---|
121 | uint32_t the_size; |
---|
122 | _H_uptr_t start; |
---|
123 | _H_uptr_t aligned_start; |
---|
124 | uint32_t overhead; |
---|
125 | Heap_Statistics *const stats = &the_heap->stats; |
---|
126 | |
---|
127 | if (page_size == 0) |
---|
128 | page_size = CPU_ALIGNMENT; |
---|
129 | else |
---|
130 | _Heap_Align_up( &page_size, CPU_ALIGNMENT ); |
---|
131 | |
---|
132 | /* Calculate aligned_start so that aligned_start + HEAP_BLOCK_USER_OFFSET |
---|
133 | (value of user pointer) is aligned on 'page_size' boundary. Make sure |
---|
134 | resulting 'aligned_start' is not below 'starting_address'. */ |
---|
135 | start = _H_p2u(starting_address); |
---|
136 | aligned_start = start + HEAP_BLOCK_USER_OFFSET; |
---|
137 | _Heap_Align_up_uptr ( &aligned_start, page_size ); |
---|
138 | aligned_start -= HEAP_BLOCK_USER_OFFSET; |
---|
139 | |
---|
140 | /* Calculate 'min_block_size'. It's HEAP_MIN_BLOCK_SIZE aligned up to the |
---|
141 | nearest multiple of 'page_size'. */ |
---|
142 | the_heap->min_block_size = HEAP_MIN_BLOCK_SIZE; |
---|
143 | _Heap_Align_up ( &the_heap->min_block_size, page_size ); |
---|
144 | |
---|
145 | /* Calculate 'the_size' -- size of the first block so that there is enough |
---|
146 | space at the end for the permanent last block. It is equal to 'size' |
---|
147 | minus total overhead aligned down to the nearest multiple of |
---|
148 | 'page_size'. */ |
---|
149 | overhead = HEAP_OVERHEAD + (aligned_start - start); |
---|
150 | if ( size < overhead ) |
---|
151 | return 0; /* Too small area for the heap */ |
---|
152 | the_size = size - overhead; |
---|
153 | _Heap_Align_down ( &the_size, page_size ); |
---|
154 | if ( the_size == 0 ) |
---|
155 | return 0; /* Too small area for the heap */ |
---|
156 | |
---|
157 | the_heap->page_size = page_size; |
---|
158 | the_heap->begin = starting_address; |
---|
159 | the_heap->end = starting_address + size; |
---|
160 | |
---|
161 | the_block = (Heap_Block *) aligned_start; |
---|
162 | |
---|
163 | the_block->prev_size = page_size; |
---|
164 | the_block->size = the_size | HEAP_PREV_USED; |
---|
165 | the_block->next = _Heap_Tail( the_heap ); |
---|
166 | the_block->prev = _Heap_Head( the_heap ); |
---|
167 | _Heap_Head(the_heap)->next = the_block; |
---|
168 | _Heap_Tail(the_heap)->prev = the_block; |
---|
169 | the_heap->start = the_block; |
---|
170 | |
---|
171 | _HAssert(_Heap_Is_aligned(the_heap->page_size, CPU_ALIGNMENT)); |
---|
172 | _HAssert(_Heap_Is_aligned(the_heap->min_block_size, page_size)); |
---|
173 | _HAssert(_Heap_Is_aligned_ptr(_Heap_User_area(the_block), page_size)); |
---|
174 | |
---|
175 | the_block = _Heap_Block_at( the_block, the_size ); |
---|
176 | the_heap->final = the_block; /* Permanent final block of the heap */ |
---|
177 | the_block->prev_size = the_size; /* Previous block is free */ |
---|
178 | the_block->size = page_size; |
---|
179 | |
---|
180 | stats->size = size; |
---|
181 | stats->free_size = the_size; |
---|
182 | stats->min_free_size = the_size; |
---|
183 | stats->free_blocks = 1; |
---|
184 | stats->max_free_blocks = 1; |
---|
185 | stats->used_blocks = 0; |
---|
186 | stats->max_search = 0; |
---|
187 | stats->allocs = 0; |
---|
188 | stats->searches = 0; |
---|
189 | stats->frees = 0; |
---|
190 | stats->resizes = 0; |
---|
191 | stats->instance = instance++; |
---|
192 | |
---|
193 | return ( the_size - HEAP_BLOCK_USED_OVERHEAD ); |
---|
194 | } |
---|
195 | |
---|
196 | /*PAGE |
---|
197 | * |
---|
198 | * Internal routines shared by _Heap_Allocate() and _Heap_Allocate_aligned(). |
---|
199 | * |
---|
200 | * Note: there is no reason to put them into a separate file(s) as they are |
---|
201 | * always required for heap to be useful. |
---|
202 | */ |
---|
203 | |
---|
204 | /* |
---|
205 | * Convert user requested 'size' of memory block to the block size. |
---|
206 | * Return block size on success, 0 if overflow occured |
---|
207 | */ |
---|
208 | size_t _Heap_Calc_block_size( |
---|
209 | size_t size, |
---|
210 | uint32_t page_size, |
---|
211 | uint32_t min_size) |
---|
212 | { |
---|
213 | uint32_t block_size = size + HEAP_BLOCK_USED_OVERHEAD; |
---|
214 | _Heap_Align_up(&block_size, page_size); |
---|
215 | if (block_size < min_size) block_size = min_size; |
---|
216 | /* 'block_size' becomes <= 'size' if and only if overflow occured. */ |
---|
217 | return (block_size > size) ? block_size : 0; |
---|
218 | } |
---|
219 | |
---|
220 | /* |
---|
221 | * Allocate block of size 'alloc_size' from 'the_block' belonging to |
---|
222 | * 'the_heap'. Split 'the_block' if possible, otherwise allocate it entirely. |
---|
223 | * When split, make the lower part used, and leave the upper part free. |
---|
224 | * Return the size of allocated block. |
---|
225 | */ |
---|
226 | uint32_t _Heap_Block_allocate( |
---|
227 | Heap_Control* the_heap, |
---|
228 | Heap_Block* the_block, |
---|
229 | uint32_t alloc_size |
---|
230 | ) |
---|
231 | { |
---|
232 | Heap_Statistics *const stats = &the_heap->stats; |
---|
233 | uint32_t const block_size = _Heap_Block_size(the_block); |
---|
234 | uint32_t const the_rest = block_size - alloc_size; |
---|
235 | |
---|
236 | _HAssert(_Heap_Is_aligned(block_size, the_heap->page_size)); |
---|
237 | _HAssert(_Heap_Is_aligned(alloc_size, the_heap->page_size)); |
---|
238 | _HAssert(alloc_size <= block_size); |
---|
239 | _HAssert(_Heap_Is_prev_used(the_block)); |
---|
240 | |
---|
241 | if(the_rest >= the_heap->min_block_size) { |
---|
242 | /* Split the block so that upper part is still free, and lower part |
---|
243 | becomes used. This is slightly less optimal than leaving lower part |
---|
244 | free as it requires replacing block in the free blocks list, but it |
---|
245 | makes it possible to reuse this code in the _Heap_Resize_block(). */ |
---|
246 | Heap_Block *next_block = _Heap_Block_at(the_block, alloc_size); |
---|
247 | _Heap_Block_replace(the_block, next_block); |
---|
248 | the_block->size = alloc_size | HEAP_PREV_USED; |
---|
249 | next_block->size = the_rest | HEAP_PREV_USED; |
---|
250 | _Heap_Block_at(next_block, the_rest)->prev_size = the_rest; |
---|
251 | } |
---|
252 | else { |
---|
253 | /* Don't split the block as remainder is either zero or too small to be |
---|
254 | used as a separate free block. Change 'alloc_size' to the size of the |
---|
255 | block and remove the block from the list of free blocks. */ |
---|
256 | _Heap_Block_remove(the_block); |
---|
257 | alloc_size = block_size; |
---|
258 | _Heap_Block_at(the_block, alloc_size)->size |= HEAP_PREV_USED; |
---|
259 | stats->free_blocks -= 1; |
---|
260 | } |
---|
261 | /* Update statistics */ |
---|
262 | stats->free_size -= alloc_size; |
---|
263 | if(stats->min_free_size > stats->free_size) |
---|
264 | stats->min_free_size = stats->free_size; |
---|
265 | stats->used_blocks += 1; |
---|
266 | return alloc_size; |
---|
267 | } |
---|