1 | /** |
---|
2 | * @file |
---|
3 | * |
---|
4 | * @brief Workspace Handler Support |
---|
5 | * @ingroup ScoreWorkspace |
---|
6 | */ |
---|
7 | |
---|
8 | /* |
---|
9 | * COPYRIGHT (c) 1989-2009. |
---|
10 | * On-Line Applications Research Corporation (OAR). |
---|
11 | * |
---|
12 | * The license and distribution terms for this file may be |
---|
13 | * found in the file LICENSE in this distribution or at |
---|
14 | * http://www.rtems.org/license/LICENSE. |
---|
15 | */ |
---|
16 | |
---|
17 | #if HAVE_CONFIG_H |
---|
18 | #include "config.h" |
---|
19 | #endif |
---|
20 | |
---|
21 | #include <rtems/score/wkspace.h> |
---|
22 | #include <rtems/score/assert.h> |
---|
23 | #include <rtems/score/heapimpl.h> |
---|
24 | #include <rtems/score/interr.h> |
---|
25 | #include <rtems/score/percpudata.h> |
---|
26 | #include <rtems/score/threadimpl.h> |
---|
27 | #include <rtems/score/tls.h> |
---|
28 | #include <rtems/posix/pthread.h> |
---|
29 | #include <rtems/config.h> |
---|
30 | |
---|
31 | #include <string.h> |
---|
32 | |
---|
33 | /* #define DEBUG_WORKSPACE */ |
---|
34 | #if defined(DEBUG_WORKSPACE) |
---|
35 | #include <rtems/bspIo.h> |
---|
36 | #endif |
---|
37 | |
---|
38 | RTEMS_LINKER_RWSET( |
---|
39 | _Per_CPU_Data, |
---|
40 | RTEMS_ALIGNED( CPU_CACHE_LINE_BYTES ) char |
---|
41 | ); |
---|
42 | |
---|
43 | Heap_Control _Workspace_Area; |
---|
44 | |
---|
45 | static uintptr_t _Workspace_Space_for_TLS( uintptr_t page_size ) |
---|
46 | { |
---|
47 | uintptr_t tls_size; |
---|
48 | uintptr_t space; |
---|
49 | |
---|
50 | tls_size = _TLS_Get_size(); |
---|
51 | |
---|
52 | /* |
---|
53 | * In case we have a non-zero TLS size, then we need a TLS area for each |
---|
54 | * thread. These areas are allocated from the workspace. Ensure that the |
---|
55 | * workspace is large enough to fulfill all requests known at configuration |
---|
56 | * time (so excluding the unlimited option). It is not possible to estimate |
---|
57 | * the TLS size in the configuration at compile-time. The TLS size is |
---|
58 | * determined at application link-time. |
---|
59 | */ |
---|
60 | if ( tls_size > 0 ) { |
---|
61 | uintptr_t tls_align = _TLS_Heap_align_up( (uintptr_t) _TLS_Alignment ); |
---|
62 | uintptr_t tls_alloc = _TLS_Get_allocation_size( tls_size, tls_align ); |
---|
63 | |
---|
64 | /* |
---|
65 | * Memory allocated with an alignment constraint is allocated from the end |
---|
66 | * of a free block. The last allocation may need one free block of minimum |
---|
67 | * size. |
---|
68 | */ |
---|
69 | space = _Heap_Min_block_size( page_size ); |
---|
70 | |
---|
71 | space += _Thread_Initial_thread_count |
---|
72 | * _Heap_Size_with_overhead( page_size, tls_alloc, tls_align ); |
---|
73 | } else { |
---|
74 | space = 0; |
---|
75 | } |
---|
76 | |
---|
77 | return space; |
---|
78 | } |
---|
79 | |
---|
80 | static uintptr_t _Workspace_Space_for_per_CPU_data( uintptr_t page_size ) |
---|
81 | { |
---|
82 | uintptr_t space; |
---|
83 | |
---|
84 | #ifdef RTEMS_SMP |
---|
85 | uintptr_t size; |
---|
86 | |
---|
87 | size = RTEMS_LINKER_SET_SIZE( _Per_CPU_Data ); |
---|
88 | _Assert( size % CPU_CACHE_LINE_BYTES == 0 ); |
---|
89 | |
---|
90 | if ( size > 0 ) { |
---|
91 | /* |
---|
92 | * Memory allocated with an alignment constraint is allocated from the end of |
---|
93 | * a free block. The last allocation may need one free block of minimum |
---|
94 | * size. |
---|
95 | */ |
---|
96 | space = _Heap_Min_block_size( page_size ); |
---|
97 | |
---|
98 | space += ( rtems_configuration_get_maximum_processors() - 1 ) |
---|
99 | * _Heap_Size_with_overhead( page_size, size, CPU_CACHE_LINE_BYTES ); |
---|
100 | } else { |
---|
101 | space = 0; |
---|
102 | } |
---|
103 | #else |
---|
104 | space = 0; |
---|
105 | #endif |
---|
106 | |
---|
107 | return space; |
---|
108 | } |
---|
109 | |
---|
110 | static void _Workspace_Allocate_per_CPU_data( void ) |
---|
111 | { |
---|
112 | #ifdef RTEMS_SMP |
---|
113 | uintptr_t size; |
---|
114 | |
---|
115 | size = RTEMS_LINKER_SET_SIZE( _Per_CPU_Data ); |
---|
116 | |
---|
117 | if ( size > 0 ) { |
---|
118 | Per_CPU_Control *cpu; |
---|
119 | uint32_t cpu_index; |
---|
120 | uint32_t cpu_max; |
---|
121 | |
---|
122 | cpu = _Per_CPU_Get_by_index( 0 ); |
---|
123 | cpu->data = RTEMS_LINKER_SET_BEGIN( _Per_CPU_Data ); |
---|
124 | |
---|
125 | cpu_max = rtems_configuration_get_maximum_processors(); |
---|
126 | |
---|
127 | for ( cpu_index = 1 ; cpu_index < cpu_max ; ++cpu_index ) { |
---|
128 | cpu = _Per_CPU_Get_by_index( cpu_index ); |
---|
129 | cpu->data = _Workspace_Allocate_aligned( size, CPU_CACHE_LINE_BYTES ); |
---|
130 | _Assert( cpu->data != NULL ); |
---|
131 | memcpy( cpu->data, RTEMS_LINKER_SET_BEGIN( _Per_CPU_Data ), size); |
---|
132 | } |
---|
133 | } |
---|
134 | #endif |
---|
135 | } |
---|
136 | |
---|
137 | void _Workspace_Handler_initialization( |
---|
138 | Heap_Area *areas, |
---|
139 | size_t area_count, |
---|
140 | Heap_Initialization_or_extend_handler extend |
---|
141 | ) |
---|
142 | { |
---|
143 | Heap_Initialization_or_extend_handler init_or_extend; |
---|
144 | uintptr_t remaining; |
---|
145 | bool do_zero; |
---|
146 | bool unified; |
---|
147 | uintptr_t page_size; |
---|
148 | uintptr_t overhead; |
---|
149 | size_t i; |
---|
150 | |
---|
151 | page_size = CPU_HEAP_ALIGNMENT; |
---|
152 | |
---|
153 | remaining = rtems_configuration_get_work_space_size(); |
---|
154 | remaining += _Workspace_Space_for_TLS( page_size ); |
---|
155 | remaining += _Workspace_Space_for_per_CPU_data( page_size ); |
---|
156 | |
---|
157 | init_or_extend = _Heap_Initialize; |
---|
158 | do_zero = rtems_configuration_get_do_zero_of_workspace(); |
---|
159 | unified = rtems_configuration_get_unified_work_area(); |
---|
160 | overhead = _Heap_Area_overhead( page_size ); |
---|
161 | |
---|
162 | for ( i = 0; i < area_count; ++i ) { |
---|
163 | Heap_Area *area; |
---|
164 | |
---|
165 | area = &areas[ i ]; |
---|
166 | |
---|
167 | if ( do_zero ) { |
---|
168 | memset( area->begin, 0, area->size ); |
---|
169 | } |
---|
170 | |
---|
171 | if ( area->size > overhead ) { |
---|
172 | uintptr_t space_available; |
---|
173 | uintptr_t size; |
---|
174 | |
---|
175 | if ( unified ) { |
---|
176 | size = area->size; |
---|
177 | } else { |
---|
178 | if ( remaining > 0 ) { |
---|
179 | size = remaining < area->size - overhead ? |
---|
180 | remaining + overhead : area->size; |
---|
181 | } else { |
---|
182 | size = 0; |
---|
183 | } |
---|
184 | } |
---|
185 | |
---|
186 | space_available = ( *init_or_extend )( |
---|
187 | &_Workspace_Area, |
---|
188 | area->begin, |
---|
189 | size, |
---|
190 | page_size |
---|
191 | ); |
---|
192 | |
---|
193 | area->begin = (char *) area->begin + size; |
---|
194 | area->size -= size; |
---|
195 | |
---|
196 | if ( space_available < remaining ) { |
---|
197 | remaining -= space_available; |
---|
198 | } else { |
---|
199 | remaining = 0; |
---|
200 | } |
---|
201 | |
---|
202 | init_or_extend = extend; |
---|
203 | } |
---|
204 | } |
---|
205 | |
---|
206 | if ( remaining > 0 ) { |
---|
207 | _Internal_error( INTERNAL_ERROR_TOO_LITTLE_WORKSPACE ); |
---|
208 | } |
---|
209 | |
---|
210 | _Heap_Protection_set_delayed_free_fraction( &_Workspace_Area, 1 ); |
---|
211 | _Workspace_Allocate_per_CPU_data(); |
---|
212 | } |
---|
213 | |
---|
214 | void *_Workspace_Allocate( |
---|
215 | size_t size |
---|
216 | ) |
---|
217 | { |
---|
218 | void *memory; |
---|
219 | |
---|
220 | memory = _Heap_Allocate( &_Workspace_Area, size ); |
---|
221 | #if defined(DEBUG_WORKSPACE) |
---|
222 | printk( |
---|
223 | "Workspace_Allocate(%d) from %p/%p -> %p\n", |
---|
224 | size, |
---|
225 | __builtin_return_address( 0 ), |
---|
226 | __builtin_return_address( 1 ), |
---|
227 | memory |
---|
228 | ); |
---|
229 | #endif |
---|
230 | return memory; |
---|
231 | } |
---|
232 | |
---|
233 | void *_Workspace_Allocate_aligned( size_t size, size_t alignment ) |
---|
234 | { |
---|
235 | return _Heap_Allocate_aligned( &_Workspace_Area, size, alignment ); |
---|
236 | } |
---|
237 | |
---|
238 | /* |
---|
239 | * _Workspace_Free |
---|
240 | */ |
---|
241 | void _Workspace_Free( |
---|
242 | void *block |
---|
243 | ) |
---|
244 | { |
---|
245 | #if defined(DEBUG_WORKSPACE) |
---|
246 | printk( |
---|
247 | "Workspace_Free(%p) from %p/%p\n", |
---|
248 | block, |
---|
249 | __builtin_return_address( 0 ), |
---|
250 | __builtin_return_address( 1 ) |
---|
251 | ); |
---|
252 | #endif |
---|
253 | _Heap_Free( &_Workspace_Area, block ); |
---|
254 | } |
---|
255 | |
---|
256 | void *_Workspace_Allocate_or_fatal_error( |
---|
257 | size_t size |
---|
258 | ) |
---|
259 | { |
---|
260 | void *memory; |
---|
261 | |
---|
262 | memory = _Heap_Allocate( &_Workspace_Area, size ); |
---|
263 | #if defined(DEBUG_WORKSPACE) |
---|
264 | printk( |
---|
265 | "Workspace_Allocate_or_fatal_error(%d) from %p/%p -> %p\n", |
---|
266 | size, |
---|
267 | __builtin_return_address( 0 ), |
---|
268 | __builtin_return_address( 1 ), |
---|
269 | memory |
---|
270 | ); |
---|
271 | #endif |
---|
272 | |
---|
273 | if ( memory == NULL ) |
---|
274 | _Internal_error( INTERNAL_ERROR_WORKSPACE_ALLOCATION ); |
---|
275 | |
---|
276 | return memory; |
---|
277 | } |
---|