source: rtems/cpukit/score/src/wkspace.c @ 776464a

5
Last change on this file since 776464a was 776464a, checked in by Sebastian Huber <sebastian.huber@…>, on 09/19/18 at 09:52:47

score: Allocate per-CPU data only if necessary

The _Workspace_Allocate_aligned() would returns a non-NULL pointer for a
zero size allocation request if there is enough memory available. This
conflicts with the size estimate of zero in
_Workspace_Space_for_per_CPU_data() if the per-CPU data set is empty.

Update #3507.

  • Property mode set to 100644
File size: 7.0 KB
Line 
1/**
2 *  @file
3 *
4 *  @brief Workspace Handler Support
5 *  @ingroup ScoreWorkspace
6 */
7
8/*
9 *  COPYRIGHT (c) 1989-2009.
10 *  On-Line Applications Research Corporation (OAR).
11 *
12 *  The license and distribution terms for this file may be
13 *  found in the file LICENSE in this distribution or at
14 *  http://www.rtems.org/license/LICENSE.
15 */
16
17#if HAVE_CONFIG_H
18  #include "config.h"
19#endif
20
21#include <rtems/score/wkspace.h>
22#include <rtems/score/assert.h>
23#include <rtems/score/heapimpl.h>
24#include <rtems/score/interr.h>
25#include <rtems/score/percpudata.h>
26#include <rtems/score/threadimpl.h>
27#include <rtems/score/tls.h>
28#include <rtems/config.h>
29
30#include <string.h>
31
32/* #define DEBUG_WORKSPACE */
33#if defined(DEBUG_WORKSPACE)
34  #include <rtems/bspIo.h>
35#endif
36
37RTEMS_LINKER_RWSET(
38  _Per_CPU_Data,
39  RTEMS_ALIGNED( CPU_CACHE_LINE_BYTES ) char
40);
41
42Heap_Control _Workspace_Area;
43
44static uint32_t _Workspace_Get_maximum_thread_count( void )
45{
46  uint32_t thread_count;
47
48  thread_count = 0;
49  thread_count += _Thread_Get_maximum_internal_threads();
50
51  thread_count += rtems_resource_maximum_per_allocation(
52    Configuration_RTEMS_API.maximum_tasks
53  );
54
55#if defined(RTEMS_POSIX_API)
56  thread_count += rtems_resource_maximum_per_allocation(
57    Configuration_POSIX_API.maximum_threads
58  );
59#endif
60
61  return thread_count;
62}
63
64static uintptr_t _Workspace_Space_for_TLS( uintptr_t page_size )
65{
66  uintptr_t tls_size;
67  uintptr_t space;
68
69  tls_size = _TLS_Get_size();
70
71  /*
72   * In case we have a non-zero TLS size, then we need a TLS area for each
73   * thread.  These areas are allocated from the workspace.  Ensure that the
74   * workspace is large enough to fulfill all requests known at configuration
75   * time (so excluding the unlimited option).  It is not possible to estimate
76   * the TLS size in the configuration at compile-time.  The TLS size is
77   * determined at application link-time.
78   */
79  if ( tls_size > 0 ) {
80    uintptr_t tls_align = _TLS_Heap_align_up( (uintptr_t) _TLS_Alignment );
81    uintptr_t tls_alloc = _TLS_Get_allocation_size( tls_size, tls_align );
82
83    /*
84     * Memory allocated with an alignment constraint is allocated from the end
85     * of a free block.  The last allocation may need one free block of minimum
86     * size.
87     */
88    space = _Heap_Min_block_size( page_size );
89
90    space += _Workspace_Get_maximum_thread_count()
91      * _Heap_Size_with_overhead( page_size, tls_alloc, tls_align );
92  } else {
93    space = 0;
94  }
95
96  return space;
97}
98
99static uintptr_t _Workspace_Space_for_per_CPU_data( uintptr_t page_size )
100{
101  uintptr_t space;
102
103#ifdef RTEMS_SMP
104  uintptr_t size;
105
106  size = RTEMS_LINKER_SET_SIZE( _Per_CPU_Data );
107  _Assert( size % CPU_CACHE_LINE_BYTES == 0 );
108
109  if ( size > 0 ) {
110    /*
111     * Memory allocated with an alignment constraint is allocated from the end of
112     * a free block.  The last allocation may need one free block of minimum
113     * size.
114     */
115    space = _Heap_Min_block_size( page_size );
116
117    space += ( rtems_configuration_get_maximum_processors() - 1 )
118      * _Heap_Size_with_overhead( page_size, size, CPU_CACHE_LINE_BYTES );
119  } else {
120    space = 0;
121  }
122#else
123  space = 0;
124#endif
125
126  return space;
127}
128
129static void _Workspace_Allocate_per_CPU_data( void )
130{
131#ifdef RTEMS_SMP
132  uintptr_t size;
133
134  size = RTEMS_LINKER_SET_SIZE( _Per_CPU_Data );
135
136  if ( size > 0 ) {
137    Per_CPU_Control *cpu;
138    uint32_t         cpu_index;
139    uint32_t         cpu_max;
140
141    cpu = _Per_CPU_Get_by_index( 0 );
142    cpu->data = RTEMS_LINKER_SET_BEGIN( _Per_CPU_Data );
143
144    cpu_max = rtems_configuration_get_maximum_processors();
145
146    for ( cpu_index = 1 ; cpu_index < cpu_max ; ++cpu_index ) {
147      cpu = _Per_CPU_Get_by_index( cpu_index );
148      cpu->data = _Workspace_Allocate_aligned( size, CPU_CACHE_LINE_BYTES );
149      _Assert( cpu->data != NULL );
150      memcpy( cpu->data, RTEMS_LINKER_SET_BEGIN( _Per_CPU_Data ), size);
151    }
152  }
153#endif
154}
155
156void _Workspace_Handler_initialization(
157  Heap_Area *areas,
158  size_t area_count,
159  Heap_Initialization_or_extend_handler extend
160)
161{
162  Heap_Initialization_or_extend_handler init_or_extend;
163  uintptr_t                             remaining;
164  bool                                  do_zero;
165  bool                                  unified;
166  uintptr_t                             page_size;
167  uintptr_t                             overhead;
168  size_t                                i;
169
170  page_size = CPU_HEAP_ALIGNMENT;
171
172  remaining = rtems_configuration_get_work_space_size();
173  remaining += _Workspace_Space_for_TLS( page_size );
174  remaining += _Workspace_Space_for_per_CPU_data( page_size );
175
176  init_or_extend = _Heap_Initialize;
177  do_zero = rtems_configuration_get_do_zero_of_workspace();
178  unified = rtems_configuration_get_unified_work_area();
179  overhead = _Heap_Area_overhead( page_size );
180
181  for ( i = 0; i < area_count; ++i ) {
182    Heap_Area *area;
183
184    area = &areas[ i ];
185
186    if ( do_zero ) {
187      memset( area->begin, 0, area->size );
188    }
189
190    if ( area->size > overhead ) {
191      uintptr_t space_available;
192      uintptr_t size;
193
194      if ( unified ) {
195        size = area->size;
196      } else {
197        if ( remaining > 0 ) {
198          size = remaining < area->size - overhead ?
199            remaining + overhead : area->size;
200        } else {
201          size = 0;
202        }
203      }
204
205      space_available = ( *init_or_extend )(
206        &_Workspace_Area,
207        area->begin,
208        size,
209        page_size
210      );
211
212      area->begin = (char *) area->begin + size;
213      area->size -= size;
214
215      if ( space_available < remaining ) {
216        remaining -= space_available;
217      } else {
218        remaining = 0;
219      }
220
221      init_or_extend = extend;
222    }
223  }
224
225  if ( remaining > 0 ) {
226    _Internal_error( INTERNAL_ERROR_TOO_LITTLE_WORKSPACE );
227  }
228
229  _Heap_Protection_set_delayed_free_fraction( &_Workspace_Area, 1 );
230  _Workspace_Allocate_per_CPU_data();
231}
232
233void *_Workspace_Allocate(
234  size_t   size
235)
236{
237  void *memory;
238
239  memory = _Heap_Allocate( &_Workspace_Area, size );
240  #if defined(DEBUG_WORKSPACE)
241    printk(
242      "Workspace_Allocate(%d) from %p/%p -> %p\n",
243      size,
244      __builtin_return_address( 0 ),
245      __builtin_return_address( 1 ),
246      memory
247    );
248  #endif
249  return memory;
250}
251
252void *_Workspace_Allocate_aligned( size_t size, size_t alignment )
253{
254  return _Heap_Allocate_aligned( &_Workspace_Area, size, alignment );
255}
256
257/*
258 *  _Workspace_Free
259 */
260void _Workspace_Free(
261  void *block
262)
263{
264  #if defined(DEBUG_WORKSPACE)
265    printk(
266      "Workspace_Free(%p) from %p/%p\n",
267      block,
268      __builtin_return_address( 0 ),
269      __builtin_return_address( 1 )
270    );
271  #endif
272  _Heap_Free( &_Workspace_Area, block );
273}
274
275void *_Workspace_Allocate_or_fatal_error(
276  size_t      size
277)
278{
279  void *memory;
280
281  memory = _Heap_Allocate( &_Workspace_Area, size );
282  #if defined(DEBUG_WORKSPACE)
283    printk(
284      "Workspace_Allocate_or_fatal_error(%d) from %p/%p -> %p\n",
285      size,
286      __builtin_return_address( 0 ),
287      __builtin_return_address( 1 ),
288      memory
289    );
290  #endif
291
292  if ( memory == NULL )
293    _Internal_error( INTERNAL_ERROR_WORKSPACE_ALLOCATION );
294
295  return memory;
296}
Note: See TracBrowser for help on using the repository browser.