source: rtems/cpukit/score/src/wkspace.c @ 3fba9de2

5
Last change on this file since 3fba9de2 was 4c20da4b, checked in by Sebastian Huber <sebastian.huber@…>, on 04/04/19 at 07:18:11

doxygen: Rename Score* groups in RTEMSScore*

Update #3706

  • Property mode set to 100644
File size: 6.7 KB
Line 
1/**
2 *  @file
3 *
4 *  @brief Workspace Handler Support
5 *  @ingroup RTEMSScoreWorkspace
6 */
7
8/*
9 *  COPYRIGHT (c) 1989-2009.
10 *  On-Line Applications Research Corporation (OAR).
11 *
12 *  The license and distribution terms for this file may be
13 *  found in the file LICENSE in this distribution or at
14 *  http://www.rtems.org/license/LICENSE.
15 */
16
17#if HAVE_CONFIG_H
18  #include "config.h"
19#endif
20
21#include <rtems/score/wkspace.h>
22#include <rtems/score/assert.h>
23#include <rtems/score/heapimpl.h>
24#include <rtems/score/interr.h>
25#include <rtems/score/percpudata.h>
26#include <rtems/score/threadimpl.h>
27#include <rtems/score/tls.h>
28#include <rtems/posix/pthread.h>
29#include <rtems/config.h>
30
31#include <string.h>
32
33/* #define DEBUG_WORKSPACE */
34#if defined(DEBUG_WORKSPACE)
35  #include <rtems/bspIo.h>
36#endif
37
38RTEMS_LINKER_RWSET(
39  _Per_CPU_Data,
40  RTEMS_ALIGNED( CPU_CACHE_LINE_BYTES ) char
41);
42
43Heap_Control _Workspace_Area;
44
45static uintptr_t _Workspace_Space_for_TLS( uintptr_t page_size )
46{
47  uintptr_t tls_size;
48  uintptr_t space;
49
50  tls_size = _TLS_Get_size();
51
52  /*
53   * In case we have a non-zero TLS size, then we need a TLS area for each
54   * thread.  These areas are allocated from the workspace.  Ensure that the
55   * workspace is large enough to fulfill all requests known at configuration
56   * time (so excluding the unlimited option).  It is not possible to estimate
57   * the TLS size in the configuration at compile-time.  The TLS size is
58   * determined at application link-time.
59   */
60  if ( tls_size > 0 ) {
61    uintptr_t tls_align = _TLS_Heap_align_up( (uintptr_t) _TLS_Alignment );
62    uintptr_t tls_alloc = _TLS_Get_allocation_size( tls_size, tls_align );
63
64    /*
65     * Memory allocated with an alignment constraint is allocated from the end
66     * of a free block.  The last allocation may need one free block of minimum
67     * size.
68     */
69    space = _Heap_Min_block_size( page_size );
70
71    space += _Thread_Initial_thread_count
72      * _Heap_Size_with_overhead( page_size, tls_alloc, tls_align );
73  } else {
74    space = 0;
75  }
76
77  return space;
78}
79
80#ifdef RTEMS_SMP
81static void *_Workspace_Allocate_from_areas(
82  Heap_Area *areas,
83  size_t     area_count,
84  uintptr_t  size,
85  uintptr_t  alignment
86)
87{
88  size_t i;
89
90  for ( i = 0; i < area_count; ++i ) {
91    Heap_Area *area;
92    uintptr_t  alloc_begin;
93    uintptr_t  alloc_size;
94
95    area = &areas[ i ];
96    alloc_begin = (uintptr_t) area->begin;
97    alloc_begin = ( alloc_begin + alignment - 1 ) & ~( alignment - 1 );
98    alloc_size = size;
99    alloc_size += alloc_begin - (uintptr_t) area->begin;
100
101    if ( area->size >= alloc_size ) {
102      area->begin = (void *) ( alloc_begin + size );
103      area->size -= alloc_size;
104
105      return (void *) alloc_begin;
106    }
107  }
108
109  return NULL;
110}
111#endif
112
113static void _Workspace_Allocate_per_CPU_data(
114  Heap_Area *areas,
115  size_t area_count
116)
117{
118#ifdef RTEMS_SMP
119  uintptr_t size;
120
121  size = RTEMS_LINKER_SET_SIZE( _Per_CPU_Data );
122
123  if ( size > 0 ) {
124    Per_CPU_Control *cpu;
125    uint32_t         cpu_index;
126    uint32_t         cpu_max;
127
128    cpu = _Per_CPU_Get_by_index( 0 );
129    cpu->data = RTEMS_LINKER_SET_BEGIN( _Per_CPU_Data );
130
131    cpu_max = rtems_configuration_get_maximum_processors();
132
133    for ( cpu_index = 1 ; cpu_index < cpu_max ; ++cpu_index ) {
134      cpu = _Per_CPU_Get_by_index( cpu_index );
135      cpu->data = _Workspace_Allocate_from_areas(
136        areas,
137        area_count,
138        size,
139        CPU_CACHE_LINE_BYTES
140      );
141
142      if( cpu->data == NULL ) {
143        _Internal_error( INTERNAL_ERROR_NO_MEMORY_FOR_PER_CPU_DATA );
144      }
145
146      memcpy( cpu->data, RTEMS_LINKER_SET_BEGIN( _Per_CPU_Data ), size);
147    }
148  }
149#else
150  (void) areas;
151  (void) area_count;
152#endif
153}
154
155void _Workspace_Handler_initialization(
156  Heap_Area *areas,
157  size_t area_count,
158  Heap_Initialization_or_extend_handler extend
159)
160{
161  Heap_Initialization_or_extend_handler init_or_extend;
162  uintptr_t                             remaining;
163  bool                                  do_zero;
164  bool                                  unified;
165  uintptr_t                             page_size;
166  uintptr_t                             overhead;
167  size_t                                i;
168
169  _Workspace_Allocate_per_CPU_data( areas, area_count );
170
171  page_size = CPU_HEAP_ALIGNMENT;
172
173  remaining = rtems_configuration_get_work_space_size();
174  remaining += _Workspace_Space_for_TLS( page_size );
175
176  init_or_extend = _Heap_Initialize;
177  do_zero = rtems_configuration_get_do_zero_of_workspace();
178  unified = rtems_configuration_get_unified_work_area();
179  overhead = _Heap_Area_overhead( page_size );
180
181  for ( i = 0; i < area_count; ++i ) {
182    Heap_Area *area;
183
184    area = &areas[ i ];
185
186    if ( do_zero ) {
187      memset( area->begin, 0, area->size );
188    }
189
190    if ( area->size > overhead ) {
191      uintptr_t space_available;
192      uintptr_t size;
193
194      if ( unified ) {
195        size = area->size;
196      } else {
197        if ( remaining > 0 ) {
198          size = remaining < area->size - overhead ?
199            remaining + overhead : area->size;
200        } else {
201          size = 0;
202        }
203      }
204
205      space_available = ( *init_or_extend )(
206        &_Workspace_Area,
207        area->begin,
208        size,
209        page_size
210      );
211
212      area->begin = (char *) area->begin + size;
213      area->size -= size;
214
215      if ( space_available < remaining ) {
216        remaining -= space_available;
217      } else {
218        remaining = 0;
219      }
220
221      init_or_extend = extend;
222    }
223  }
224
225  if ( remaining > 0 ) {
226    _Internal_error( INTERNAL_ERROR_TOO_LITTLE_WORKSPACE );
227  }
228
229  _Heap_Protection_set_delayed_free_fraction( &_Workspace_Area, 1 );
230}
231
232void *_Workspace_Allocate(
233  size_t   size
234)
235{
236  void *memory;
237
238  memory = _Heap_Allocate( &_Workspace_Area, size );
239  #if defined(DEBUG_WORKSPACE)
240    printk(
241      "Workspace_Allocate(%d) from %p/%p -> %p\n",
242      size,
243      __builtin_return_address( 0 ),
244      __builtin_return_address( 1 ),
245      memory
246    );
247  #endif
248  return memory;
249}
250
251void *_Workspace_Allocate_aligned( size_t size, size_t alignment )
252{
253  return _Heap_Allocate_aligned( &_Workspace_Area, size, alignment );
254}
255
256/*
257 *  _Workspace_Free
258 */
259void _Workspace_Free(
260  void *block
261)
262{
263  #if defined(DEBUG_WORKSPACE)
264    printk(
265      "Workspace_Free(%p) from %p/%p\n",
266      block,
267      __builtin_return_address( 0 ),
268      __builtin_return_address( 1 )
269    );
270  #endif
271  _Heap_Free( &_Workspace_Area, block );
272}
273
274void *_Workspace_Allocate_or_fatal_error(
275  size_t      size
276)
277{
278  void *memory;
279
280  memory = _Heap_Allocate( &_Workspace_Area, size );
281  #if defined(DEBUG_WORKSPACE)
282    printk(
283      "Workspace_Allocate_or_fatal_error(%d) from %p/%p -> %p\n",
284      size,
285      __builtin_return_address( 0 ),
286      __builtin_return_address( 1 ),
287      memory
288    );
289  #endif
290
291  if ( memory == NULL )
292    _Internal_error( INTERNAL_ERROR_WORKSPACE_ALLOCATION );
293
294  return memory;
295}
Note: See TracBrowser for help on using the repository browser.