source: rtems/bsps/shared/cache/cacheimpl.h @ 5bf0c1a

5
Last change on this file since 5bf0c1a was 5bf0c1a, checked in by Sebastian Huber <sebastian.huber@…>, on 11/28/18 at 13:52:22

bsps/sparc: Fix SMP build

Update #3622.

  • Property mode set to 100644
File size: 10.8 KB
Line 
1/*
2 *  Cache Manager
3 *
4 *  COPYRIGHT (c) 1989-1999.
5 *  On-Line Applications Research Corporation (OAR).
6 *
7 *  The license and distribution terms for this file may be
8 *  found in the file LICENSE in this distribution or at
9 *  http://www.rtems.org/license/LICENSE.
10 *
11 *
12 *  The functions in this file implement the API to the RTEMS Cache Manager and
13 *  are divided into data cache and instruction cache functions. Data cache
14 *  functions only have bodies if a data cache is supported. Instruction
15 *  cache functions only have bodies if an instruction cache is supported.
16 *  Support for a particular cache exists only if CPU_x_CACHE_ALIGNMENT is
17 *  defined, where x E {DATA, INSTRUCTION}. These definitions are found in
18 *  the Cache Manager Wrapper header files, often
19 *
20 *  rtems/c/src/lib/libcpu/CPU/cache_.h
21 *
22 *  The cache implementation header file can define
23 *
24 *    #define CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS
25 *
26 *  if it provides cache maintenance functions which operate on multiple lines.
27 *  Otherwise a generic loop with single line operations will be used.  It is
28 *  strongly recommended to provide the implementation in terms of static
29 *  inline functions for performance reasons.
30 *
31 *  The functions below are implemented with CPU dependent inline routines
32 *  found in the cache.c files for each CPU. In the event that a CPU does
33 *  not support a specific function for a cache it has, the CPU dependent
34 *  routine does nothing (but does exist).
35 *
36 *  At this point, the Cache Manager makes no considerations, and provides no
37 *  support for BSP specific issues such as a secondary cache. In such a system,
38 *  the CPU dependent routines would have to be modified, or a BSP layer added
39 *  to this Manager.
40 */
41
42#include <rtems.h>
43
44#if defined(RTEMS_SMP) && defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING)
45#include <rtems/score/smpimpl.h>
46#endif
47
48#if CPU_DATA_CACHE_ALIGNMENT > CPU_CACHE_LINE_BYTES
49#error "CPU_DATA_CACHE_ALIGNMENT is greater than CPU_CACHE_LINE_BYTES"
50#endif
51
52#if CPU_INSTRUCTION_CACHE_ALIGNMENT > CPU_CACHE_LINE_BYTES
53#error "CPU_INSTRUCTION_CACHE_ALIGNMENT is greater than CPU_CACHE_LINE_BYTES"
54#endif
55
56/*
57 * THESE FUNCTIONS ONLY HAVE BODIES IF WE HAVE A DATA CACHE
58 */
59
60/*
61 * This function is called to flush the data cache by performing cache
62 * copybacks. It must determine how many cache lines need to be copied
63 * back and then perform the copybacks.
64 */
65void
66rtems_cache_flush_multiple_data_lines( const void * d_addr, size_t n_bytes )
67{
68#if defined(CPU_DATA_CACHE_ALIGNMENT)
69#if defined(CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS)
70  _CPU_cache_flush_data_range( d_addr, n_bytes );
71#else
72  const void * final_address;
73
74 /*
75  * Set d_addr to the beginning of the cache line; final_address indicates
76  * the last address_t which needs to be pushed. Increment d_addr and push
77  * the resulting line until final_address is passed.
78  */
79
80  if( n_bytes == 0 )
81    /* Do nothing if number of bytes to flush is zero */
82    return;
83
84  final_address = (void *)((size_t)d_addr + n_bytes - 1);
85  d_addr = (void *)((size_t)d_addr & ~(CPU_DATA_CACHE_ALIGNMENT - 1));
86  while( d_addr <= final_address )  {
87    _CPU_cache_flush_1_data_line( d_addr );
88    d_addr = (void *)((size_t)d_addr + CPU_DATA_CACHE_ALIGNMENT);
89  }
90#endif
91#endif
92}
93
94
95/*
96 * This function is responsible for performing a data cache invalidate.
97 * It must determine how many cache lines need to be invalidated and then
98 * perform the invalidations.
99 */
100
101void
102rtems_cache_invalidate_multiple_data_lines( const void * d_addr, size_t n_bytes )
103{
104#if defined(CPU_DATA_CACHE_ALIGNMENT)
105#if defined(CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS)
106  _CPU_cache_invalidate_data_range( d_addr, n_bytes );
107#else
108  const void * final_address;
109
110 /*
111  * Set d_addr to the beginning of the cache line; final_address indicates
112  * the last address_t which needs to be invalidated. Increment d_addr and
113  * invalidate the resulting line until final_address is passed.
114  */
115
116  if( n_bytes == 0 )
117    /* Do nothing if number of bytes to invalidate is zero */
118    return;
119
120  final_address = (void *)((size_t)d_addr + n_bytes - 1);
121  d_addr = (void *)((size_t)d_addr & ~(CPU_DATA_CACHE_ALIGNMENT - 1));
122  while( final_address >= d_addr ) {
123    _CPU_cache_invalidate_1_data_line( d_addr );
124    d_addr = (void *)((size_t)d_addr + CPU_DATA_CACHE_ALIGNMENT);
125  }
126#endif
127#endif
128}
129
130
131/*
132 * This function is responsible for performing a data cache flush.
133 * It flushes the entire cache.
134 */
135void
136rtems_cache_flush_entire_data( void )
137{
138#if defined(CPU_DATA_CACHE_ALIGNMENT)
139   /*
140    * Call the CPU-specific routine
141    */
142   _CPU_cache_flush_entire_data();
143#endif
144}
145
146
147/*
148 * This function is responsible for performing a data cache
149 * invalidate. It invalidates the entire cache.
150 */
151void
152rtems_cache_invalidate_entire_data( void )
153{
154#if defined(CPU_DATA_CACHE_ALIGNMENT)
155 /*
156  * Call the CPU-specific routine
157  */
158
159 _CPU_cache_invalidate_entire_data();
160#endif
161}
162
163
164/*
165 * This function returns the data cache granularity.
166 */
167size_t
168rtems_cache_get_data_line_size( void )
169{
170#if defined(CPU_DATA_CACHE_ALIGNMENT)
171  return CPU_DATA_CACHE_ALIGNMENT;
172#else
173  return 0;
174#endif
175}
176
177
178size_t
179rtems_cache_get_data_cache_size( uint32_t level )
180{
181#if defined(CPU_CACHE_SUPPORT_PROVIDES_CACHE_SIZE_FUNCTIONS)
182  return _CPU_cache_get_data_cache_size( level );
183#else
184  return 0;
185#endif
186}
187
188/*
189 * This function freezes the data cache; cache lines
190 * are not replaced.
191 */
192void
193rtems_cache_freeze_data( void )
194{
195#if defined(CPU_DATA_CACHE_ALIGNMENT)
196  _CPU_cache_freeze_data();
197#endif
198}
199
200
201/*
202 * This function unfreezes the instruction cache.
203 */
204void rtems_cache_unfreeze_data( void )
205{
206#if defined(CPU_DATA_CACHE_ALIGNMENT)
207  _CPU_cache_unfreeze_data();
208#endif
209}
210
211
212/* Turn on the data cache. */
213void
214rtems_cache_enable_data( void )
215{
216#if defined(CPU_DATA_CACHE_ALIGNMENT)
217  _CPU_cache_enable_data();
218#endif
219}
220
221
222/* Turn off the data cache. */
223void
224rtems_cache_disable_data( void )
225{
226#if defined(CPU_DATA_CACHE_ALIGNMENT)
227  _CPU_cache_disable_data();
228#endif
229}
230
231
232
233/*
234 * THESE FUNCTIONS ONLY HAVE BODIES IF WE HAVE AN INSTRUCTION CACHE
235 */
236
237#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT) \
238  && defined(RTEMS_SMP) \
239  && defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING)
240
241typedef struct {
242  const void *addr;
243  size_t size;
244} smp_cache_area;
245
246static void smp_cache_inst_inv(void *arg)
247{
248  smp_cache_area *area = arg;
249
250  _CPU_cache_invalidate_instruction_range(area->addr, area->size);
251}
252
253static void smp_cache_inst_inv_all(void *arg)
254{
255  _CPU_cache_invalidate_entire_instruction();
256}
257
258#endif
259
260/*
261 * This function is responsible for performing an instruction cache
262 * invalidate. It must determine how many cache lines need to be invalidated
263 * and then perform the invalidations.
264 */
265
266#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT) \
267  && !defined(CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS)
268static void
269_CPU_cache_invalidate_instruction_range(
270  const void * i_addr,
271  size_t n_bytes
272)
273{
274  const void * final_address;
275
276 /*
277  * Set i_addr to the beginning of the cache line; final_address indicates
278  * the last address_t which needs to be invalidated. Increment i_addr and
279  * invalidate the resulting line until final_address is passed.
280  */
281
282  if( n_bytes == 0 )
283    /* Do nothing if number of bytes to invalidate is zero */
284    return;
285
286  final_address = (void *)((size_t)i_addr + n_bytes - 1);
287  i_addr = (void *)((size_t)i_addr & ~(CPU_INSTRUCTION_CACHE_ALIGNMENT - 1));
288  while( final_address >= i_addr ) {
289    _CPU_cache_invalidate_1_instruction_line( i_addr );
290    i_addr = (void *)((size_t)i_addr + CPU_INSTRUCTION_CACHE_ALIGNMENT);
291  }
292}
293#endif
294
295void
296rtems_cache_invalidate_multiple_instruction_lines(
297  const void * i_addr,
298  size_t n_bytes
299)
300{
301#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
302#if defined(RTEMS_SMP) && defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING)
303  smp_cache_area area = { i_addr, n_bytes };
304
305  _SMP_Multicast_action( 0, NULL, smp_cache_inst_inv, &area );
306#else
307  _CPU_cache_invalidate_instruction_range( i_addr, n_bytes );
308#endif
309#endif
310}
311
312
313/*
314 * This function is responsible for performing an instruction cache
315 * invalidate. It invalidates the entire cache.
316 */
317void
318rtems_cache_invalidate_entire_instruction( void )
319{
320#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
321#if defined(RTEMS_SMP) && defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING)
322  _SMP_Multicast_action( 0, NULL, smp_cache_inst_inv_all, NULL );
323#else
324 _CPU_cache_invalidate_entire_instruction();
325#endif
326#endif
327}
328
329
330/*
331 * This function returns the instruction cache granularity.
332 */
333size_t
334rtems_cache_get_instruction_line_size( void )
335{
336#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
337  return CPU_INSTRUCTION_CACHE_ALIGNMENT;
338#else
339  return 0;
340#endif
341}
342
343
344size_t
345rtems_cache_get_instruction_cache_size( uint32_t level )
346{
347#if defined(CPU_CACHE_SUPPORT_PROVIDES_CACHE_SIZE_FUNCTIONS)
348  return _CPU_cache_get_instruction_cache_size( level );
349#else
350  return 0;
351#endif
352}
353
354
355/*
356 * This function freezes the instruction cache; cache lines
357 * are not replaced.
358 */
359void
360rtems_cache_freeze_instruction( void )
361{
362#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
363  _CPU_cache_freeze_instruction();
364#endif
365}
366
367
368/*
369 * This function unfreezes the instruction cache.
370 */
371void rtems_cache_unfreeze_instruction( void )
372{
373#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
374  _CPU_cache_unfreeze_instruction();
375#endif
376}
377
378
379/* Turn on the instruction cache. */
380void
381rtems_cache_enable_instruction( void )
382{
383#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
384  _CPU_cache_enable_instruction();
385#endif
386}
387
388
389/* Turn off the instruction cache. */
390void
391rtems_cache_disable_instruction( void )
392{
393#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
394  _CPU_cache_disable_instruction();
395#endif
396}
397
398/* Returns the maximal cache line size of all cache kinds in bytes. */
399size_t rtems_cache_get_maximal_line_size( void )
400{
401#if defined(CPU_MAXIMAL_CACHE_ALIGNMENT)
402  return CPU_MAXIMAL_CACHE_ALIGNMENT;
403#endif
404  size_t max_line_size = 0;
405#if defined(CPU_DATA_CACHE_ALIGNMENT)
406  {
407    size_t data_line_size = CPU_DATA_CACHE_ALIGNMENT;
408    if ( max_line_size < data_line_size )
409      max_line_size = data_line_size;
410  }
411#endif
412#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
413  {
414    size_t instruction_line_size = CPU_INSTRUCTION_CACHE_ALIGNMENT;
415    if ( max_line_size < instruction_line_size )
416      max_line_size = instruction_line_size;
417  }
418#endif
419  return max_line_size;
420}
421
422/*
423 * Purpose is to synchronize caches after code has been loaded
424 * or self modified. Actual implementation is simple only
425 * but it can and should be repaced by optimized version
426 * which does not need flush and invalidate all cache levels
427 * when code is changed.
428 */
429void
430rtems_cache_instruction_sync_after_code_change( const void * code_addr, size_t n_bytes )
431{
432#if defined(CPU_CACHE_SUPPORT_PROVIDES_INSTRUCTION_SYNC_FUNCTION)
433  _CPU_cache_instruction_sync_after_code_change( code_addr, n_bytes );
434#else
435  rtems_cache_flush_multiple_data_lines( code_addr, n_bytes );
436  rtems_cache_invalidate_multiple_instruction_lines( code_addr, n_bytes );
437#endif
438}
Note: See TracBrowser for help on using the repository browser.