source: rtems/bsps/shared/cache/cacheimpl.h @ 3b2481f

5
Last change on this file since 3b2481f was 3b2481f, checked in by Sebastian Huber <sebastian.huber@…>, on 04/18/19 at 05:08:32

score: Simplify _SMP_Multicast_action()

Move resposibility to disable thread dispatching to the caller of
_SMP_Multicast_action(). Using an interrupt disable for this purpose is
questionable.

  • Property mode set to 100644
File size: 12.0 KB
Line 
1/*
2 *  Cache Manager
3 *
4 *  Copyright (C) 2014, 2018 embedded brains GmbH
5 *
6 *  COPYRIGHT (c) 1989-1999.
7 *  On-Line Applications Research Corporation (OAR).
8 *
9 *  The license and distribution terms for this file may be
10 *  found in the file LICENSE in this distribution or at
11 *  http://www.rtems.org/license/LICENSE.
12 */
13
14/*
15 * The functions in this file implement the API to the RTEMS Cache Manager.
16 * This file is intended to be included in a cache implemention source file
17 * provided by the architecture or BSP, e.g.
18 *
19 *  - bsps/${RTEMS_CPU}/shared/cache/cache.c
20 *  - bsps/${RTEMS_CPU}/${RTEMS_BSP_FAMILY}/start/cache.c
21 *
22 * In this file a couple of defines and inline functions may be provided and
23 * afterwards this file is included, e.g.
24 *
25 *  #define CPU_DATA_CACHE_ALIGNMENT XYZ
26 *  ...
27 *  #include "../../../bsps/shared/cache/cacheimpl.h"
28 *
29 * The cache implementation source file shall define
30 *
31 *  #define CPU_DATA_CACHE_ALIGNMENT <POSITIVE INTEGER>
32 *
33 * to enable the data cache support.
34 *
35 * The cache implementation source file shall define
36 *
37 *  #define CPU_INSTRUCTION_CACHE_ALIGNMENT <POSITIVE INTEGER>
38 *
39 * to enable the instruction cache support.
40 *
41 * The cache implementation source file shall define
42 *
43 *  #define CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS
44 *
45 * if it provides cache maintenance functions which operate on multiple lines.
46 * Otherwise a generic loop with single line operations will be used.  It is
47 * strongly recommended to provide the implementation in terms of static inline
48 * functions for performance reasons.
49 *
50 * The cache implementation source file shall define
51 *
52 *  #define CPU_CACHE_SUPPORT_PROVIDES_CACHE_SIZE_FUNCTIONS
53 *
54 * if it provides functions to get the data and instruction cache sizes by
55 * level.
56 *
57 * The cache implementation source file shall define
58 *
59 *  #define CPU_CACHE_SUPPORT_PROVIDES_INSTRUCTION_SYNC_FUNCTION
60 *
61 * if special instructions must be used to synchronize the instruction caches
62 * after a code change.
63 *
64 * The cache implementation source file shall define
65 *
66 *  #define CPU_CACHE_SUPPORT_PROVIDES_DISABLE_DATA
67 *
68 * if an external implementation of rtems_cache_disable_data() is provided,
69 * e.g. as an implementation in assembly code.
70 *
71 * The cache implementation source file shall define
72 *
73 *  #define CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING
74 *
75 * if the hardware provides no instruction cache snooping and the instruction
76 * cache invalidation needs software support.
77 *
78 * The functions below are implemented with inline routines found in the cache
79 * implementation source file for each architecture or BSP.  In the event that
80 * not support for a specific function for a cache is provided, the API routine
81 * does nothing (but does exist).
82 */
83
84#include <rtems.h>
85
86#if defined(RTEMS_SMP) && defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING)
87#include <rtems/score/smpimpl.h>
88#include <rtems/score/threaddispatch.h>
89#endif
90
91#if CPU_DATA_CACHE_ALIGNMENT > CPU_CACHE_LINE_BYTES
92#error "CPU_DATA_CACHE_ALIGNMENT is greater than CPU_CACHE_LINE_BYTES"
93#endif
94
95#if CPU_INSTRUCTION_CACHE_ALIGNMENT > CPU_CACHE_LINE_BYTES
96#error "CPU_INSTRUCTION_CACHE_ALIGNMENT is greater than CPU_CACHE_LINE_BYTES"
97#endif
98
99/*
100 * THESE FUNCTIONS ONLY HAVE BODIES IF WE HAVE A DATA CACHE
101 */
102
103/*
104 * This function is called to flush the data cache by performing cache
105 * copybacks. It must determine how many cache lines need to be copied
106 * back and then perform the copybacks.
107 */
108void
109rtems_cache_flush_multiple_data_lines( const void * d_addr, size_t n_bytes )
110{
111#if defined(CPU_DATA_CACHE_ALIGNMENT)
112#if defined(CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS)
113  _CPU_cache_flush_data_range( d_addr, n_bytes );
114#else
115  const void * final_address;
116
117 /*
118  * Set d_addr to the beginning of the cache line; final_address indicates
119  * the last address_t which needs to be pushed. Increment d_addr and push
120  * the resulting line until final_address is passed.
121  */
122
123  if( n_bytes == 0 )
124    /* Do nothing if number of bytes to flush is zero */
125    return;
126
127  final_address = (void *)((size_t)d_addr + n_bytes - 1);
128  d_addr = (void *)((size_t)d_addr & ~(CPU_DATA_CACHE_ALIGNMENT - 1));
129  while( d_addr <= final_address )  {
130    _CPU_cache_flush_1_data_line( d_addr );
131    d_addr = (void *)((size_t)d_addr + CPU_DATA_CACHE_ALIGNMENT);
132  }
133#endif
134#endif
135}
136
137/*
138 * This function is responsible for performing a data cache invalidate.
139 * It must determine how many cache lines need to be invalidated and then
140 * perform the invalidations.
141 */
142void
143rtems_cache_invalidate_multiple_data_lines( const void * d_addr, size_t n_bytes )
144{
145#if defined(CPU_DATA_CACHE_ALIGNMENT)
146#if defined(CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS)
147  _CPU_cache_invalidate_data_range( d_addr, n_bytes );
148#else
149  const void * final_address;
150
151 /*
152  * Set d_addr to the beginning of the cache line; final_address indicates
153  * the last address_t which needs to be invalidated. Increment d_addr and
154  * invalidate the resulting line until final_address is passed.
155  */
156
157  if( n_bytes == 0 )
158    /* Do nothing if number of bytes to invalidate is zero */
159    return;
160
161  final_address = (void *)((size_t)d_addr + n_bytes - 1);
162  d_addr = (void *)((size_t)d_addr & ~(CPU_DATA_CACHE_ALIGNMENT - 1));
163  while( final_address >= d_addr ) {
164    _CPU_cache_invalidate_1_data_line( d_addr );
165    d_addr = (void *)((size_t)d_addr + CPU_DATA_CACHE_ALIGNMENT);
166  }
167#endif
168#endif
169}
170
171/*
172 * This function is responsible for performing a data cache flush.
173 * It flushes the entire cache.
174 */
175void
176rtems_cache_flush_entire_data( void )
177{
178#if defined(CPU_DATA_CACHE_ALIGNMENT)
179   /*
180    * Call the CPU-specific routine
181    */
182   _CPU_cache_flush_entire_data();
183#endif
184}
185
186/*
187 * This function is responsible for performing a data cache
188 * invalidate. It invalidates the entire cache.
189 */
190void
191rtems_cache_invalidate_entire_data( void )
192{
193#if defined(CPU_DATA_CACHE_ALIGNMENT)
194 /*
195  * Call the CPU-specific routine
196  */
197
198 _CPU_cache_invalidate_entire_data();
199#endif
200}
201
202/*
203 * This function returns the data cache granularity.
204 */
205size_t
206rtems_cache_get_data_line_size( void )
207{
208#if defined(CPU_DATA_CACHE_ALIGNMENT)
209  return CPU_DATA_CACHE_ALIGNMENT;
210#else
211  return 0;
212#endif
213}
214
215size_t
216rtems_cache_get_data_cache_size( uint32_t level )
217{
218#if defined(CPU_CACHE_SUPPORT_PROVIDES_CACHE_SIZE_FUNCTIONS)
219  return _CPU_cache_get_data_cache_size( level );
220#else
221  return 0;
222#endif
223}
224
225/*
226 * This function freezes the data cache; cache lines
227 * are not replaced.
228 */
229void
230rtems_cache_freeze_data( void )
231{
232#if defined(CPU_DATA_CACHE_ALIGNMENT)
233  _CPU_cache_freeze_data();
234#endif
235}
236
237void rtems_cache_unfreeze_data( void )
238{
239#if defined(CPU_DATA_CACHE_ALIGNMENT)
240  _CPU_cache_unfreeze_data();
241#endif
242}
243
244void
245rtems_cache_enable_data( void )
246{
247#if defined(CPU_DATA_CACHE_ALIGNMENT)
248  _CPU_cache_enable_data();
249#endif
250}
251
252#if !defined(CPU_CACHE_SUPPORT_PROVIDES_DISABLE_DATA)
253void
254rtems_cache_disable_data( void )
255{
256#if defined(CPU_DATA_CACHE_ALIGNMENT)
257  _CPU_cache_disable_data();
258#endif
259}
260#endif
261
262/*
263 * THESE FUNCTIONS ONLY HAVE BODIES IF WE HAVE AN INSTRUCTION CACHE
264 */
265
266#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT) \
267  && defined(RTEMS_SMP) \
268  && defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING)
269
270typedef struct {
271  const void *addr;
272  size_t size;
273} smp_cache_area;
274
275static void smp_cache_inst_inv(void *arg)
276{
277  smp_cache_area *area = arg;
278
279  _CPU_cache_invalidate_instruction_range(area->addr, area->size);
280}
281
282static void smp_cache_inst_inv_all(void *arg)
283{
284  _CPU_cache_invalidate_entire_instruction();
285}
286
287static void smp_cache_broadcast( SMP_Action_handler handler, void *arg )
288{
289  uint32_t         isr_level;
290  Per_CPU_Control *cpu_self;
291
292  isr_level = _ISR_Get_level();
293
294  if ( isr_level == 0 ) {
295    cpu_self = _Thread_Dispatch_disable();
296  } else {
297    cpu_self = _Per_CPU_Get();
298  }
299
300  _SMP_Broadcast_action( handler, arg );
301
302  if ( isr_level == 0 ) {
303    _Thread_Dispatch_enable( cpu_self );
304  }
305}
306
307#endif
308
309/*
310 * This function is responsible for performing an instruction cache
311 * invalidate. It must determine how many cache lines need to be invalidated
312 * and then perform the invalidations.
313 */
314#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT) \
315  && !defined(CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS)
316static void
317_CPU_cache_invalidate_instruction_range(
318  const void * i_addr,
319  size_t n_bytes
320)
321{
322  const void * final_address;
323
324 /*
325  * Set i_addr to the beginning of the cache line; final_address indicates
326  * the last address_t which needs to be invalidated. Increment i_addr and
327  * invalidate the resulting line until final_address is passed.
328  */
329
330  if( n_bytes == 0 )
331    /* Do nothing if number of bytes to invalidate is zero */
332    return;
333
334  final_address = (void *)((size_t)i_addr + n_bytes - 1);
335  i_addr = (void *)((size_t)i_addr & ~(CPU_INSTRUCTION_CACHE_ALIGNMENT - 1));
336  while( final_address >= i_addr ) {
337    _CPU_cache_invalidate_1_instruction_line( i_addr );
338    i_addr = (void *)((size_t)i_addr + CPU_INSTRUCTION_CACHE_ALIGNMENT);
339  }
340}
341#endif
342
343void
344rtems_cache_invalidate_multiple_instruction_lines(
345  const void * i_addr,
346  size_t n_bytes
347)
348{
349#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
350#if defined(RTEMS_SMP) && defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING)
351  smp_cache_area area = { i_addr, n_bytes };
352
353  smp_cache_broadcast( smp_cache_inst_inv, &area );
354#else
355  _CPU_cache_invalidate_instruction_range( i_addr, n_bytes );
356#endif
357#endif
358}
359
360/*
361 * This function is responsible for performing an instruction cache
362 * invalidate. It invalidates the entire cache.
363 */
364void
365rtems_cache_invalidate_entire_instruction( void )
366{
367#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
368#if defined(RTEMS_SMP) && defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING)
369  smp_cache_broadcast( smp_cache_inst_inv_all, NULL );
370#else
371 _CPU_cache_invalidate_entire_instruction();
372#endif
373#endif
374}
375
376/*
377 * This function returns the instruction cache granularity.
378 */
379size_t
380rtems_cache_get_instruction_line_size( void )
381{
382#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
383  return CPU_INSTRUCTION_CACHE_ALIGNMENT;
384#else
385  return 0;
386#endif
387}
388
389size_t
390rtems_cache_get_instruction_cache_size( uint32_t level )
391{
392#if defined(CPU_CACHE_SUPPORT_PROVIDES_CACHE_SIZE_FUNCTIONS)
393  return _CPU_cache_get_instruction_cache_size( level );
394#else
395  return 0;
396#endif
397}
398
399/*
400 * This function freezes the instruction cache; cache lines
401 * are not replaced.
402 */
403void
404rtems_cache_freeze_instruction( void )
405{
406#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
407  _CPU_cache_freeze_instruction();
408#endif
409}
410
411void rtems_cache_unfreeze_instruction( void )
412{
413#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
414  _CPU_cache_unfreeze_instruction();
415#endif
416}
417
418void
419rtems_cache_enable_instruction( void )
420{
421#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
422  _CPU_cache_enable_instruction();
423#endif
424}
425
426void
427rtems_cache_disable_instruction( void )
428{
429#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
430  _CPU_cache_disable_instruction();
431#endif
432}
433
434/* Returns the maximal cache line size of all cache kinds in bytes. */
435size_t rtems_cache_get_maximal_line_size( void )
436{
437#if defined(CPU_MAXIMAL_CACHE_ALIGNMENT)
438  return CPU_MAXIMAL_CACHE_ALIGNMENT;
439#endif
440  size_t max_line_size = 0;
441#if defined(CPU_DATA_CACHE_ALIGNMENT)
442  {
443    size_t data_line_size = CPU_DATA_CACHE_ALIGNMENT;
444    if ( max_line_size < data_line_size )
445      max_line_size = data_line_size;
446  }
447#endif
448#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
449  {
450    size_t instruction_line_size = CPU_INSTRUCTION_CACHE_ALIGNMENT;
451    if ( max_line_size < instruction_line_size )
452      max_line_size = instruction_line_size;
453  }
454#endif
455  return max_line_size;
456}
457
458/*
459 * Purpose is to synchronize caches after code has been loaded
460 * or self modified. Actual implementation is simple only
461 * but it can and should be repaced by optimized version
462 * which does not need flush and invalidate all cache levels
463 * when code is changed.
464 */
465void rtems_cache_instruction_sync_after_code_change(
466  const void *code_addr,
467  size_t      n_bytes
468)
469{
470#if defined(CPU_CACHE_SUPPORT_PROVIDES_INSTRUCTION_SYNC_FUNCTION)
471  _CPU_cache_instruction_sync_after_code_change( code_addr, n_bytes );
472#else
473  rtems_cache_flush_multiple_data_lines( code_addr, n_bytes );
474  rtems_cache_invalidate_multiple_instruction_lines( code_addr, n_bytes );
475#endif
476}
Note: See TracBrowser for help on using the repository browser.