source: rtems/bsps/shared/cache/cacheimpl.h @ b0c2d48

5
Last change on this file since b0c2d48 was b0c2d48, checked in by Sebastian Huber <sebastian.huber@…>, on 12/21/18 at 06:49:47

bsps: Add CPU_CACHE_SUPPORT_PROVIDES_DISABLE_DATA

Update #3667.

  • Property mode set to 100644
File size: 11.6 KB
Line 
1/*
2 *  Cache Manager
3 *
4 *  Copyright (C) 2014, 2018 embedded brains GmbH
5 *
6 *  COPYRIGHT (c) 1989-1999.
7 *  On-Line Applications Research Corporation (OAR).
8 *
9 *  The license and distribution terms for this file may be
10 *  found in the file LICENSE in this distribution or at
11 *  http://www.rtems.org/license/LICENSE.
12 */
13
14/*
15 * The functions in this file implement the API to the RTEMS Cache Manager.
16 * This file is intended to be included in a cache implemention source file
17 * provided by the architecture or BSP, e.g.
18 *
19 *  - bsps/${RTEMS_CPU}/shared/cache/cache.c
20 *  - bsps/${RTEMS_CPU}/${RTEMS_BSP_FAMILY}/start/cache.c
21 *
22 * In this file a couple of defines and inline functions may be provided and
23 * afterwards this file is included, e.g.
24 *
25 *  #define CPU_DATA_CACHE_ALIGNMENT XYZ
26 *  ...
27 *  #include "../../../bsps/shared/cache/cacheimpl.h"
28 *
29 * The cache implementation source file shall define
30 *
31 *  #define CPU_DATA_CACHE_ALIGNMENT <POSITIVE INTEGER>
32 *
33 * to enable the data cache support.
34 *
35 * The cache implementation source file shall define
36 *
37 *  #define CPU_INSTRUCTION_CACHE_ALIGNMENT <POSITIVE INTEGER>
38 *
39 * to enable the instruction cache support.
40 *
41 * The cache implementation source file shall define
42 *
43 *  #define CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS
44 *
45 * if it provides cache maintenance functions which operate on multiple lines.
46 * Otherwise a generic loop with single line operations will be used.  It is
47 * strongly recommended to provide the implementation in terms of static inline
48 * functions for performance reasons.
49 *
50 * The cache implementation source file shall define
51 *
52 *  #define CPU_CACHE_SUPPORT_PROVIDES_CACHE_SIZE_FUNCTIONS
53 *
54 * if it provides functions to get the data and instruction cache sizes by
55 * level.
56 *
57 * The cache implementation source file shall define
58 *
59 *  #define CPU_CACHE_SUPPORT_PROVIDES_INSTRUCTION_SYNC_FUNCTION
60 *
61 * if special instructions must be used to synchronize the instruction caches
62 * after a code change.
63 *
64 * The cache implementation source file shall define
65 *
66 *  #define CPU_CACHE_SUPPORT_PROVIDES_DISABLE_DATA
67 *
68 * if an external implementation of rtems_cache_disable_data() is provided,
69 * e.g. as an implementation in assembly code.
70 *
71 * The cache implementation source file shall define
72 *
73 *  #define CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING
74 *
75 * if the hardware provides no instruction cache snooping and the instruction
76 * cache invalidation needs software support.
77 *
78 * The functions below are implemented with inline routines found in the cache
79 * implementation source file for each architecture or BSP.  In the event that
80 * not support for a specific function for a cache is provided, the API routine
81 * does nothing (but does exist).
82 */
83
84#include <rtems.h>
85
86#if defined(RTEMS_SMP) && defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING)
87#include <rtems/score/smpimpl.h>
88#endif
89
90#if CPU_DATA_CACHE_ALIGNMENT > CPU_CACHE_LINE_BYTES
91#error "CPU_DATA_CACHE_ALIGNMENT is greater than CPU_CACHE_LINE_BYTES"
92#endif
93
94#if CPU_INSTRUCTION_CACHE_ALIGNMENT > CPU_CACHE_LINE_BYTES
95#error "CPU_INSTRUCTION_CACHE_ALIGNMENT is greater than CPU_CACHE_LINE_BYTES"
96#endif
97
98/*
99 * THESE FUNCTIONS ONLY HAVE BODIES IF WE HAVE A DATA CACHE
100 */
101
102/*
103 * This function is called to flush the data cache by performing cache
104 * copybacks. It must determine how many cache lines need to be copied
105 * back and then perform the copybacks.
106 */
107void
108rtems_cache_flush_multiple_data_lines( const void * d_addr, size_t n_bytes )
109{
110#if defined(CPU_DATA_CACHE_ALIGNMENT)
111#if defined(CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS)
112  _CPU_cache_flush_data_range( d_addr, n_bytes );
113#else
114  const void * final_address;
115
116 /*
117  * Set d_addr to the beginning of the cache line; final_address indicates
118  * the last address_t which needs to be pushed. Increment d_addr and push
119  * the resulting line until final_address is passed.
120  */
121
122  if( n_bytes == 0 )
123    /* Do nothing if number of bytes to flush is zero */
124    return;
125
126  final_address = (void *)((size_t)d_addr + n_bytes - 1);
127  d_addr = (void *)((size_t)d_addr & ~(CPU_DATA_CACHE_ALIGNMENT - 1));
128  while( d_addr <= final_address )  {
129    _CPU_cache_flush_1_data_line( d_addr );
130    d_addr = (void *)((size_t)d_addr + CPU_DATA_CACHE_ALIGNMENT);
131  }
132#endif
133#endif
134}
135
136/*
137 * This function is responsible for performing a data cache invalidate.
138 * It must determine how many cache lines need to be invalidated and then
139 * perform the invalidations.
140 */
141void
142rtems_cache_invalidate_multiple_data_lines( const void * d_addr, size_t n_bytes )
143{
144#if defined(CPU_DATA_CACHE_ALIGNMENT)
145#if defined(CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS)
146  _CPU_cache_invalidate_data_range( d_addr, n_bytes );
147#else
148  const void * final_address;
149
150 /*
151  * Set d_addr to the beginning of the cache line; final_address indicates
152  * the last address_t which needs to be invalidated. Increment d_addr and
153  * invalidate the resulting line until final_address is passed.
154  */
155
156  if( n_bytes == 0 )
157    /* Do nothing if number of bytes to invalidate is zero */
158    return;
159
160  final_address = (void *)((size_t)d_addr + n_bytes - 1);
161  d_addr = (void *)((size_t)d_addr & ~(CPU_DATA_CACHE_ALIGNMENT - 1));
162  while( final_address >= d_addr ) {
163    _CPU_cache_invalidate_1_data_line( d_addr );
164    d_addr = (void *)((size_t)d_addr + CPU_DATA_CACHE_ALIGNMENT);
165  }
166#endif
167#endif
168}
169
170/*
171 * This function is responsible for performing a data cache flush.
172 * It flushes the entire cache.
173 */
174void
175rtems_cache_flush_entire_data( void )
176{
177#if defined(CPU_DATA_CACHE_ALIGNMENT)
178   /*
179    * Call the CPU-specific routine
180    */
181   _CPU_cache_flush_entire_data();
182#endif
183}
184
185/*
186 * This function is responsible for performing a data cache
187 * invalidate. It invalidates the entire cache.
188 */
189void
190rtems_cache_invalidate_entire_data( void )
191{
192#if defined(CPU_DATA_CACHE_ALIGNMENT)
193 /*
194  * Call the CPU-specific routine
195  */
196
197 _CPU_cache_invalidate_entire_data();
198#endif
199}
200
201/*
202 * This function returns the data cache granularity.
203 */
204size_t
205rtems_cache_get_data_line_size( void )
206{
207#if defined(CPU_DATA_CACHE_ALIGNMENT)
208  return CPU_DATA_CACHE_ALIGNMENT;
209#else
210  return 0;
211#endif
212}
213
214size_t
215rtems_cache_get_data_cache_size( uint32_t level )
216{
217#if defined(CPU_CACHE_SUPPORT_PROVIDES_CACHE_SIZE_FUNCTIONS)
218  return _CPU_cache_get_data_cache_size( level );
219#else
220  return 0;
221#endif
222}
223
224/*
225 * This function freezes the data cache; cache lines
226 * are not replaced.
227 */
228void
229rtems_cache_freeze_data( void )
230{
231#if defined(CPU_DATA_CACHE_ALIGNMENT)
232  _CPU_cache_freeze_data();
233#endif
234}
235
236void rtems_cache_unfreeze_data( void )
237{
238#if defined(CPU_DATA_CACHE_ALIGNMENT)
239  _CPU_cache_unfreeze_data();
240#endif
241}
242
243void
244rtems_cache_enable_data( void )
245{
246#if defined(CPU_DATA_CACHE_ALIGNMENT)
247  _CPU_cache_enable_data();
248#endif
249}
250
251#if !defined(CPU_CACHE_SUPPORT_PROVIDES_DISABLE_DATA)
252void
253rtems_cache_disable_data( void )
254{
255#if defined(CPU_DATA_CACHE_ALIGNMENT)
256  _CPU_cache_disable_data();
257#endif
258}
259#endif
260
261/*
262 * THESE FUNCTIONS ONLY HAVE BODIES IF WE HAVE AN INSTRUCTION CACHE
263 */
264
265#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT) \
266  && defined(RTEMS_SMP) \
267  && defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING)
268
269typedef struct {
270  const void *addr;
271  size_t size;
272} smp_cache_area;
273
274static void smp_cache_inst_inv(void *arg)
275{
276  smp_cache_area *area = arg;
277
278  _CPU_cache_invalidate_instruction_range(area->addr, area->size);
279}
280
281static void smp_cache_inst_inv_all(void *arg)
282{
283  _CPU_cache_invalidate_entire_instruction();
284}
285
286#endif
287
288/*
289 * This function is responsible for performing an instruction cache
290 * invalidate. It must determine how many cache lines need to be invalidated
291 * and then perform the invalidations.
292 */
293#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT) \
294  && !defined(CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS)
295static void
296_CPU_cache_invalidate_instruction_range(
297  const void * i_addr,
298  size_t n_bytes
299)
300{
301  const void * final_address;
302
303 /*
304  * Set i_addr to the beginning of the cache line; final_address indicates
305  * the last address_t which needs to be invalidated. Increment i_addr and
306  * invalidate the resulting line until final_address is passed.
307  */
308
309  if( n_bytes == 0 )
310    /* Do nothing if number of bytes to invalidate is zero */
311    return;
312
313  final_address = (void *)((size_t)i_addr + n_bytes - 1);
314  i_addr = (void *)((size_t)i_addr & ~(CPU_INSTRUCTION_CACHE_ALIGNMENT - 1));
315  while( final_address >= i_addr ) {
316    _CPU_cache_invalidate_1_instruction_line( i_addr );
317    i_addr = (void *)((size_t)i_addr + CPU_INSTRUCTION_CACHE_ALIGNMENT);
318  }
319}
320#endif
321
322void
323rtems_cache_invalidate_multiple_instruction_lines(
324  const void * i_addr,
325  size_t n_bytes
326)
327{
328#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
329#if defined(RTEMS_SMP) && defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING)
330  smp_cache_area area = { i_addr, n_bytes };
331
332  _SMP_Multicast_action( 0, NULL, smp_cache_inst_inv, &area );
333#else
334  _CPU_cache_invalidate_instruction_range( i_addr, n_bytes );
335#endif
336#endif
337}
338
339/*
340 * This function is responsible for performing an instruction cache
341 * invalidate. It invalidates the entire cache.
342 */
343void
344rtems_cache_invalidate_entire_instruction( void )
345{
346#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
347#if defined(RTEMS_SMP) && defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING)
348  _SMP_Multicast_action( 0, NULL, smp_cache_inst_inv_all, NULL );
349#else
350 _CPU_cache_invalidate_entire_instruction();
351#endif
352#endif
353}
354
355/*
356 * This function returns the instruction cache granularity.
357 */
358size_t
359rtems_cache_get_instruction_line_size( void )
360{
361#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
362  return CPU_INSTRUCTION_CACHE_ALIGNMENT;
363#else
364  return 0;
365#endif
366}
367
368size_t
369rtems_cache_get_instruction_cache_size( uint32_t level )
370{
371#if defined(CPU_CACHE_SUPPORT_PROVIDES_CACHE_SIZE_FUNCTIONS)
372  return _CPU_cache_get_instruction_cache_size( level );
373#else
374  return 0;
375#endif
376}
377
378/*
379 * This function freezes the instruction cache; cache lines
380 * are not replaced.
381 */
382void
383rtems_cache_freeze_instruction( void )
384{
385#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
386  _CPU_cache_freeze_instruction();
387#endif
388}
389
390void rtems_cache_unfreeze_instruction( void )
391{
392#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
393  _CPU_cache_unfreeze_instruction();
394#endif
395}
396
397void
398rtems_cache_enable_instruction( void )
399{
400#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
401  _CPU_cache_enable_instruction();
402#endif
403}
404
405void
406rtems_cache_disable_instruction( void )
407{
408#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
409  _CPU_cache_disable_instruction();
410#endif
411}
412
413/* Returns the maximal cache line size of all cache kinds in bytes. */
414size_t rtems_cache_get_maximal_line_size( void )
415{
416#if defined(CPU_MAXIMAL_CACHE_ALIGNMENT)
417  return CPU_MAXIMAL_CACHE_ALIGNMENT;
418#endif
419  size_t max_line_size = 0;
420#if defined(CPU_DATA_CACHE_ALIGNMENT)
421  {
422    size_t data_line_size = CPU_DATA_CACHE_ALIGNMENT;
423    if ( max_line_size < data_line_size )
424      max_line_size = data_line_size;
425  }
426#endif
427#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
428  {
429    size_t instruction_line_size = CPU_INSTRUCTION_CACHE_ALIGNMENT;
430    if ( max_line_size < instruction_line_size )
431      max_line_size = instruction_line_size;
432  }
433#endif
434  return max_line_size;
435}
436
437/*
438 * Purpose is to synchronize caches after code has been loaded
439 * or self modified. Actual implementation is simple only
440 * but it can and should be repaced by optimized version
441 * which does not need flush and invalidate all cache levels
442 * when code is changed.
443 */
444void rtems_cache_instruction_sync_after_code_change(
445  const void *code_addr,
446  size_t      n_bytes
447)
448{
449#if defined(CPU_CACHE_SUPPORT_PROVIDES_INSTRUCTION_SYNC_FUNCTION)
450  _CPU_cache_instruction_sync_after_code_change( code_addr, n_bytes );
451#else
452  rtems_cache_flush_multiple_data_lines( code_addr, n_bytes );
453  rtems_cache_invalidate_multiple_instruction_lines( code_addr, n_bytes );
454#endif
455}
Note: See TracBrowser for help on using the repository browser.