source: rtems/bsps/shared/cache/cacheimpl.h @ 5e0ab02

5
Last change on this file since 5e0ab02 was 5e0ab02, checked in by Sebastian Huber <sebastian.huber@…>, on 12/21/18 at 06:46:58

bsps: Update cache manager documentation

Update #3667.

  • Property mode set to 100644
File size: 11.3 KB
Line 
1/*
2 *  Cache Manager
3 *
4 *  Copyright (C) 2014, 2018 embedded brains GmbH
5 *
6 *  COPYRIGHT (c) 1989-1999.
7 *  On-Line Applications Research Corporation (OAR).
8 *
9 *  The license and distribution terms for this file may be
10 *  found in the file LICENSE in this distribution or at
11 *  http://www.rtems.org/license/LICENSE.
12 */
13
14/*
15 * The functions in this file implement the API to the RTEMS Cache Manager.
16 * This file is intended to be included in a cache implemention source file
17 * provided by the architecture or BSP, e.g.
18 *
19 *  - bsps/${RTEMS_CPU}/shared/cache/cache.c
20 *  - bsps/${RTEMS_CPU}/${RTEMS_BSP_FAMILY}/start/cache.c
21 *
22 * In this file a couple of defines and inline functions may be provided and
23 * afterwards this file is included, e.g.
24 *
25 *  #define CPU_DATA_CACHE_ALIGNMENT XYZ
26 *  ...
27 *  #include "../../../bsps/shared/cache/cacheimpl.h"
28 *
29 * The cache implementation source file shall define
30 *
31 *  #define CPU_DATA_CACHE_ALIGNMENT <POSITIVE INTEGER>
32 *
33 * to enable the data cache support.
34 *
35 * The cache implementation source file shall define
36 *
37 *  #define CPU_INSTRUCTION_CACHE_ALIGNMENT <POSITIVE INTEGER>
38 *
39 * to enable the instruction cache support.
40 *
41 * The cache implementation source file shall define
42 *
43 *  #define CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS
44 *
45 * if it provides cache maintenance functions which operate on multiple lines.
46 * Otherwise a generic loop with single line operations will be used.  It is
47 * strongly recommended to provide the implementation in terms of static inline
48 * functions for performance reasons.
49 *
50 * The cache implementation source file shall define
51 *
52 *  #define CPU_CACHE_SUPPORT_PROVIDES_CACHE_SIZE_FUNCTIONS
53 *
54 * if it provides functions to get the data and instruction cache sizes by
55 * level.
56 *
57 * The cache implementation source file shall define
58 *
59 *  #define CPU_CACHE_SUPPORT_PROVIDES_INSTRUCTION_SYNC_FUNCTION
60 *
61 * if special instructions must be used to synchronize the instruction caches
62 * after a code change.
63 *
64 * The cache implementation source file shall define
65 *
66 *  #define CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING
67 *
68 * if the hardware provides no instruction cache snooping and the instruction
69 * cache invalidation needs software support.
70 *
71 * The functions below are implemented with inline routines found in the cache
72 * implementation source file for each architecture or BSP.  In the event that
73 * not support for a specific function for a cache is provided, the API routine
74 * does nothing (but does exist).
75 */
76
77#include <rtems.h>
78
79#if defined(RTEMS_SMP) && defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING)
80#include <rtems/score/smpimpl.h>
81#endif
82
83#if CPU_DATA_CACHE_ALIGNMENT > CPU_CACHE_LINE_BYTES
84#error "CPU_DATA_CACHE_ALIGNMENT is greater than CPU_CACHE_LINE_BYTES"
85#endif
86
87#if CPU_INSTRUCTION_CACHE_ALIGNMENT > CPU_CACHE_LINE_BYTES
88#error "CPU_INSTRUCTION_CACHE_ALIGNMENT is greater than CPU_CACHE_LINE_BYTES"
89#endif
90
91/*
92 * THESE FUNCTIONS ONLY HAVE BODIES IF WE HAVE A DATA CACHE
93 */
94
95/*
96 * This function is called to flush the data cache by performing cache
97 * copybacks. It must determine how many cache lines need to be copied
98 * back and then perform the copybacks.
99 */
100void
101rtems_cache_flush_multiple_data_lines( const void * d_addr, size_t n_bytes )
102{
103#if defined(CPU_DATA_CACHE_ALIGNMENT)
104#if defined(CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS)
105  _CPU_cache_flush_data_range( d_addr, n_bytes );
106#else
107  const void * final_address;
108
109 /*
110  * Set d_addr to the beginning of the cache line; final_address indicates
111  * the last address_t which needs to be pushed. Increment d_addr and push
112  * the resulting line until final_address is passed.
113  */
114
115  if( n_bytes == 0 )
116    /* Do nothing if number of bytes to flush is zero */
117    return;
118
119  final_address = (void *)((size_t)d_addr + n_bytes - 1);
120  d_addr = (void *)((size_t)d_addr & ~(CPU_DATA_CACHE_ALIGNMENT - 1));
121  while( d_addr <= final_address )  {
122    _CPU_cache_flush_1_data_line( d_addr );
123    d_addr = (void *)((size_t)d_addr + CPU_DATA_CACHE_ALIGNMENT);
124  }
125#endif
126#endif
127}
128
129/*
130 * This function is responsible for performing a data cache invalidate.
131 * It must determine how many cache lines need to be invalidated and then
132 * perform the invalidations.
133 */
134void
135rtems_cache_invalidate_multiple_data_lines( const void * d_addr, size_t n_bytes )
136{
137#if defined(CPU_DATA_CACHE_ALIGNMENT)
138#if defined(CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS)
139  _CPU_cache_invalidate_data_range( d_addr, n_bytes );
140#else
141  const void * final_address;
142
143 /*
144  * Set d_addr to the beginning of the cache line; final_address indicates
145  * the last address_t which needs to be invalidated. Increment d_addr and
146  * invalidate the resulting line until final_address is passed.
147  */
148
149  if( n_bytes == 0 )
150    /* Do nothing if number of bytes to invalidate is zero */
151    return;
152
153  final_address = (void *)((size_t)d_addr + n_bytes - 1);
154  d_addr = (void *)((size_t)d_addr & ~(CPU_DATA_CACHE_ALIGNMENT - 1));
155  while( final_address >= d_addr ) {
156    _CPU_cache_invalidate_1_data_line( d_addr );
157    d_addr = (void *)((size_t)d_addr + CPU_DATA_CACHE_ALIGNMENT);
158  }
159#endif
160#endif
161}
162
163/*
164 * This function is responsible for performing a data cache flush.
165 * It flushes the entire cache.
166 */
167void
168rtems_cache_flush_entire_data( void )
169{
170#if defined(CPU_DATA_CACHE_ALIGNMENT)
171   /*
172    * Call the CPU-specific routine
173    */
174   _CPU_cache_flush_entire_data();
175#endif
176}
177
178/*
179 * This function is responsible for performing a data cache
180 * invalidate. It invalidates the entire cache.
181 */
182void
183rtems_cache_invalidate_entire_data( void )
184{
185#if defined(CPU_DATA_CACHE_ALIGNMENT)
186 /*
187  * Call the CPU-specific routine
188  */
189
190 _CPU_cache_invalidate_entire_data();
191#endif
192}
193
194/*
195 * This function returns the data cache granularity.
196 */
197size_t
198rtems_cache_get_data_line_size( void )
199{
200#if defined(CPU_DATA_CACHE_ALIGNMENT)
201  return CPU_DATA_CACHE_ALIGNMENT;
202#else
203  return 0;
204#endif
205}
206
207size_t
208rtems_cache_get_data_cache_size( uint32_t level )
209{
210#if defined(CPU_CACHE_SUPPORT_PROVIDES_CACHE_SIZE_FUNCTIONS)
211  return _CPU_cache_get_data_cache_size( level );
212#else
213  return 0;
214#endif
215}
216
217/*
218 * This function freezes the data cache; cache lines
219 * are not replaced.
220 */
221void
222rtems_cache_freeze_data( void )
223{
224#if defined(CPU_DATA_CACHE_ALIGNMENT)
225  _CPU_cache_freeze_data();
226#endif
227}
228
229void rtems_cache_unfreeze_data( void )
230{
231#if defined(CPU_DATA_CACHE_ALIGNMENT)
232  _CPU_cache_unfreeze_data();
233#endif
234}
235
236void
237rtems_cache_enable_data( void )
238{
239#if defined(CPU_DATA_CACHE_ALIGNMENT)
240  _CPU_cache_enable_data();
241#endif
242}
243
244void
245rtems_cache_disable_data( void )
246{
247#if defined(CPU_DATA_CACHE_ALIGNMENT)
248  _CPU_cache_disable_data();
249#endif
250}
251
252/*
253 * THESE FUNCTIONS ONLY HAVE BODIES IF WE HAVE AN INSTRUCTION CACHE
254 */
255
256#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT) \
257  && defined(RTEMS_SMP) \
258  && defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING)
259
260typedef struct {
261  const void *addr;
262  size_t size;
263} smp_cache_area;
264
265static void smp_cache_inst_inv(void *arg)
266{
267  smp_cache_area *area = arg;
268
269  _CPU_cache_invalidate_instruction_range(area->addr, area->size);
270}
271
272static void smp_cache_inst_inv_all(void *arg)
273{
274  _CPU_cache_invalidate_entire_instruction();
275}
276
277#endif
278
279/*
280 * This function is responsible for performing an instruction cache
281 * invalidate. It must determine how many cache lines need to be invalidated
282 * and then perform the invalidations.
283 */
284#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT) \
285  && !defined(CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS)
286static void
287_CPU_cache_invalidate_instruction_range(
288  const void * i_addr,
289  size_t n_bytes
290)
291{
292  const void * final_address;
293
294 /*
295  * Set i_addr to the beginning of the cache line; final_address indicates
296  * the last address_t which needs to be invalidated. Increment i_addr and
297  * invalidate the resulting line until final_address is passed.
298  */
299
300  if( n_bytes == 0 )
301    /* Do nothing if number of bytes to invalidate is zero */
302    return;
303
304  final_address = (void *)((size_t)i_addr + n_bytes - 1);
305  i_addr = (void *)((size_t)i_addr & ~(CPU_INSTRUCTION_CACHE_ALIGNMENT - 1));
306  while( final_address >= i_addr ) {
307    _CPU_cache_invalidate_1_instruction_line( i_addr );
308    i_addr = (void *)((size_t)i_addr + CPU_INSTRUCTION_CACHE_ALIGNMENT);
309  }
310}
311#endif
312
313void
314rtems_cache_invalidate_multiple_instruction_lines(
315  const void * i_addr,
316  size_t n_bytes
317)
318{
319#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
320#if defined(RTEMS_SMP) && defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING)
321  smp_cache_area area = { i_addr, n_bytes };
322
323  _SMP_Multicast_action( 0, NULL, smp_cache_inst_inv, &area );
324#else
325  _CPU_cache_invalidate_instruction_range( i_addr, n_bytes );
326#endif
327#endif
328}
329
330/*
331 * This function is responsible for performing an instruction cache
332 * invalidate. It invalidates the entire cache.
333 */
334void
335rtems_cache_invalidate_entire_instruction( void )
336{
337#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
338#if defined(RTEMS_SMP) && defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING)
339  _SMP_Multicast_action( 0, NULL, smp_cache_inst_inv_all, NULL );
340#else
341 _CPU_cache_invalidate_entire_instruction();
342#endif
343#endif
344}
345
346/*
347 * This function returns the instruction cache granularity.
348 */
349size_t
350rtems_cache_get_instruction_line_size( void )
351{
352#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
353  return CPU_INSTRUCTION_CACHE_ALIGNMENT;
354#else
355  return 0;
356#endif
357}
358
359size_t
360rtems_cache_get_instruction_cache_size( uint32_t level )
361{
362#if defined(CPU_CACHE_SUPPORT_PROVIDES_CACHE_SIZE_FUNCTIONS)
363  return _CPU_cache_get_instruction_cache_size( level );
364#else
365  return 0;
366#endif
367}
368
369/*
370 * This function freezes the instruction cache; cache lines
371 * are not replaced.
372 */
373void
374rtems_cache_freeze_instruction( void )
375{
376#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
377  _CPU_cache_freeze_instruction();
378#endif
379}
380
381void rtems_cache_unfreeze_instruction( void )
382{
383#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
384  _CPU_cache_unfreeze_instruction();
385#endif
386}
387
388void
389rtems_cache_enable_instruction( void )
390{
391#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
392  _CPU_cache_enable_instruction();
393#endif
394}
395
396void
397rtems_cache_disable_instruction( void )
398{
399#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
400  _CPU_cache_disable_instruction();
401#endif
402}
403
404/* Returns the maximal cache line size of all cache kinds in bytes. */
405size_t rtems_cache_get_maximal_line_size( void )
406{
407#if defined(CPU_MAXIMAL_CACHE_ALIGNMENT)
408  return CPU_MAXIMAL_CACHE_ALIGNMENT;
409#endif
410  size_t max_line_size = 0;
411#if defined(CPU_DATA_CACHE_ALIGNMENT)
412  {
413    size_t data_line_size = CPU_DATA_CACHE_ALIGNMENT;
414    if ( max_line_size < data_line_size )
415      max_line_size = data_line_size;
416  }
417#endif
418#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
419  {
420    size_t instruction_line_size = CPU_INSTRUCTION_CACHE_ALIGNMENT;
421    if ( max_line_size < instruction_line_size )
422      max_line_size = instruction_line_size;
423  }
424#endif
425  return max_line_size;
426}
427
428/*
429 * Purpose is to synchronize caches after code has been loaded
430 * or self modified. Actual implementation is simple only
431 * but it can and should be repaced by optimized version
432 * which does not need flush and invalidate all cache levels
433 * when code is changed.
434 */
435void rtems_cache_instruction_sync_after_code_change(
436  const void *code_addr,
437  size_t      n_bytes
438)
439{
440#if defined(CPU_CACHE_SUPPORT_PROVIDES_INSTRUCTION_SYNC_FUNCTION)
441  _CPU_cache_instruction_sync_after_code_change( code_addr, n_bytes );
442#else
443  rtems_cache_flush_multiple_data_lines( code_addr, n_bytes );
444  rtems_cache_invalidate_multiple_instruction_lines( code_addr, n_bytes );
445#endif
446}
Note: See TracBrowser for help on using the repository browser.