source: rtems/c/src/lib/libcpu/shared/src/cache_manager.c @ 4bf2a6aa

4.115
Last change on this file since 4bf2a6aa was 4bf2a6aa, checked in by Sebastian Huber <sebastian.huber@…>, on 04/27/15 at 07:40:16

bsps/cache: Clarify range functions support

  • Property mode set to 100644
File size: 10.8 KB
RevLine 
[cf1f72e]1/*
[8ef3818]2 *  Cache Manager
3 *
4 *  COPYRIGHT (c) 1989-1999.
5 *  On-Line Applications Research Corporation (OAR).
6 *
7 *  The license and distribution terms for this file may be
8 *  found in the file LICENSE in this distribution or at
[c499856]9 *  http://www.rtems.org/license/LICENSE.
[8ef3818]10 *
[359e537]11 *
[cf1f72e]12 *  The functions in this file implement the API to the RTEMS Cache Manager and
[8ef3818]13 *  are divided into data cache and instruction cache functions. Data cache
[5e77d129]14 *  functions only have bodies if a data cache is supported. Instruction
15 *  cache functions only have bodies if an instruction cache is supported.
16 *  Support for a particular cache exists only if CPU_x_CACHE_ALIGNMENT is
17 *  defined, where x E {DATA, INSTRUCTION}. These definitions are found in
18 *  the Cache Manager Wrapper header files, often
[359e537]19 *
[5e77d129]20 *  rtems/c/src/lib/libcpu/CPU/cache_.h
[359e537]21 *
[2bd440e]22 *  The cache implementation header file can define
[4bf2a6aa]23 *
24 *    #define CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS
25 *
[2bd440e]26 *  if it provides cache maintenance functions which operate on multiple lines.
[4bf2a6aa]27 *  Otherwise a generic loop with single line operations will be used.  It is
28 *  strongly recommended to provide the implementation in terms of static
29 *  inline functions for performance reasons.
[2bd440e]30 *
[8ef3818]31 *  The functions below are implemented with CPU dependent inline routines
[5e77d129]32 *  found in the cache.c files for each CPU. In the event that a CPU does
33 *  not support a specific function for a cache it has, the CPU dependent
34 *  routine does nothing (but does exist).
[359e537]35 *
[8ef3818]36 *  At this point, the Cache Manager makes no considerations, and provides no
37 *  support for BSP specific issues such as a secondary cache. In such a system,
38 *  the CPU dependent routines would have to be modified, or a BSP layer added
39 *  to this Manager.
40 */
41
[cf1f72e]42#include <rtems.h>
43#include "cache_.h"
[ddbc3f8d]44
[26c142e5]45#if defined(RTEMS_SMP)
[ddbc3f8d]46
[26c142e5]47#include <rtems/score/smpimpl.h>
[ddbc3f8d]48
49typedef struct {
50  const void *addr;
51  size_t size;
[26c142e5]52} smp_cache_area;
[ddbc3f8d]53
[26c142e5]54#if defined(CPU_DATA_CACHE_ALIGNMENT)
[ddbc3f8d]55
[26c142e5]56static void smp_cache_data_flush(void *arg)
[ddbc3f8d]57{
[26c142e5]58  smp_cache_area *area = arg;
[ddbc3f8d]59
[26c142e5]60  rtems_cache_flush_multiple_data_lines(area->addr, area->size);
[ddbc3f8d]61}
62
[26c142e5]63static void smp_cache_data_inv(void *arg)
[ddbc3f8d]64{
[26c142e5]65  smp_cache_area *area = arg;
[ddbc3f8d]66
[26c142e5]67  rtems_cache_invalidate_multiple_data_lines(area->addr, area->size);
[ddbc3f8d]68}
69
[26c142e5]70static void smp_cache_data_flush_all(void *arg)
[ddbc3f8d]71{
[26c142e5]72  rtems_cache_flush_entire_data();
73}
[ddbc3f8d]74
[26c142e5]75static void smp_cache_data_inv_all(void *arg)
76{
77  rtems_cache_invalidate_entire_data();
[ddbc3f8d]78}
[26c142e5]79
80#endif /* defined(CPU_DATA_CACHE_ALIGNMENT) */
[ddbc3f8d]81
82void
83rtems_cache_flush_multiple_data_lines_processor_set(
84  const void *addr,
85  size_t size,
86  const size_t setsize,
87  const cpu_set_t *set
88)
89{
90#if defined(CPU_DATA_CACHE_ALIGNMENT)
[26c142e5]91  smp_cache_area area = { addr, size };
92
93  _SMP_Multicast_action( setsize, set, smp_cache_data_flush, &area );
[ddbc3f8d]94#endif
95}
96
97void
98rtems_cache_invalidate_multiple_data_lines_processor_set(
99  const void *addr,
100  size_t size,
101  const size_t setsize,
102  const cpu_set_t *set
103)
104{
105#if defined(CPU_DATA_CACHE_ALIGNMENT)
[26c142e5]106  smp_cache_area area = { addr, size };
107
108  _SMP_Multicast_action( setsize, set, smp_cache_data_inv, &area );
[ddbc3f8d]109#endif
110}
111
112void
113rtems_cache_flush_entire_data_processor_set(
114  const size_t setsize,
115  const cpu_set_t *set
116)
117{
118#if defined(CPU_DATA_CACHE_ALIGNMENT)
[26c142e5]119  _SMP_Multicast_action( setsize, set, smp_cache_data_flush_all, NULL );
[ddbc3f8d]120#endif
121}
122
123void
124rtems_cache_invalidate_entire_data_processor_set(
125  const size_t setsize,
126  const cpu_set_t *set
127)
128{
129#if defined(CPU_DATA_CACHE_ALIGNMENT)
[26c142e5]130  _SMP_Multicast_action( setsize, set, smp_cache_data_inv_all, NULL );
[ddbc3f8d]131#endif
132}
[26c142e5]133
134#endif /* defined(RTEMS_SMP) */
[8ef3818]135
136/*
[cf1f72e]137 * THESE FUNCTIONS ONLY HAVE BODIES IF WE HAVE A DATA CACHE
[8ef3818]138 */
139
140/*
141 * This function is called to flush the data cache by performing cache
142 * copybacks. It must determine how many cache lines need to be copied
143 * back and then perform the copybacks.
144 */
145void
[5e77d129]146rtems_cache_flush_multiple_data_lines( const void * d_addr, size_t n_bytes )
[8ef3818]147{
[5e77d129]148#if defined(CPU_DATA_CACHE_ALIGNMENT)
[2bd440e]149#if defined(CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS)
150  _CPU_cache_flush_data_range( d_addr, n_bytes );
151#else
[cf1f72e]152  const void * final_address;
153
154 /*
155  * Set d_addr to the beginning of the cache line; final_address indicates
156  * the last address_t which needs to be pushed. Increment d_addr and push
157  * the resulting line until final_address is passed.
158  */
159
[d2978ee9]160  if( n_bytes == 0 )
161    /* Do nothing if number of bytes to flush is zero */
162    return;
[359e537]163
[cf1f72e]164  final_address = (void *)((size_t)d_addr + n_bytes - 1);
[5e77d129]165  d_addr = (void *)((size_t)d_addr & ~(CPU_DATA_CACHE_ALIGNMENT - 1));
[cf1f72e]166  while( d_addr <= final_address )  {
[5e77d129]167    _CPU_cache_flush_1_data_line( d_addr );
168    d_addr = (void *)((size_t)d_addr + CPU_DATA_CACHE_ALIGNMENT);
[cf1f72e]169  }
170#endif
[2bd440e]171#endif
[8ef3818]172}
173
174
175/*
176 * This function is responsible for performing a data cache invalidate.
177 * It must determine how many cache lines need to be invalidated and then
178 * perform the invalidations.
179 */
[cf1f72e]180
[8ef3818]181void
[5e77d129]182rtems_cache_invalidate_multiple_data_lines( const void * d_addr, size_t n_bytes )
[8ef3818]183{
[5e77d129]184#if defined(CPU_DATA_CACHE_ALIGNMENT)
[2bd440e]185#if defined(CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS)
186  _CPU_cache_invalidate_data_range( d_addr, n_bytes );
187#else
[cf1f72e]188  const void * final_address;
189
190 /*
191  * Set d_addr to the beginning of the cache line; final_address indicates
192  * the last address_t which needs to be invalidated. Increment d_addr and
193  * invalidate the resulting line until final_address is passed.
194  */
195
[d2978ee9]196  if( n_bytes == 0 )
197    /* Do nothing if number of bytes to invalidate is zero */
198    return;
[359e537]199
[cf1f72e]200  final_address = (void *)((size_t)d_addr + n_bytes - 1);
[5e77d129]201  d_addr = (void *)((size_t)d_addr & ~(CPU_DATA_CACHE_ALIGNMENT - 1));
[ec45e86]202  while( final_address >= d_addr ) {
[5e77d129]203    _CPU_cache_invalidate_1_data_line( d_addr );
204    d_addr = (void *)((size_t)d_addr + CPU_DATA_CACHE_ALIGNMENT);
[cf1f72e]205  }
206#endif
[2bd440e]207#endif
[8ef3818]208}
209
210
211/*
212 * This function is responsible for performing a data cache flush.
213 * It flushes the entire cache.
214 */
215void
[5e77d129]216rtems_cache_flush_entire_data( void )
[8ef3818]217{
[5e77d129]218#if defined(CPU_DATA_CACHE_ALIGNMENT)
[8ef3818]219   /*
220    * Call the CPU-specific routine
221    */
[5e77d129]222   _CPU_cache_flush_entire_data();
[cf1f72e]223#endif
[8ef3818]224}
225
226
227/*
228 * This function is responsible for performing a data cache
229 * invalidate. It invalidates the entire cache.
230 */
231void
[5e77d129]232rtems_cache_invalidate_entire_data( void )
[8ef3818]233{
[5e77d129]234#if defined(CPU_DATA_CACHE_ALIGNMENT)
[cf1f72e]235 /*
236  * Call the CPU-specific routine
237  */
238
[5e77d129]239 _CPU_cache_invalidate_entire_data();
[cf1f72e]240#endif
[8ef3818]241}
242
243
244/*
245 * This function returns the data cache granularity.
246 */
[e7549ff4]247size_t
[5e77d129]248rtems_cache_get_data_line_size( void )
[8ef3818]249{
[5e77d129]250#if defined(CPU_DATA_CACHE_ALIGNMENT)
251  return CPU_DATA_CACHE_ALIGNMENT;
[cf1f72e]252#else
253  return 0;
254#endif
[8ef3818]255}
256
257
[e1d7bf0]258size_t
259rtems_cache_get_data_cache_size( uint32_t level )
260{
261#if defined(CPU_CACHE_SUPPORT_PROVIDES_CACHE_SIZE_FUNCTIONS)
262  return _CPU_cache_get_data_cache_size( level );
263#else
264  return 0;
265#endif
266}
267
[8ef3818]268/*
269 * This function freezes the data cache; cache lines
270 * are not replaced.
271 */
272void
[5e77d129]273rtems_cache_freeze_data( void )
[8ef3818]274{
[5e77d129]275#if defined(CPU_DATA_CACHE_ALIGNMENT)
276  _CPU_cache_freeze_data();
[cf1f72e]277#endif
[8ef3818]278}
279
280
281/*
282 * This function unfreezes the instruction cache.
283 */
[5e77d129]284void rtems_cache_unfreeze_data( void )
[8ef3818]285{
[5e77d129]286#if defined(CPU_DATA_CACHE_ALIGNMENT)
287  _CPU_cache_unfreeze_data();
[cf1f72e]288#endif
[8ef3818]289}
290
291
292/* Turn on the data cache. */
293void
[5e77d129]294rtems_cache_enable_data( void )
[8ef3818]295{
[5e77d129]296#if defined(CPU_DATA_CACHE_ALIGNMENT)
297  _CPU_cache_enable_data();
[cf1f72e]298#endif
[8ef3818]299}
300
301
302/* Turn off the data cache. */
303void
[5e77d129]304rtems_cache_disable_data( void )
[8ef3818]305{
[5e77d129]306#if defined(CPU_DATA_CACHE_ALIGNMENT)
307  _CPU_cache_disable_data();
[8ef3818]308#endif
[cf1f72e]309}
[8ef3818]310
311
312
313/*
[cf1f72e]314 * THESE FUNCTIONS ONLY HAVE BODIES IF WE HAVE AN INSTRUCTION CACHE
[8ef3818]315 */
316
[26c142e5]317#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT) \
318  && defined(RTEMS_SMP) \
319  && defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING)
320
321static void smp_cache_inst_inv(void *arg)
322{
323  smp_cache_area *area = arg;
[ddbc3f8d]324
[26c142e5]325  _CPU_cache_invalidate_instruction_range(area->addr, area->size);
326}
327
328static void smp_cache_inst_inv_all(void *arg)
329{
330  _CPU_cache_invalidate_entire_instruction();
331}
332
333#endif
[ddbc3f8d]334
[8ef3818]335/*
336 * This function is responsible for performing an instruction cache
337 * invalidate. It must determine how many cache lines need to be invalidated
338 * and then perform the invalidations.
339 */
[ddbc3f8d]340
[26c142e5]341#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT) \
342  && !defined(CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS)
[ddbc3f8d]343static void
[26c142e5]344_CPU_cache_invalidate_instruction_range(
[ddbc3f8d]345  const void * i_addr,
346  size_t n_bytes
347)
[8ef3818]348{
[cf1f72e]349  const void * final_address;
350
351 /*
352  * Set i_addr to the beginning of the cache line; final_address indicates
353  * the last address_t which needs to be invalidated. Increment i_addr and
354  * invalidate the resulting line until final_address is passed.
355  */
356
[d2978ee9]357  if( n_bytes == 0 )
358    /* Do nothing if number of bytes to invalidate is zero */
359    return;
[359e537]360
[cf1f72e]361  final_address = (void *)((size_t)i_addr + n_bytes - 1);
[5e77d129]362  i_addr = (void *)((size_t)i_addr & ~(CPU_INSTRUCTION_CACHE_ALIGNMENT - 1));
[fc6a0ae]363  while( final_address >= i_addr ) {
[5e77d129]364    _CPU_cache_invalidate_1_instruction_line( i_addr );
365    i_addr = (void *)((size_t)i_addr + CPU_INSTRUCTION_CACHE_ALIGNMENT);
[cf1f72e]366  }
[ddbc3f8d]367}
368#endif
369
370void
371rtems_cache_invalidate_multiple_instruction_lines(
372  const void * i_addr,
373  size_t n_bytes
374)
375{
376#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
377#if defined(RTEMS_SMP) && defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING)
[26c142e5]378  smp_cache_area area = { i_addr, n_bytes };
[ddbc3f8d]379
[26c142e5]380  _SMP_Multicast_action( 0, NULL, smp_cache_inst_inv, &area );
[ddbc3f8d]381#else
[26c142e5]382  _CPU_cache_invalidate_instruction_range( i_addr, n_bytes );
[cf1f72e]383#endif
[2bd440e]384#endif
[8ef3818]385}
386
387
388/*
389 * This function is responsible for performing an instruction cache
390 * invalidate. It invalidates the entire cache.
391 */
392void
[5e77d129]393rtems_cache_invalidate_entire_instruction( void )
[8ef3818]394{
[5e77d129]395#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
[ddbc3f8d]396#if defined(RTEMS_SMP) && defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING)
[26c142e5]397  _SMP_Multicast_action( 0, NULL, smp_cache_inst_inv_all, NULL );
[ddbc3f8d]398#else
[5e77d129]399 _CPU_cache_invalidate_entire_instruction();
[cf1f72e]400#endif
[ddbc3f8d]401#endif
[8ef3818]402}
403
404
405/*
406 * This function returns the instruction cache granularity.
407 */
[e7549ff4]408size_t
[5e77d129]409rtems_cache_get_instruction_line_size( void )
[8ef3818]410{
[5e77d129]411#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
412  return CPU_INSTRUCTION_CACHE_ALIGNMENT;
[cf1f72e]413#else
414  return 0;
415#endif
[8ef3818]416}
417
418
[e1d7bf0]419size_t
420rtems_cache_get_instruction_cache_size( uint32_t level )
421{
422#if defined(CPU_CACHE_SUPPORT_PROVIDES_CACHE_SIZE_FUNCTIONS)
423  return _CPU_cache_get_instruction_cache_size( level );
424#else
425  return 0;
426#endif
427}
428
429
[8ef3818]430/*
431 * This function freezes the instruction cache; cache lines
432 * are not replaced.
433 */
434void
[5e77d129]435rtems_cache_freeze_instruction( void )
[8ef3818]436{
[5e77d129]437#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
438  _CPU_cache_freeze_instruction();
[cf1f72e]439#endif
[8ef3818]440}
441
442
443/*
444 * This function unfreezes the instruction cache.
445 */
[5e77d129]446void rtems_cache_unfreeze_instruction( void )
[8ef3818]447{
[5e77d129]448#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
449  _CPU_cache_unfreeze_instruction();
[cf1f72e]450#endif
[8ef3818]451}
452
453
454/* Turn on the instruction cache. */
455void
[5e77d129]456rtems_cache_enable_instruction( void )
[8ef3818]457{
[5e77d129]458#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
459  _CPU_cache_enable_instruction();
[cf1f72e]460#endif
[8ef3818]461}
462
463
464/* Turn off the instruction cache. */
465void
[5e77d129]466rtems_cache_disable_instruction( void )
[8ef3818]467{
[5e77d129]468#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
469  _CPU_cache_disable_instruction();
[8ef3818]470#endif
[cf1f72e]471}
Note: See TracBrowser for help on using the repository browser.