source: rtems/bsps/shared/cache/cacheimpl.h @ a6f70e1

5
Last change on this file since a6f70e1 was a6f70e1, checked in by Sebastian Huber <sebastian.huber@…>, on 12/21/18 at 06:14:42

bsps: Remove superfluous comments in cacheimpl.h

Remove superfluous blank lines.

Update #3667.

  • Property mode set to 100644
File size: 10.5 KB
RevLine 
[cf1f72e]1/*
[8ef3818]2 *  Cache Manager
3 *
4 *  COPYRIGHT (c) 1989-1999.
5 *  On-Line Applications Research Corporation (OAR).
6 *
7 *  The license and distribution terms for this file may be
8 *  found in the file LICENSE in this distribution or at
[c499856]9 *  http://www.rtems.org/license/LICENSE.
[8ef3818]10 *
[359e537]11 *
[cf1f72e]12 *  The functions in this file implement the API to the RTEMS Cache Manager and
[8ef3818]13 *  are divided into data cache and instruction cache functions. Data cache
[5e77d129]14 *  functions only have bodies if a data cache is supported. Instruction
15 *  cache functions only have bodies if an instruction cache is supported.
16 *  Support for a particular cache exists only if CPU_x_CACHE_ALIGNMENT is
17 *  defined, where x E {DATA, INSTRUCTION}. These definitions are found in
18 *  the Cache Manager Wrapper header files, often
[359e537]19 *
[5e77d129]20 *  rtems/c/src/lib/libcpu/CPU/cache_.h
[359e537]21 *
[2bd440e]22 *  The cache implementation header file can define
[4bf2a6aa]23 *
24 *    #define CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS
25 *
[2bd440e]26 *  if it provides cache maintenance functions which operate on multiple lines.
[4bf2a6aa]27 *  Otherwise a generic loop with single line operations will be used.  It is
28 *  strongly recommended to provide the implementation in terms of static
29 *  inline functions for performance reasons.
[2bd440e]30 *
[8ef3818]31 *  The functions below are implemented with CPU dependent inline routines
[5e77d129]32 *  found in the cache.c files for each CPU. In the event that a CPU does
33 *  not support a specific function for a cache it has, the CPU dependent
34 *  routine does nothing (but does exist).
[359e537]35 *
[8ef3818]36 *  At this point, the Cache Manager makes no considerations, and provides no
37 *  support for BSP specific issues such as a secondary cache. In such a system,
38 *  the CPU dependent routines would have to be modified, or a BSP layer added
39 *  to this Manager.
40 */
41
[cf1f72e]42#include <rtems.h>
[ddbc3f8d]43
[5bf0c1a]44#if defined(RTEMS_SMP) && defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING)
45#include <rtems/score/smpimpl.h>
46#endif
47
[a8865f8]48#if CPU_DATA_CACHE_ALIGNMENT > CPU_CACHE_LINE_BYTES
49#error "CPU_DATA_CACHE_ALIGNMENT is greater than CPU_CACHE_LINE_BYTES"
50#endif
51
52#if CPU_INSTRUCTION_CACHE_ALIGNMENT > CPU_CACHE_LINE_BYTES
53#error "CPU_INSTRUCTION_CACHE_ALIGNMENT is greater than CPU_CACHE_LINE_BYTES"
54#endif
55
[8ef3818]56/*
[cf1f72e]57 * THESE FUNCTIONS ONLY HAVE BODIES IF WE HAVE A DATA CACHE
[8ef3818]58 */
59
60/*
61 * This function is called to flush the data cache by performing cache
62 * copybacks. It must determine how many cache lines need to be copied
63 * back and then perform the copybacks.
64 */
65void
[5e77d129]66rtems_cache_flush_multiple_data_lines( const void * d_addr, size_t n_bytes )
[8ef3818]67{
[5e77d129]68#if defined(CPU_DATA_CACHE_ALIGNMENT)
[2bd440e]69#if defined(CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS)
70  _CPU_cache_flush_data_range( d_addr, n_bytes );
71#else
[cf1f72e]72  const void * final_address;
73
74 /*
75  * Set d_addr to the beginning of the cache line; final_address indicates
76  * the last address_t which needs to be pushed. Increment d_addr and push
77  * the resulting line until final_address is passed.
78  */
79
[d2978ee9]80  if( n_bytes == 0 )
81    /* Do nothing if number of bytes to flush is zero */
82    return;
[359e537]83
[cf1f72e]84  final_address = (void *)((size_t)d_addr + n_bytes - 1);
[5e77d129]85  d_addr = (void *)((size_t)d_addr & ~(CPU_DATA_CACHE_ALIGNMENT - 1));
[cf1f72e]86  while( d_addr <= final_address )  {
[5e77d129]87    _CPU_cache_flush_1_data_line( d_addr );
88    d_addr = (void *)((size_t)d_addr + CPU_DATA_CACHE_ALIGNMENT);
[cf1f72e]89  }
90#endif
[2bd440e]91#endif
[8ef3818]92}
93
94/*
95 * This function is responsible for performing a data cache invalidate.
96 * It must determine how many cache lines need to be invalidated and then
97 * perform the invalidations.
98 */
99void
[5e77d129]100rtems_cache_invalidate_multiple_data_lines( const void * d_addr, size_t n_bytes )
[8ef3818]101{
[5e77d129]102#if defined(CPU_DATA_CACHE_ALIGNMENT)
[2bd440e]103#if defined(CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS)
104  _CPU_cache_invalidate_data_range( d_addr, n_bytes );
105#else
[cf1f72e]106  const void * final_address;
107
108 /*
109  * Set d_addr to the beginning of the cache line; final_address indicates
110  * the last address_t which needs to be invalidated. Increment d_addr and
111  * invalidate the resulting line until final_address is passed.
112  */
113
[d2978ee9]114  if( n_bytes == 0 )
115    /* Do nothing if number of bytes to invalidate is zero */
116    return;
[359e537]117
[cf1f72e]118  final_address = (void *)((size_t)d_addr + n_bytes - 1);
[5e77d129]119  d_addr = (void *)((size_t)d_addr & ~(CPU_DATA_CACHE_ALIGNMENT - 1));
[ec45e86]120  while( final_address >= d_addr ) {
[5e77d129]121    _CPU_cache_invalidate_1_data_line( d_addr );
122    d_addr = (void *)((size_t)d_addr + CPU_DATA_CACHE_ALIGNMENT);
[cf1f72e]123  }
124#endif
[2bd440e]125#endif
[8ef3818]126}
127
128/*
129 * This function is responsible for performing a data cache flush.
130 * It flushes the entire cache.
131 */
132void
[5e77d129]133rtems_cache_flush_entire_data( void )
[8ef3818]134{
[5e77d129]135#if defined(CPU_DATA_CACHE_ALIGNMENT)
[8ef3818]136   /*
137    * Call the CPU-specific routine
138    */
[5e77d129]139   _CPU_cache_flush_entire_data();
[cf1f72e]140#endif
[8ef3818]141}
142
143/*
144 * This function is responsible for performing a data cache
145 * invalidate. It invalidates the entire cache.
146 */
147void
[5e77d129]148rtems_cache_invalidate_entire_data( void )
[8ef3818]149{
[5e77d129]150#if defined(CPU_DATA_CACHE_ALIGNMENT)
[cf1f72e]151 /*
152  * Call the CPU-specific routine
153  */
154
[5e77d129]155 _CPU_cache_invalidate_entire_data();
[cf1f72e]156#endif
[8ef3818]157}
158
159/*
160 * This function returns the data cache granularity.
161 */
[e7549ff4]162size_t
[5e77d129]163rtems_cache_get_data_line_size( void )
[8ef3818]164{
[5e77d129]165#if defined(CPU_DATA_CACHE_ALIGNMENT)
166  return CPU_DATA_CACHE_ALIGNMENT;
[cf1f72e]167#else
168  return 0;
169#endif
[8ef3818]170}
171
[e1d7bf0]172size_t
173rtems_cache_get_data_cache_size( uint32_t level )
174{
175#if defined(CPU_CACHE_SUPPORT_PROVIDES_CACHE_SIZE_FUNCTIONS)
176  return _CPU_cache_get_data_cache_size( level );
177#else
178  return 0;
179#endif
180}
181
[8ef3818]182/*
183 * This function freezes the data cache; cache lines
184 * are not replaced.
185 */
186void
[5e77d129]187rtems_cache_freeze_data( void )
[8ef3818]188{
[5e77d129]189#if defined(CPU_DATA_CACHE_ALIGNMENT)
190  _CPU_cache_freeze_data();
[cf1f72e]191#endif
[8ef3818]192}
193
[5e77d129]194void rtems_cache_unfreeze_data( void )
[8ef3818]195{
[5e77d129]196#if defined(CPU_DATA_CACHE_ALIGNMENT)
197  _CPU_cache_unfreeze_data();
[cf1f72e]198#endif
[8ef3818]199}
200
201void
[5e77d129]202rtems_cache_enable_data( void )
[8ef3818]203{
[5e77d129]204#if defined(CPU_DATA_CACHE_ALIGNMENT)
205  _CPU_cache_enable_data();
[cf1f72e]206#endif
[8ef3818]207}
208
209void
[5e77d129]210rtems_cache_disable_data( void )
[8ef3818]211{
[5e77d129]212#if defined(CPU_DATA_CACHE_ALIGNMENT)
213  _CPU_cache_disable_data();
[8ef3818]214#endif
[cf1f72e]215}
[8ef3818]216
217/*
[cf1f72e]218 * THESE FUNCTIONS ONLY HAVE BODIES IF WE HAVE AN INSTRUCTION CACHE
[8ef3818]219 */
220
[26c142e5]221#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT) \
222  && defined(RTEMS_SMP) \
223  && defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING)
224
[5bf0c1a]225typedef struct {
226  const void *addr;
227  size_t size;
228} smp_cache_area;
229
[26c142e5]230static void smp_cache_inst_inv(void *arg)
231{
232  smp_cache_area *area = arg;
[ddbc3f8d]233
[26c142e5]234  _CPU_cache_invalidate_instruction_range(area->addr, area->size);
235}
236
237static void smp_cache_inst_inv_all(void *arg)
238{
239  _CPU_cache_invalidate_entire_instruction();
240}
241
242#endif
[ddbc3f8d]243
[8ef3818]244/*
245 * This function is responsible for performing an instruction cache
246 * invalidate. It must determine how many cache lines need to be invalidated
247 * and then perform the invalidations.
248 */
[26c142e5]249#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT) \
250  && !defined(CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS)
[ddbc3f8d]251static void
[26c142e5]252_CPU_cache_invalidate_instruction_range(
[ddbc3f8d]253  const void * i_addr,
254  size_t n_bytes
255)
[8ef3818]256{
[cf1f72e]257  const void * final_address;
258
259 /*
260  * Set i_addr to the beginning of the cache line; final_address indicates
261  * the last address_t which needs to be invalidated. Increment i_addr and
262  * invalidate the resulting line until final_address is passed.
263  */
264
[d2978ee9]265  if( n_bytes == 0 )
266    /* Do nothing if number of bytes to invalidate is zero */
267    return;
[359e537]268
[cf1f72e]269  final_address = (void *)((size_t)i_addr + n_bytes - 1);
[5e77d129]270  i_addr = (void *)((size_t)i_addr & ~(CPU_INSTRUCTION_CACHE_ALIGNMENT - 1));
[fc6a0ae]271  while( final_address >= i_addr ) {
[5e77d129]272    _CPU_cache_invalidate_1_instruction_line( i_addr );
273    i_addr = (void *)((size_t)i_addr + CPU_INSTRUCTION_CACHE_ALIGNMENT);
[cf1f72e]274  }
[ddbc3f8d]275}
276#endif
277
278void
279rtems_cache_invalidate_multiple_instruction_lines(
280  const void * i_addr,
281  size_t n_bytes
282)
283{
284#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
285#if defined(RTEMS_SMP) && defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING)
[26c142e5]286  smp_cache_area area = { i_addr, n_bytes };
[ddbc3f8d]287
[26c142e5]288  _SMP_Multicast_action( 0, NULL, smp_cache_inst_inv, &area );
[ddbc3f8d]289#else
[26c142e5]290  _CPU_cache_invalidate_instruction_range( i_addr, n_bytes );
[cf1f72e]291#endif
[2bd440e]292#endif
[8ef3818]293}
294
295/*
296 * This function is responsible for performing an instruction cache
297 * invalidate. It invalidates the entire cache.
298 */
299void
[5e77d129]300rtems_cache_invalidate_entire_instruction( void )
[8ef3818]301{
[5e77d129]302#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
[ddbc3f8d]303#if defined(RTEMS_SMP) && defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING)
[26c142e5]304  _SMP_Multicast_action( 0, NULL, smp_cache_inst_inv_all, NULL );
[ddbc3f8d]305#else
[5e77d129]306 _CPU_cache_invalidate_entire_instruction();
[cf1f72e]307#endif
[ddbc3f8d]308#endif
[8ef3818]309}
310
311/*
312 * This function returns the instruction cache granularity.
313 */
[e7549ff4]314size_t
[5e77d129]315rtems_cache_get_instruction_line_size( void )
[8ef3818]316{
[5e77d129]317#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
318  return CPU_INSTRUCTION_CACHE_ALIGNMENT;
[cf1f72e]319#else
320  return 0;
321#endif
[8ef3818]322}
323
[e1d7bf0]324size_t
325rtems_cache_get_instruction_cache_size( uint32_t level )
326{
327#if defined(CPU_CACHE_SUPPORT_PROVIDES_CACHE_SIZE_FUNCTIONS)
328  return _CPU_cache_get_instruction_cache_size( level );
329#else
330  return 0;
331#endif
332}
333
[8ef3818]334/*
335 * This function freezes the instruction cache; cache lines
336 * are not replaced.
337 */
338void
[5e77d129]339rtems_cache_freeze_instruction( void )
[8ef3818]340{
[5e77d129]341#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
342  _CPU_cache_freeze_instruction();
[cf1f72e]343#endif
[8ef3818]344}
345
[5e77d129]346void rtems_cache_unfreeze_instruction( void )
[8ef3818]347{
[5e77d129]348#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
349  _CPU_cache_unfreeze_instruction();
[cf1f72e]350#endif
[8ef3818]351}
352
353void
[5e77d129]354rtems_cache_enable_instruction( void )
[8ef3818]355{
[5e77d129]356#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
357  _CPU_cache_enable_instruction();
[cf1f72e]358#endif
[8ef3818]359}
360
361void
[5e77d129]362rtems_cache_disable_instruction( void )
[8ef3818]363{
[5e77d129]364#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
365  _CPU_cache_disable_instruction();
[8ef3818]366#endif
[cf1f72e]367}
[0e507d55]368
369/* Returns the maximal cache line size of all cache kinds in bytes. */
370size_t rtems_cache_get_maximal_line_size( void )
371{
372#if defined(CPU_MAXIMAL_CACHE_ALIGNMENT)
373  return CPU_MAXIMAL_CACHE_ALIGNMENT;
374#endif
375  size_t max_line_size = 0;
376#if defined(CPU_DATA_CACHE_ALIGNMENT)
377  {
378    size_t data_line_size = CPU_DATA_CACHE_ALIGNMENT;
379    if ( max_line_size < data_line_size )
380      max_line_size = data_line_size;
381  }
382#endif
383#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
384  {
385    size_t instruction_line_size = CPU_INSTRUCTION_CACHE_ALIGNMENT;
386    if ( max_line_size < instruction_line_size )
387      max_line_size = instruction_line_size;
388  }
389#endif
390  return max_line_size;
391}
392
393/*
394 * Purpose is to synchronize caches after code has been loaded
395 * or self modified. Actual implementation is simple only
396 * but it can and should be repaced by optimized version
397 * which does not need flush and invalidate all cache levels
398 * when code is changed.
399 */
[a6f70e1]400void rtems_cache_instruction_sync_after_code_change(
401  const void *code_addr,
402  size_t      n_bytes
403)
[0e507d55]404{
405#if defined(CPU_CACHE_SUPPORT_PROVIDES_INSTRUCTION_SYNC_FUNCTION)
406  _CPU_cache_instruction_sync_after_code_change( code_addr, n_bytes );
407#else
408  rtems_cache_flush_multiple_data_lines( code_addr, n_bytes );
409  rtems_cache_invalidate_multiple_instruction_lines( code_addr, n_bytes );
410#endif
411}
Note: See TracBrowser for help on using the repository browser.