source: rtems/c/src/lib/libcpu/shared/src/cache_manager.c @ a8865f8

5
Last change on this file since a8865f8 was a8865f8, checked in by Sebastian Huber <sebastian.huber@…>, on 01/25/16 at 09:20:28

score: Introduce CPU_CACHE_LINE_BYTES

Add CPU_CACHE_LINE_BYTES for the maximum cache line size in bytes. The
actual processor may use no cache or a smaller cache line size.

  • Property mode set to 100644
File size: 11.0 KB
Line 
1/*
2 *  Cache Manager
3 *
4 *  COPYRIGHT (c) 1989-1999.
5 *  On-Line Applications Research Corporation (OAR).
6 *
7 *  The license and distribution terms for this file may be
8 *  found in the file LICENSE in this distribution or at
9 *  http://www.rtems.org/license/LICENSE.
10 *
11 *
12 *  The functions in this file implement the API to the RTEMS Cache Manager and
13 *  are divided into data cache and instruction cache functions. Data cache
14 *  functions only have bodies if a data cache is supported. Instruction
15 *  cache functions only have bodies if an instruction cache is supported.
16 *  Support for a particular cache exists only if CPU_x_CACHE_ALIGNMENT is
17 *  defined, where x E {DATA, INSTRUCTION}. These definitions are found in
18 *  the Cache Manager Wrapper header files, often
19 *
20 *  rtems/c/src/lib/libcpu/CPU/cache_.h
21 *
22 *  The cache implementation header file can define
23 *
24 *    #define CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS
25 *
26 *  if it provides cache maintenance functions which operate on multiple lines.
27 *  Otherwise a generic loop with single line operations will be used.  It is
28 *  strongly recommended to provide the implementation in terms of static
29 *  inline functions for performance reasons.
30 *
31 *  The functions below are implemented with CPU dependent inline routines
32 *  found in the cache.c files for each CPU. In the event that a CPU does
33 *  not support a specific function for a cache it has, the CPU dependent
34 *  routine does nothing (but does exist).
35 *
36 *  At this point, the Cache Manager makes no considerations, and provides no
37 *  support for BSP specific issues such as a secondary cache. In such a system,
38 *  the CPU dependent routines would have to be modified, or a BSP layer added
39 *  to this Manager.
40 */
41
42#include <rtems.h>
43#include "cache_.h"
44
45#if CPU_DATA_CACHE_ALIGNMENT > CPU_CACHE_LINE_BYTES
46#error "CPU_DATA_CACHE_ALIGNMENT is greater than CPU_CACHE_LINE_BYTES"
47#endif
48
49#if CPU_INSTRUCTION_CACHE_ALIGNMENT > CPU_CACHE_LINE_BYTES
50#error "CPU_INSTRUCTION_CACHE_ALIGNMENT is greater than CPU_CACHE_LINE_BYTES"
51#endif
52
53#if defined(RTEMS_SMP)
54
55#include <rtems/score/smpimpl.h>
56
57typedef struct {
58  const void *addr;
59  size_t size;
60} smp_cache_area;
61
62#if defined(CPU_DATA_CACHE_ALIGNMENT)
63
64static void smp_cache_data_flush(void *arg)
65{
66  smp_cache_area *area = arg;
67
68  rtems_cache_flush_multiple_data_lines(area->addr, area->size);
69}
70
71static void smp_cache_data_inv(void *arg)
72{
73  smp_cache_area *area = arg;
74
75  rtems_cache_invalidate_multiple_data_lines(area->addr, area->size);
76}
77
78static void smp_cache_data_flush_all(void *arg)
79{
80  rtems_cache_flush_entire_data();
81}
82
83static void smp_cache_data_inv_all(void *arg)
84{
85  rtems_cache_invalidate_entire_data();
86}
87
88#endif /* defined(CPU_DATA_CACHE_ALIGNMENT) */
89
90void
91rtems_cache_flush_multiple_data_lines_processor_set(
92  const void *addr,
93  size_t size,
94  const size_t setsize,
95  const cpu_set_t *set
96)
97{
98#if defined(CPU_DATA_CACHE_ALIGNMENT)
99  smp_cache_area area = { addr, size };
100
101  _SMP_Multicast_action( setsize, set, smp_cache_data_flush, &area );
102#endif
103}
104
105void
106rtems_cache_invalidate_multiple_data_lines_processor_set(
107  const void *addr,
108  size_t size,
109  const size_t setsize,
110  const cpu_set_t *set
111)
112{
113#if defined(CPU_DATA_CACHE_ALIGNMENT)
114  smp_cache_area area = { addr, size };
115
116  _SMP_Multicast_action( setsize, set, smp_cache_data_inv, &area );
117#endif
118}
119
120void
121rtems_cache_flush_entire_data_processor_set(
122  const size_t setsize,
123  const cpu_set_t *set
124)
125{
126#if defined(CPU_DATA_CACHE_ALIGNMENT)
127  _SMP_Multicast_action( setsize, set, smp_cache_data_flush_all, NULL );
128#endif
129}
130
131void
132rtems_cache_invalidate_entire_data_processor_set(
133  const size_t setsize,
134  const cpu_set_t *set
135)
136{
137#if defined(CPU_DATA_CACHE_ALIGNMENT)
138  _SMP_Multicast_action( setsize, set, smp_cache_data_inv_all, NULL );
139#endif
140}
141
142#endif /* defined(RTEMS_SMP) */
143
144/*
145 * THESE FUNCTIONS ONLY HAVE BODIES IF WE HAVE A DATA CACHE
146 */
147
148/*
149 * This function is called to flush the data cache by performing cache
150 * copybacks. It must determine how many cache lines need to be copied
151 * back and then perform the copybacks.
152 */
153void
154rtems_cache_flush_multiple_data_lines( const void * d_addr, size_t n_bytes )
155{
156#if defined(CPU_DATA_CACHE_ALIGNMENT)
157#if defined(CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS)
158  _CPU_cache_flush_data_range( d_addr, n_bytes );
159#else
160  const void * final_address;
161
162 /*
163  * Set d_addr to the beginning of the cache line; final_address indicates
164  * the last address_t which needs to be pushed. Increment d_addr and push
165  * the resulting line until final_address is passed.
166  */
167
168  if( n_bytes == 0 )
169    /* Do nothing if number of bytes to flush is zero */
170    return;
171
172  final_address = (void *)((size_t)d_addr + n_bytes - 1);
173  d_addr = (void *)((size_t)d_addr & ~(CPU_DATA_CACHE_ALIGNMENT - 1));
174  while( d_addr <= final_address )  {
175    _CPU_cache_flush_1_data_line( d_addr );
176    d_addr = (void *)((size_t)d_addr + CPU_DATA_CACHE_ALIGNMENT);
177  }
178#endif
179#endif
180}
181
182
183/*
184 * This function is responsible for performing a data cache invalidate.
185 * It must determine how many cache lines need to be invalidated and then
186 * perform the invalidations.
187 */
188
189void
190rtems_cache_invalidate_multiple_data_lines( const void * d_addr, size_t n_bytes )
191{
192#if defined(CPU_DATA_CACHE_ALIGNMENT)
193#if defined(CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS)
194  _CPU_cache_invalidate_data_range( d_addr, n_bytes );
195#else
196  const void * final_address;
197
198 /*
199  * Set d_addr to the beginning of the cache line; final_address indicates
200  * the last address_t which needs to be invalidated. Increment d_addr and
201  * invalidate the resulting line until final_address is passed.
202  */
203
204  if( n_bytes == 0 )
205    /* Do nothing if number of bytes to invalidate is zero */
206    return;
207
208  final_address = (void *)((size_t)d_addr + n_bytes - 1);
209  d_addr = (void *)((size_t)d_addr & ~(CPU_DATA_CACHE_ALIGNMENT - 1));
210  while( final_address >= d_addr ) {
211    _CPU_cache_invalidate_1_data_line( d_addr );
212    d_addr = (void *)((size_t)d_addr + CPU_DATA_CACHE_ALIGNMENT);
213  }
214#endif
215#endif
216}
217
218
219/*
220 * This function is responsible for performing a data cache flush.
221 * It flushes the entire cache.
222 */
223void
224rtems_cache_flush_entire_data( void )
225{
226#if defined(CPU_DATA_CACHE_ALIGNMENT)
227   /*
228    * Call the CPU-specific routine
229    */
230   _CPU_cache_flush_entire_data();
231#endif
232}
233
234
235/*
236 * This function is responsible for performing a data cache
237 * invalidate. It invalidates the entire cache.
238 */
239void
240rtems_cache_invalidate_entire_data( void )
241{
242#if defined(CPU_DATA_CACHE_ALIGNMENT)
243 /*
244  * Call the CPU-specific routine
245  */
246
247 _CPU_cache_invalidate_entire_data();
248#endif
249}
250
251
252/*
253 * This function returns the data cache granularity.
254 */
255size_t
256rtems_cache_get_data_line_size( void )
257{
258#if defined(CPU_DATA_CACHE_ALIGNMENT)
259  return CPU_DATA_CACHE_ALIGNMENT;
260#else
261  return 0;
262#endif
263}
264
265
266size_t
267rtems_cache_get_data_cache_size( uint32_t level )
268{
269#if defined(CPU_CACHE_SUPPORT_PROVIDES_CACHE_SIZE_FUNCTIONS)
270  return _CPU_cache_get_data_cache_size( level );
271#else
272  return 0;
273#endif
274}
275
276/*
277 * This function freezes the data cache; cache lines
278 * are not replaced.
279 */
280void
281rtems_cache_freeze_data( void )
282{
283#if defined(CPU_DATA_CACHE_ALIGNMENT)
284  _CPU_cache_freeze_data();
285#endif
286}
287
288
289/*
290 * This function unfreezes the instruction cache.
291 */
292void rtems_cache_unfreeze_data( void )
293{
294#if defined(CPU_DATA_CACHE_ALIGNMENT)
295  _CPU_cache_unfreeze_data();
296#endif
297}
298
299
300/* Turn on the data cache. */
301void
302rtems_cache_enable_data( void )
303{
304#if defined(CPU_DATA_CACHE_ALIGNMENT)
305  _CPU_cache_enable_data();
306#endif
307}
308
309
310/* Turn off the data cache. */
311void
312rtems_cache_disable_data( void )
313{
314#if defined(CPU_DATA_CACHE_ALIGNMENT)
315  _CPU_cache_disable_data();
316#endif
317}
318
319
320
321/*
322 * THESE FUNCTIONS ONLY HAVE BODIES IF WE HAVE AN INSTRUCTION CACHE
323 */
324
325#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT) \
326  && defined(RTEMS_SMP) \
327  && defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING)
328
329static void smp_cache_inst_inv(void *arg)
330{
331  smp_cache_area *area = arg;
332
333  _CPU_cache_invalidate_instruction_range(area->addr, area->size);
334}
335
336static void smp_cache_inst_inv_all(void *arg)
337{
338  _CPU_cache_invalidate_entire_instruction();
339}
340
341#endif
342
343/*
344 * This function is responsible for performing an instruction cache
345 * invalidate. It must determine how many cache lines need to be invalidated
346 * and then perform the invalidations.
347 */
348
349#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT) \
350  && !defined(CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS)
351static void
352_CPU_cache_invalidate_instruction_range(
353  const void * i_addr,
354  size_t n_bytes
355)
356{
357  const void * final_address;
358
359 /*
360  * Set i_addr to the beginning of the cache line; final_address indicates
361  * the last address_t which needs to be invalidated. Increment i_addr and
362  * invalidate the resulting line until final_address is passed.
363  */
364
365  if( n_bytes == 0 )
366    /* Do nothing if number of bytes to invalidate is zero */
367    return;
368
369  final_address = (void *)((size_t)i_addr + n_bytes - 1);
370  i_addr = (void *)((size_t)i_addr & ~(CPU_INSTRUCTION_CACHE_ALIGNMENT - 1));
371  while( final_address >= i_addr ) {
372    _CPU_cache_invalidate_1_instruction_line( i_addr );
373    i_addr = (void *)((size_t)i_addr + CPU_INSTRUCTION_CACHE_ALIGNMENT);
374  }
375}
376#endif
377
378void
379rtems_cache_invalidate_multiple_instruction_lines(
380  const void * i_addr,
381  size_t n_bytes
382)
383{
384#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
385#if defined(RTEMS_SMP) && defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING)
386  smp_cache_area area = { i_addr, n_bytes };
387
388  _SMP_Multicast_action( 0, NULL, smp_cache_inst_inv, &area );
389#else
390  _CPU_cache_invalidate_instruction_range( i_addr, n_bytes );
391#endif
392#endif
393}
394
395
396/*
397 * This function is responsible for performing an instruction cache
398 * invalidate. It invalidates the entire cache.
399 */
400void
401rtems_cache_invalidate_entire_instruction( void )
402{
403#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
404#if defined(RTEMS_SMP) && defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING)
405  _SMP_Multicast_action( 0, NULL, smp_cache_inst_inv_all, NULL );
406#else
407 _CPU_cache_invalidate_entire_instruction();
408#endif
409#endif
410}
411
412
413/*
414 * This function returns the instruction cache granularity.
415 */
416size_t
417rtems_cache_get_instruction_line_size( void )
418{
419#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
420  return CPU_INSTRUCTION_CACHE_ALIGNMENT;
421#else
422  return 0;
423#endif
424}
425
426
427size_t
428rtems_cache_get_instruction_cache_size( uint32_t level )
429{
430#if defined(CPU_CACHE_SUPPORT_PROVIDES_CACHE_SIZE_FUNCTIONS)
431  return _CPU_cache_get_instruction_cache_size( level );
432#else
433  return 0;
434#endif
435}
436
437
438/*
439 * This function freezes the instruction cache; cache lines
440 * are not replaced.
441 */
442void
443rtems_cache_freeze_instruction( void )
444{
445#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
446  _CPU_cache_freeze_instruction();
447#endif
448}
449
450
451/*
452 * This function unfreezes the instruction cache.
453 */
454void rtems_cache_unfreeze_instruction( void )
455{
456#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
457  _CPU_cache_unfreeze_instruction();
458#endif
459}
460
461
462/* Turn on the instruction cache. */
463void
464rtems_cache_enable_instruction( void )
465{
466#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
467  _CPU_cache_enable_instruction();
468#endif
469}
470
471
472/* Turn off the instruction cache. */
473void
474rtems_cache_disable_instruction( void )
475{
476#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
477  _CPU_cache_disable_instruction();
478#endif
479}
Note: See TracBrowser for help on using the repository browser.