source: rtems/bsps/shared/cache/cacheimpl.h @ a6f70e1

Last change on this file since a6f70e1 was a6f70e1, checked in by Sebastian Huber <sebastian.huber@…>, on Dec 21, 2018 at 6:14:42 AM

bsps: Remove superfluous comments in cacheimpl.h

Remove superfluous blank lines.

Update #3667.

  • Property mode set to 100644
File size: 10.5 KB
Line 
1/*
2 *  Cache Manager
3 *
4 *  COPYRIGHT (c) 1989-1999.
5 *  On-Line Applications Research Corporation (OAR).
6 *
7 *  The license and distribution terms for this file may be
8 *  found in the file LICENSE in this distribution or at
9 *  http://www.rtems.org/license/LICENSE.
10 *
11 *
12 *  The functions in this file implement the API to the RTEMS Cache Manager and
13 *  are divided into data cache and instruction cache functions. Data cache
14 *  functions only have bodies if a data cache is supported. Instruction
15 *  cache functions only have bodies if an instruction cache is supported.
16 *  Support for a particular cache exists only if CPU_x_CACHE_ALIGNMENT is
17 *  defined, where x E {DATA, INSTRUCTION}. These definitions are found in
18 *  the Cache Manager Wrapper header files, often
19 *
20 *  rtems/c/src/lib/libcpu/CPU/cache_.h
21 *
22 *  The cache implementation header file can define
23 *
24 *    #define CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS
25 *
26 *  if it provides cache maintenance functions which operate on multiple lines.
27 *  Otherwise a generic loop with single line operations will be used.  It is
28 *  strongly recommended to provide the implementation in terms of static
29 *  inline functions for performance reasons.
30 *
31 *  The functions below are implemented with CPU dependent inline routines
32 *  found in the cache.c files for each CPU. In the event that a CPU does
33 *  not support a specific function for a cache it has, the CPU dependent
34 *  routine does nothing (but does exist).
35 *
36 *  At this point, the Cache Manager makes no considerations, and provides no
37 *  support for BSP specific issues such as a secondary cache. In such a system,
38 *  the CPU dependent routines would have to be modified, or a BSP layer added
39 *  to this Manager.
40 */
41
42#include <rtems.h>
43
44#if defined(RTEMS_SMP) && defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING)
45#include <rtems/score/smpimpl.h>
46#endif
47
48#if CPU_DATA_CACHE_ALIGNMENT > CPU_CACHE_LINE_BYTES
49#error "CPU_DATA_CACHE_ALIGNMENT is greater than CPU_CACHE_LINE_BYTES"
50#endif
51
52#if CPU_INSTRUCTION_CACHE_ALIGNMENT > CPU_CACHE_LINE_BYTES
53#error "CPU_INSTRUCTION_CACHE_ALIGNMENT is greater than CPU_CACHE_LINE_BYTES"
54#endif
55
56/*
57 * THESE FUNCTIONS ONLY HAVE BODIES IF WE HAVE A DATA CACHE
58 */
59
60/*
61 * This function is called to flush the data cache by performing cache
62 * copybacks. It must determine how many cache lines need to be copied
63 * back and then perform the copybacks.
64 */
65void
66rtems_cache_flush_multiple_data_lines( const void * d_addr, size_t n_bytes )
67{
68#if defined(CPU_DATA_CACHE_ALIGNMENT)
69#if defined(CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS)
70  _CPU_cache_flush_data_range( d_addr, n_bytes );
71#else
72  const void * final_address;
73
74 /*
75  * Set d_addr to the beginning of the cache line; final_address indicates
76  * the last address_t which needs to be pushed. Increment d_addr and push
77  * the resulting line until final_address is passed.
78  */
79
80  if( n_bytes == 0 )
81    /* Do nothing if number of bytes to flush is zero */
82    return;
83
84  final_address = (void *)((size_t)d_addr + n_bytes - 1);
85  d_addr = (void *)((size_t)d_addr & ~(CPU_DATA_CACHE_ALIGNMENT - 1));
86  while( d_addr <= final_address )  {
87    _CPU_cache_flush_1_data_line( d_addr );
88    d_addr = (void *)((size_t)d_addr + CPU_DATA_CACHE_ALIGNMENT);
89  }
90#endif
91#endif
92}
93
94/*
95 * This function is responsible for performing a data cache invalidate.
96 * It must determine how many cache lines need to be invalidated and then
97 * perform the invalidations.
98 */
99void
100rtems_cache_invalidate_multiple_data_lines( const void * d_addr, size_t n_bytes )
101{
102#if defined(CPU_DATA_CACHE_ALIGNMENT)
103#if defined(CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS)
104  _CPU_cache_invalidate_data_range( d_addr, n_bytes );
105#else
106  const void * final_address;
107
108 /*
109  * Set d_addr to the beginning of the cache line; final_address indicates
110  * the last address_t which needs to be invalidated. Increment d_addr and
111  * invalidate the resulting line until final_address is passed.
112  */
113
114  if( n_bytes == 0 )
115    /* Do nothing if number of bytes to invalidate is zero */
116    return;
117
118  final_address = (void *)((size_t)d_addr + n_bytes - 1);
119  d_addr = (void *)((size_t)d_addr & ~(CPU_DATA_CACHE_ALIGNMENT - 1));
120  while( final_address >= d_addr ) {
121    _CPU_cache_invalidate_1_data_line( d_addr );
122    d_addr = (void *)((size_t)d_addr + CPU_DATA_CACHE_ALIGNMENT);
123  }
124#endif
125#endif
126}
127
128/*
129 * This function is responsible for performing a data cache flush.
130 * It flushes the entire cache.
131 */
132void
133rtems_cache_flush_entire_data( void )
134{
135#if defined(CPU_DATA_CACHE_ALIGNMENT)
136   /*
137    * Call the CPU-specific routine
138    */
139   _CPU_cache_flush_entire_data();
140#endif
141}
142
143/*
144 * This function is responsible for performing a data cache
145 * invalidate. It invalidates the entire cache.
146 */
147void
148rtems_cache_invalidate_entire_data( void )
149{
150#if defined(CPU_DATA_CACHE_ALIGNMENT)
151 /*
152  * Call the CPU-specific routine
153  */
154
155 _CPU_cache_invalidate_entire_data();
156#endif
157}
158
159/*
160 * This function returns the data cache granularity.
161 */
162size_t
163rtems_cache_get_data_line_size( void )
164{
165#if defined(CPU_DATA_CACHE_ALIGNMENT)
166  return CPU_DATA_CACHE_ALIGNMENT;
167#else
168  return 0;
169#endif
170}
171
172size_t
173rtems_cache_get_data_cache_size( uint32_t level )
174{
175#if defined(CPU_CACHE_SUPPORT_PROVIDES_CACHE_SIZE_FUNCTIONS)
176  return _CPU_cache_get_data_cache_size( level );
177#else
178  return 0;
179#endif
180}
181
182/*
183 * This function freezes the data cache; cache lines
184 * are not replaced.
185 */
186void
187rtems_cache_freeze_data( void )
188{
189#if defined(CPU_DATA_CACHE_ALIGNMENT)
190  _CPU_cache_freeze_data();
191#endif
192}
193
194void rtems_cache_unfreeze_data( void )
195{
196#if defined(CPU_DATA_CACHE_ALIGNMENT)
197  _CPU_cache_unfreeze_data();
198#endif
199}
200
201void
202rtems_cache_enable_data( void )
203{
204#if defined(CPU_DATA_CACHE_ALIGNMENT)
205  _CPU_cache_enable_data();
206#endif
207}
208
209void
210rtems_cache_disable_data( void )
211{
212#if defined(CPU_DATA_CACHE_ALIGNMENT)
213  _CPU_cache_disable_data();
214#endif
215}
216
217/*
218 * THESE FUNCTIONS ONLY HAVE BODIES IF WE HAVE AN INSTRUCTION CACHE
219 */
220
221#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT) \
222  && defined(RTEMS_SMP) \
223  && defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING)
224
225typedef struct {
226  const void *addr;
227  size_t size;
228} smp_cache_area;
229
230static void smp_cache_inst_inv(void *arg)
231{
232  smp_cache_area *area = arg;
233
234  _CPU_cache_invalidate_instruction_range(area->addr, area->size);
235}
236
237static void smp_cache_inst_inv_all(void *arg)
238{
239  _CPU_cache_invalidate_entire_instruction();
240}
241
242#endif
243
244/*
245 * This function is responsible for performing an instruction cache
246 * invalidate. It must determine how many cache lines need to be invalidated
247 * and then perform the invalidations.
248 */
249#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT) \
250  && !defined(CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS)
251static void
252_CPU_cache_invalidate_instruction_range(
253  const void * i_addr,
254  size_t n_bytes
255)
256{
257  const void * final_address;
258
259 /*
260  * Set i_addr to the beginning of the cache line; final_address indicates
261  * the last address_t which needs to be invalidated. Increment i_addr and
262  * invalidate the resulting line until final_address is passed.
263  */
264
265  if( n_bytes == 0 )
266    /* Do nothing if number of bytes to invalidate is zero */
267    return;
268
269  final_address = (void *)((size_t)i_addr + n_bytes - 1);
270  i_addr = (void *)((size_t)i_addr & ~(CPU_INSTRUCTION_CACHE_ALIGNMENT - 1));
271  while( final_address >= i_addr ) {
272    _CPU_cache_invalidate_1_instruction_line( i_addr );
273    i_addr = (void *)((size_t)i_addr + CPU_INSTRUCTION_CACHE_ALIGNMENT);
274  }
275}
276#endif
277
278void
279rtems_cache_invalidate_multiple_instruction_lines(
280  const void * i_addr,
281  size_t n_bytes
282)
283{
284#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
285#if defined(RTEMS_SMP) && defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING)
286  smp_cache_area area = { i_addr, n_bytes };
287
288  _SMP_Multicast_action( 0, NULL, smp_cache_inst_inv, &area );
289#else
290  _CPU_cache_invalidate_instruction_range( i_addr, n_bytes );
291#endif
292#endif
293}
294
295/*
296 * This function is responsible for performing an instruction cache
297 * invalidate. It invalidates the entire cache.
298 */
299void
300rtems_cache_invalidate_entire_instruction( void )
301{
302#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
303#if defined(RTEMS_SMP) && defined(CPU_CACHE_NO_INSTRUCTION_CACHE_SNOOPING)
304  _SMP_Multicast_action( 0, NULL, smp_cache_inst_inv_all, NULL );
305#else
306 _CPU_cache_invalidate_entire_instruction();
307#endif
308#endif
309}
310
311/*
312 * This function returns the instruction cache granularity.
313 */
314size_t
315rtems_cache_get_instruction_line_size( void )
316{
317#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
318  return CPU_INSTRUCTION_CACHE_ALIGNMENT;
319#else
320  return 0;
321#endif
322}
323
324size_t
325rtems_cache_get_instruction_cache_size( uint32_t level )
326{
327#if defined(CPU_CACHE_SUPPORT_PROVIDES_CACHE_SIZE_FUNCTIONS)
328  return _CPU_cache_get_instruction_cache_size( level );
329#else
330  return 0;
331#endif
332}
333
334/*
335 * This function freezes the instruction cache; cache lines
336 * are not replaced.
337 */
338void
339rtems_cache_freeze_instruction( void )
340{
341#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
342  _CPU_cache_freeze_instruction();
343#endif
344}
345
346void rtems_cache_unfreeze_instruction( void )
347{
348#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
349  _CPU_cache_unfreeze_instruction();
350#endif
351}
352
353void
354rtems_cache_enable_instruction( void )
355{
356#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
357  _CPU_cache_enable_instruction();
358#endif
359}
360
361void
362rtems_cache_disable_instruction( void )
363{
364#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
365  _CPU_cache_disable_instruction();
366#endif
367}
368
369/* Returns the maximal cache line size of all cache kinds in bytes. */
370size_t rtems_cache_get_maximal_line_size( void )
371{
372#if defined(CPU_MAXIMAL_CACHE_ALIGNMENT)
373  return CPU_MAXIMAL_CACHE_ALIGNMENT;
374#endif
375  size_t max_line_size = 0;
376#if defined(CPU_DATA_CACHE_ALIGNMENT)
377  {
378    size_t data_line_size = CPU_DATA_CACHE_ALIGNMENT;
379    if ( max_line_size < data_line_size )
380      max_line_size = data_line_size;
381  }
382#endif
383#if defined(CPU_INSTRUCTION_CACHE_ALIGNMENT)
384  {
385    size_t instruction_line_size = CPU_INSTRUCTION_CACHE_ALIGNMENT;
386    if ( max_line_size < instruction_line_size )
387      max_line_size = instruction_line_size;
388  }
389#endif
390  return max_line_size;
391}
392
393/*
394 * Purpose is to synchronize caches after code has been loaded
395 * or self modified. Actual implementation is simple only
396 * but it can and should be repaced by optimized version
397 * which does not need flush and invalidate all cache levels
398 * when code is changed.
399 */
400void rtems_cache_instruction_sync_after_code_change(
401  const void *code_addr,
402  size_t      n_bytes
403)
404{
405#if defined(CPU_CACHE_SUPPORT_PROVIDES_INSTRUCTION_SYNC_FUNCTION)
406  _CPU_cache_instruction_sync_after_code_change( code_addr, n_bytes );
407#else
408  rtems_cache_flush_multiple_data_lines( code_addr, n_bytes );
409  rtems_cache_invalidate_multiple_instruction_lines( code_addr, n_bytes );
410#endif
411}
Note: See TracBrowser for help on using the repository browser.