source: rtems/c/src/lib/libbsp/arm/xilinx-zynq/include/cache_.h @ 2bd440e

4.115
Last change on this file since 2bd440e was 2bd440e, checked in by Ric Claus <claus@…>, on 08/22/13 at 12:18:14

bsp/xilinx-zynq: Add cache support

  • Property mode set to 100644
File size: 27.5 KB
Line 
1/*
2 * Authorship
3 * ----------
4 * This software was created by
5 *     R. Claus <claus@slac.stanford.edu>, 2013,
6 *       Stanford Linear Accelerator Center, Stanford University.
7 *
8 * Acknowledgement of sponsorship
9 * ------------------------------
10 * This software was produced by
11 *     the Stanford Linear Accelerator Center, Stanford University,
12 *     under Contract DE-AC03-76SFO0515 with the Department of Energy.
13 *
14 * Government disclaimer of liability
15 * ----------------------------------
16 * Neither the United States nor the United States Department of Energy,
17 * nor any of their employees, makes any warranty, express or implied, or
18 * assumes any legal liability or responsibility for the accuracy,
19 * completeness, or usefulness of any data, apparatus, product, or process
20 * disclosed, or represents that its use would not infringe privately owned
21 * rights.
22 *
23 * Stanford disclaimer of liability
24 * --------------------------------
25 * Stanford University makes no representations or warranties, express or
26 * implied, nor assumes any liability for the use of this software.
27 *
28 * Stanford disclaimer of copyright
29 * --------------------------------
30 * Stanford University, owner of the copyright, hereby disclaims its
31 * copyright and all other rights in this software.  Hence, anyone may
32 * freely use it for any purpose without restriction.
33 *
34 * Maintenance of notices
35 * ----------------------
36 * In the interest of clarity regarding the origin and status of this
37 * SLAC software, this and all the preceding Stanford University notices
38 * are to remain affixed to any copy or derivative of this software made
39 * or distributed by the recipient and are to be affixed to any copy of
40 * software made or distributed by the recipient that contains a copy or
41 * derivative of this software.
42 *
43 * ------------------ SLAC Software Notices, Set 4 OTT.002a, 2004 FEB 03
44 */
45
46#ifndef LIBBSP_ARM_ZYNQ_CACHE__H
47#define LIBBSP_ARM_ZYNQ_CACHE__H
48
49#include <libcpu/arm-cp15.h>
50
51/* These two defines also ensure that the rtems_cache_* functions have bodies */
52#define CPU_DATA_CACHE_ALIGNMENT 32
53#define CPU_INSTRUCTION_CACHE_ALIGNMENT 32
54
55#define CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS
56
57#define L2CC_BASE_ADDR 0xF8F02000U
58
59#define ZYNQ_L2_CACHE_LINE_SIZE 32
60
61/* L2CC Register Offsets */
62typedef struct {
63  uint32_t cache_id;                                    /* Cache ID */
64  uint32_t cache_type;                                  /* Cache type */
65
66  uint8_t  reserved_8[0x100 - 8];
67  uint32_t ctrl;                                        /* Control */
68#define L2CC_ENABLE_MASK                 0x00000001     /* Enables the L2CC */
69
70  uint32_t aux_ctrl;                                    /* Auxiliary control */
71#define L2CC_AUX_EBRESPE_MASK            0x40000000     /* Early BRESP Enable */
72#define L2CC_AUX_IPFE_MASK               0x20000000     /* Instruction Prefetch Enable */
73#define L2CC_AUX_DPFE_MASK               0x10000000     /* Data Prefetch Enable */
74#define L2CC_AUX_NSIC_MASK               0x08000000     /* Non-secure interrupt access control */
75#define L2CC_AUX_NSLE_MASK               0x04000000     /* Non-secure lockdown enable */
76#define L2CC_AUX_CRP_MASK                0x02000000     /* Cache replacement policy */
77#define L2CC_AUX_FWE_MASK                0x01800000     /* Force write allocate */
78#define L2CC_AUX_SAOE_MASK               0x00400000     /* Shared attribute override enable */
79#define L2CC_AUX_PE_MASK                 0x00200000     /* Parity enable */
80#define L2CC_AUX_EMBE_MASK               0x00100000     /* Event monitor bus enable */
81#define L2CC_AUX_WAY_SIZE_MASK           0x000E0000     /* Way-size */
82#define L2CC_AUX_ASSOC_MASK              0x00010000     /* Associativity */
83#define L2CC_AUX_SAIE_MASK               0x00002000     /* Shared attribute invalidate enable */
84#define L2CC_AUX_EXCL_CACHE_MASK         0x00001000     /* Exclusive cache configuration */
85#define L2CC_AUX_SBDLE_MASK              0x00000800     /* Store buffer device limitation Enable */
86#define L2CC_AUX_HPSODRE_MASK            0x00000400     /* High Priority for SO and Dev Reads Enable */
87#define L2CC_AUX_FLZE_MASK               0x00000001     /* Full line of zero enable */
88
89#define L2CC_AUX_REG_DEFAULT_MASK        0x72360000     /* Enable all prefetching, */
90                                                        /* Cache replacement policy, Parity enable, */
91                                                        /* Event monitor bus enable and Way Size (64 KB) */
92#define L2CC_AUX_REG_ZERO_MASK           0xFFF1FFFF
93
94  uint32_t tag_ram_ctrl;
95#define L2CC_TAG_RAM_DEFAULT_MASK        0x00000111     /* Latency for tag RAM */
96  uint32_t data_ram_ctrl;
97#define L2CC_DATA_RAM_DEFAULT_MASK       0x00000121     /* Latency for data RAM */
98
99  uint8_t  reserved_110[0x200 - 0x110];
100  uint32_t ev_ctrl;                                     /* Event counter control */
101  uint32_t ev_cnt1_cfg;                                 /* Event counter 1 configuration */
102  uint32_t ev_cnt0_cfg;                                 /* Event counter 0 configuration */
103  uint32_t ev_cnt1;                                     /* Event counter 1 value */
104  uint32_t ev_cnt0;                                     /* Event counter 0 value */
105  uint32_t int_mask;                                    /* Interrupt enable mask */
106  uint32_t int_mask_status;                             /* Masked   interrupt status (read-only)*/
107  uint32_t int_raw_status;                              /* Unmasked interrupt status */
108  uint32_t int_clr;                                     /* Interrupt clear */
109/* Interrupt bit masks */
110#define L2CC_INT_DECERR_MASK             0x00000100     /* DECERR from L3 */
111#define L2CC_INT_SLVERR_MASK             0x00000080     /* SLVERR from L3 */
112#define L2CC_INT_ERRRD_MASK              0x00000040     /* Error on L2 data RAM (Read) */
113#define L2CC_INT_ERRRT_MASK              0x00000020     /* Error on L2 tag RAM (Read) */
114#define L2CC_INT_ERRWD_MASK              0x00000010     /* Error on L2 data RAM (Write) */
115#define L2CC_INT_ERRWT_MASK              0x00000008     /* Error on L2 tag RAM (Write) */
116#define L2CC_INT_PARRD_MASK              0x00000004     /* Parity Error on L2 data RAM (Read) */
117#define L2CC_INT_PARRT_MASK              0x00000002     /* Parity Error on L2 tag RAM (Read) */
118#define L2CC_INT_ECNTR_MASK              0x00000001     /* Event Counter1/0 Overflow Increment */
119
120  uint8_t  reserved_224[0x730 - 0x224];
121  uint32_t cache_sync;                                  /* Drain the STB */
122  uint8_t  reserved_734[0x770 - 0x734];
123  uint32_t inv_pa;                                      /* Invalidate line by PA */
124  uint8_t  reserved_774[0x77c - 0x774];
125  uint32_t inv_way;                                     /* Invalidate by Way */
126  uint8_t  reserved_780[0x7b0 - 0x780];
127  uint32_t clean_pa;                                    /* Clean Line by PA */
128  uint8_t  reserved_7b4[0x7b8 - 0x7b4];
129  uint32_t clean_index;                                 /* Clean Line by Set/Way */
130  uint32_t clean_way;                                   /* Clean by Way */
131  uint8_t  reserved_7c0[0x7f0 - 0x7c0];
132  uint32_t clean_inv_pa;                                /* Clean and Invalidate Line by PA */
133  uint8_t  reserved_7f4[0x7f8 - 0x7f4];
134  uint32_t clean_inv_indx;                              /* Clean and Invalidate Line by Set/Way */
135  uint32_t clean_inv_way;                               /* Clean and Invalidate by Way */
136
137  uint8_t  reserved_800[0x900 - 0x800];
138  uint32_t d_lockdown_0;                                /* Data        lock down 0 */
139  uint32_t i_lockdown_0;                                /* Instruction lock down 0 */
140  uint32_t d_lockdown_1;                                /* Data        lock down 1 */
141  uint32_t i_lockdown_1;                                /* Instruction lock down 1 */
142  uint32_t d_lockdown_2;                                /* Data        lock down 2 */
143  uint32_t i_lockdown_2;                                /* Instruction lock down 2 */
144  uint32_t d_lockdown_3;                                /* Data        lock down 3 */
145  uint32_t i_lockdown_3;                                /* Instruction lock down 3 */
146  uint32_t d_lockdown_4;                                /* Data        lock down 4 */
147  uint32_t i_lockdown_4;                                /* Instruction lock down 4 */
148  uint32_t d_lockdown_5;                                /* Data        lock down 5 */
149  uint32_t i_lockdown_5;                                /* Instruction lock down 5 */
150  uint32_t d_lockdown_6;                                /* Data        lock down 6 */
151  uint32_t i_lockdown_6;                                /* Instruction lock down 6 */
152  uint32_t d_lockdown_7;                                /* Data        lock down 7 */
153  uint32_t i_lockdown_7;                                /* Instruction lock down 7 */
154
155  uint8_t  reserved_940[0x950 - 0x940];
156  uint32_t lock_line_en;                                /* Lockdown by Line Enable */
157  uint32_t unlock_way;                                  /* Cache lockdown by way */
158
159  uint8_t  reserved_958[0xc00 - 0x958];
160  uint32_t addr_filtering_start;                        /* Address range redirect, part 1 */
161  uint32_t addr_filtering_end;                          /* Address range redirect, part 2 */
162#define L2CC_ADDR_FILTER_VALID_MASK      0xFFF00000     /* Address filtering valid bits*/
163#define L2CC_ADDR_FILTER_ENABLE_MASK     0x00000001     /* Address filtering enable bit*/
164
165  uint8_t  reserved_c08[0xf40 - 0xc08];
166  uint32_t debug_ctrl;                                  /* Debug control */
167#define L2CC_DEBUG_SPIDEN_MASK           0x00000004     /* Debug SPIDEN bit */
168#define L2CC_DEBUG_DWB_MASK              0x00000002     /* Debug DWB bit, forces write through */
169#define L2CC_DEBUG_DCL_MASK              0x00000002     /* Debug DCL bit, disables cache line fill */
170
171  uint8_t  reserved_f44[0xf60 - 0xf44];
172  uint32_t prefetch_ctrl;                               /* Purpose prefetch enables */
173  uint8_t  reserved_f64[0xf80 - 0xf64];
174  uint32_t power_ctrl;                                  /* Purpose power controls */
175} L2CC;
176
177static inline void
178zynq_cache_l1_cache_properties(uint32_t *l1LineSize,
179                         uint32_t *l1NumWays,
180                         uint32_t *l1NumSets)
181{
182  uint32_t id;
183
184  /* Select cache level 1 and Data cache in CSSELR */
185  arm_cp15_set_cache_size_selection(0);
186  _ARM_Instruction_synchronization_barrier();
187  id = arm_cp15_get_cache_size_id();
188
189  *l1LineSize =  (id        & 0x0007U) + 2 + 2; /* Cache line size (+2 -> bytes) */
190  *l1NumWays  = ((id >>  3) & 0x03ffU) + 1;     /* Number of Ways */
191  *l1NumSets  = ((id >> 13) & 0x7fffU) + 1;     /* Number of Sets */
192}
193
194
195static inline void
196zynq_cache_l1_cache_flush_1_data_line(const void *d_addr)
197{
198  /* Select cache Level 1 and Data cache in CSSELR */
199  arm_cp15_set_cache_size_selection(0);
200
201  /* Flush the Data cache */
202  arm_cp15_data_cache_clean_and_invalidate_line(d_addr);
203
204  /* Wait for L1 flush to complete */
205  _ARM_Data_synchronization_barrier();
206}
207
208static inline void
209zynq_cache_l1_cache_flush_data_range(const void *d_addr, size_t n_bytes)
210{
211  const void * final_address;
212
213 /*
214  * Set d_addr to the beginning of the cache line; final_address indicates
215  * the last address_t which needs to be pushed. Increment d_addr and push
216  * the resulting line until final_address is passed.
217  */
218
219  if( n_bytes == 0 )
220    /* Do nothing if number of bytes to flush is zero */
221    return;
222
223  /* Select cache Level 1 and Data cache in CSSELR */
224  arm_cp15_set_cache_size_selection(0);
225
226  final_address = (void *)((size_t)d_addr + n_bytes - 1);
227  d_addr = (void *)((size_t)d_addr & ~(CPU_DATA_CACHE_ALIGNMENT - 1));
228  while( d_addr <= final_address )  {
229    arm_cp15_data_cache_clean_and_invalidate_line( d_addr );
230    d_addr = (void *)((size_t)d_addr + CPU_DATA_CACHE_ALIGNMENT);
231  }
232
233  /* Wait for L1 flush to complete */
234  _ARM_Data_synchronization_barrier();
235}
236
237static inline void
238zynq_cache_l1_cache_flush_entire_data(void)
239{
240  uint32_t l1LineSize, l1NumWays, l1NumSets;
241  uint32_t sets, ways, s, w;
242
243  /* Select cache Level 1 and Data cache in CSSELR */
244  arm_cp15_set_cache_size_selection(0);
245  _ARM_Instruction_synchronization_barrier();
246
247  /* Get the L1 cache properties */
248  zynq_cache_l1_cache_properties(&l1LineSize, &l1NumWays, &l1NumSets);
249
250  ways = l1NumWays * (1 << 30);
251  sets = l1NumSets * (1 << l1LineSize);
252
253  /* Invalidate all the cache lines */
254  for (w = 0; w < ways; w += (1 << 30)) {
255    for (s = 0; s < sets; s += (1 << l1LineSize)) {
256      /* Flush by Set/Way */
257      arm_cp15_data_cache_clean_and_invalidate_line_by_set_and_way(w | s);
258    }
259  }
260
261  /* Wait for L1 flush to complete */
262  _ARM_Data_synchronization_barrier();
263}
264
265static inline void
266zynq_cache_l1_cache_invalidate_1_data_line(const void *d_addr)
267{
268  /* Select cache Level 1 and Data cache in CSSELR */
269  arm_cp15_set_cache_size_selection(0);
270
271  /* Invalidate the cache line */
272  arm_cp15_data_cache_invalidate_line(d_addr);
273
274  /* Wait for L1 invalidate to complete */
275  _ARM_Data_synchronization_barrier();
276}
277
278static inline void
279zynq_cache_l1_cache_invalidate_data_range(const void *d_addr, size_t n_bytes)
280{
281  const void * final_address;
282
283 /*
284  * Set d_addr to the beginning of the cache line; final_address indicates
285  * the last address_t which needs to be invalidated. Increment d_addr and
286  * invalidate the resulting line until final_address is passed.
287  */
288
289  if( n_bytes == 0 )
290    /* Do nothing if number of bytes to invalidate is zero */
291    return;
292
293  /* Select cache Level 1 and Data cache in CSSELR */
294  arm_cp15_set_cache_size_selection(0);
295
296  final_address = (void *)((size_t)d_addr + n_bytes - 1);
297  d_addr = (void *)((size_t)d_addr & ~(CPU_DATA_CACHE_ALIGNMENT - 1));
298  while( final_address >= d_addr ) {
299    arm_cp15_data_cache_invalidate_line( d_addr );
300    d_addr = (void *)((size_t)d_addr + CPU_DATA_CACHE_ALIGNMENT);
301  }
302
303  /* Wait for L1 invalidate to complete */
304  _ARM_Data_synchronization_barrier();
305}
306
307static inline void
308zynq_cache_l1_cache_invalidate_entire_data(void)
309{
310  uint32_t l1LineSize, l1NumWays, l1NumSets;
311  uint32_t sets, ways, s, w;
312
313  /* Select cache Level 1 and Data cache in CSSELR */
314  arm_cp15_set_cache_size_selection(0);
315  _ARM_Instruction_synchronization_barrier();
316
317  /* Get the L1 cache properties */
318  zynq_cache_l1_cache_properties(&l1LineSize, &l1NumWays, &l1NumSets);
319
320  ways = l1NumWays * (1 << 30);
321  sets = l1NumSets * (1 << l1LineSize);
322
323  /* Invalidate all the cache lines */
324  for (w = 0; w < ways; w += (1 << 30)) {
325    for (s = 0; s < sets; s += (1 << l1LineSize)) {
326      /* Invalidate by Set/Way */
327      arm_cp15_data_cache_invalidate_line_by_set_and_way(w | s);
328    }
329  }
330
331  /* Wait for L1 invalidate to complete */
332  _ARM_Data_synchronization_barrier();
333}
334
335static inline void
336zynq_cache_l1_cache_store_data(const void *d_addr)
337{
338  /* Select cache Level 1 and Data cache in CSSELR */
339  arm_cp15_set_cache_size_selection(0);
340
341  /* Store the Data cache line */
342  arm_cp15_data_cache_clean_line(d_addr);
343
344  /* Wait for L1 store to complete */
345  _ARM_Data_synchronization_barrier();
346}
347
348static inline void
349zynq_cache_l1_cache_freeze_data(void)
350{
351  /* TODO */
352}
353
354static inline void
355zynq_cache_l1_cache_unfreeze_data(void)
356{
357  /* TODO */
358}
359
360static inline void
361zynq_cache_l1_cache_invalidate_1_instruction_line(const void *i_addr)
362{
363  /* Select cache Level 1 and Instruction cache in CSSELR */
364  arm_cp15_set_cache_size_selection(1);
365
366  /* Invalidate the Instruction cache line */
367  arm_cp15_instruction_cache_invalidate_line(i_addr);
368
369  /* Wait for L1 invalidate to complete */
370  _ARM_Data_synchronization_barrier();
371}
372
373static inline void
374zynq_cache_l1_cache_invalidate_instruction_range(const void *i_addr, size_t n_bytes)
375{
376  const void * final_address;
377
378 /*
379  * Set i_addr to the beginning of the cache line; final_address indicates
380  * the last address_t which needs to be invalidated. Increment i_addr and
381  * invalidate the resulting line until final_address is passed.
382  */
383
384  if( n_bytes == 0 )
385    /* Do nothing if number of bytes to invalidate is zero */
386    return;
387
388  /* Select cache Level 1 and Instruction cache in CSSELR */
389  arm_cp15_set_cache_size_selection(1);
390
391  final_address = (void *)((size_t)i_addr + n_bytes - 1);
392  i_addr = (void *)((size_t)i_addr & ~(CPU_INSTRUCTION_CACHE_ALIGNMENT - 1));
393  while( final_address > i_addr ) {
394    arm_cp15_instruction_cache_invalidate_line( i_addr );
395    i_addr = (void *)((size_t)i_addr + CPU_INSTRUCTION_CACHE_ALIGNMENT);
396  }
397
398  /* Wait for L1 invalidate to complete */
399  _ARM_Data_synchronization_barrier();
400}
401
402static inline void
403zynq_cache_l1_cache_invalidate_entire_instruction(void)
404{
405  /* Select cache Level 1 and Instruction cache in CSSELR */
406  arm_cp15_set_cache_size_selection(1);
407
408  /* Invalidate the Instruction cache */
409  arm_cp15_instruction_cache_invalidate();
410
411  /* Wait for L1 invalidate to complete */
412  _ARM_Data_synchronization_barrier();
413}
414
415static inline void
416zynq_cache_l1_cache_freeze_instruction(void)
417{
418  /* TODO */
419}
420
421static inline void
422zynq_cache_l1_cache_unfreeze_instruction(void)
423{
424  /* TODO */
425}
426
427static inline void
428zynq_cache_l1_cache_enable_data(void)
429{
430  rtems_interrupt_level level;
431  uint32_t ctrl;
432
433  rtems_interrupt_disable(level);
434
435  /* Enable caches only if they are disabled */
436  ctrl = arm_cp15_get_control();
437  if (!(ctrl & ARM_CP15_CTRL_C)) {
438    /* Clean and invalidate the Data cache */
439    zynq_cache_l1_cache_invalidate_entire_data();
440
441    /* Enable the Data cache */
442    ctrl |= ARM_CP15_CTRL_C;
443
444    arm_cp15_set_control(ctrl);
445  }
446
447  rtems_interrupt_enable(level);
448}
449
450static inline void
451zynq_cache_l1_cache_disable_data(void)
452{
453  rtems_interrupt_level level;
454
455  rtems_interrupt_disable(level);
456
457  /* Clean and invalidate the Data cache */
458  zynq_cache_l1_cache_flush_entire_data();
459
460  /* Disable the Data cache */
461  arm_cp15_set_control(arm_cp15_get_control() & ~ARM_CP15_CTRL_C);
462
463  rtems_interrupt_enable(level);
464}
465
466static inline void
467zynq_cache_l1_cache_enable_instruction(void)
468{
469  rtems_interrupt_level level;
470  uint32_t              ctrl;
471
472  rtems_interrupt_disable(level);
473
474  /* Enable Instruction cache only if it is disabled */
475  ctrl = arm_cp15_get_control();
476  if (!(ctrl & ARM_CP15_CTRL_I)) {
477    /* Invalidate the Instruction cache */
478    zynq_cache_l1_cache_invalidate_entire_instruction();
479
480    /* Enable the Instruction cache */
481    ctrl |= ARM_CP15_CTRL_I;
482
483    arm_cp15_set_control(ctrl);
484  }
485
486  rtems_interrupt_enable(level);
487}
488
489static inline void
490zynq_cache_l1_cache_disable_instruction(void)
491{
492  rtems_interrupt_level level;
493
494  rtems_interrupt_disable(level);
495
496  /* Synchronize the processor */
497  _ARM_Data_synchronization_barrier();
498
499  /* Invalidate the Instruction cache */
500  zynq_cache_l1_cache_invalidate_entire_instruction();
501
502  /* Disable the Instruction cache */
503  arm_cp15_set_control(arm_cp15_get_control() & ~ARM_CP15_CTRL_I);
504
505  rtems_interrupt_enable(level);
506}
507
508
509static inline void
510zynq_cache_l2_cache_flush_1_line(const void *d_addr)
511{
512  volatile L2CC* l2cc = (volatile L2CC *)L2CC_BASE_ADDR;
513
514  l2cc->clean_inv_pa = (uint32_t)d_addr;
515
516  /* Synchronize the processor */
517  _ARM_Data_synchronization_barrier();
518}
519
520static inline void
521zynq_cache_l2_cache_flush_range(const void *d_addr, size_t n_bytes)
522{
523  volatile L2CC *l2cc = (volatile L2CC *)L2CC_BASE_ADDR;
524
525  if (n_bytes != 0) {
526    uint32_t       adx = (uint32_t)d_addr;
527    const uint32_t end = adx + n_bytes;
528
529    /* Back starting address up to start of a line and flush until end */
530    for (adx &= ~(ZYNQ_L2_CACHE_LINE_SIZE - 1);
531         adx < end;
532         adx += ZYNQ_L2_CACHE_LINE_SIZE) {
533      l2cc->clean_inv_pa = adx;
534    }
535  }
536
537  /* Wait for L2 flush to complete */
538  while (l2cc->cache_sync != 0);
539
540  /* Synchronize the processor */
541  _ARM_Data_synchronization_barrier();
542}
543
544static inline void
545zynq_cache_l2_cache_flush_entire(void)
546{
547  volatile L2CC* l2cc = (volatile L2CC *)L2CC_BASE_ADDR;
548
549  /* Flush the caches */
550  l2cc->clean_inv_way = 0x0000FFFFU;
551
552  /* Wait for the flush to complete */
553  while (l2cc->cache_sync != 0);
554
555  /* Synchronize the processor */
556  _ARM_Data_synchronization_barrier();
557}
558
559static inline void
560zynq_cache_l2_cache_invalidate_1_line(const void *d_addr)
561{
562  volatile L2CC* l2cc = (volatile L2CC *)L2CC_BASE_ADDR;
563
564  l2cc->inv_pa = (uint32_t)d_addr;
565
566  /* Synchronize the processor */
567  _ARM_Data_synchronization_barrier();
568}
569
570static inline void
571zynq_cache_l2_cache_invalidate_range(const void* d_addr, size_t n_bytes)
572{
573  volatile L2CC* l2cc = (volatile L2CC *)L2CC_BASE_ADDR;
574
575  if (n_bytes != 0) {
576    uint32_t       adx = (uint32_t)d_addr;
577    const uint32_t end = adx + n_bytes;
578
579    /* Back starting address up to start of a line and invalidate until end */
580    for (adx &= ~(ZYNQ_L2_CACHE_LINE_SIZE - 1);
581         adx < end;
582         adx += ZYNQ_L2_CACHE_LINE_SIZE) {
583      l2cc->inv_pa = adx;
584    }
585  }
586
587  /* Wait for L2 invalidate to complete */
588  while (l2cc->cache_sync != 0);
589
590  /* Synchronize the processor */
591  _ARM_Data_synchronization_barrier();
592}
593
594static inline void
595zynq_cache_l2_cache_invalidate_entire(void)
596{
597  volatile L2CC *l2cc = (volatile L2CC *)L2CC_BASE_ADDR;
598
599  /* Invalidate the caches */
600  l2cc->inv_way = 0xFFFFU;
601
602  /* Wait for the invalidate to complete */
603  while (l2cc->cache_sync != 0);
604
605  /* Synchronize the processor */
606  _ARM_Data_synchronization_barrier();
607}
608
609static inline void
610zynq_cache_l2_cache_store(const void *d_addr)
611{
612  volatile L2CC *l2cc = (volatile L2CC *)L2CC_BASE_ADDR;
613
614  l2cc->clean_pa = (uint32_t)d_addr;
615
616  /* Synchronize the processor */
617  _ARM_Data_synchronization_barrier();
618}
619
620static inline void
621zynq_cache_l2_cache_freeze(void)
622{
623  /* TODO */
624}
625
626static inline void
627zynq_cache_l2_cache_unfreeze(void)
628{
629  /* TODO */
630}
631
632static inline void
633zynq_cache_l2_cache_enable(void)
634{
635  volatile L2CC *l2cc = (volatile L2CC *)L2CC_BASE_ADDR;
636
637  /* Only enable if L2CC is currently disabled */
638  if ((l2cc->ctrl & L2CC_ENABLE_MASK) == 0) {
639    rtems_interrupt_level level;
640    uint32_t value;
641
642    rtems_interrupt_disable(level);
643
644    /* Set up the way size and latencies */
645    value               = l2cc->aux_ctrl;
646    value              &= L2CC_AUX_REG_ZERO_MASK;
647    value              |= L2CC_AUX_REG_DEFAULT_MASK;
648    l2cc->aux_ctrl      = value;
649    l2cc->tag_ram_ctrl  = L2CC_TAG_RAM_DEFAULT_MASK;
650    l2cc->data_ram_ctrl = L2CC_DATA_RAM_DEFAULT_MASK;
651
652    /* Clear the pending interrupts */
653    l2cc->int_clr       = l2cc->int_raw_status;
654
655    /* Enable the L2CC */
656    l2cc->ctrl         |= L2CC_ENABLE_MASK;
657
658    /* Synchronize the processor */
659    _ARM_Data_synchronization_barrier();
660
661    /* Enable the Data cache */
662    arm_cp15_set_control(arm_cp15_get_control() | ARM_CP15_CTRL_C);
663
664    /* Synchronize the processor */
665    _ARM_Data_synchronization_barrier();
666
667    rtems_interrupt_enable(level);
668  }
669}
670
671static inline void
672zynq_cache_l2_cache_disable(void)
673{
674  volatile L2CC *l2cc = (volatile L2CC *)L2CC_BASE_ADDR;
675
676  if (l2cc->ctrl & L2CC_ENABLE_MASK) {
677    rtems_interrupt_level level;
678    uint32_t              ctrl;
679
680    rtems_interrupt_disable(level);
681
682    ctrl = arm_cp15_get_control();
683
684    /* Disable the L1 Data cache */
685    ctrl &= ~ARM_CP15_CTRL_C;
686
687    arm_cp15_set_control(ctrl);
688
689    /* Synchronize the processor */
690    _ARM_Data_synchronization_barrier();
691
692    /* Clean and Invalidate L2 Cache */
693    zynq_cache_l2_cache_flush_entire();
694
695    /* Disable the L2 cache */
696    l2cc->ctrl &= ~L2CC_ENABLE_MASK;
697
698    /* Enable the L1 Data cache */
699    ctrl |= ARM_CP15_CTRL_C;
700
701    arm_cp15_set_control(ctrl);
702
703    /* Synchronize the processor */
704    _ARM_Data_synchronization_barrier();
705
706    rtems_interrupt_enable(level);
707  }
708}
709
710
711static inline void
712_CPU_cache_enable_data(void)
713{
714  zynq_cache_l1_cache_enable_data();
715  zynq_cache_l2_cache_enable();
716}
717
718static inline void
719_CPU_cache_disable_data(void)
720{
721  zynq_cache_l1_cache_disable_data();
722  zynq_cache_l2_cache_disable();
723}
724
725static inline void
726_CPU_cache_enable_instruction(void)
727{
728  zynq_cache_l1_cache_enable_instruction();
729  zynq_cache_l2_cache_enable();
730}
731
732static inline void
733_CPU_cache_disable_instruction(void)
734{
735  zynq_cache_l1_cache_disable_instruction();
736  zynq_cache_l2_cache_disable();
737}
738
739static inline void
740_CPU_cache_flush_data_range(const void *d_addr, size_t n_bytes)
741{
742  volatile L2CC *l2cc = (volatile L2CC *)L2CC_BASE_ADDR;
743
744  if (n_bytes != 0) {
745    uint32_t       adx = (uint32_t)d_addr;
746    const uint32_t end = adx + n_bytes;
747
748    /* Select cache Level 1 and Data cache in CSSELR */
749    arm_cp15_set_cache_size_selection(0);
750
751    /* Back starting address up to start of a line and flush until end */
752    for (adx &= ~(CPU_DATA_CACHE_ALIGNMENT - 1);
753         adx < end;
754         adx += CPU_DATA_CACHE_ALIGNMENT) {
755      /* Flush L1 Data cache line */
756      arm_cp15_data_cache_clean_and_invalidate_line( (const void*)adx );
757
758      /* Flush L2 cache line */
759      l2cc->clean_inv_pa = adx;
760
761      _ARM_Data_synchronization_barrier();
762    }
763  }
764
765  /* Wait for L1 and L2 flush to complete */
766  _ARM_Data_synchronization_barrier();
767  while (l2cc->cache_sync != 0);
768}
769
770static inline void
771_CPU_cache_flush_entire_data(void)
772{
773  zynq_cache_l1_cache_flush_entire_data();
774  zynq_cache_l2_cache_flush_entire();
775}
776
777static inline void
778_CPU_cache_invalidate_data_range(const void *d_addr, size_t n_bytes)
779{
780  volatile L2CC *l2cc = (volatile L2CC *)L2CC_BASE_ADDR;
781
782  if (n_bytes != 0) {
783    uint32_t       adx = (uint32_t)d_addr;
784    const uint32_t end = adx + n_bytes;
785
786    /* Select cache Level 1 and Data cache in CSSELR */
787    arm_cp15_set_cache_size_selection(0);
788
789    /* Back starting address up to start of a line and invalidate until end */
790    for (adx &= ~(CPU_DATA_CACHE_ALIGNMENT - 1);
791         adx < end;
792         adx += CPU_DATA_CACHE_ALIGNMENT) {
793      /* Invalidate L2 cache line */
794      l2cc->inv_pa = adx;
795      _ARM_Data_synchronization_barrier();
796
797      /* Invalidate L1 Data cache line */
798      arm_cp15_data_cache_invalidate_line( (const void *)adx );
799    }
800  }
801
802  /* Wait for L1 and L2 invalidate to complete */
803  _ARM_Data_synchronization_barrier();
804  while (l2cc->cache_sync != 0);
805}
806
807static inline void
808_CPU_cache_invalidate_entire_data(void)
809{
810  zynq_cache_l2_cache_invalidate_entire();
811  zynq_cache_l1_cache_invalidate_entire_data();
812}
813
814static inline void
815_CPU_cache_store_data_line(const void *d_addr)
816{
817  zynq_cache_l1_cache_store_data(d_addr);
818  zynq_cache_l2_cache_store(d_addr);
819}
820
821static inline void
822_CPU_cache_freeze_data(void)
823{
824  zynq_cache_l1_cache_freeze_data();
825  zynq_cache_l2_cache_freeze();
826}
827
828static inline void
829_CPU_cache_unfreeze_data(void)
830{
831  zynq_cache_l1_cache_unfreeze_data();
832  zynq_cache_l2_cache_unfreeze();
833}
834
835static inline void
836_CPU_cache_invalidate_instruction_range(const void *i_addr, size_t n_bytes)
837{
838  volatile L2CC *l2cc = (volatile L2CC *)L2CC_BASE_ADDR;
839
840  if (n_bytes != 0) {
841    uint32_t       adx = (uint32_t)i_addr;
842    const uint32_t end = adx + n_bytes;
843
844    /* Select cache Level 1 and Instruction cache in CSSELR */
845    arm_cp15_set_cache_size_selection(1);
846
847    /* Back starting address up to start of a line and invalidate until end */
848    for (adx &= ~(CPU_INSTRUCTION_CACHE_ALIGNMENT - 1);
849         adx < end;
850         adx += CPU_INSTRUCTION_CACHE_ALIGNMENT) {
851      /* Invalidate L2 cache line */
852      l2cc->inv_pa = adx;
853      _ARM_Data_synchronization_barrier();
854
855      /* Invalidate L1 I-cache line */
856      arm_cp15_instruction_cache_invalidate_line( (const void *)adx );
857    }
858  }
859
860  /* Wait for L1 and L2 invalidate to complete */
861  _ARM_Data_synchronization_barrier();
862  while (l2cc->cache_sync != 0);
863}
864
865static inline void
866_CPU_cache_invalidate_entire_instruction(void)
867{
868  zynq_cache_l2_cache_invalidate_entire();
869  zynq_cache_l1_cache_invalidate_entire_instruction();
870}
871
872static inline void
873_CPU_cache_freeze_instruction(void)
874{
875  zynq_cache_l1_cache_freeze_instruction();
876  zynq_cache_l2_cache_freeze();
877}
878
879static inline void
880_CPU_cache_unfreeze_instruction(void)
881{
882  zynq_cache_l1_cache_unfreeze_instruction();
883  zynq_cache_l2_cache_unfreeze();
884}
885
886#endif /* LIBBSP_ARM_ZYNQ_CACHE__H */
Note: See TracBrowser for help on using the repository browser.