source: rtems/c/src/lib/libbsp/arm/xilinx-zynq/include/cache_.h @ 21dd58d9

4.115
Last change on this file since 21dd58d9 was 21dd58d9, checked in by Daniel Ramirez <javamonn@…>, on 12/22/13 at 23:18:34

arm_xilinx-zynq: added new doxygen

  • Property mode set to 100644
File size: 26.7 KB
Line 
1/**
2 * @file
3 * @ingroup zynq_cache
4 * @brief Cache definitions and functions.
5 */
6
7/*
8 * Authorship
9 * ----------
10 * This software was created by
11 *     R. Claus <claus@slac.stanford.edu>, 2013,
12 *       Stanford Linear Accelerator Center, Stanford University.
13 *
14 * Acknowledgement of sponsorship
15 * ------------------------------
16 * This software was produced by
17 *     the Stanford Linear Accelerator Center, Stanford University,
18 *     under Contract DE-AC03-76SFO0515 with the Department of Energy.
19 *
20 * Government disclaimer of liability
21 * ----------------------------------
22 * Neither the United States nor the United States Department of Energy,
23 * nor any of their employees, makes any warranty, express or implied, or
24 * assumes any legal liability or responsibility for the accuracy,
25 * completeness, or usefulness of any data, apparatus, product, or process
26 * disclosed, or represents that its use would not infringe privately owned
27 * rights.
28 *
29 * Stanford disclaimer of liability
30 * --------------------------------
31 * Stanford University makes no representations or warranties, express or
32 * implied, nor assumes any liability for the use of this software.
33 *
34 * Stanford disclaimer of copyright
35 * --------------------------------
36 * Stanford University, owner of the copyright, hereby disclaims its
37 * copyright and all other rights in this software.  Hence, anyone may
38 * freely use it for any purpose without restriction.
39 *
40 * Maintenance of notices
41 * ----------------------
42 * In the interest of clarity regarding the origin and status of this
43 * SLAC software, this and all the preceding Stanford University notices
44 * are to remain affixed to any copy or derivative of this software made
45 * or distributed by the recipient and are to be affixed to any copy of
46 * software made or distributed by the recipient that contains a copy or
47 * derivative of this software.
48 *
49 * ------------------ SLAC Software Notices, Set 4 OTT.002a, 2004 FEB 03
50 */
51
52#ifndef LIBBSP_ARM_ZYNQ_CACHE__H
53#define LIBBSP_ARM_ZYNQ_CACHE__H
54
55#include <libcpu/arm-cp15.h>
56
57/* These two defines also ensure that the rtems_cache_* functions have bodies */
58#define CPU_DATA_CACHE_ALIGNMENT 32
59#define CPU_INSTRUCTION_CACHE_ALIGNMENT 32
60
61#define CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS
62
63#define L2CC_BASE_ADDR 0xF8F02000U
64
65#define ZYNQ_L2_CACHE_LINE_SIZE 32
66
67/**
68 * @defgroup zynq_cache Cache Support
69 * @ingroup arm_zynq
70 * @brief Cache Functions and Defitions
71 * @{
72 */
73
74/**
75 * @brief L2CC Register Offsets
76 */
77typedef struct {
78  uint32_t cache_id;                                    /* Cache ID */
79  uint32_t cache_type;                                  /* Cache type */
80
81  uint8_t  reserved_8[0x100 - 8];
82  uint32_t ctrl;                                        /* Control */
83/** @brief Enables the L2CC */
84#define L2CC_ENABLE_MASK                 0x00000001
85
86  /** @brief Auxiliary control */
87  uint32_t aux_ctrl;
88/** @brief Early BRESP Enable */
89#define L2CC_AUX_EBRESPE_MASK            0x40000000
90/** @brief Instruction Prefetch Enable */
91#define L2CC_AUX_IPFE_MASK               0x20000000
92/** @brief Data Prefetch Enable */
93#define L2CC_AUX_DPFE_MASK               0x10000000
94/** @brief Non-secure interrupt access control */
95#define L2CC_AUX_NSIC_MASK               0x08000000
96/** @brief Non-secure lockdown enable */
97#define L2CC_AUX_NSLE_MASK               0x04000000
98/** @brief Cache replacement policy */
99#define L2CC_AUX_CRP_MASK                0x02000000
100/** @brief Force write allocate */
101#define L2CC_AUX_FWE_MASK                0x01800000
102/** @breif Shared attribute override enable */
103#define L2CC_AUX_SAOE_MASK               0x00400000
104/** @brief Parity enable */
105#define L2CC_AUX_PE_MASK                 0x00200000
106/** @brief Event monitor bus enable */
107#define L2CC_AUX_EMBE_MASK               0x00100000
108/** @brief Way-size */
109#define L2CC_AUX_WAY_SIZE_MASK           0x000E0000
110/** @brief Way-size */
111#define L2CC_AUX_ASSOC_MASK              0x00010000
112/** @brief Shared attribute invalidate enable */
113#define L2CC_AUX_SAIE_MASK               0x00002000
114/** @brief Exclusive cache configuration */
115#define L2CC_AUX_EXCL_CACHE_MASK         0x00001000
116/** @brief Store buffer device limitation Enable */
117#define L2CC_AUX_SBDLE_MASK              0x00000800
118/** @brief High Priority for SO and Dev Reads Enable */
119#define L2CC_AUX_HPSODRE_MASK            0x00000400
120
121/** @brief Full line of zero enable */
122#define L2CC_AUX_FLZE_MASK               0x00000001
123
124/** @brief Enable all prefetching, */
125#define L2CC_AUX_REG_DEFAULT_MASK        0x72360000
126#define L2CC_AUX_REG_ZERO_MASK           0xFFF1FFFF
127
128   /** @brief Latency for tag RAM */
129   uint32_t tag_ram_ctrl;
130#define L2CC_TAG_RAM_DEFAULT_MASK        0x00000111
131   /** @brief Latency for data RAM */
132   uint32_t data_ram_ctrl;
133#define L2CC_DATA_RAM_DEFAULT_MASK       0x00000121
134
135  uint8_t  reserved_110[0x200 - 0x110];
136  /** @brief Event counter control */
137  uint32_t ev_ctrl;
138  /** @brief Event counter 1 configuration */
139  uint32_t ev_cnt1_cfg;
140  /** @brief Event counter 0 configuration */
141  uint32_t ev_cnt0_cfg;
142  /** @brief Event counter 1 value */
143  uint32_t ev_cnt1;
144  /** @brief Event counter 0 value */
145  uint32_t ev_cnt0;
146  /** @brief Interrupt enable mask */
147  uint32_t int_mask;
148  /** @brief Masked   interrupt status (read-only)*/
149  uint32_t int_mask_status;
150  /** @brief Unmasked interrupt status */
151  uint32_t int_raw_status;
152  /** @brief Interrupt clear */
153  uint32_t int_clr;
154
155/**
156 * @name Interrupt bit masks
157 *
158 * @{
159 */
160 
161/** @brief DECERR from L3 */
162#define L2CC_INT_DECERR_MASK             0x00000100
163/** @brief SLVERR from L3 */
164#define L2CC_INT_SLVERR_MASK             0x00000080
165/** @brief Error on L2 data RAM (Read) */
166#define L2CC_INT_ERRRD_MASK              0x00000040
167/** @brief Error on L2 tag RAM (Read) */
168#define L2CC_INT_ERRRT_MASK              0x00000020
169/** @brief Error on L2 data RAM (Write) */
170#define L2CC_INT_ERRWD_MASK              0x00000010
171/** @brief Error on L2 tag RAM (Write) */
172#define L2CC_INT_ERRWT_MASK              0x00000008
173/** @brief Parity Error on L2 data RAM (Read) */
174#define L2CC_INT_PARRD_MASK              0x00000004
175/** @brief Parity Error on L2 tag RAM (Read) */
176#define L2CC_INT_PARRT_MASK              0x00000002
177/** @brief Event Counter1/0 Overflow Increment */
178#define L2CC_INT_ECNTR_MASK              0x00000001
179
180/** @} */
181 
182  uint8_t  reserved_224[0x730 - 0x224];
183  /** @brief Drain the STB */
184  uint32_t cache_sync;
185  uint8_t  reserved_734[0x770 - 0x734];
186  /** @brief Invalidate line by PA */
187  uint32_t inv_pa;
188  uint8_t  reserved_774[0x77c - 0x774];
189  /** @brief Invalidate by Way */
190  uint32_t inv_way;
191  uint8_t  reserved_780[0x7b0 - 0x780];
192  /** @brief Clean Line by PA */
193  uint32_t clean_pa;
194  uint8_t  reserved_7b4[0x7b8 - 0x7b4];
195  /** @brief Clean Line by Set/Way */
196  uint32_t clean_index;
197  /** @brief Clean by Way */
198  uint32_t clean_way;
199  uint8_t  reserved_7c0[0x7f0 - 0x7c0];
200  /** @brief Clean and Invalidate Line by PA */
201  uint32_t clean_inv_pa;
202  uint8_t  reserved_7f4[0x7f8 - 0x7f4];
203  /** @brief Clean and Invalidate Line by Set/Way */
204  uint32_t clean_inv_indx;
205  /** @brief Clean and Invalidate by Way */
206  uint32_t clean_inv_way;
207
208  /** @brief Data        lock down 0 */
209  uint32_t d_lockdown_0;
210  /** @brief Instruction lock down 0 */
211  uint32_t i_lockdown_0;
212  /** @brief Data        lock down 1 */
213  uint32_t d_lockdown_1;
214  /** @brief Instruction lock down 1 */
215  uint32_t i_lockdown_1;
216  /** @brief Data        lock down 2 */
217  uint32_t d_lockdown_2;
218  /** @brief Instruction lock down 2 */
219  uint32_t i_lockdown_2;
220  /** @brief Data        lock down 3 */
221  uint32_t d_lockdown_3;
222  /** @brief Instruction lock down 3 */
223  uint32_t i_lockdown_3;
224  /** @brief Data        lock down 4 */
225  uint32_t d_lockdown_4;
226  /** @brief Instruction lock down 4 */
227  uint32_t i_lockdown_4;
228  /** @brief Data        lock down 5 */
229  uint32_t d_lockdown_5;
230  /** @brief Instruction lock down 5 */
231  uint32_t i_lockdown_5;
232  /** @brief Data        lock down 6 */
233  uint32_t d_lockdown_6;
234  /** @brief Instruction lock down 6 */
235  uint32_t i_lockdown_6;
236  /** @brief Data        lock down 7 */
237  uint32_t d_lockdown_7;
238  /** @brief Instruction lock down 7 */
239  uint32_t i_lockdown_7;
240
241  uint8_t  reserved_940[0x950 - 0x940];
242  /** @brief Lockdown by Line Enable */
243  uint32_t lock_line_en;
244  /** @brief Cache lockdown by way */
245  uint32_t unlock_way;
246
247  uint8_t  reserved_958[0xc00 - 0x958];
248  /** @brief Address range redirect, part 1 */
249  uint32_t addr_filtering_start;
250  /** @brief Address range redirect, part 2 */
251  uint32_t addr_filtering_end;
252/** @brief Address filtering valid bits*/
253#define L2CC_ADDR_FILTER_VALID_MASK      0xFFF00000
254/** @brief Address filtering enable bit*/
255#define L2CC_ADDR_FILTER_ENABLE_MASK     0x00000001
256
257  uint8_t  reserved_c08[0xf40 - 0xc08];
258  /** @brief Debug control */
259  uint32_t debug_ctrl;
260/** @brief Debug SPIDEN bit */
261#define L2CC_DEBUG_SPIDEN_MASK           0x00000004
262/** @brief Debug DWB bit, forces write through */
263#define L2CC_DEBUG_DWB_MASK              0x00000002
264/** @breif Debug DCL bit, disables cache line fill */
265#define L2CC_DEBUG_DCL_MASK              0x00000002
266
267  uint8_t  reserved_f44[0xf60 - 0xf44];
268  /** @brief Purpose prefetch enables */
269  uint32_t prefetch_ctrl;
270  uint8_t  reserved_f64[0xf80 - 0xf64];
271  /** @brief Purpose power controls */
272  uint32_t power_ctrl;
273} L2CC;
274
275static inline void
276zynq_cache_l1_cache_properties(uint32_t *l1LineSize,
277                         uint32_t *l1NumWays,
278                         uint32_t *l1NumSets)
279{
280  uint32_t id;
281
282  /* Select cache level 1 and Data cache in CSSELR */
283  arm_cp15_set_cache_size_selection(0);
284  _ARM_Instruction_synchronization_barrier();
285  id = arm_cp15_get_cache_size_id();
286
287  *l1LineSize =  (id        & 0x0007U) + 2 + 2; /* Cache line size (+2 -> bytes) */
288  *l1NumWays  = ((id >>  3) & 0x03ffU) + 1;     /* Number of Ways */
289  *l1NumSets  = ((id >> 13) & 0x7fffU) + 1;     /* Number of Sets */
290}
291
292
293static inline void
294zynq_cache_l1_cache_flush_1_data_line(const void *d_addr)
295{
296  /* Select cache Level 1 and Data cache in CSSELR */
297  arm_cp15_set_cache_size_selection(0);
298
299  /* Flush the Data cache */
300  arm_cp15_data_cache_clean_and_invalidate_line(d_addr);
301
302  /* Wait for L1 flush to complete */
303  _ARM_Data_synchronization_barrier();
304}
305
306static inline void
307zynq_cache_l1_cache_flush_data_range(const void *d_addr, size_t n_bytes)
308{
309  const void * final_address;
310
311 /*
312  * Set d_addr to the beginning of the cache line; final_address indicates
313  * the last address_t which needs to be pushed. Increment d_addr and push
314  * the resulting line until final_address is passed.
315  */
316
317  if( n_bytes == 0 )
318    /* Do nothing if number of bytes to flush is zero */
319    return;
320
321  /* Select cache Level 1 and Data cache in CSSELR */
322  arm_cp15_set_cache_size_selection(0);
323
324  final_address = (void *)((size_t)d_addr + n_bytes - 1);
325  d_addr = (void *)((size_t)d_addr & ~(CPU_DATA_CACHE_ALIGNMENT - 1));
326  while( d_addr <= final_address )  {
327    arm_cp15_data_cache_clean_and_invalidate_line( d_addr );
328    d_addr = (void *)((size_t)d_addr + CPU_DATA_CACHE_ALIGNMENT);
329  }
330
331  /* Wait for L1 flush to complete */
332  _ARM_Data_synchronization_barrier();
333}
334
335static inline void
336zynq_cache_l1_cache_flush_entire_data(void)
337{
338  uint32_t l1LineSize, l1NumWays, l1NumSets;
339  uint32_t sets, ways, s, w;
340
341  /* Select cache Level 1 and Data cache in CSSELR */
342  arm_cp15_set_cache_size_selection(0);
343  _ARM_Instruction_synchronization_barrier();
344
345  /* Get the L1 cache properties */
346  zynq_cache_l1_cache_properties(&l1LineSize, &l1NumWays, &l1NumSets);
347
348  ways = l1NumWays * (1 << 30);
349  sets = l1NumSets * (1 << l1LineSize);
350
351  /* Invalidate all the cache lines */
352  for (w = 0; w < ways; w += (1 << 30)) {
353    for (s = 0; s < sets; s += (1 << l1LineSize)) {
354      /* Flush by Set/Way */
355      arm_cp15_data_cache_clean_and_invalidate_line_by_set_and_way(w | s);
356    }
357  }
358
359  /* Wait for L1 flush to complete */
360  _ARM_Data_synchronization_barrier();
361}
362
363static inline void
364zynq_cache_l1_cache_invalidate_1_data_line(const void *d_addr)
365{
366  /* Select cache Level 1 and Data cache in CSSELR */
367  arm_cp15_set_cache_size_selection(0);
368
369  /* Invalidate the cache line */
370  arm_cp15_data_cache_invalidate_line(d_addr);
371
372  /* Wait for L1 invalidate to complete */
373  _ARM_Data_synchronization_barrier();
374}
375
376static inline void
377zynq_cache_l1_cache_invalidate_data_range(const void *d_addr, size_t n_bytes)
378{
379  const void * final_address;
380
381 /*
382  * Set d_addr to the beginning of the cache line; final_address indicates
383  * the last address_t which needs to be invalidated. Increment d_addr and
384  * invalidate the resulting line until final_address is passed.
385  */
386
387  if( n_bytes == 0 )
388    /* Do nothing if number of bytes to invalidate is zero */
389    return;
390
391  /* Select cache Level 1 and Data cache in CSSELR */
392  arm_cp15_set_cache_size_selection(0);
393
394  final_address = (void *)((size_t)d_addr + n_bytes - 1);
395  d_addr = (void *)((size_t)d_addr & ~(CPU_DATA_CACHE_ALIGNMENT - 1));
396  while( final_address >= d_addr ) {
397    arm_cp15_data_cache_invalidate_line( d_addr );
398    d_addr = (void *)((size_t)d_addr + CPU_DATA_CACHE_ALIGNMENT);
399  }
400
401  /* Wait for L1 invalidate to complete */
402  _ARM_Data_synchronization_barrier();
403}
404
405static inline void
406zynq_cache_l1_cache_invalidate_entire_data(void)
407{
408  uint32_t l1LineSize, l1NumWays, l1NumSets;
409  uint32_t sets, ways, s, w;
410
411  /* Select cache Level 1 and Data cache in CSSELR */
412  arm_cp15_set_cache_size_selection(0);
413  _ARM_Instruction_synchronization_barrier();
414
415  /* Get the L1 cache properties */
416  zynq_cache_l1_cache_properties(&l1LineSize, &l1NumWays, &l1NumSets);
417
418  ways = l1NumWays * (1 << 30);
419  sets = l1NumSets * (1 << l1LineSize);
420
421  /* Invalidate all the cache lines */
422  for (w = 0; w < ways; w += (1 << 30)) {
423    for (s = 0; s < sets; s += (1 << l1LineSize)) {
424      /* Invalidate by Set/Way */
425      arm_cp15_data_cache_invalidate_line_by_set_and_way(w | s);
426    }
427  }
428
429  /* Wait for L1 invalidate to complete */
430  _ARM_Data_synchronization_barrier();
431}
432
433static inline void
434zynq_cache_l1_cache_store_data(const void *d_addr)
435{
436  /* Select cache Level 1 and Data cache in CSSELR */
437  arm_cp15_set_cache_size_selection(0);
438
439  /* Store the Data cache line */
440  arm_cp15_data_cache_clean_line(d_addr);
441
442  /* Wait for L1 store to complete */
443  _ARM_Data_synchronization_barrier();
444}
445
446static inline void
447zynq_cache_l1_cache_freeze_data(void)
448{
449  /* TODO */
450}
451
452static inline void
453zynq_cache_l1_cache_unfreeze_data(void)
454{
455  /* TODO */
456}
457
458static inline void
459zynq_cache_l1_cache_invalidate_1_instruction_line(const void *i_addr)
460{
461  /* Select cache Level 1 and Instruction cache in CSSELR */
462  arm_cp15_set_cache_size_selection(1);
463
464  /* Invalidate the Instruction cache line */
465  arm_cp15_instruction_cache_invalidate_line(i_addr);
466
467  /* Wait for L1 invalidate to complete */
468  _ARM_Data_synchronization_barrier();
469}
470
471static inline void
472zynq_cache_l1_cache_invalidate_instruction_range(const void *i_addr, size_t n_bytes)
473{
474  const void * final_address;
475
476 /*
477  * Set i_addr to the beginning of the cache line; final_address indicates
478  * the last address_t which needs to be invalidated. Increment i_addr and
479  * invalidate the resulting line until final_address is passed.
480  */
481
482  if( n_bytes == 0 )
483    /* Do nothing if number of bytes to invalidate is zero */
484    return;
485
486  /* Select cache Level 1 and Instruction cache in CSSELR */
487  arm_cp15_set_cache_size_selection(1);
488
489  final_address = (void *)((size_t)i_addr + n_bytes - 1);
490  i_addr = (void *)((size_t)i_addr & ~(CPU_INSTRUCTION_CACHE_ALIGNMENT - 1));
491  while( final_address > i_addr ) {
492    arm_cp15_instruction_cache_invalidate_line( i_addr );
493    i_addr = (void *)((size_t)i_addr + CPU_INSTRUCTION_CACHE_ALIGNMENT);
494  }
495
496  /* Wait for L1 invalidate to complete */
497  _ARM_Data_synchronization_barrier();
498}
499
500static inline void
501zynq_cache_l1_cache_invalidate_entire_instruction(void)
502{
503  /* Select cache Level 1 and Instruction cache in CSSELR */
504  arm_cp15_set_cache_size_selection(1);
505
506  /* Invalidate the Instruction cache */
507  arm_cp15_instruction_cache_invalidate();
508
509  /* Wait for L1 invalidate to complete */
510  _ARM_Data_synchronization_barrier();
511}
512
513static inline void
514zynq_cache_l1_cache_freeze_instruction(void)
515{
516  /* TODO */
517}
518
519static inline void
520zynq_cache_l1_cache_unfreeze_instruction(void)
521{
522  /* TODO */
523}
524
525static inline void
526zynq_cache_l1_cache_enable_data(void)
527{
528  rtems_interrupt_level level;
529  uint32_t ctrl;
530
531  rtems_interrupt_disable(level);
532
533  /* Enable caches only if they are disabled */
534  ctrl = arm_cp15_get_control();
535  if (!(ctrl & ARM_CP15_CTRL_C)) {
536    /* Clean and invalidate the Data cache */
537    zynq_cache_l1_cache_invalidate_entire_data();
538
539    /* Enable the Data cache */
540    ctrl |= ARM_CP15_CTRL_C;
541
542    arm_cp15_set_control(ctrl);
543  }
544
545  rtems_interrupt_enable(level);
546}
547
548static inline void
549zynq_cache_l1_cache_disable_data(void)
550{
551  rtems_interrupt_level level;
552
553  rtems_interrupt_disable(level);
554
555  /* Clean and invalidate the Data cache */
556  zynq_cache_l1_cache_flush_entire_data();
557
558  /* Disable the Data cache */
559  arm_cp15_set_control(arm_cp15_get_control() & ~ARM_CP15_CTRL_C);
560
561  rtems_interrupt_enable(level);
562}
563
564static inline void
565zynq_cache_l1_cache_enable_instruction(void)
566{
567  rtems_interrupt_level level;
568  uint32_t              ctrl;
569
570  rtems_interrupt_disable(level);
571
572  /* Enable Instruction cache only if it is disabled */
573  ctrl = arm_cp15_get_control();
574  if (!(ctrl & ARM_CP15_CTRL_I)) {
575    /* Invalidate the Instruction cache */
576    zynq_cache_l1_cache_invalidate_entire_instruction();
577
578    /* Enable the Instruction cache */
579    ctrl |= ARM_CP15_CTRL_I;
580
581    arm_cp15_set_control(ctrl);
582  }
583
584  rtems_interrupt_enable(level);
585}
586
587static inline void
588zynq_cache_l1_cache_disable_instruction(void)
589{
590  rtems_interrupt_level level;
591
592  rtems_interrupt_disable(level);
593
594  /* Synchronize the processor */
595  _ARM_Data_synchronization_barrier();
596
597  /* Invalidate the Instruction cache */
598  zynq_cache_l1_cache_invalidate_entire_instruction();
599
600  /* Disable the Instruction cache */
601  arm_cp15_set_control(arm_cp15_get_control() & ~ARM_CP15_CTRL_I);
602
603  rtems_interrupt_enable(level);
604}
605
606
607static inline void
608zynq_cache_l2_cache_flush_1_line(const void *d_addr)
609{
610  volatile L2CC* l2cc = (volatile L2CC *)L2CC_BASE_ADDR;
611
612  l2cc->clean_inv_pa = (uint32_t)d_addr;
613
614  /* Synchronize the processor */
615  _ARM_Data_synchronization_barrier();
616}
617
618static inline void
619zynq_cache_l2_cache_flush_range(const void *d_addr, size_t n_bytes)
620{
621  volatile L2CC *l2cc = (volatile L2CC *)L2CC_BASE_ADDR;
622
623  if (n_bytes != 0) {
624    uint32_t       adx = (uint32_t)d_addr;
625    const uint32_t end = adx + n_bytes;
626
627    /* Back starting address up to start of a line and flush until end */
628    for (adx &= ~(ZYNQ_L2_CACHE_LINE_SIZE - 1);
629         adx < end;
630         adx += ZYNQ_L2_CACHE_LINE_SIZE) {
631      l2cc->clean_inv_pa = adx;
632    }
633  }
634
635  /* Wait for L2 flush to complete */
636  while (l2cc->cache_sync != 0);
637
638  /* Synchronize the processor */
639  _ARM_Data_synchronization_barrier();
640}
641
642static inline void
643zynq_cache_l2_cache_flush_entire(void)
644{
645  volatile L2CC* l2cc = (volatile L2CC *)L2CC_BASE_ADDR;
646
647  /* Flush the caches */
648  l2cc->clean_inv_way = 0x0000FFFFU;
649
650  /* Wait for the flush to complete */
651  while (l2cc->cache_sync != 0);
652
653  /* Synchronize the processor */
654  _ARM_Data_synchronization_barrier();
655}
656
657static inline void
658zynq_cache_l2_cache_invalidate_1_line(const void *d_addr)
659{
660  volatile L2CC* l2cc = (volatile L2CC *)L2CC_BASE_ADDR;
661
662  l2cc->inv_pa = (uint32_t)d_addr;
663
664  /* Synchronize the processor */
665  _ARM_Data_synchronization_barrier();
666}
667
668static inline void
669zynq_cache_l2_cache_invalidate_range(const void* d_addr, size_t n_bytes)
670{
671  volatile L2CC* l2cc = (volatile L2CC *)L2CC_BASE_ADDR;
672
673  if (n_bytes != 0) {
674    uint32_t       adx = (uint32_t)d_addr;
675    const uint32_t end = adx + n_bytes;
676
677    /* Back starting address up to start of a line and invalidate until end */
678    for (adx &= ~(ZYNQ_L2_CACHE_LINE_SIZE - 1);
679         adx < end;
680         adx += ZYNQ_L2_CACHE_LINE_SIZE) {
681      l2cc->inv_pa = adx;
682    }
683  }
684
685  /* Wait for L2 invalidate to complete */
686  while (l2cc->cache_sync != 0);
687
688  /* Synchronize the processor */
689  _ARM_Data_synchronization_barrier();
690}
691
692static inline void
693zynq_cache_l2_cache_invalidate_entire(void)
694{
695  volatile L2CC *l2cc = (volatile L2CC *)L2CC_BASE_ADDR;
696
697  /* Invalidate the caches */
698  l2cc->inv_way = 0xFFFFU;
699
700  /* Wait for the invalidate to complete */
701  while (l2cc->cache_sync != 0);
702
703  /* Synchronize the processor */
704  _ARM_Data_synchronization_barrier();
705}
706
707static inline void
708zynq_cache_l2_cache_store(const void *d_addr)
709{
710  volatile L2CC *l2cc = (volatile L2CC *)L2CC_BASE_ADDR;
711
712  l2cc->clean_pa = (uint32_t)d_addr;
713
714  /* Synchronize the processor */
715  _ARM_Data_synchronization_barrier();
716}
717
718static inline void
719zynq_cache_l2_cache_freeze(void)
720{
721  /* TODO */
722}
723
724static inline void
725zynq_cache_l2_cache_unfreeze(void)
726{
727  /* TODO */
728}
729
730static inline void
731zynq_cache_l2_cache_enable(void)
732{
733  volatile L2CC *l2cc = (volatile L2CC *)L2CC_BASE_ADDR;
734
735  /* Only enable if L2CC is currently disabled */
736  if ((l2cc->ctrl & L2CC_ENABLE_MASK) == 0) {
737    rtems_interrupt_level level;
738    uint32_t value;
739
740    rtems_interrupt_disable(level);
741
742    /* Set up the way size and latencies */
743    value               = l2cc->aux_ctrl;
744    value              &= L2CC_AUX_REG_ZERO_MASK;
745    value              |= L2CC_AUX_REG_DEFAULT_MASK;
746    l2cc->aux_ctrl      = value;
747    l2cc->tag_ram_ctrl  = L2CC_TAG_RAM_DEFAULT_MASK;
748    l2cc->data_ram_ctrl = L2CC_DATA_RAM_DEFAULT_MASK;
749
750    /* Clear the pending interrupts */
751    l2cc->int_clr       = l2cc->int_raw_status;
752
753    /* Enable the L2CC */
754    l2cc->ctrl         |= L2CC_ENABLE_MASK;
755
756    /* Synchronize the processor */
757    _ARM_Data_synchronization_barrier();
758
759    /* Enable the Data cache */
760    arm_cp15_set_control(arm_cp15_get_control() | ARM_CP15_CTRL_C);
761
762    /* Synchronize the processor */
763    _ARM_Data_synchronization_barrier();
764
765    rtems_interrupt_enable(level);
766  }
767}
768
769static inline void
770zynq_cache_l2_cache_disable(void)
771{
772  volatile L2CC *l2cc = (volatile L2CC *)L2CC_BASE_ADDR;
773
774  if (l2cc->ctrl & L2CC_ENABLE_MASK) {
775    rtems_interrupt_level level;
776    uint32_t              ctrl;
777
778    rtems_interrupt_disable(level);
779
780    ctrl = arm_cp15_get_control();
781
782    /* Disable the L1 Data cache */
783    ctrl &= ~ARM_CP15_CTRL_C;
784
785    arm_cp15_set_control(ctrl);
786
787    /* Synchronize the processor */
788    _ARM_Data_synchronization_barrier();
789
790    /* Clean and Invalidate L2 Cache */
791    zynq_cache_l2_cache_flush_entire();
792
793    /* Disable the L2 cache */
794    l2cc->ctrl &= ~L2CC_ENABLE_MASK;
795
796    /* Enable the L1 Data cache */
797    ctrl |= ARM_CP15_CTRL_C;
798
799    arm_cp15_set_control(ctrl);
800
801    /* Synchronize the processor */
802    _ARM_Data_synchronization_barrier();
803
804    rtems_interrupt_enable(level);
805  }
806}
807
808
809static inline void
810_CPU_cache_enable_data(void)
811{
812  zynq_cache_l1_cache_enable_data();
813  zynq_cache_l2_cache_enable();
814}
815
816static inline void
817_CPU_cache_disable_data(void)
818{
819  zynq_cache_l1_cache_disable_data();
820  zynq_cache_l2_cache_disable();
821}
822
823static inline void
824_CPU_cache_enable_instruction(void)
825{
826  zynq_cache_l1_cache_enable_instruction();
827  zynq_cache_l2_cache_enable();
828}
829
830static inline void
831_CPU_cache_disable_instruction(void)
832{
833  zynq_cache_l1_cache_disable_instruction();
834  zynq_cache_l2_cache_disable();
835}
836
837static inline void
838_CPU_cache_flush_data_range(const void *d_addr, size_t n_bytes)
839{
840  volatile L2CC *l2cc = (volatile L2CC *)L2CC_BASE_ADDR;
841
842  if (n_bytes != 0) {
843    uint32_t       adx = (uint32_t)d_addr;
844    const uint32_t end = adx + n_bytes;
845
846    /* Select cache Level 1 and Data cache in CSSELR */
847    arm_cp15_set_cache_size_selection(0);
848
849    /* Back starting address up to start of a line and flush until end */
850    for (adx &= ~(CPU_DATA_CACHE_ALIGNMENT - 1);
851         adx < end;
852         adx += CPU_DATA_CACHE_ALIGNMENT) {
853      /* Flush L1 Data cache line */
854      arm_cp15_data_cache_clean_and_invalidate_line( (const void*)adx );
855
856      /* Flush L2 cache line */
857      l2cc->clean_inv_pa = adx;
858
859      _ARM_Data_synchronization_barrier();
860    }
861  }
862
863  /* Wait for L1 and L2 flush to complete */
864  _ARM_Data_synchronization_barrier();
865  while (l2cc->cache_sync != 0);
866}
867
868static inline void
869_CPU_cache_flush_entire_data(void)
870{
871  zynq_cache_l1_cache_flush_entire_data();
872  zynq_cache_l2_cache_flush_entire();
873}
874
875static inline void
876_CPU_cache_invalidate_data_range(const void *d_addr, size_t n_bytes)
877{
878  volatile L2CC *l2cc = (volatile L2CC *)L2CC_BASE_ADDR;
879
880  if (n_bytes != 0) {
881    uint32_t       adx = (uint32_t)d_addr;
882    const uint32_t end = adx + n_bytes;
883
884    /* Select cache Level 1 and Data cache in CSSELR */
885    arm_cp15_set_cache_size_selection(0);
886
887    /* Back starting address up to start of a line and invalidate until end */
888    for (adx &= ~(CPU_DATA_CACHE_ALIGNMENT - 1);
889         adx < end;
890         adx += CPU_DATA_CACHE_ALIGNMENT) {
891      /* Invalidate L2 cache line */
892      l2cc->inv_pa = adx;
893      _ARM_Data_synchronization_barrier();
894
895      /* Invalidate L1 Data cache line */
896      arm_cp15_data_cache_invalidate_line( (const void *)adx );
897    }
898  }
899
900  /* Wait for L1 and L2 invalidate to complete */
901  _ARM_Data_synchronization_barrier();
902  while (l2cc->cache_sync != 0);
903}
904
905static inline void
906_CPU_cache_invalidate_entire_data(void)
907{
908  zynq_cache_l2_cache_invalidate_entire();
909  zynq_cache_l1_cache_invalidate_entire_data();
910}
911
912static inline void
913_CPU_cache_store_data_line(const void *d_addr)
914{
915  zynq_cache_l1_cache_store_data(d_addr);
916  zynq_cache_l2_cache_store(d_addr);
917}
918
919static inline void
920_CPU_cache_freeze_data(void)
921{
922  zynq_cache_l1_cache_freeze_data();
923  zynq_cache_l2_cache_freeze();
924}
925
926static inline void
927_CPU_cache_unfreeze_data(void)
928{
929  zynq_cache_l1_cache_unfreeze_data();
930  zynq_cache_l2_cache_unfreeze();
931}
932
933static inline void
934_CPU_cache_invalidate_instruction_range(const void *i_addr, size_t n_bytes)
935{
936  volatile L2CC *l2cc = (volatile L2CC *)L2CC_BASE_ADDR;
937
938  if (n_bytes != 0) {
939    uint32_t       adx = (uint32_t)i_addr;
940    const uint32_t end = adx + n_bytes;
941
942    /* Select cache Level 1 and Instruction cache in CSSELR */
943    arm_cp15_set_cache_size_selection(1);
944
945    /* Back starting address up to start of a line and invalidate until end */
946    for (adx &= ~(CPU_INSTRUCTION_CACHE_ALIGNMENT - 1);
947         adx < end;
948         adx += CPU_INSTRUCTION_CACHE_ALIGNMENT) {
949      /* Invalidate L2 cache line */
950      l2cc->inv_pa = adx;
951      _ARM_Data_synchronization_barrier();
952
953      /* Invalidate L1 I-cache line */
954      arm_cp15_instruction_cache_invalidate_line( (const void *)adx );
955    }
956  }
957
958  /* Wait for L1 and L2 invalidate to complete */
959  _ARM_Data_synchronization_barrier();
960  while (l2cc->cache_sync != 0);
961}
962
963static inline void
964_CPU_cache_invalidate_entire_instruction(void)
965{
966  zynq_cache_l2_cache_invalidate_entire();
967  zynq_cache_l1_cache_invalidate_entire_instruction();
968}
969
970static inline void
971_CPU_cache_freeze_instruction(void)
972{
973  zynq_cache_l1_cache_freeze_instruction();
974  zynq_cache_l2_cache_freeze();
975}
976
977static inline void
978_CPU_cache_unfreeze_instruction(void)
979{
980  zynq_cache_l1_cache_unfreeze_instruction();
981  zynq_cache_l2_cache_unfreeze();
982}
983
984/** @} */
985
986#endif /* LIBBSP_ARM_ZYNQ_CACHE__H */
Note: See TracBrowser for help on using the repository browser.