source: rtems/c/src/lib/libbsp/arm/shared/arm-l2c-310/cache_.h @ 861d315

4.115
Last change on this file since 861d315 was 861d315, checked in by Sebastian Huber <sebastian.huber@…>, on 11/19/14 at 13:05:36

bsps/arm: L2C 310 use L2C_310_* prefix throughout

  • Property mode set to 100644
File size: 38.8 KB
Line 
1/**
2 * @file cache_.h
3 *
4 * @ingroup L2C-310_cache
5 *
6 * @brief Cache definitions and functions.
7 *
8 * This file implements handling for the ARM L2C-310 cache controller
9 */
10
11/*
12 * Authorship
13 * ----------
14 * This software was created by
15 *     R. Claus <claus@slac.stanford.edu>, 2013,
16 *       Stanford Linear Accelerator Center, Stanford University.
17 *
18 * Acknowledgement of sponsorship
19 * ------------------------------
20 * This software was produced by
21 *     the Stanford Linear Accelerator Center, Stanford University,
22 *     under Contract DE-AC03-76SFO0515 with the Department of Energy.
23 *
24 * Government disclaimer of liability
25 * ----------------------------------
26 * Neither the United States nor the United States Department of Energy,
27 * nor any of their employees, makes any warranty, express or implied, or
28 * assumes any legal liability or responsibility for the accuracy,
29 * completeness, or usefulness of any data, apparatus, product, or process
30 * disclosed, or represents that its use would not infringe privately owned
31 * rights.
32 *
33 * Stanford disclaimer of liability
34 * --------------------------------
35 * Stanford University makes no representations or warranties, express or
36 * implied, nor assumes any liability for the use of this software.
37 *
38 * Stanford disclaimer of copyright
39 * --------------------------------
40 * Stanford University, owner of the copyright, hereby disclaims its
41 * copyright and all other rights in this software.  Hence, anyone may
42 * freely use it for any purpose without restriction.
43 *
44 * Maintenance of notices
45 * ----------------------
46 * In the interest of clarity regarding the origin and status of this
47 * SLAC software, this and all the preceding Stanford University notices
48 * are to remain affixed to any copy or derivative of this software made
49 * or distributed by the recipient and are to be affixed to any copy of
50 * software made or distributed by the recipient that contains a copy or
51 * derivative of this software.
52 *
53 * ------------------ SLAC Software Notices, Set 4 OTT.002a, 2004 FEB 03
54 */
55
56#ifndef LIBBSP_ARM_SHARED_L2C_310_CACHE_H
57#define LIBBSP_ARM_SHARED_L2C_310_CACHE_H
58
59#include <assert.h>
60#include <bsp.h>
61#include <bsp/fatal.h>
62#include <libcpu/arm-cp15.h>
63#include <rtems/rtems/intr.h>
64#include <bsp/arm-release-id.h>
65#include <bsp/arm-errata.h>
66#include "../include/arm-cache-l1.h"
67
68#ifdef __cplusplus
69extern "C" {
70#endif /* __cplusplus */
71
72/* These two defines also ensure that the rtems_cache_* functions have bodies */
73#define CPU_DATA_CACHE_ALIGNMENT ARM_CACHE_L1_CPU_DATA_ALIGNMENT
74#define CPU_INSTRUCTION_CACHE_ALIGNMENT ARM_CACHE_L1_CPU_INSTRUCTION_ALIGNMENT
75#define CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS \
76  ARM_CACHE_L1_CPU_SUPPORT_PROVIDES_RANGE_FUNCTIONS
77#define CPU_CACHE_SUPPORT_PROVIDES_CACHE_SIZE_FUNCTIONS
78
79#define L2C_310_DATA_LINE_MASK ( CPU_DATA_CACHE_ALIGNMENT - 1 )
80#define L2C_310_INSTRUCTION_LINE_MASK \
81  ( CPU_INSTRUCTION_CACHE_ALIGNMENT \
82    - 1 )
83#define L2C_310_NUM_WAYS 8
84#define L2C_310_WAY_MASK ( ( 1 << L2C_310_NUM_WAYS ) - 1 )
85
86#define L2C_310_MIN( a, b ) \
87  ((a < b) ? (a) : (b))
88
89#define L2C_310_MAX_LOCKING_BYTES (4 * 1024)
90
91
92/* RTL release number as can be read from cache_id register */
93typedef enum {
94  L2C_310_RTL_RELEASE_R0_P0 = 0x0,
95  L2C_310_RTL_RELEASE_R1_P0 = 0x2,
96  L2C_310_RTL_RELEASE_R2_P0 = 0x4,
97  L2C_310_RTL_RELEASE_R3_P0 = 0x5,
98  L2C_310_RTL_RELEASE_R3_P1 = 0x6,
99  L2C_310_RTL_RELEASE_R3_P2 = 0x8,
100  L2C_310_RTL_RELEASE_R3_P3 = 0x9
101} cache_l2c_310_rtl_release;
102
103/**
104 * @defgroup L2C-310_cache Cache Support
105 * @ingroup arm_shared
106 * @brief Cache Functions and Defitions
107 * @{
108 */
109
110
111/**
112 * @brief L2CC Register Offsets
113 */
114typedef struct {
115  /** @brief Cache ID */
116  uint32_t cache_id;
117#define L2C_310_ID_RTL_MASK 0x3f
118#define L2C_310_ID_PART_MASK ( 0xf << 6 )
119#define L2C_310_ID_PART_L210 ( 1 << 6 )
120#define L2C_310_ID_PART_L310 ( 3 << 6 )
121#define L2C_310_ID_IMPL_MASK ( 0xff << 24 )
122  /** @brief Cache type */
123  uint32_t cache_type;
124/** @brief 1 if data banking implemented, 0 if not */
125#define L2C_310_TYPE_DATA_BANKING_MASK 0x80000000
126/** @brief 11xy, where: x=1 if pl310_LOCKDOWN_BY_MASTER is defined, otherwise 0 */
127#define L2C_310_TYPE_CTYPE_MASK 0x1E000000
128/** @brief y=1 if pl310_LOCKDOWN_BY_LINE is defined, otherwise 0. */
129#define L2C_310_TYPE_CTYPE_SHIFT 25
130/** @brief 1 for Harvard architecture, 0 for unified architecture */
131#define L2C_310_TYPE_HARVARD_MASK 0x01000000
132/** @brief Data cache way size = 2 Exp(value + 2) KB */
133#define L2C_310_TYPE_SIZE_D_WAYS_MASK 0x00700000
134#define L2C_310_TYPE_SIZE_D_WAYS_SHIFT 20
135/** @brief Assoziativity aka number of data ways = (value * 8) + 8 */
136#define L2C_310_TYPE_NUM_D_WAYS_MASK 0x00040000
137#define L2C_310_TYPE_NUM_D_WAYS_SHIFT 18
138/** @brief Data cache line length 00 - 32 */
139#define L2C_310_TYPE_LENGTH_D_LINE_MASK 0x00003000
140#define L2C_310_TYPE_LENGTH_D_LINE_SHIFT 12
141#define L2C_310_TYPE_LENGTH_D_LINE_VAL_32 0x0
142/** @brief Instruction cache way size = 2 Exp(value + 2) KB */
143#define L2C_310_TYPE_SIZE_I_WAYS_MASK 0x00000700
144#define L2C_310_TYPE_SIZE_I_WAYS_SHIFT 8
145/** @brief Assoziativity aka number of instruction ways = (value * 8) + 8 */
146#define L2C_310_TYPE_NUM_I_WAYS_MASK 0x00000040
147#define L2C_310_TYPE_NUM_I_WAYS_SHIFT 6
148/** @brief Instruction cache line length 00 - 32 */
149#define L2C_310_TYPE_LENGTH_I_LINE_MASK 0x00000003
150#define L2C_310_TYPE_LENGTH_I_LINE_SHIFT 0
151#define L2C_310_TYPE_LENGTH_I_LINE_VAL_32 0x0
152
153  uint8_t reserved_8[0x100 - 8];
154  uint32_t ctrl; /* Control */
155/** @brief Enables the L2CC */
156#define L2C_310_ENABLE_MASK 0x00000001
157
158  /** @brief Auxiliary control */
159  uint32_t aux_ctrl;
160
161/** @brief Early BRESP Enable */
162#define L2C_310_AUX_EBRESPE_MASK 0x40000000
163
164/** @brief Instruction Prefetch Enable */
165#define L2C_310_AUX_IPFE_MASK 0x20000000
166
167/** @brief Data Prefetch Enable */
168#define L2C_310_AUX_DPFE_MASK 0x10000000
169
170/** @brief Non-secure interrupt access control */
171#define L2C_310_AUX_NSIC_MASK 0x08000000
172
173/** @brief Non-secure lockdown enable */
174#define L2C_310_AUX_NSLE_MASK 0x04000000
175
176/** @brief Cache replacement policy */
177#define L2C_310_AUX_CRP_MASK 0x02000000
178
179/** @brief Force write allocate */
180#define L2C_310_AUX_FWE_MASK 0x01800000
181
182/** @brief Shared attribute override enable */
183#define L2C_310_AUX_SAOE_MASK 0x00400000
184
185/** @brief Parity enable */
186#define L2C_310_AUX_PE_MASK 0x00200000
187
188/** @brief Event monitor bus enable */
189#define L2C_310_AUX_EMBE_MASK 0x00100000
190
191/** @brief Way-size */
192#define L2C_310_AUX_WAY_SIZE_MASK 0x000E0000
193#define L2C_310_AUX_WAY_SIZE_SHIFT 17
194
195/** @brief Way-size */
196#define L2C_310_AUX_ASSOC_MASK 0x00010000
197
198/** @brief Shared attribute invalidate enable */
199#define L2C_310_AUX_SAIE_MASK 0x00002000
200
201/** @brief Exclusive cache configuration */
202#define L2C_310_AUX_EXCL_CACHE_MASK 0x00001000
203
204/** @brief Store buffer device limitation Enable */
205#define L2C_310_AUX_SBDLE_MASK 0x00000800
206
207/** @brief High Priority for SO and Dev Reads Enable */
208#define L2C_310_AUX_HPSODRE_MASK 0x00000400
209
210/** @brief Full line of zero enable */
211#define L2C_310_AUX_FLZE_MASK 0x00000001
212
213/** @brief Enable all prefetching, */
214#define L2C_310_AUX_REG_DEFAULT_MASK \
215  ( L2C_310_AUX_WAY_SIZE_MASK & ( 0x3 << L2C_310_AUX_WAY_SIZE_SHIFT ) ) \
216  | L2C_310_AUX_PE_MASK      /* Prefetch enable */ \
217  | L2C_310_AUX_SAOE_MASK    /* Shared attribute override enable */ \
218  | L2C_310_AUX_CRP_MASK     /* Cache replacement policy */ \
219  | L2C_310_AUX_DPFE_MASK    /* Data prefetch enable */ \
220  | L2C_310_AUX_IPFE_MASK    /* Instruction prefetch enable */ \
221  | L2C_310_AUX_EBRESPE_MASK /* Early BRESP enable */
222
223#define L2C_310_AUX_REG_ZERO_MASK 0xFFF1FFFF
224
225/** @brief 1 cycle of latency, there is no additional latency fot tag RAM */
226#define L2C_310_RAM_1_CYCLE_LAT_VAL 0x00000000
227/** @brief 2 cycles of latency for tag RAM */
228#define L2C_310_RAM_2_CYCLE_LAT_VAL 0x00000001
229/** @brief 3 cycles of latency for tag RAM */
230#define L2C_310_RAM_3_CYCLE_LAT_VAL 0x00000002
231/** @brief 4 cycles of latency for tag RAM */
232#define L2C_310_RAM_4_CYCLE_LAT_VAL 0x00000003
233/** @brief 5 cycles of latency for tag RAM */
234#define L2C_310_RAM_5_CYCLE_LAT_VAL 0x00000004
235/** @brief 6 cycles of latency for tag RAM */
236#define L2C_310_RAM_6_CYCLE_LAT_VAL 0x00000005
237/** @brief 7 cycles of latency for tag RAM */
238#define L2C_310_RAM_7_CYCLE_LAT_VAL 0x00000006
239/** @brief 8 cycles of latency for tag RAM */
240#define L2C_310_RAM_8_CYCLE_LAT_VAL 0x00000007
241/** @brief Shift left setup latency values by this value */
242#define L2C_310_RAM_SETUP_SHIFT 0x00000000
243/** @brief Shift left read latency values by this value */
244#define L2C_310_RAM_READ_SHIFT 0x00000004
245/** @brief Shift left write latency values by this value */
246#define L2C_310_RAM_WRITE_SHIFT 0x00000008
247/** @brief Mask for RAM setup latency */
248#define L2C_310_RAM_SETUP_LAT_MASK 0x00000007
249/** @brief Mask for RAM read latency */
250#define L2C_310_RAM_READ_LAT_MASK 0x00000070
251/** @brief Mask for RAM read latency */
252#define L2C_310_RAM_WRITE_LAT_MASK 0x00000700
253  /** @brief Latency for tag RAM */
254  uint32_t tag_ram_ctrl;
255/* @brief Latency for tag RAM */
256#define L2C_310_TAG_RAM_DEFAULT_LAT \
257  ( ( L2C_310_RAM_2_CYCLE_LAT_VAL << L2C_310_RAM_SETUP_SHIFT ) \
258    | ( L2C_310_RAM_2_CYCLE_LAT_VAL << L2C_310_RAM_READ_SHIFT ) \
259    | ( L2C_310_RAM_2_CYCLE_LAT_VAL << L2C_310_RAM_WRITE_SHIFT ) )
260  /** @brief Latency for data RAM */
261  uint32_t data_ram_ctrl;
262/** @brief Latency for data RAM */
263#define L2C_310_DATA_RAM_DEFAULT_MASK \
264  ( ( L2C_310_RAM_2_CYCLE_LAT_VAL << L2C_310_RAM_SETUP_SHIFT ) \
265    | ( L2C_310_RAM_3_CYCLE_LAT_VAL << L2C_310_RAM_READ_SHIFT ) \
266    | ( L2C_310_RAM_2_CYCLE_LAT_VAL << L2C_310_RAM_WRITE_SHIFT ) )
267
268  uint8_t reserved_110[0x200 - 0x110];
269
270  /** @brief Event counter control */
271  uint32_t ev_ctrl;
272
273  /** @brief Event counter 1 configuration */
274  uint32_t ev_cnt1_cfg;
275
276  /** @brief Event counter 0 configuration */
277  uint32_t ev_cnt0_cfg;
278
279  /** @brief Event counter 1 value */
280  uint32_t ev_cnt1;
281
282  /** @brief Event counter 0 value */
283  uint32_t ev_cnt0;
284
285  /** @brief Interrupt enable mask */
286  uint32_t int_mask;
287
288  /** @brief Masked   interrupt status (read-only)*/
289  uint32_t int_mask_status;
290
291  /** @brief Unmasked interrupt status */
292  uint32_t int_raw_status;
293
294  /** @brief Interrupt clear */
295  uint32_t int_clr;
296
297/**
298 * @name Interrupt bit masks
299 *
300 * @{
301 */
302
303/** @brief DECERR from L3 */
304#define L2C_310_INT_DECERR_MASK 0x00000100
305
306/** @brief SLVERR from L3 */
307#define L2C_310_INT_SLVERR_MASK 0x00000080
308
309/** @brief Error on L2 data RAM (Read) */
310#define L2C_310_INT_ERRRD_MASK 0x00000040
311
312/** @brief Error on L2 tag RAM (Read) */
313#define L2C_310_INT_ERRRT_MASK 0x00000020
314
315/** @brief Error on L2 data RAM (Write) */
316#define L2C_310_INT_ERRWD_MASK 0x00000010
317
318/** @brief Error on L2 tag RAM (Write) */
319#define L2C_310_INT_ERRWT_MASK 0x00000008
320
321/** @brief Parity Error on L2 data RAM (Read) */
322#define L2C_310_INT_PARRD_MASK 0x00000004
323
324/** @brief Parity Error on L2 tag RAM (Read) */
325#define L2C_310_INT_PARRT_MASK 0x00000002
326
327/** @brief Event Counter1/0 Overflow Increment */
328#define L2C_310_INT_ECNTR_MASK 0x00000001
329
330/** @} */
331
332  uint8_t reserved_224[0x730 - 0x224];
333
334  /** @brief Drain the STB */
335  uint32_t cache_sync;
336  uint8_t reserved_734[0x740 - 0x734];
337  /** @brief ARM Errata 753970 for pl310-r3p0 */
338  uint32_t dummy_cache_sync_reg;
339  uint8_t reserved_744[0x770 - 0x744];
340
341  /** @brief Invalidate line by PA */
342  uint32_t inv_pa;
343  uint8_t reserved_774[0x77c - 0x774];
344
345  /** @brief Invalidate by Way */
346  uint32_t inv_way;
347  uint8_t reserved_780[0x7b0 - 0x780];
348
349  /** @brief Clean Line by PA */
350  uint32_t clean_pa;
351  uint8_t reserved_7b4[0x7b8 - 0x7b4];
352
353  /** @brief Clean Line by Set/Way */
354  uint32_t clean_index;
355
356  /** @brief Clean by Way */
357  uint32_t clean_way;
358  uint8_t reserved_7c0[0x7f0 - 0x7c0];
359
360  /** @brief Clean and Invalidate Line by PA */
361  uint32_t clean_inv_pa;
362  uint8_t reserved_7f4[0x7f8 - 0x7f4];
363
364  /** @brief Clean and Invalidate Line by Set/Way */
365  uint32_t clean_inv_indx;
366
367  /** @brief Clean and Invalidate by Way */
368  uint32_t clean_inv_way;
369
370  /** @brief Data        lock down 0 */
371  uint32_t d_lockdown_0;
372
373  /** @brief Instruction lock down 0 */
374  uint32_t i_lockdown_0;
375
376  /** @brief Data        lock down 1 */
377  uint32_t d_lockdown_1;
378
379  /** @brief Instruction lock down 1 */
380  uint32_t i_lockdown_1;
381
382  /** @brief Data        lock down 2 */
383  uint32_t d_lockdown_2;
384
385  /** @brief Instruction lock down 2 */
386  uint32_t i_lockdown_2;
387
388  /** @brief Data        lock down 3 */
389  uint32_t d_lockdown_3;
390
391  /** @brief Instruction lock down 3 */
392  uint32_t i_lockdown_3;
393
394  /** @brief Data        lock down 4 */
395  uint32_t d_lockdown_4;
396
397  /** @brief Instruction lock down 4 */
398  uint32_t i_lockdown_4;
399
400  /** @brief Data        lock down 5 */
401  uint32_t d_lockdown_5;
402
403  /** @brief Instruction lock down 5 */
404  uint32_t i_lockdown_5;
405
406  /** @brief Data        lock down 6 */
407  uint32_t d_lockdown_6;
408
409  /** @brief Instruction lock down 6 */
410  uint32_t i_lockdown_6;
411
412  /** @brief Data        lock down 7 */
413  uint32_t d_lockdown_7;
414
415  /** @brief Instruction lock down 7 */
416  uint32_t i_lockdown_7;
417
418  uint8_t reserved_940[0x950 - 0x940];
419
420  /** @brief Lockdown by Line Enable */
421  uint32_t lock_line_en;
422
423  /** @brief Cache lockdown by way */
424  uint32_t unlock_way;
425
426  uint8_t reserved_958[0xc00 - 0x958];
427
428  /** @brief Address range redirect, part 1 */
429  uint32_t addr_filtering_start;
430
431  /** @brief Address range redirect, part 2 */
432  uint32_t addr_filtering_end;
433
434/** @brief Address filtering valid bits*/
435#define L2C_310_ADDR_FILTER_VALID_MASK 0xFFF00000
436
437/** @brief Address filtering enable bit*/
438#define L2C_310_ADDR_FILTER_ENABLE_MASK 0x00000001
439
440  uint8_t reserved_c08[0xf40 - 0xc08];
441
442  /** @brief Debug control */
443  uint32_t debug_ctrl;
444
445/** @brief Debug SPIDEN bit */
446#define L2C_310_DEBUG_SPIDEN_MASK 0x00000004
447
448/** @brief Debug DWB bit, forces write through */
449#define L2C_310_DEBUG_DWB_MASK 0x00000002
450
451/** @brief Debug DCL bit, disables cache line fill */
452#define L2C_310_DEBUG_DCL_MASK 0x00000002
453
454  uint8_t reserved_f44[0xf60 - 0xf44];
455
456  /** @brief Purpose prefetch enables */
457  uint32_t prefetch_ctrl;
458/** @brief Prefetch offset */
459#define L2C_310_PREFETCH_OFFSET_MASK 0x0000001F
460  uint8_t reserved_f64[0xf80 - 0xf64];
461
462  /** @brief Purpose power controls */
463  uint32_t power_ctrl;
464} L2CC;
465
466rtems_interrupt_lock l2c_310_cache_lock = RTEMS_INTERRUPT_LOCK_INITIALIZER(
467  "cache"
468);
469
470/* Errata table for the LC2 310 Level 2 cache from ARM.
471* Information taken from ARMs
472* "CoreLink controllers and peripherals
473* - System controllers
474* - L2C-310 Level 2 Cache Controller
475* - Revision r3p3
476* - Software Developer Errata Notice
477* - ARM CoreLink Level 2 Cache Controller (L2C-310 or PL310),
478*   r3 releases Software Developers Errata Notice"
479* Please see this document for more information on these erratas */
480static bool l2c_310_cache_errata_is_applicable_753970(
481  cache_l2c_310_rtl_release rtl_release
482)
483{
484  bool is_applicable = false;
485
486  switch ( rtl_release ) {
487    case L2C_310_RTL_RELEASE_R3_P3:
488    case L2C_310_RTL_RELEASE_R3_P2:
489    case L2C_310_RTL_RELEASE_R3_P1:
490    case L2C_310_RTL_RELEASE_R2_P0:
491    case L2C_310_RTL_RELEASE_R1_P0:
492    case L2C_310_RTL_RELEASE_R0_P0:
493      is_applicable = false;
494      break;
495    case L2C_310_RTL_RELEASE_R3_P0:
496      is_applicable = true;
497      break;
498    default:
499      assert( 0 );
500      break;
501  }
502
503  return is_applicable;
504}
505
506static bool l2c_310_cache_errata_is_applicable_727913(
507  cache_l2c_310_rtl_release rtl_release
508)
509{
510  bool is_applicable = false;
511
512  switch ( rtl_release ) {
513    case L2C_310_RTL_RELEASE_R3_P3:
514    case L2C_310_RTL_RELEASE_R3_P2:
515    case L2C_310_RTL_RELEASE_R3_P1:
516    case L2C_310_RTL_RELEASE_R2_P0:
517    case L2C_310_RTL_RELEASE_R1_P0:
518    case L2C_310_RTL_RELEASE_R0_P0:
519      is_applicable = false;
520      break;
521    case L2C_310_RTL_RELEASE_R3_P0:
522      is_applicable = true;
523      break;
524    default:
525      assert( 0 );
526      break;
527  }
528
529  return is_applicable;
530}
531
532static bool l2c_310_cache_errata_is_applicable_727914(
533  cache_l2c_310_rtl_release rtl_release
534)
535{
536  bool is_applicable = false;
537
538  switch ( rtl_release ) {
539    case L2C_310_RTL_RELEASE_R3_P3:
540    case L2C_310_RTL_RELEASE_R3_P2:
541    case L2C_310_RTL_RELEASE_R3_P1:
542    case L2C_310_RTL_RELEASE_R2_P0:
543    case L2C_310_RTL_RELEASE_R1_P0:
544    case L2C_310_RTL_RELEASE_R0_P0:
545      is_applicable = false;
546      break;
547    case L2C_310_RTL_RELEASE_R3_P0:
548      is_applicable = true;
549      break;
550    default:
551      assert( 0 );
552      break;
553  }
554
555  return is_applicable;
556}
557
558static bool l2c_310_cache_errata_is_applicable_727915(
559  cache_l2c_310_rtl_release rtl_release
560)
561{
562  bool is_applicable = false;
563
564  switch ( rtl_release ) {
565    case L2C_310_RTL_RELEASE_R3_P3:
566    case L2C_310_RTL_RELEASE_R3_P2:
567    case L2C_310_RTL_RELEASE_R3_P1:
568    case L2C_310_RTL_RELEASE_R1_P0:
569    case L2C_310_RTL_RELEASE_R0_P0:
570      is_applicable = false;
571      break;
572    case L2C_310_RTL_RELEASE_R3_P0:
573    case L2C_310_RTL_RELEASE_R2_P0:
574      is_applicable = true;
575      break;
576    default:
577      assert( 0 );
578      break;
579  }
580
581  return is_applicable;
582}
583
584static bool l2c_310_cache_errata_is_applicable_729806(
585  cache_l2c_310_rtl_release rtl_release
586)
587{
588  bool is_applicable = false;
589
590  switch ( rtl_release ) {
591    case L2C_310_RTL_RELEASE_R3_P3:
592    case L2C_310_RTL_RELEASE_R3_P2:
593    case L2C_310_RTL_RELEASE_R2_P0:
594    case L2C_310_RTL_RELEASE_R1_P0:
595    case L2C_310_RTL_RELEASE_R0_P0:
596      is_applicable = false;
597      break;
598    case L2C_310_RTL_RELEASE_R3_P1:
599    case L2C_310_RTL_RELEASE_R3_P0:
600      is_applicable = true;
601      break;
602    default:
603      assert( 0 );
604      break;
605  }
606
607  return is_applicable;
608}
609
610static bool l2c_310_cache_errata_is_applicable_729815(
611  cache_l2c_310_rtl_release rtl_release
612)
613{
614  bool is_applicable = false;
615
616  switch ( rtl_release ) {
617    case L2C_310_RTL_RELEASE_R3_P3:
618    case L2C_310_RTL_RELEASE_R1_P0:
619    case L2C_310_RTL_RELEASE_R0_P0:
620      is_applicable = false;
621      break;
622    case L2C_310_RTL_RELEASE_R3_P2:
623    case L2C_310_RTL_RELEASE_R3_P1:
624    case L2C_310_RTL_RELEASE_R3_P0:
625    case L2C_310_RTL_RELEASE_R2_P0:
626      is_applicable = true;
627      break;
628    default:
629      assert( 0 );
630      break;
631  }
632
633  return is_applicable;
634}
635
636static bool l2c_310_cache_errata_is_applicable_742884(
637  cache_l2c_310_rtl_release rtl_release
638)
639{
640  bool is_applicable = false;
641
642  switch ( rtl_release ) {
643    case L2C_310_RTL_RELEASE_R3_P3:
644    case L2C_310_RTL_RELEASE_R3_P2:
645    case L2C_310_RTL_RELEASE_R3_P0:
646    case L2C_310_RTL_RELEASE_R2_P0:
647    case L2C_310_RTL_RELEASE_R1_P0:
648    case L2C_310_RTL_RELEASE_R0_P0:
649      is_applicable = false;
650      break;
651    case L2C_310_RTL_RELEASE_R3_P1:
652      is_applicable = true;
653      break;
654    default:
655      assert( 0 );
656      break;
657  }
658
659  return is_applicable;
660}
661
662static bool l2c_310_cache_errata_is_applicable_752271(
663  cache_l2c_310_rtl_release rtl_release
664)
665{
666  bool is_applicable = false;
667
668  switch ( rtl_release ) {
669    case L2C_310_RTL_RELEASE_R3_P3:
670    case L2C_310_RTL_RELEASE_R3_P2:
671    case L2C_310_RTL_RELEASE_R2_P0:
672    case L2C_310_RTL_RELEASE_R1_P0:
673    case L2C_310_RTL_RELEASE_R0_P0:
674      is_applicable = false;
675      break;
676    case L2C_310_RTL_RELEASE_R3_P1:
677    case L2C_310_RTL_RELEASE_R3_P0:
678      is_applicable = true;
679      break;
680    default:
681      assert( 0 );
682      break;
683  }
684
685  return is_applicable;
686}
687
688static bool l2c_310_cache_errata_is_applicable_765569(
689  cache_l2c_310_rtl_release rtl_release
690)
691{
692  bool is_applicable = false;
693
694  switch ( rtl_release ) {
695    case L2C_310_RTL_RELEASE_R3_P3:
696    case L2C_310_RTL_RELEASE_R3_P2:
697    case L2C_310_RTL_RELEASE_R3_P1:
698    case L2C_310_RTL_RELEASE_R3_P0:
699    case L2C_310_RTL_RELEASE_R2_P0:
700    case L2C_310_RTL_RELEASE_R1_P0:
701    case L2C_310_RTL_RELEASE_R0_P0:
702      is_applicable = true;
703      break;
704    default:
705      assert( 0 );
706      break;
707  }
708
709  return is_applicable;
710}
711
712static bool l2c_310_cache_errata_is_applicable_769419(
713  cache_l2c_310_rtl_release rtl_release
714)
715{
716  bool is_applicable = false;
717
718  switch ( rtl_release ) {
719    case L2C_310_RTL_RELEASE_R3_P3:
720    case L2C_310_RTL_RELEASE_R3_P2:
721      is_applicable = false;
722      break;
723    case L2C_310_RTL_RELEASE_R3_P1:
724    case L2C_310_RTL_RELEASE_R3_P0:
725    case L2C_310_RTL_RELEASE_R2_P0:
726    case L2C_310_RTL_RELEASE_R1_P0:
727    case L2C_310_RTL_RELEASE_R0_P0:
728      is_applicable = true;
729      break;
730    default:
731      assert( 0 );
732      break;
733  }
734
735  return is_applicable;
736}
737
738static bool l2c_310_cache_errata_is_applicable_588369(
739  cache_l2c_310_rtl_release rtl_release
740)
741{
742  bool is_applicable = false;
743
744  switch ( rtl_release ) {
745    case L2C_310_RTL_RELEASE_R3_P3:
746    case L2C_310_RTL_RELEASE_R3_P2:
747    case L2C_310_RTL_RELEASE_R3_P1:
748    case L2C_310_RTL_RELEASE_R3_P0:
749    case L2C_310_RTL_RELEASE_R2_P0:
750      is_applicable = false;
751      break;
752    case L2C_310_RTL_RELEASE_R1_P0:
753    case L2C_310_RTL_RELEASE_R0_P0:
754      is_applicable = true;
755      break;
756    default:
757      assert( 0 );
758      break;
759  }
760
761  return is_applicable;
762}
763
764#ifdef CACHE_ERRATA_CHECKS_FOR_IMPLEMENTED_ERRATAS
765static bool l2c_310_cache_errata_is_applicable_754670(
766  cache_l2c_310_rtl_release rtl_release
767)
768{
769  bool is_applicable = false;
770
771  switch ( rtl_release ) {
772    case L2C_310_RTL_RELEASE_R3_P3:
773    case L2C_310_RTL_RELEASE_R3_P2:
774    case L2C_310_RTL_RELEASE_R3_P1:
775    case L2C_310_RTL_RELEASE_R3_P0:
776    case L2C_310_RTL_RELEASE_R2_P0:
777    case L2C_310_RTL_RELEASE_R1_P0:
778    case L2C_310_RTL_RELEASE_R0_P0:
779      is_applicable = true;
780    break;
781    default:
782      assert( 0 );
783      break;
784  }
785
786  return is_applicable;
787}
788#endif /* CACHE_ERRATA_CHECKS_FOR_IMPLEMENTED_ERRATAS */
789
790/* The common workaround for this erratum would be to add a
791 * data synchronization barrier to the beginning of the abort handler.
792 * But for RTEMS a call of the abort handler means a fatal condition anyway.
793 * So there is no need to handle this erratum */
794#define CACHE_ARM_ERRATA_775420_HANDLER()                   \
795  if( arm_errata_is_applicable_processor_errata_775420 ) {  \
796  }                                                         \
797
798static void l2c_310_cache_check_errata( cache_l2c_310_rtl_release rtl_release )
799{
800  /* This erratum gets handled within the sources */
801  /* Unhandled erratum present: 588369 Errata 588369 says that clean + inv may
802   * keep the cache line if it was clean. See ARMs documentation on the erratum
803   * for a workaround */
804  /* assert( ! l2c_310_cache_errata_is_applicable_588369( rtl_release ) ); */
805
806  /* Unhandled erratum present: 727913 Prefetch dropping feature can cause
807   * incorrect behavior when PL310 handles reads that cross cache line
808   * boundary */
809  assert( ! l2c_310_cache_errata_is_applicable_727913( rtl_release ) );
810
811  /* Unhandled erratum present: 727914 Double linefill feature can cause
812   * deadlock */
813  assert( ! l2c_310_cache_errata_is_applicable_727914( rtl_release ) );
814
815  /* Unhandled erratum present: 727915 Background Clean and Invalidate by Way
816   * operation can cause data corruption */
817  assert( ! l2c_310_cache_errata_is_applicable_727915( rtl_release ) );
818
819  /* Unhandled erratum present: 729806 Speculative reads from the Cortex-A9
820   * MPCore processor can cause deadlock */
821  assert( ! l2c_310_cache_errata_is_applicable_729806( rtl_release ) );
822
823  if( l2c_310_cache_errata_is_applicable_729815( rtl_release ) )
824  {
825    volatile L2CC *l2cc = (volatile L2CC *) BSP_ARM_L2C_310_BASE;
826
827    assert( 0 == ( l2cc->aux_ctrl & L2C_310_AUX_HPSODRE_MASK ) );
828
829    /* Erratum: 729815 The “High Priority for SO and Dev reads” feature can
830     * cause Quality of Service issues to cacheable read transactions*/
831
832    /* Conditions
833       This problem occurs when the following conditions are met:
834       1. Bit[10] “High Priority for SO and Dev reads enable” of the PL310
835          Auxiliary Control Register is set to 1.
836       2. PL310 receives a cacheable read that misses in the L2 cache.
837       3. PL310 receives a continuous flow of Strongly Ordered or Device
838          reads that take all address slots in the master interface.
839       Workaround
840       A workaround is only necessary in systems that are able to issue a
841       continuous flow of Strongly Ordered or Device reads. In such a case,
842       the workaround is to disable the “High Priority for SO and Dev reads”
843       feature. This is the default behavior.*/
844  }
845
846  /* Unhandled erratum present: 742884 Double linefill feature might introduce
847   * circular dependency and deadlock */
848  assert( ! l2c_310_cache_errata_is_applicable_742884( rtl_release ) );
849
850  /* Unhandled erratum present: 752271 Double linefill feature can cause data
851   * corruption */
852  assert( ! l2c_310_cache_errata_is_applicable_752271( rtl_release ) );
853
854  /* This erratum can not be worked around: 754670 A continuous write flow can
855   * stall a read targeting the same memory area
856   * But this erratum does not lead to any data corruption */
857  /* assert( ! l2c_310_cache_errata_is_applicable_754670() ); */
858
859  if( l2c_310_cache_errata_is_applicable_765569( rtl_release ) )
860  {
861    volatile L2CC *l2cc = (volatile L2CC *) BSP_ARM_L2C_310_BASE;
862
863    assert( !( ( l2cc->aux_ctrl & L2C_310_AUX_IPFE_MASK
864                 || l2cc->aux_ctrl & L2C_310_AUX_DPFE_MASK )
865               && ( ( l2cc->prefetch_ctrl & L2C_310_PREFETCH_OFFSET_MASK )
866                    == 23 ) ) );
867
868    /* Unhandled erratum present: 765569 Prefetcher can cross 4KB boundary if
869     * offset is programmed with value 23 */
870
871    /* Conditions
872       This problem occurs when the following conditions are met:
873       1. One of the Prefetch Enable bits (bits [29:28] of the Auxiliary or
874          Prefetch Control Register) is set HIGH.
875       2. The prefetch offset bits are programmed with value 23 (5'b10111).
876       Workaround
877       A workaround for this erratum is to program the prefetch offset with any
878       value except 23.*/
879  }
880
881  /* Unhandled erratum present: 769419 No automatic Store Buffer drain,
882   * visibility of written data requires an explicit Cache */
883  assert( ! l2c_310_cache_errata_is_applicable_769419( rtl_release ) );
884}
885
886static inline void
887cache_l2c_310_sync( void )
888{
889  volatile L2CC *l2cc = (volatile L2CC *) BSP_ARM_L2C_310_BASE;
890  cache_l2c_310_rtl_release rtl_release =
891    l2cc->cache_id & L2C_310_ID_RTL_MASK;
892
893  if( l2c_310_cache_errata_is_applicable_753970( rtl_release ) ) {
894    l2cc->dummy_cache_sync_reg = 0;
895  } else {
896    l2cc->cache_sync           = 0;
897  }
898}
899
900static inline void
901cache_l2c_310_flush_1_line(
902  const void *d_addr,
903  const bool  is_errata_588369applicable
904)
905{
906  volatile L2CC *l2cc = (volatile L2CC *) BSP_ARM_L2C_310_BASE;
907
908  if( is_errata_588369applicable ) {
909    /*
910    * Errata 588369 says that clean + inv may keep the
911    * cache line if it was clean, the recommended
912    * workaround is to clean then invalidate the cache
913    * line, with write-back and cache linefill disabled.
914    */
915    l2cc->clean_pa     = (uint32_t) d_addr;
916    cache_l2c_310_sync();
917    l2cc->inv_pa       = (uint32_t) d_addr;
918  } else {
919    l2cc->clean_inv_pa = (uint32_t) d_addr;
920  }
921}
922
923static inline void
924cache_l2c_310_flush_range( const void* d_addr, const size_t n_bytes )
925{
926  rtems_interrupt_lock_context lock_context;
927  /* Back starting address up to start of a line and invalidate until ADDR_LAST */
928  uint32_t       adx               = (uint32_t)d_addr
929    & ~L2C_310_DATA_LINE_MASK;
930  const uint32_t ADDR_LAST         =
931    (uint32_t)( (size_t)d_addr + n_bytes - 1 );
932  uint32_t       block_end         =
933    L2C_310_MIN( ADDR_LAST, adx + L2C_310_MAX_LOCKING_BYTES );
934  volatile L2CC *l2cc = (volatile L2CC *) BSP_ARM_L2C_310_BASE;
935  cache_l2c_310_rtl_release rtl_release =
936    l2cc->cache_id & L2C_310_ID_RTL_MASK;
937  bool is_errata_588369_applicable =
938    l2c_310_cache_errata_is_applicable_588369( rtl_release );
939
940  rtems_interrupt_lock_acquire( &l2c_310_cache_lock, &lock_context );
941
942  for (;
943       adx      <= ADDR_LAST;
944       adx       = block_end + 1,
945       block_end = L2C_310_MIN( ADDR_LAST, adx + L2C_310_MAX_LOCKING_BYTES )) {
946    for (; adx <= block_end; adx += CPU_DATA_CACHE_ALIGNMENT ) {
947      cache_l2c_310_flush_1_line( (void*)adx, is_errata_588369_applicable );
948    }
949    if( block_end < ADDR_LAST ) {
950      rtems_interrupt_lock_release( &l2c_310_cache_lock, &lock_context );
951      rtems_interrupt_lock_acquire( &l2c_310_cache_lock, &lock_context );
952    }
953  }
954  cache_l2c_310_sync();
955  rtems_interrupt_lock_release( &l2c_310_cache_lock, &lock_context );
956}
957
958static inline void
959cache_l2c_310_flush_entire( void )
960{
961  volatile L2CC               *l2cc = (volatile L2CC *) BSP_ARM_L2C_310_BASE;
962  rtems_interrupt_lock_context lock_context;
963
964  /* Only flush if level 2 cache is active */
965  if( ( l2cc->ctrl & L2C_310_ENABLE_MASK ) != 0 ) {
966
967    /* ensure ordering with previous memory accesses */
968    _ARM_Data_memory_barrier();
969
970    rtems_interrupt_lock_acquire( &l2c_310_cache_lock, &lock_context );
971    l2cc->clean_inv_way = L2C_310_WAY_MASK;
972
973    while ( l2cc->clean_inv_way & L2C_310_WAY_MASK ) {};
974
975    /* Wait for the flush to complete */
976    cache_l2c_310_sync();
977
978    rtems_interrupt_lock_release( &l2c_310_cache_lock, &lock_context );
979  }
980}
981
982static inline void
983cache_l2c_310_invalidate_1_line( const void *d_addr )
984{
985  volatile L2CC *l2cc = (volatile L2CC *) BSP_ARM_L2C_310_BASE;
986
987
988  l2cc->inv_pa = (uint32_t) d_addr;
989  cache_l2c_310_sync();
990}
991
992static inline void
993cache_l2c_310_invalidate_range( uint32_t adx, const uint32_t ADDR_LAST )
994{
995  volatile L2CC               *l2cc = (volatile L2CC *) BSP_ARM_L2C_310_BASE;
996  rtems_interrupt_lock_context lock_context;
997
998  rtems_interrupt_lock_acquire( &l2c_310_cache_lock, &lock_context );
999  for (;
1000       adx <= ADDR_LAST;
1001       adx += CPU_INSTRUCTION_CACHE_ALIGNMENT ) {
1002    /* Invalidate L2 cache line */
1003    l2cc->inv_pa = adx;
1004  }
1005  cache_l2c_310_sync();
1006  rtems_interrupt_lock_release( &l2c_310_cache_lock, &lock_context );
1007}
1008
1009static inline void
1010cache_l2c_310_invalidate_entire( void )
1011{
1012  volatile L2CC *l2cc = (volatile L2CC *) BSP_ARM_L2C_310_BASE;
1013
1014  /* Invalidate the caches */
1015
1016  /* ensure ordering with previous memory accesses */
1017  _ARM_Data_memory_barrier();
1018
1019  l2cc->inv_way = L2C_310_WAY_MASK;
1020
1021  while ( l2cc->inv_way & L2C_310_WAY_MASK ) ;
1022
1023  /* Wait for the invalidate to complete */
1024  cache_l2c_310_sync();
1025}
1026
1027static inline void
1028cache_l2c_310_clean_and_invalidate_entire( void )
1029{
1030  volatile L2CC               *l2cc = (volatile L2CC *) BSP_ARM_L2C_310_BASE;
1031  rtems_interrupt_lock_context lock_context;
1032
1033  if( ( l2cc->ctrl & L2C_310_ENABLE_MASK ) != 0 ) {
1034    /* Invalidate the caches */
1035
1036    /* ensure ordering with previous memory accesses */
1037    _ARM_Data_memory_barrier();
1038
1039    rtems_interrupt_lock_acquire( &l2c_310_cache_lock, &lock_context );
1040    l2cc->clean_inv_way = L2C_310_WAY_MASK;
1041
1042    while ( l2cc->clean_inv_way & L2C_310_WAY_MASK ) ;
1043
1044    /* Wait for the invalidate to complete */
1045    cache_l2c_310_sync();
1046
1047    rtems_interrupt_lock_release( &l2c_310_cache_lock, &lock_context );
1048  }
1049}
1050
1051static inline void
1052cache_l2c_310_freeze( void )
1053{
1054  /* To be implemented as needed, if supported
1055   by hardware at all */
1056}
1057
1058static inline void
1059cache_l2c_310_unfreeze( void )
1060{
1061  /* To be implemented as needed, if supported
1062   by hardware at all */
1063}
1064
1065static inline size_t
1066cache_l2c_310_get_cache_size( void )
1067{
1068  size_t         size       = 0;
1069  volatile L2CC *l2cc       = (volatile L2CC *) BSP_ARM_L2C_310_BASE;
1070  uint32_t       cache_type = l2cc->cache_type;
1071  uint32_t       way_size;
1072  uint32_t       num_ways;
1073
1074  way_size = (cache_type & L2C_310_TYPE_SIZE_D_WAYS_MASK)
1075    >> L2C_310_TYPE_SIZE_D_WAYS_SHIFT;
1076  num_ways = (cache_type & L2C_310_TYPE_NUM_D_WAYS_MASK)
1077    >> L2C_310_TYPE_NUM_D_WAYS_SHIFT;
1078
1079  assert( way_size <= 0x07 );
1080  assert( num_ways <= 0x01 );
1081  if(  way_size <= 0x07 && num_ways <= 0x01 ) {
1082    if( way_size == 0x00 ) {
1083      way_size = 16 * 1024;
1084    } else if( way_size == 0x07 ) {
1085      way_size = 512 * 1024;
1086    } else {
1087      way_size = (1 << (way_size - 1)) * 16 * 1024;
1088    }
1089    switch( num_ways ) {
1090      case 0:
1091        num_ways = 8;
1092        break;
1093      case 1:
1094        num_ways = 16;
1095        break;
1096      default:
1097        num_ways = 0;
1098        break;
1099    }
1100    size = way_size * num_ways;
1101  }
1102  return size;
1103}
1104
1105static void cache_l2c_310_unlock( volatile L2CC *l2cc )
1106{
1107  l2cc->d_lockdown_0 = 0;
1108  l2cc->i_lockdown_0 = 0;
1109  l2cc->d_lockdown_1 = 0;
1110  l2cc->i_lockdown_1 = 0;
1111  l2cc->d_lockdown_2 = 0;
1112  l2cc->i_lockdown_2 = 0;
1113  l2cc->d_lockdown_3 = 0;
1114  l2cc->i_lockdown_3 = 0;
1115  l2cc->d_lockdown_4 = 0;
1116  l2cc->i_lockdown_4 = 0;
1117  l2cc->d_lockdown_5 = 0;
1118  l2cc->i_lockdown_5 = 0;
1119  l2cc->d_lockdown_6 = 0;
1120  l2cc->i_lockdown_6 = 0;
1121  l2cc->d_lockdown_7 = 0;
1122  l2cc->i_lockdown_7 = 0;
1123}
1124
1125static void cache_l2c_310_wait_for_background_ops( volatile L2CC *l2cc )
1126{
1127  while ( l2cc->inv_way & L2C_310_WAY_MASK ) ;
1128
1129  while ( l2cc->clean_way & L2C_310_WAY_MASK ) ;
1130
1131  while ( l2cc->clean_inv_way & L2C_310_WAY_MASK ) ;
1132}
1133
1134/* We support only the L2C-310 revisions r3p2 and r3p3 cache controller */
1135
1136#if (BSP_ARM_L2C_310_ID & L2C_310_ID_PART_MASK) \
1137  != L2C_310_ID_PART_L310
1138#error "invalid L2-310 cache controller part number"
1139#endif
1140
1141#if ((BSP_ARM_L2C_310_ID & L2C_310_ID_RTL_MASK) != 0x8) \
1142  && ((BSP_ARM_L2C_310_ID & L2C_310_ID_RTL_MASK) != 0x9)
1143#error "invalid L2-310 cache controller RTL revision"
1144#endif
1145
1146static inline void
1147cache_l2c_310_enable( void )
1148{
1149  volatile L2CC *l2cc = (volatile L2CC *) BSP_ARM_L2C_310_BASE;
1150  uint32_t cache_id = l2cc->cache_id;
1151  cache_l2c_310_rtl_release rtl_release =
1152    cache_id & L2C_310_ID_RTL_MASK;
1153  uint32_t id_mask =
1154    L2C_310_ID_IMPL_MASK | L2C_310_ID_PART_MASK;
1155
1156  /*
1157   * Do we actually have an L2C-310 cache controller?  Has BSP_ARM_L2C_310_BASE
1158   * been configured correctly?
1159   */
1160  if (
1161    (BSP_ARM_L2C_310_ID & id_mask) != (cache_id & id_mask)
1162      || rtl_release < (BSP_ARM_L2C_310_ID & L2C_310_ID_RTL_MASK)
1163  ) {
1164    bsp_fatal( ARM_FATAL_L2C_310_UNEXPECTED_ID );
1165  }
1166
1167  l2c_310_cache_check_errata( rtl_release );
1168
1169  /* Only enable if L2CC is currently disabled */
1170  if( ( l2cc->ctrl & L2C_310_ENABLE_MASK ) == 0 ) {
1171    uint32_t aux_ctrl;
1172    int ways;
1173
1174    /* Make sure that I&D is not locked down when starting */
1175    cache_l2c_310_unlock( l2cc );
1176
1177    cache_l2c_310_wait_for_background_ops( l2cc );
1178
1179    aux_ctrl = l2cc->aux_ctrl;
1180
1181    if ( (aux_ctrl & ( 1 << 16 )) != 0 ) {
1182      ways = 16;
1183    } else {
1184      ways = 8;
1185    }
1186
1187    if ( ways != L2C_310_NUM_WAYS ) {
1188      bsp_fatal( ARM_FATAL_L2C_310_UNEXPECTED_NUM_WAYS );
1189    }
1190
1191    /* Set up the way size */
1192    aux_ctrl &= L2C_310_AUX_REG_ZERO_MASK; /* Set way_size to 0 */
1193    aux_ctrl |= L2C_310_AUX_REG_DEFAULT_MASK;
1194
1195    l2cc->aux_ctrl = aux_ctrl;
1196
1197    /* Set up the latencies */
1198    l2cc->tag_ram_ctrl  = L2C_310_TAG_RAM_DEFAULT_LAT;
1199    l2cc->data_ram_ctrl = L2C_310_DATA_RAM_DEFAULT_MASK;
1200
1201    cache_l2c_310_invalidate_entire();
1202
1203    /* Clear the pending interrupts */
1204    l2cc->int_clr = l2cc->int_raw_status;
1205
1206    /* Enable the L2CC */
1207    l2cc->ctrl |= L2C_310_ENABLE_MASK;
1208  }
1209}
1210
1211static inline void
1212cache_l2c_310_disable( void )
1213{
1214  volatile L2CC               *l2cc = (volatile L2CC *) BSP_ARM_L2C_310_BASE;
1215  rtems_interrupt_lock_context lock_context;
1216
1217  if ( l2cc->ctrl & L2C_310_ENABLE_MASK ) {
1218    /* Clean and Invalidate L2 Cache */
1219    cache_l2c_310_flush_entire();
1220    rtems_interrupt_lock_acquire( &l2c_310_cache_lock, &lock_context );
1221
1222    cache_l2c_310_wait_for_background_ops( l2cc );
1223
1224    /* Disable the L2 cache */
1225    l2cc->ctrl &= ~L2C_310_ENABLE_MASK;
1226    rtems_interrupt_lock_release( &l2c_310_cache_lock, &lock_context );
1227  }
1228}
1229
1230static inline void
1231_CPU_cache_enable_data( void )
1232{
1233  cache_l2c_310_enable();
1234}
1235
1236static inline void
1237_CPU_cache_disable_data( void )
1238{
1239  arm_cache_l1_disable_data();
1240  cache_l2c_310_disable();
1241}
1242
1243static inline void
1244_CPU_cache_enable_instruction( void )
1245{
1246  cache_l2c_310_enable();
1247}
1248
1249static inline void
1250_CPU_cache_disable_instruction( void )
1251{
1252  arm_cache_l1_disable_instruction();
1253  cache_l2c_310_disable();
1254}
1255
1256static inline void
1257_CPU_cache_flush_data_range(
1258  const void *d_addr,
1259  size_t      n_bytes
1260)
1261{
1262  if ( n_bytes != 0 ) {
1263    arm_cache_l1_flush_data_range(
1264      d_addr,
1265      n_bytes
1266    );
1267    cache_l2c_310_flush_range(
1268      d_addr,
1269      n_bytes
1270    );
1271  }
1272}
1273
1274static inline void
1275_CPU_cache_flush_entire_data( void )
1276{
1277  arm_cache_l1_flush_entire_data();
1278  cache_l2c_310_flush_entire();
1279}
1280
1281static inline void
1282_CPU_cache_invalidate_data_range(
1283  const void *addr_first,
1284  size_t     n_bytes
1285)
1286{
1287  if ( n_bytes > 0 ) {
1288    /* Back starting address up to start of a line and invalidate until ADDR_LAST */
1289    uint32_t       adx       = (uint32_t) addr_first
1290      & ~L2C_310_DATA_LINE_MASK;
1291    const uint32_t ADDR_LAST =
1292      (uint32_t)( (size_t)addr_first + n_bytes - 1 );
1293    uint32_t       block_end =
1294      L2C_310_MIN( ADDR_LAST, adx + L2C_310_MAX_LOCKING_BYTES );
1295
1296    /* We have to apply a lock. Thus we will operate only L2C_310_MAX_LOCKING_BYTES
1297     * at a time */
1298    for (;
1299         adx      <= ADDR_LAST;
1300         adx       = block_end + 1,
1301         block_end = L2C_310_MIN( ADDR_LAST, adx + L2C_310_MAX_LOCKING_BYTES )) {
1302      cache_l2c_310_invalidate_range(
1303        adx,
1304        block_end
1305      );
1306    }
1307    arm_cache_l1_invalidate_data_range(
1308      addr_first,
1309      n_bytes
1310    );
1311
1312    adx       = (uint32_t)addr_first & ~L2C_310_DATA_LINE_MASK;
1313    block_end = L2C_310_MIN( ADDR_LAST, adx + L2C_310_MAX_LOCKING_BYTES );
1314    for (;
1315         adx      <= ADDR_LAST;
1316         adx       = block_end + 1,
1317         block_end = L2C_310_MIN( ADDR_LAST, adx + L2C_310_MAX_LOCKING_BYTES )) {
1318      cache_l2c_310_invalidate_range(
1319        adx,
1320        block_end
1321      );
1322    }
1323    arm_cache_l1_invalidate_data_range(
1324      addr_first,
1325      n_bytes
1326    );
1327  }
1328}
1329
1330static inline void
1331_CPU_cache_invalidate_entire_data( void )
1332{
1333  /* This is broadcast within the cluster */
1334  arm_cache_l1_flush_entire_data();
1335
1336  /* forces the address out past level 2 */
1337  cache_l2c_310_clean_and_invalidate_entire();
1338
1339  /*This is broadcast within the cluster */
1340  arm_cache_l1_clean_and_invalidate_entire_data();
1341}
1342
1343static inline void
1344_CPU_cache_freeze_data( void )
1345{
1346  arm_cache_l1_freeze_data();
1347  cache_l2c_310_freeze();
1348}
1349
1350static inline void
1351_CPU_cache_unfreeze_data( void )
1352{
1353  arm_cache_l1_unfreeze_data();
1354  cache_l2c_310_unfreeze();
1355}
1356
1357static inline void
1358_CPU_cache_invalidate_instruction_range( const void *i_addr, size_t n_bytes)
1359{
1360  arm_cache_l1_invalidate_instruction_range( i_addr, n_bytes );
1361}
1362
1363static inline void
1364_CPU_cache_invalidate_entire_instruction( void )
1365{
1366  arm_cache_l1_invalidate_entire_instruction();
1367}
1368
1369static inline void
1370_CPU_cache_freeze_instruction( void )
1371{
1372  arm_cache_l1_freeze_instruction();
1373  cache_l2c_310_freeze();
1374}
1375
1376static inline void
1377_CPU_cache_unfreeze_instruction( void )
1378{
1379  arm_cache_l1_unfreeze_instruction();
1380  cache_l2c_310_unfreeze();
1381}
1382
1383static inline size_t
1384_CPU_cache_get_data_cache_size( const uint32_t level )
1385{
1386  size_t size = 0;
1387
1388  switch( level )
1389  {
1390    case 1:
1391      size = arm_cache_l1_get_data_cache_size();
1392    break;
1393    case 0:
1394    case 2:
1395      size = cache_l2c_310_get_cache_size();
1396    break;
1397    default:
1398      size = 0;
1399    break;
1400  }
1401  return size;
1402}
1403
1404static inline size_t
1405_CPU_cache_get_instruction_cache_size( const uint32_t level )
1406{
1407  size_t size = 0;
1408
1409  switch( level )
1410  {
1411    case 1:
1412      size = arm_cache_l1_get_instruction_cache_size();
1413      break;
1414    case 0:
1415    case 2:
1416      size = cache_l2c_310_get_cache_size();
1417      break;
1418    default:
1419      size = 0;
1420      break;
1421  }
1422  return size;
1423}
1424
1425
1426/** @} */
1427
1428#ifdef __cplusplus
1429}
1430#endif /* __cplusplus */
1431
1432#endif /* LIBBSP_ARM_SHARED_L2C_310_CACHE_H */
Note: See TracBrowser for help on using the repository browser.