source: rtems/c/src/lib/libbsp/arm/shared/arm-l2c-310/cache_.h @ e492e7f8

4.115
Last change on this file since e492e7f8 was e492e7f8, checked in by Sebastian Huber <sebastian.huber@…>, on 11/19/14 at 13:37:11

bsps/arm: L2C 310 compile-time errata 588369

  • Property mode set to 100644
File size: 37.0 KB
Line 
1/**
2 * @file cache_.h
3 *
4 * @ingroup L2C-310_cache
5 *
6 * @brief Cache definitions and functions.
7 *
8 * This file implements handling for the ARM L2C-310 cache controller
9 */
10
11/*
12 * Authorship
13 * ----------
14 * This software was created by
15 *     R. Claus <claus@slac.stanford.edu>, 2013,
16 *       Stanford Linear Accelerator Center, Stanford University.
17 *
18 * Acknowledgement of sponsorship
19 * ------------------------------
20 * This software was produced by
21 *     the Stanford Linear Accelerator Center, Stanford University,
22 *     under Contract DE-AC03-76SFO0515 with the Department of Energy.
23 *
24 * Government disclaimer of liability
25 * ----------------------------------
26 * Neither the United States nor the United States Department of Energy,
27 * nor any of their employees, makes any warranty, express or implied, or
28 * assumes any legal liability or responsibility for the accuracy,
29 * completeness, or usefulness of any data, apparatus, product, or process
30 * disclosed, or represents that its use would not infringe privately owned
31 * rights.
32 *
33 * Stanford disclaimer of liability
34 * --------------------------------
35 * Stanford University makes no representations or warranties, express or
36 * implied, nor assumes any liability for the use of this software.
37 *
38 * Stanford disclaimer of copyright
39 * --------------------------------
40 * Stanford University, owner of the copyright, hereby disclaims its
41 * copyright and all other rights in this software.  Hence, anyone may
42 * freely use it for any purpose without restriction.
43 *
44 * Maintenance of notices
45 * ----------------------
46 * In the interest of clarity regarding the origin and status of this
47 * SLAC software, this and all the preceding Stanford University notices
48 * are to remain affixed to any copy or derivative of this software made
49 * or distributed by the recipient and are to be affixed to any copy of
50 * software made or distributed by the recipient that contains a copy or
51 * derivative of this software.
52 *
53 * ------------------ SLAC Software Notices, Set 4 OTT.002a, 2004 FEB 03
54 */
55
56#ifndef LIBBSP_ARM_SHARED_L2C_310_CACHE_H
57#define LIBBSP_ARM_SHARED_L2C_310_CACHE_H
58
59#include <assert.h>
60#include <bsp.h>
61#include <bsp/fatal.h>
62#include <libcpu/arm-cp15.h>
63#include <rtems/rtems/intr.h>
64#include <bsp/arm-release-id.h>
65#include <bsp/arm-errata.h>
66#include "../include/arm-cache-l1.h"
67
68#ifdef __cplusplus
69extern "C" {
70#endif /* __cplusplus */
71
72/* These two defines also ensure that the rtems_cache_* functions have bodies */
73#define CPU_DATA_CACHE_ALIGNMENT ARM_CACHE_L1_CPU_DATA_ALIGNMENT
74#define CPU_INSTRUCTION_CACHE_ALIGNMENT ARM_CACHE_L1_CPU_INSTRUCTION_ALIGNMENT
75#define CPU_CACHE_SUPPORT_PROVIDES_RANGE_FUNCTIONS \
76  ARM_CACHE_L1_CPU_SUPPORT_PROVIDES_RANGE_FUNCTIONS
77#define CPU_CACHE_SUPPORT_PROVIDES_CACHE_SIZE_FUNCTIONS
78
79#define L2C_310_DATA_LINE_MASK ( CPU_DATA_CACHE_ALIGNMENT - 1 )
80#define L2C_310_INSTRUCTION_LINE_MASK \
81  ( CPU_INSTRUCTION_CACHE_ALIGNMENT \
82    - 1 )
83#define L2C_310_NUM_WAYS 8
84#define L2C_310_WAY_MASK ( ( 1 << L2C_310_NUM_WAYS ) - 1 )
85
86#define L2C_310_MIN( a, b ) \
87  ((a < b) ? (a) : (b))
88
89#define L2C_310_MAX_LOCKING_BYTES (4 * 1024)
90
91
92/* RTL release number as can be read from cache_id register */
93#define L2C_310_RTL_RELEASE_R0_P0 0x0
94#define L2C_310_RTL_RELEASE_R1_P0 0x2
95#define L2C_310_RTL_RELEASE_R2_P0 0x4
96#define L2C_310_RTL_RELEASE_R3_P0 0x5
97#define L2C_310_RTL_RELEASE_R3_P1 0x6
98#define L2C_310_RTL_RELEASE_R3_P2 0x8
99#define L2C_310_RTL_RELEASE_R3_P3 0x9
100
101#define BSP_ARM_L2C_310_RTL_RELEASE (BSP_ARM_L2C_310_ID & L2C_310_ID_RTL_MASK)
102
103/**
104 * @defgroup L2C-310_cache Cache Support
105 * @ingroup arm_shared
106 * @brief Cache Functions and Defitions
107 * @{
108 */
109
110
111/**
112 * @brief L2CC Register Offsets
113 */
114typedef struct {
115  /** @brief Cache ID */
116  uint32_t cache_id;
117#define L2C_310_ID_RTL_MASK 0x3f
118#define L2C_310_ID_PART_MASK ( 0xf << 6 )
119#define L2C_310_ID_PART_L210 ( 1 << 6 )
120#define L2C_310_ID_PART_L310 ( 3 << 6 )
121#define L2C_310_ID_IMPL_MASK ( 0xff << 24 )
122  /** @brief Cache type */
123  uint32_t cache_type;
124/** @brief 1 if data banking implemented, 0 if not */
125#define L2C_310_TYPE_DATA_BANKING_MASK 0x80000000
126/** @brief 11xy, where: x=1 if pl310_LOCKDOWN_BY_MASTER is defined, otherwise 0 */
127#define L2C_310_TYPE_CTYPE_MASK 0x1E000000
128/** @brief y=1 if pl310_LOCKDOWN_BY_LINE is defined, otherwise 0. */
129#define L2C_310_TYPE_CTYPE_SHIFT 25
130/** @brief 1 for Harvard architecture, 0 for unified architecture */
131#define L2C_310_TYPE_HARVARD_MASK 0x01000000
132/** @brief Data cache way size = 2 Exp(value + 2) KB */
133#define L2C_310_TYPE_SIZE_D_WAYS_MASK 0x00700000
134#define L2C_310_TYPE_SIZE_D_WAYS_SHIFT 20
135/** @brief Assoziativity aka number of data ways = (value * 8) + 8 */
136#define L2C_310_TYPE_NUM_D_WAYS_MASK 0x00040000
137#define L2C_310_TYPE_NUM_D_WAYS_SHIFT 18
138/** @brief Data cache line length 00 - 32 */
139#define L2C_310_TYPE_LENGTH_D_LINE_MASK 0x00003000
140#define L2C_310_TYPE_LENGTH_D_LINE_SHIFT 12
141#define L2C_310_TYPE_LENGTH_D_LINE_VAL_32 0x0
142/** @brief Instruction cache way size = 2 Exp(value + 2) KB */
143#define L2C_310_TYPE_SIZE_I_WAYS_MASK 0x00000700
144#define L2C_310_TYPE_SIZE_I_WAYS_SHIFT 8
145/** @brief Assoziativity aka number of instruction ways = (value * 8) + 8 */
146#define L2C_310_TYPE_NUM_I_WAYS_MASK 0x00000040
147#define L2C_310_TYPE_NUM_I_WAYS_SHIFT 6
148/** @brief Instruction cache line length 00 - 32 */
149#define L2C_310_TYPE_LENGTH_I_LINE_MASK 0x00000003
150#define L2C_310_TYPE_LENGTH_I_LINE_SHIFT 0
151#define L2C_310_TYPE_LENGTH_I_LINE_VAL_32 0x0
152
153  uint8_t reserved_8[0x100 - 8];
154  uint32_t ctrl; /* Control */
155/** @brief Enables the L2CC */
156#define L2C_310_CTRL_ENABLE 0x00000001
157
158#define L2C_310_CTRL_EXCL_CONFIG (1 << 12)
159
160  /** @brief Auxiliary control */
161  uint32_t aux_ctrl;
162
163/** @brief Early BRESP Enable */
164#define L2C_310_AUX_EBRESPE_MASK 0x40000000
165
166/** @brief Instruction Prefetch Enable */
167#define L2C_310_AUX_IPFE_MASK 0x20000000
168
169/** @brief Data Prefetch Enable */
170#define L2C_310_AUX_DPFE_MASK 0x10000000
171
172/** @brief Non-secure interrupt access control */
173#define L2C_310_AUX_NSIC_MASK 0x08000000
174
175/** @brief Non-secure lockdown enable */
176#define L2C_310_AUX_NSLE_MASK 0x04000000
177
178/** @brief Cache replacement policy */
179#define L2C_310_AUX_CRP_MASK 0x02000000
180
181/** @brief Force write allocate */
182#define L2C_310_AUX_FWE_MASK 0x01800000
183
184/** @brief Shared attribute override enable */
185#define L2C_310_AUX_SAOE_MASK 0x00400000
186
187/** @brief Parity enable */
188#define L2C_310_AUX_PE_MASK 0x00200000
189
190/** @brief Event monitor bus enable */
191#define L2C_310_AUX_EMBE_MASK 0x00100000
192
193/** @brief Way-size */
194#define L2C_310_AUX_WAY_SIZE_MASK 0x000E0000
195#define L2C_310_AUX_WAY_SIZE_SHIFT 17
196
197/** @brief Way-size */
198#define L2C_310_AUX_ASSOC_MASK 0x00010000
199
200/** @brief Shared attribute invalidate enable */
201#define L2C_310_AUX_SAIE_MASK 0x00002000
202
203/** @brief Exclusive cache configuration */
204#define L2C_310_AUX_EXCL_CACHE_MASK 0x00001000
205
206/** @brief Store buffer device limitation Enable */
207#define L2C_310_AUX_SBDLE_MASK 0x00000800
208
209/** @brief High Priority for SO and Dev Reads Enable */
210#define L2C_310_AUX_HPSODRE_MASK 0x00000400
211
212/** @brief Full line of zero enable */
213#define L2C_310_AUX_FLZE_MASK 0x00000001
214
215/** @brief Enable all prefetching, */
216#define L2C_310_AUX_REG_DEFAULT_MASK \
217  ( L2C_310_AUX_WAY_SIZE_MASK & ( 0x3 << L2C_310_AUX_WAY_SIZE_SHIFT ) ) \
218  | L2C_310_AUX_PE_MASK      /* Prefetch enable */ \
219  | L2C_310_AUX_SAOE_MASK    /* Shared attribute override enable */ \
220  | L2C_310_AUX_CRP_MASK     /* Cache replacement policy */ \
221  | L2C_310_AUX_DPFE_MASK    /* Data prefetch enable */ \
222  | L2C_310_AUX_IPFE_MASK    /* Instruction prefetch enable */ \
223  | L2C_310_AUX_EBRESPE_MASK /* Early BRESP enable */
224
225#define L2C_310_AUX_REG_ZERO_MASK 0xFFF1FFFF
226
227/** @brief 1 cycle of latency, there is no additional latency fot tag RAM */
228#define L2C_310_RAM_1_CYCLE_LAT_VAL 0x00000000
229/** @brief 2 cycles of latency for tag RAM */
230#define L2C_310_RAM_2_CYCLE_LAT_VAL 0x00000001
231/** @brief 3 cycles of latency for tag RAM */
232#define L2C_310_RAM_3_CYCLE_LAT_VAL 0x00000002
233/** @brief 4 cycles of latency for tag RAM */
234#define L2C_310_RAM_4_CYCLE_LAT_VAL 0x00000003
235/** @brief 5 cycles of latency for tag RAM */
236#define L2C_310_RAM_5_CYCLE_LAT_VAL 0x00000004
237/** @brief 6 cycles of latency for tag RAM */
238#define L2C_310_RAM_6_CYCLE_LAT_VAL 0x00000005
239/** @brief 7 cycles of latency for tag RAM */
240#define L2C_310_RAM_7_CYCLE_LAT_VAL 0x00000006
241/** @brief 8 cycles of latency for tag RAM */
242#define L2C_310_RAM_8_CYCLE_LAT_VAL 0x00000007
243/** @brief Shift left setup latency values by this value */
244#define L2C_310_RAM_SETUP_SHIFT 0x00000000
245/** @brief Shift left read latency values by this value */
246#define L2C_310_RAM_READ_SHIFT 0x00000004
247/** @brief Shift left write latency values by this value */
248#define L2C_310_RAM_WRITE_SHIFT 0x00000008
249/** @brief Mask for RAM setup latency */
250#define L2C_310_RAM_SETUP_LAT_MASK 0x00000007
251/** @brief Mask for RAM read latency */
252#define L2C_310_RAM_READ_LAT_MASK 0x00000070
253/** @brief Mask for RAM read latency */
254#define L2C_310_RAM_WRITE_LAT_MASK 0x00000700
255  /** @brief Latency for tag RAM */
256  uint32_t tag_ram_ctrl;
257/* @brief Latency for tag RAM */
258#define L2C_310_TAG_RAM_DEFAULT_LAT \
259  ( ( L2C_310_RAM_2_CYCLE_LAT_VAL << L2C_310_RAM_SETUP_SHIFT ) \
260    | ( L2C_310_RAM_2_CYCLE_LAT_VAL << L2C_310_RAM_READ_SHIFT ) \
261    | ( L2C_310_RAM_2_CYCLE_LAT_VAL << L2C_310_RAM_WRITE_SHIFT ) )
262  /** @brief Latency for data RAM */
263  uint32_t data_ram_ctrl;
264/** @brief Latency for data RAM */
265#define L2C_310_DATA_RAM_DEFAULT_MASK \
266  ( ( L2C_310_RAM_2_CYCLE_LAT_VAL << L2C_310_RAM_SETUP_SHIFT ) \
267    | ( L2C_310_RAM_3_CYCLE_LAT_VAL << L2C_310_RAM_READ_SHIFT ) \
268    | ( L2C_310_RAM_2_CYCLE_LAT_VAL << L2C_310_RAM_WRITE_SHIFT ) )
269
270  uint8_t reserved_110[0x200 - 0x110];
271
272  /** @brief Event counter control */
273  uint32_t ev_ctrl;
274
275  /** @brief Event counter 1 configuration */
276  uint32_t ev_cnt1_cfg;
277
278  /** @brief Event counter 0 configuration */
279  uint32_t ev_cnt0_cfg;
280
281  /** @brief Event counter 1 value */
282  uint32_t ev_cnt1;
283
284  /** @brief Event counter 0 value */
285  uint32_t ev_cnt0;
286
287  /** @brief Interrupt enable mask */
288  uint32_t int_mask;
289
290  /** @brief Masked   interrupt status (read-only)*/
291  uint32_t int_mask_status;
292
293  /** @brief Unmasked interrupt status */
294  uint32_t int_raw_status;
295
296  /** @brief Interrupt clear */
297  uint32_t int_clr;
298
299/**
300 * @name Interrupt bit masks
301 *
302 * @{
303 */
304
305/** @brief DECERR from L3 */
306#define L2C_310_INT_DECERR_MASK 0x00000100
307
308/** @brief SLVERR from L3 */
309#define L2C_310_INT_SLVERR_MASK 0x00000080
310
311/** @brief Error on L2 data RAM (Read) */
312#define L2C_310_INT_ERRRD_MASK 0x00000040
313
314/** @brief Error on L2 tag RAM (Read) */
315#define L2C_310_INT_ERRRT_MASK 0x00000020
316
317/** @brief Error on L2 data RAM (Write) */
318#define L2C_310_INT_ERRWD_MASK 0x00000010
319
320/** @brief Error on L2 tag RAM (Write) */
321#define L2C_310_INT_ERRWT_MASK 0x00000008
322
323/** @brief Parity Error on L2 data RAM (Read) */
324#define L2C_310_INT_PARRD_MASK 0x00000004
325
326/** @brief Parity Error on L2 tag RAM (Read) */
327#define L2C_310_INT_PARRT_MASK 0x00000002
328
329/** @brief Event Counter1/0 Overflow Increment */
330#define L2C_310_INT_ECNTR_MASK 0x00000001
331
332/** @} */
333
334  uint8_t reserved_224[0x730 - 0x224];
335
336  /** @brief Drain the STB */
337  uint32_t cache_sync;
338  uint8_t reserved_734[0x740 - 0x734];
339  /** @brief ARM Errata 753970 for pl310-r3p0 */
340  uint32_t dummy_cache_sync_reg;
341  uint8_t reserved_744[0x770 - 0x744];
342
343  /** @brief Invalidate line by PA */
344  uint32_t inv_pa;
345  uint8_t reserved_774[0x77c - 0x774];
346
347  /** @brief Invalidate by Way */
348  uint32_t inv_way;
349  uint8_t reserved_780[0x7b0 - 0x780];
350
351  /** @brief Clean Line by PA */
352  uint32_t clean_pa;
353  uint8_t reserved_7b4[0x7b8 - 0x7b4];
354
355  /** @brief Clean Line by Set/Way */
356  uint32_t clean_index;
357
358  /** @brief Clean by Way */
359  uint32_t clean_way;
360  uint8_t reserved_7c0[0x7f0 - 0x7c0];
361
362  /** @brief Clean and Invalidate Line by PA */
363  uint32_t clean_inv_pa;
364  uint8_t reserved_7f4[0x7f8 - 0x7f4];
365
366  /** @brief Clean and Invalidate Line by Set/Way */
367  uint32_t clean_inv_indx;
368
369  /** @brief Clean and Invalidate by Way */
370  uint32_t clean_inv_way;
371
372  /** @brief Data        lock down 0 */
373  uint32_t d_lockdown_0;
374
375  /** @brief Instruction lock down 0 */
376  uint32_t i_lockdown_0;
377
378  /** @brief Data        lock down 1 */
379  uint32_t d_lockdown_1;
380
381  /** @brief Instruction lock down 1 */
382  uint32_t i_lockdown_1;
383
384  /** @brief Data        lock down 2 */
385  uint32_t d_lockdown_2;
386
387  /** @brief Instruction lock down 2 */
388  uint32_t i_lockdown_2;
389
390  /** @brief Data        lock down 3 */
391  uint32_t d_lockdown_3;
392
393  /** @brief Instruction lock down 3 */
394  uint32_t i_lockdown_3;
395
396  /** @brief Data        lock down 4 */
397  uint32_t d_lockdown_4;
398
399  /** @brief Instruction lock down 4 */
400  uint32_t i_lockdown_4;
401
402  /** @brief Data        lock down 5 */
403  uint32_t d_lockdown_5;
404
405  /** @brief Instruction lock down 5 */
406  uint32_t i_lockdown_5;
407
408  /** @brief Data        lock down 6 */
409  uint32_t d_lockdown_6;
410
411  /** @brief Instruction lock down 6 */
412  uint32_t i_lockdown_6;
413
414  /** @brief Data        lock down 7 */
415  uint32_t d_lockdown_7;
416
417  /** @brief Instruction lock down 7 */
418  uint32_t i_lockdown_7;
419
420  uint8_t reserved_940[0x950 - 0x940];
421
422  /** @brief Lockdown by Line Enable */
423  uint32_t lock_line_en;
424
425  /** @brief Cache lockdown by way */
426  uint32_t unlock_way;
427
428  uint8_t reserved_958[0xc00 - 0x958];
429
430  /** @brief Address range redirect, part 1 */
431  uint32_t addr_filtering_start;
432
433  /** @brief Address range redirect, part 2 */
434  uint32_t addr_filtering_end;
435
436/** @brief Address filtering valid bits*/
437#define L2C_310_ADDR_FILTER_VALID_MASK 0xFFF00000
438
439/** @brief Address filtering enable bit*/
440#define L2C_310_ADDR_FILTER_ENABLE_MASK 0x00000001
441
442  uint8_t reserved_c08[0xf40 - 0xc08];
443
444  /** @brief Debug control */
445  uint32_t debug_ctrl;
446
447/** @brief Debug SPIDEN bit */
448#define L2C_310_DEBUG_SPIDEN_MASK 0x00000004
449
450/** @brief Debug DWB bit, forces write through */
451#define L2C_310_DEBUG_DWB_MASK 0x00000002
452
453/** @brief Debug DCL bit, disables cache line fill */
454#define L2C_310_DEBUG_DCL_MASK 0x00000002
455
456  uint8_t reserved_f44[0xf60 - 0xf44];
457
458  /** @brief Purpose prefetch enables */
459  uint32_t prefetch_ctrl;
460/** @brief Prefetch offset */
461#define L2C_310_PREFETCH_OFFSET_MASK 0x0000001F
462  uint8_t reserved_f64[0xf80 - 0xf64];
463
464  /** @brief Purpose power controls */
465  uint32_t power_ctrl;
466} L2CC;
467
468rtems_interrupt_lock l2c_310_lock = RTEMS_INTERRUPT_LOCK_INITIALIZER(
469  "cache"
470);
471
472/* Errata table for the LC2 310 Level 2 cache from ARM.
473* Information taken from ARMs
474* "CoreLink controllers and peripherals
475* - System controllers
476* - L2C-310 Level 2 Cache Controller
477* - Revision r3p3
478* - Software Developer Errata Notice
479* - ARM CoreLink Level 2 Cache Controller (L2C-310 or PL310),
480*   r3 releases Software Developers Errata Notice"
481* Please see this document for more information on these erratas */
482#if BSP_ARM_L2C_310_RTL_RELEASE == L2C_310_RTL_RELEASE_R3_P0
483#define L2C_310_ERRATA_IS_APPLICABLE_753970
484#endif
485
486static bool l2c_310_errata_is_applicable_727913(
487  uint32_t rtl_release
488)
489{
490  bool is_applicable = false;
491
492  switch ( rtl_release ) {
493    case L2C_310_RTL_RELEASE_R3_P3:
494    case L2C_310_RTL_RELEASE_R3_P2:
495    case L2C_310_RTL_RELEASE_R3_P1:
496    case L2C_310_RTL_RELEASE_R2_P0:
497    case L2C_310_RTL_RELEASE_R1_P0:
498    case L2C_310_RTL_RELEASE_R0_P0:
499      is_applicable = false;
500      break;
501    case L2C_310_RTL_RELEASE_R3_P0:
502      is_applicable = true;
503      break;
504    default:
505      assert( 0 );
506      break;
507  }
508
509  return is_applicable;
510}
511
512static bool l2c_310_errata_is_applicable_727914(
513  uint32_t rtl_release
514)
515{
516  bool is_applicable = false;
517
518  switch ( rtl_release ) {
519    case L2C_310_RTL_RELEASE_R3_P3:
520    case L2C_310_RTL_RELEASE_R3_P2:
521    case L2C_310_RTL_RELEASE_R3_P1:
522    case L2C_310_RTL_RELEASE_R2_P0:
523    case L2C_310_RTL_RELEASE_R1_P0:
524    case L2C_310_RTL_RELEASE_R0_P0:
525      is_applicable = false;
526      break;
527    case L2C_310_RTL_RELEASE_R3_P0:
528      is_applicable = true;
529      break;
530    default:
531      assert( 0 );
532      break;
533  }
534
535  return is_applicable;
536}
537
538static bool l2c_310_errata_is_applicable_727915(
539  uint32_t rtl_release
540)
541{
542  bool is_applicable = false;
543
544  switch ( rtl_release ) {
545    case L2C_310_RTL_RELEASE_R3_P3:
546    case L2C_310_RTL_RELEASE_R3_P2:
547    case L2C_310_RTL_RELEASE_R3_P1:
548    case L2C_310_RTL_RELEASE_R1_P0:
549    case L2C_310_RTL_RELEASE_R0_P0:
550      is_applicable = false;
551      break;
552    case L2C_310_RTL_RELEASE_R3_P0:
553    case L2C_310_RTL_RELEASE_R2_P0:
554      is_applicable = true;
555      break;
556    default:
557      assert( 0 );
558      break;
559  }
560
561  return is_applicable;
562}
563
564static bool l2c_310_errata_is_applicable_729806(
565  uint32_t rtl_release
566)
567{
568  bool is_applicable = false;
569
570  switch ( rtl_release ) {
571    case L2C_310_RTL_RELEASE_R3_P3:
572    case L2C_310_RTL_RELEASE_R3_P2:
573    case L2C_310_RTL_RELEASE_R2_P0:
574    case L2C_310_RTL_RELEASE_R1_P0:
575    case L2C_310_RTL_RELEASE_R0_P0:
576      is_applicable = false;
577      break;
578    case L2C_310_RTL_RELEASE_R3_P1:
579    case L2C_310_RTL_RELEASE_R3_P0:
580      is_applicable = true;
581      break;
582    default:
583      assert( 0 );
584      break;
585  }
586
587  return is_applicable;
588}
589
590static bool l2c_310_errata_is_applicable_729815(
591  uint32_t rtl_release
592)
593{
594  bool is_applicable = false;
595
596  switch ( rtl_release ) {
597    case L2C_310_RTL_RELEASE_R3_P3:
598    case L2C_310_RTL_RELEASE_R1_P0:
599    case L2C_310_RTL_RELEASE_R0_P0:
600      is_applicable = false;
601      break;
602    case L2C_310_RTL_RELEASE_R3_P2:
603    case L2C_310_RTL_RELEASE_R3_P1:
604    case L2C_310_RTL_RELEASE_R3_P0:
605    case L2C_310_RTL_RELEASE_R2_P0:
606      is_applicable = true;
607      break;
608    default:
609      assert( 0 );
610      break;
611  }
612
613  return is_applicable;
614}
615
616static bool l2c_310_errata_is_applicable_742884(
617  uint32_t rtl_release
618)
619{
620  bool is_applicable = false;
621
622  switch ( rtl_release ) {
623    case L2C_310_RTL_RELEASE_R3_P3:
624    case L2C_310_RTL_RELEASE_R3_P2:
625    case L2C_310_RTL_RELEASE_R3_P0:
626    case L2C_310_RTL_RELEASE_R2_P0:
627    case L2C_310_RTL_RELEASE_R1_P0:
628    case L2C_310_RTL_RELEASE_R0_P0:
629      is_applicable = false;
630      break;
631    case L2C_310_RTL_RELEASE_R3_P1:
632      is_applicable = true;
633      break;
634    default:
635      assert( 0 );
636      break;
637  }
638
639  return is_applicable;
640}
641
642static bool l2c_310_errata_is_applicable_752271(
643  uint32_t rtl_release
644)
645{
646  bool is_applicable = false;
647
648  switch ( rtl_release ) {
649    case L2C_310_RTL_RELEASE_R3_P3:
650    case L2C_310_RTL_RELEASE_R3_P2:
651    case L2C_310_RTL_RELEASE_R2_P0:
652    case L2C_310_RTL_RELEASE_R1_P0:
653    case L2C_310_RTL_RELEASE_R0_P0:
654      is_applicable = false;
655      break;
656    case L2C_310_RTL_RELEASE_R3_P1:
657    case L2C_310_RTL_RELEASE_R3_P0:
658      is_applicable = true;
659      break;
660    default:
661      assert( 0 );
662      break;
663  }
664
665  return is_applicable;
666}
667
668static bool l2c_310_errata_is_applicable_765569(
669  uint32_t rtl_release
670)
671{
672  bool is_applicable = false;
673
674  switch ( rtl_release ) {
675    case L2C_310_RTL_RELEASE_R3_P3:
676    case L2C_310_RTL_RELEASE_R3_P2:
677    case L2C_310_RTL_RELEASE_R3_P1:
678    case L2C_310_RTL_RELEASE_R3_P0:
679    case L2C_310_RTL_RELEASE_R2_P0:
680    case L2C_310_RTL_RELEASE_R1_P0:
681    case L2C_310_RTL_RELEASE_R0_P0:
682      is_applicable = true;
683      break;
684    default:
685      assert( 0 );
686      break;
687  }
688
689  return is_applicable;
690}
691
692static bool l2c_310_errata_is_applicable_769419(
693  uint32_t rtl_release
694)
695{
696  bool is_applicable = false;
697
698  switch ( rtl_release ) {
699    case L2C_310_RTL_RELEASE_R3_P3:
700    case L2C_310_RTL_RELEASE_R3_P2:
701      is_applicable = false;
702      break;
703    case L2C_310_RTL_RELEASE_R3_P1:
704    case L2C_310_RTL_RELEASE_R3_P0:
705    case L2C_310_RTL_RELEASE_R2_P0:
706    case L2C_310_RTL_RELEASE_R1_P0:
707    case L2C_310_RTL_RELEASE_R0_P0:
708      is_applicable = true;
709      break;
710    default:
711      assert( 0 );
712      break;
713  }
714
715  return is_applicable;
716}
717
718#if BSP_ARM_L2C_310_RTL_RELEASE == L2C_310_RTL_RELEASE_R0_P0 \
719   || BSP_ARM_L2C_310_RTL_RELEASE == L2C_310_RTL_RELEASE_R1_P0
720#define L2C_310_ERRATA_IS_APPLICABLE_588369
721#endif
722
723#ifdef CACHE_ERRATA_CHECKS_FOR_IMPLEMENTED_ERRATAS
724static bool l2c_310_errata_is_applicable_754670(
725  uint32_t rtl_release
726)
727{
728  bool is_applicable = false;
729
730  switch ( rtl_release ) {
731    case L2C_310_RTL_RELEASE_R3_P3:
732    case L2C_310_RTL_RELEASE_R3_P2:
733    case L2C_310_RTL_RELEASE_R3_P1:
734    case L2C_310_RTL_RELEASE_R3_P0:
735    case L2C_310_RTL_RELEASE_R2_P0:
736    case L2C_310_RTL_RELEASE_R1_P0:
737    case L2C_310_RTL_RELEASE_R0_P0:
738      is_applicable = true;
739    break;
740    default:
741      assert( 0 );
742      break;
743  }
744
745  return is_applicable;
746}
747#endif /* CACHE_ERRATA_CHECKS_FOR_IMPLEMENTED_ERRATAS */
748
749/* The common workaround for this erratum would be to add a
750 * data synchronization barrier to the beginning of the abort handler.
751 * But for RTEMS a call of the abort handler means a fatal condition anyway.
752 * So there is no need to handle this erratum */
753#define CACHE_ARM_ERRATA_775420_HANDLER()                   \
754  if( arm_errata_is_applicable_processor_errata_775420 ) {  \
755  }                                                         \
756
757static void l2c_310_check_errata( uint32_t rtl_release )
758{
759  /* This erratum gets handled within the sources */
760  /* Unhandled erratum present: 588369 Errata 588369 says that clean + inv may
761   * keep the cache line if it was clean. See ARMs documentation on the erratum
762   * for a workaround */
763  /* assert( ! l2c_310_errata_is_applicable_588369( rtl_release ) ); */
764
765  /* Unhandled erratum present: 727913 Prefetch dropping feature can cause
766   * incorrect behavior when PL310 handles reads that cross cache line
767   * boundary */
768  assert( ! l2c_310_errata_is_applicable_727913( rtl_release ) );
769
770  /* Unhandled erratum present: 727914 Double linefill feature can cause
771   * deadlock */
772  assert( ! l2c_310_errata_is_applicable_727914( rtl_release ) );
773
774  /* Unhandled erratum present: 727915 Background Clean and Invalidate by Way
775   * operation can cause data corruption */
776  assert( ! l2c_310_errata_is_applicable_727915( rtl_release ) );
777
778  /* Unhandled erratum present: 729806 Speculative reads from the Cortex-A9
779   * MPCore processor can cause deadlock */
780  assert( ! l2c_310_errata_is_applicable_729806( rtl_release ) );
781
782  if( l2c_310_errata_is_applicable_729815( rtl_release ) )
783  {
784    volatile L2CC *l2cc = (volatile L2CC *) BSP_ARM_L2C_310_BASE;
785
786    assert( 0 == ( l2cc->aux_ctrl & L2C_310_AUX_HPSODRE_MASK ) );
787
788    /* Erratum: 729815 The “High Priority for SO and Dev reads” feature can
789     * cause Quality of Service issues to cacheable read transactions*/
790
791    /* Conditions
792       This problem occurs when the following conditions are met:
793       1. Bit[10] “High Priority for SO and Dev reads enable” of the PL310
794          Auxiliary Control Register is set to 1.
795       2. PL310 receives a cacheable read that misses in the L2 cache.
796       3. PL310 receives a continuous flow of Strongly Ordered or Device
797          reads that take all address slots in the master interface.
798       Workaround
799       A workaround is only necessary in systems that are able to issue a
800       continuous flow of Strongly Ordered or Device reads. In such a case,
801       the workaround is to disable the “High Priority for SO and Dev reads”
802       feature. This is the default behavior.*/
803  }
804
805  /* Unhandled erratum present: 742884 Double linefill feature might introduce
806   * circular dependency and deadlock */
807  assert( ! l2c_310_errata_is_applicable_742884( rtl_release ) );
808
809  /* Unhandled erratum present: 752271 Double linefill feature can cause data
810   * corruption */
811  assert( ! l2c_310_errata_is_applicable_752271( rtl_release ) );
812
813  /* This erratum can not be worked around: 754670 A continuous write flow can
814   * stall a read targeting the same memory area
815   * But this erratum does not lead to any data corruption */
816  /* assert( ! l2c_310_errata_is_applicable_754670() ); */
817
818  if( l2c_310_errata_is_applicable_765569( rtl_release ) )
819  {
820    volatile L2CC *l2cc = (volatile L2CC *) BSP_ARM_L2C_310_BASE;
821
822    assert( !( ( l2cc->aux_ctrl & L2C_310_AUX_IPFE_MASK
823                 || l2cc->aux_ctrl & L2C_310_AUX_DPFE_MASK )
824               && ( ( l2cc->prefetch_ctrl & L2C_310_PREFETCH_OFFSET_MASK )
825                    == 23 ) ) );
826
827    /* Unhandled erratum present: 765569 Prefetcher can cross 4KB boundary if
828     * offset is programmed with value 23 */
829
830    /* Conditions
831       This problem occurs when the following conditions are met:
832       1. One of the Prefetch Enable bits (bits [29:28] of the Auxiliary or
833          Prefetch Control Register) is set HIGH.
834       2. The prefetch offset bits are programmed with value 23 (5'b10111).
835       Workaround
836       A workaround for this erratum is to program the prefetch offset with any
837       value except 23.*/
838  }
839
840  /* Unhandled erratum present: 769419 No automatic Store Buffer drain,
841   * visibility of written data requires an explicit Cache */
842  assert( ! l2c_310_errata_is_applicable_769419( rtl_release ) );
843}
844
845static inline void
846l2c_310_sync( volatile L2CC *l2cc )
847{
848#ifdef L2C_310_ERRATA_IS_APPLICABLE_753970
849  l2cc->dummy_cache_sync_reg = 0;
850#else
851  l2cc->cache_sync = 0;
852#endif
853}
854
855static inline void
856l2c_310_flush_1_line( volatile L2CC *l2cc, uint32_t d_addr )
857{
858#ifdef L2C_310_ERRATA_IS_APPLICABLE_588369
859  /*
860  * Errata 588369 says that clean + inv may keep the
861  * cache line if it was clean, the recommended
862  * workaround is to clean then invalidate the cache
863  * line, with write-back and cache linefill disabled.
864  */
865  l2cc->clean_pa     = d_addr;
866  l2c_310_sync( l2cc );
867  l2cc->inv_pa       = d_addr;
868#else
869  l2cc->clean_inv_pa = d_addr;
870#endif
871}
872
873static inline void
874l2c_310_flush_range( const void* d_addr, const size_t n_bytes )
875{
876  rtems_interrupt_lock_context lock_context;
877  /* Back starting address up to start of a line and invalidate until ADDR_LAST */
878  uint32_t       adx               = (uint32_t)d_addr
879    & ~L2C_310_DATA_LINE_MASK;
880  const uint32_t ADDR_LAST         =
881    (uint32_t)( (size_t)d_addr + n_bytes - 1 );
882  uint32_t       block_end         =
883    L2C_310_MIN( ADDR_LAST, adx + L2C_310_MAX_LOCKING_BYTES );
884  volatile L2CC *l2cc = (volatile L2CC *) BSP_ARM_L2C_310_BASE;
885
886  rtems_interrupt_lock_acquire( &l2c_310_lock, &lock_context );
887
888  for (;
889       adx      <= ADDR_LAST;
890       adx       = block_end + 1,
891       block_end = L2C_310_MIN( ADDR_LAST, adx + L2C_310_MAX_LOCKING_BYTES )) {
892    for (; adx <= block_end; adx += CPU_DATA_CACHE_ALIGNMENT ) {
893      l2c_310_flush_1_line( l2cc, adx );
894    }
895    if( block_end < ADDR_LAST ) {
896      rtems_interrupt_lock_release( &l2c_310_lock, &lock_context );
897      rtems_interrupt_lock_acquire( &l2c_310_lock, &lock_context );
898    }
899  }
900  l2c_310_sync( l2cc );
901  rtems_interrupt_lock_release( &l2c_310_lock, &lock_context );
902}
903
904static inline void
905l2c_310_flush_entire( void )
906{
907  volatile L2CC               *l2cc = (volatile L2CC *) BSP_ARM_L2C_310_BASE;
908  rtems_interrupt_lock_context lock_context;
909
910  /* Only flush if level 2 cache is active */
911  if( ( l2cc->ctrl & L2C_310_CTRL_ENABLE ) != 0 ) {
912
913    /* ensure ordering with previous memory accesses */
914    _ARM_Data_memory_barrier();
915
916    rtems_interrupt_lock_acquire( &l2c_310_lock, &lock_context );
917    l2cc->clean_inv_way = L2C_310_WAY_MASK;
918
919    while ( l2cc->clean_inv_way & L2C_310_WAY_MASK ) {};
920
921    /* Wait for the flush to complete */
922    l2c_310_sync( l2cc );
923
924    rtems_interrupt_lock_release( &l2c_310_lock, &lock_context );
925  }
926}
927
928static inline void
929l2c_310_invalidate_1_line( const void *d_addr )
930{
931  volatile L2CC *l2cc = (volatile L2CC *) BSP_ARM_L2C_310_BASE;
932
933
934  l2cc->inv_pa = (uint32_t) d_addr;
935  l2c_310_sync( l2cc );
936}
937
938static inline void
939l2c_310_invalidate_range( uint32_t adx, const uint32_t ADDR_LAST )
940{
941  volatile L2CC               *l2cc = (volatile L2CC *) BSP_ARM_L2C_310_BASE;
942  rtems_interrupt_lock_context lock_context;
943
944  rtems_interrupt_lock_acquire( &l2c_310_lock, &lock_context );
945  for (;
946       adx <= ADDR_LAST;
947       adx += CPU_INSTRUCTION_CACHE_ALIGNMENT ) {
948    /* Invalidate L2 cache line */
949    l2cc->inv_pa = adx;
950  }
951  l2c_310_sync( l2cc );
952  rtems_interrupt_lock_release( &l2c_310_lock, &lock_context );
953}
954
955static inline void
956l2c_310_invalidate_entire( void )
957{
958  volatile L2CC *l2cc = (volatile L2CC *) BSP_ARM_L2C_310_BASE;
959
960  /* Invalidate the caches */
961
962  /* ensure ordering with previous memory accesses */
963  _ARM_Data_memory_barrier();
964
965  l2cc->inv_way = L2C_310_WAY_MASK;
966
967  while ( l2cc->inv_way & L2C_310_WAY_MASK ) ;
968
969  /* Wait for the invalidate to complete */
970  l2c_310_sync( l2cc );
971}
972
973static inline void
974l2c_310_clean_and_invalidate_entire( void )
975{
976  volatile L2CC               *l2cc = (volatile L2CC *) BSP_ARM_L2C_310_BASE;
977  rtems_interrupt_lock_context lock_context;
978
979  if( ( l2cc->ctrl & L2C_310_CTRL_ENABLE ) != 0 ) {
980    /* Invalidate the caches */
981
982    /* ensure ordering with previous memory accesses */
983    _ARM_Data_memory_barrier();
984
985    rtems_interrupt_lock_acquire( &l2c_310_lock, &lock_context );
986    l2cc->clean_inv_way = L2C_310_WAY_MASK;
987
988    while ( l2cc->clean_inv_way & L2C_310_WAY_MASK ) ;
989
990    /* Wait for the invalidate to complete */
991    l2c_310_sync( l2cc );
992
993    rtems_interrupt_lock_release( &l2c_310_lock, &lock_context );
994  }
995}
996
997static inline void
998l2c_310_freeze( void )
999{
1000  /* To be implemented as needed, if supported
1001   by hardware at all */
1002}
1003
1004static inline void
1005l2c_310_unfreeze( void )
1006{
1007  /* To be implemented as needed, if supported
1008   by hardware at all */
1009}
1010
1011static inline size_t
1012l2c_310_get_cache_size( void )
1013{
1014  size_t         size       = 0;
1015  volatile L2CC *l2cc       = (volatile L2CC *) BSP_ARM_L2C_310_BASE;
1016  uint32_t       cache_type = l2cc->cache_type;
1017  uint32_t       way_size;
1018  uint32_t       num_ways;
1019
1020  way_size = (cache_type & L2C_310_TYPE_SIZE_D_WAYS_MASK)
1021    >> L2C_310_TYPE_SIZE_D_WAYS_SHIFT;
1022  num_ways = (cache_type & L2C_310_TYPE_NUM_D_WAYS_MASK)
1023    >> L2C_310_TYPE_NUM_D_WAYS_SHIFT;
1024
1025  assert( way_size <= 0x07 );
1026  assert( num_ways <= 0x01 );
1027  if(  way_size <= 0x07 && num_ways <= 0x01 ) {
1028    if( way_size == 0x00 ) {
1029      way_size = 16 * 1024;
1030    } else if( way_size == 0x07 ) {
1031      way_size = 512 * 1024;
1032    } else {
1033      way_size = (1 << (way_size - 1)) * 16 * 1024;
1034    }
1035    switch( num_ways ) {
1036      case 0:
1037        num_ways = 8;
1038        break;
1039      case 1:
1040        num_ways = 16;
1041        break;
1042      default:
1043        num_ways = 0;
1044        break;
1045    }
1046    size = way_size * num_ways;
1047  }
1048  return size;
1049}
1050
1051static void l2c_310_unlock( volatile L2CC *l2cc )
1052{
1053  l2cc->d_lockdown_0 = 0;
1054  l2cc->i_lockdown_0 = 0;
1055  l2cc->d_lockdown_1 = 0;
1056  l2cc->i_lockdown_1 = 0;
1057  l2cc->d_lockdown_2 = 0;
1058  l2cc->i_lockdown_2 = 0;
1059  l2cc->d_lockdown_3 = 0;
1060  l2cc->i_lockdown_3 = 0;
1061  l2cc->d_lockdown_4 = 0;
1062  l2cc->i_lockdown_4 = 0;
1063  l2cc->d_lockdown_5 = 0;
1064  l2cc->i_lockdown_5 = 0;
1065  l2cc->d_lockdown_6 = 0;
1066  l2cc->i_lockdown_6 = 0;
1067  l2cc->d_lockdown_7 = 0;
1068  l2cc->i_lockdown_7 = 0;
1069}
1070
1071static void l2c_310_wait_for_background_ops( volatile L2CC *l2cc )
1072{
1073  while ( l2cc->inv_way & L2C_310_WAY_MASK ) ;
1074
1075  while ( l2cc->clean_way & L2C_310_WAY_MASK ) ;
1076
1077  while ( l2cc->clean_inv_way & L2C_310_WAY_MASK ) ;
1078}
1079
1080/* We support only the L2C-310 revisions r3p2 and r3p3 cache controller */
1081
1082#if (BSP_ARM_L2C_310_ID & L2C_310_ID_PART_MASK) \
1083  != L2C_310_ID_PART_L310
1084#error "invalid L2-310 cache controller part number"
1085#endif
1086
1087#if (BSP_ARM_L2C_310_RTL_RELEASE != L2C_310_RTL_RELEASE_R3_P2) \
1088  && (BSP_ARM_L2C_310_RTL_RELEASE != L2C_310_RTL_RELEASE_R3_P3)
1089#error "invalid L2-310 cache controller RTL revision"
1090#endif
1091
1092static inline void
1093l2c_310_enable( void )
1094{
1095  volatile L2CC *l2cc = (volatile L2CC *) BSP_ARM_L2C_310_BASE;
1096  uint32_t cache_id = l2cc->cache_id;
1097  uint32_t rtl_release = cache_id & L2C_310_ID_RTL_MASK;
1098  uint32_t id_mask = L2C_310_ID_IMPL_MASK | L2C_310_ID_PART_MASK;
1099  uint32_t ctrl;
1100
1101  /*
1102   * Do we actually have an L2C-310 cache controller?  Has BSP_ARM_L2C_310_BASE
1103   * been configured correctly?
1104   */
1105  if (
1106    (BSP_ARM_L2C_310_ID & id_mask) != (cache_id & id_mask)
1107      || rtl_release < BSP_ARM_L2C_310_RTL_RELEASE
1108  ) {
1109    bsp_fatal( ARM_FATAL_L2C_310_UNEXPECTED_ID );
1110  }
1111
1112  l2c_310_check_errata( rtl_release );
1113
1114  ctrl = l2cc->ctrl;
1115
1116  if ( ( ctrl & L2C_310_CTRL_EXCL_CONFIG ) != 0 ) {
1117    bsp_fatal( ARM_FATAL_L2C_310_EXCLUSIVE_CONFIG );
1118  }
1119
1120  /* Only enable if L2CC is currently disabled */
1121  if( ( ctrl & L2C_310_CTRL_ENABLE ) == 0 ) {
1122    uint32_t aux_ctrl;
1123    int ways;
1124
1125    /* Make sure that I&D is not locked down when starting */
1126    l2c_310_unlock( l2cc );
1127
1128    l2c_310_wait_for_background_ops( l2cc );
1129
1130    aux_ctrl = l2cc->aux_ctrl;
1131
1132    if ( (aux_ctrl & ( 1 << 16 )) != 0 ) {
1133      ways = 16;
1134    } else {
1135      ways = 8;
1136    }
1137
1138    if ( ways != L2C_310_NUM_WAYS ) {
1139      bsp_fatal( ARM_FATAL_L2C_310_UNEXPECTED_NUM_WAYS );
1140    }
1141
1142    /* Set up the way size */
1143    aux_ctrl &= L2C_310_AUX_REG_ZERO_MASK; /* Set way_size to 0 */
1144    aux_ctrl |= L2C_310_AUX_REG_DEFAULT_MASK;
1145
1146    l2cc->aux_ctrl = aux_ctrl;
1147
1148    /* Set up the latencies */
1149    l2cc->tag_ram_ctrl  = L2C_310_TAG_RAM_DEFAULT_LAT;
1150    l2cc->data_ram_ctrl = L2C_310_DATA_RAM_DEFAULT_MASK;
1151
1152    l2c_310_invalidate_entire();
1153
1154    /* Clear the pending interrupts */
1155    l2cc->int_clr = l2cc->int_raw_status;
1156
1157    /* Enable the L2CC */
1158    l2cc->ctrl = ctrl | L2C_310_CTRL_ENABLE;
1159  }
1160}
1161
1162static inline void
1163l2c_310_disable( void )
1164{
1165  volatile L2CC               *l2cc = (volatile L2CC *) BSP_ARM_L2C_310_BASE;
1166  rtems_interrupt_lock_context lock_context;
1167
1168  if ( l2cc->ctrl & L2C_310_CTRL_ENABLE ) {
1169    /* Clean and Invalidate L2 Cache */
1170    l2c_310_flush_entire();
1171    rtems_interrupt_lock_acquire( &l2c_310_lock, &lock_context );
1172
1173    l2c_310_wait_for_background_ops( l2cc );
1174
1175    /* Disable the L2 cache */
1176    l2cc->ctrl &= ~L2C_310_CTRL_ENABLE;
1177    rtems_interrupt_lock_release( &l2c_310_lock, &lock_context );
1178  }
1179}
1180
1181static inline void
1182_CPU_cache_enable_data( void )
1183{
1184  l2c_310_enable();
1185}
1186
1187static inline void
1188_CPU_cache_disable_data( void )
1189{
1190  arm_cache_l1_disable_data();
1191  l2c_310_disable();
1192}
1193
1194static inline void
1195_CPU_cache_enable_instruction( void )
1196{
1197  l2c_310_enable();
1198}
1199
1200static inline void
1201_CPU_cache_disable_instruction( void )
1202{
1203  arm_cache_l1_disable_instruction();
1204  l2c_310_disable();
1205}
1206
1207static inline void
1208_CPU_cache_flush_data_range(
1209  const void *d_addr,
1210  size_t      n_bytes
1211)
1212{
1213  if ( n_bytes != 0 ) {
1214    arm_cache_l1_flush_data_range(
1215      d_addr,
1216      n_bytes
1217    );
1218    l2c_310_flush_range(
1219      d_addr,
1220      n_bytes
1221    );
1222  }
1223}
1224
1225static inline void
1226_CPU_cache_flush_entire_data( void )
1227{
1228  arm_cache_l1_flush_entire_data();
1229  l2c_310_flush_entire();
1230}
1231
1232static inline void
1233_CPU_cache_invalidate_data_range(
1234  const void *addr_first,
1235  size_t     n_bytes
1236)
1237{
1238  if ( n_bytes > 0 ) {
1239    /* Back starting address up to start of a line and invalidate until ADDR_LAST */
1240    uint32_t       adx       = (uint32_t) addr_first
1241      & ~L2C_310_DATA_LINE_MASK;
1242    const uint32_t ADDR_LAST =
1243      (uint32_t)( (size_t)addr_first + n_bytes - 1 );
1244    uint32_t       block_end =
1245      L2C_310_MIN( ADDR_LAST, adx + L2C_310_MAX_LOCKING_BYTES );
1246
1247    /* We have to apply a lock. Thus we will operate only L2C_310_MAX_LOCKING_BYTES
1248     * at a time */
1249    for (;
1250         adx      <= ADDR_LAST;
1251         adx       = block_end + 1,
1252         block_end = L2C_310_MIN( ADDR_LAST, adx + L2C_310_MAX_LOCKING_BYTES )) {
1253      l2c_310_invalidate_range(
1254        adx,
1255        block_end
1256      );
1257    }
1258    arm_cache_l1_invalidate_data_range(
1259      addr_first,
1260      n_bytes
1261    );
1262
1263    adx       = (uint32_t)addr_first & ~L2C_310_DATA_LINE_MASK;
1264    block_end = L2C_310_MIN( ADDR_LAST, adx + L2C_310_MAX_LOCKING_BYTES );
1265    for (;
1266         adx      <= ADDR_LAST;
1267         adx       = block_end + 1,
1268         block_end = L2C_310_MIN( ADDR_LAST, adx + L2C_310_MAX_LOCKING_BYTES )) {
1269      l2c_310_invalidate_range(
1270        adx,
1271        block_end
1272      );
1273    }
1274    arm_cache_l1_invalidate_data_range(
1275      addr_first,
1276      n_bytes
1277    );
1278  }
1279}
1280
1281static inline void
1282_CPU_cache_invalidate_entire_data( void )
1283{
1284  /* This is broadcast within the cluster */
1285  arm_cache_l1_flush_entire_data();
1286
1287  /* forces the address out past level 2 */
1288  l2c_310_clean_and_invalidate_entire();
1289
1290  /*This is broadcast within the cluster */
1291  arm_cache_l1_clean_and_invalidate_entire_data();
1292}
1293
1294static inline void
1295_CPU_cache_freeze_data( void )
1296{
1297  arm_cache_l1_freeze_data();
1298  l2c_310_freeze();
1299}
1300
1301static inline void
1302_CPU_cache_unfreeze_data( void )
1303{
1304  arm_cache_l1_unfreeze_data();
1305  l2c_310_unfreeze();
1306}
1307
1308static inline void
1309_CPU_cache_invalidate_instruction_range( const void *i_addr, size_t n_bytes)
1310{
1311  arm_cache_l1_invalidate_instruction_range( i_addr, n_bytes );
1312}
1313
1314static inline void
1315_CPU_cache_invalidate_entire_instruction( void )
1316{
1317  arm_cache_l1_invalidate_entire_instruction();
1318}
1319
1320static inline void
1321_CPU_cache_freeze_instruction( void )
1322{
1323  arm_cache_l1_freeze_instruction();
1324  l2c_310_freeze();
1325}
1326
1327static inline void
1328_CPU_cache_unfreeze_instruction( void )
1329{
1330  arm_cache_l1_unfreeze_instruction();
1331  l2c_310_unfreeze();
1332}
1333
1334static inline size_t
1335_CPU_cache_get_data_cache_size( const uint32_t level )
1336{
1337  size_t size = 0;
1338
1339  switch( level )
1340  {
1341    case 1:
1342      size = arm_cache_l1_get_data_cache_size();
1343    break;
1344    case 0:
1345    case 2:
1346      size = l2c_310_get_cache_size();
1347    break;
1348    default:
1349      size = 0;
1350    break;
1351  }
1352  return size;
1353}
1354
1355static inline size_t
1356_CPU_cache_get_instruction_cache_size( const uint32_t level )
1357{
1358  size_t size = 0;
1359
1360  switch( level )
1361  {
1362    case 1:
1363      size = arm_cache_l1_get_instruction_cache_size();
1364      break;
1365    case 0:
1366    case 2:
1367      size = l2c_310_get_cache_size();
1368      break;
1369    default:
1370      size = 0;
1371      break;
1372  }
1373  return size;
1374}
1375
1376
1377/** @} */
1378
1379#ifdef __cplusplus
1380}
1381#endif /* __cplusplus */
1382
1383#endif /* LIBBSP_ARM_SHARED_L2C_310_CACHE_H */
Note: See TracBrowser for help on using the repository browser.