source: rtems/c/src/lib/libbsp/arm/altera-cyclone-v/hwlib/include/alt_cache.h @ 76386c1

4.115
Last change on this file since 76386c1 was 76386c1, checked in by Sebastian Huber <sebastian.huber@…>, on 08/26/14 at 14:00:44

bsp/altera-cyclone-v: Add DMA support hwlib files

  • Property mode set to 100644
File size: 35.4 KB
RevLine 
[76386c1]1/******************************************************************************
2 *
3 * Copyright 2013 Altera Corporation. All Rights Reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright notice,
9 * this list of conditions and the following disclaimer.
10 *
11 * 2. Redistributions in binary form must reproduce the above copyright notice,
12 * this list of conditions and the following disclaimer in the documentation
13 * and/or other materials provided with the distribution.
14 *
15 * 3. The name of the author may not be used to endorse or promote products
16 * derived from this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER "AS IS" AND ANY EXPRESS OR
19 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
20 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ARE DISCLAIMED. IN NO
21 * EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
22 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
23 * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
26 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY
27 * OF SUCH DAMAGE.
28 *
29 ******************************************************************************/
30
31#ifndef __ALT_CACHE_H__
32#define __ALT_CACHE_H__
33
34#include "hwlib.h"
35
36#ifdef __cplusplus
37extern "C"
38{
39#endif
40
41/*!
42 * \addtogroup CACHE_MGR Cache Management API
43 *
44 * This module defines the cache management API for enabling and disabling L1
45 * data cache, L1 instruction cache, L1 dynamic branch prediction caches, L1
46 * TLB cache, and L2 cache in the SoC. As well, many it allows users to perform
47 * cache maintenance operations on these caches. This includes the following
48 * operations:
49 *  * Invalidate: Marks the cache line as being invalid, freeing up the space
50 *    to cache other data. All APIs which enable caches invalidates the memory
51 *    before being enabling the cache.
52 *  * Clean: If the cache line is dirty, it synchronizes the cache line data
53 *    with the upper level memory system and marks that line as clean. All APIs
54 *    which disable caches cleans the memory before disabling the cache.
55 *  * Purge: A term used in this API as a short form for clean and invalidate.
56 *    This operation cleans and invalidates a cache line in that order, as a
57 *    single command to the cache controller.
58 *
59 * The following reference materials were used in the design of this API:
60 *  * ARM&reg; Architecture Reference Manual, ARMv7-A and ARMv7-R edition
61 *  * Cortex&trade;-A9 Technical Reference Manual
62 *  * Cortex&trade;-A9 MPCore Technical Reference Manual
63 *  * CoreLink&trade; Level 2 Cache Controller L2C-310 Technical Reference
64 *    Manual
65 *
66 * @{
67 */
68
69/*!
70 * \addtogroup CACHE_SYS System Level Cache Management API
71 *
72 * This API group provides cache maintenance operations which affects multiple
73 * cache levels.
74 *
75 * The enable and disable functions enables and disables all caches in the
76 * system respectively. For caches shared by the CPU core(s), particularly the
77 * L2 cache, once that cache is enabled or disabled it will not be invalidated
78 * or cleaned again respectively. This allows the safe system-wide enable and
79 * disable to be used in single-core and multi-core scenarios.
80 *
81 * For cache maintenance operations, this API implements the procedures
82 * outlined in the L2C-310 Technical Reference Manual, section 3.3.10,
83 * subsection "System cache maintenance considerations". This allows for a
84 * convenient way to invalidate, clean, or clean and invalidate cache data from
85 * the L1 to L2 to L3 while avoiding any potential race conditions in
86 * mutli-core or multi-master scenarios. It assumes that the L1 and L2 cache is
87 * set in "non-exclusive" mode. This means a segment of data can reside in both
88 * the L1 and L2 simultaneously. This is the default mode for caches in the
89 * system.
90 *
91 * The current implementation of the system cache APIs assumes that the MMU is
92 * configured with a flat memory mapping or that every virtual address matches
93 * perfectly with the physical address. This restriction may be lifted in a
94 * future release of the cache API implementation.
95 *
96 * @{
97 */
98
99/*!
100 * Enables support for a non-flat virtual memory. A flat virtual memory is
101 * where every virtual address matches exactly to the physical address, making
102 * the virtual to physical translation trivial. Adding support for non-flat
103 * adds some overhead for the VA to PA translation and error detection.
104 *
105 * To enable non-flat virtual memory support, defined
106 * ALT_CACHE_SUPPORT_NON_FLAT_VIRTUAL_MEMORY=1 in your Makefile when compiling
107 * HWLibs.
108 */
109#ifndef ALT_CACHE_SUPPORT_NON_FLAT_VIRTUAL_MEMORY
110#define ALT_CACHE_SUPPORT_NON_FLAT_VIRTUAL_MEMORY (0)
111#endif
112
113/*!
114 * This is the system wide cache line size, given in bytes.
115 */
116#define ALT_CACHE_LINE_SIZE         32
117
118/*!
119 * Enables all caches and features which improve reliability and speed on all
120 * cache controllers visible to the current CPU core. This includes parity
121 * error detection. Cache controllers visible to multiple CPU cores, for
122 * example the L2, will first be checked to be disabled before being enabled.
123 * All necessary cache maintenance operations will be done automatically.
124 *
125 * \retval      ALT_E_SUCCESS   The operation was successful.
126 * \retval      ALT_E_ERROR     The operation failed.
127 */
128ALT_STATUS_CODE alt_cache_system_enable(void);
129
130/*!
131 * Disables all cache controllers visible to the current CPU core. Cache
132 * controllers visible to multiple CPU cores, for example the L2, will first
133 * be checked to be enabled before being disabled. All necessary cache
134 * maintenance operations will be done automatically.
135 *
136 * \retval      ALT_E_SUCCESS   The operation was successful.
137 * \retval      ALT_E_ERROR     The operation failed.
138 */
139ALT_STATUS_CODE alt_cache_system_disable(void);
140
141/*!
142 * Invalidates the specified contents of all cache levels visible to the
143 * current CPU core for the given memory segment.
144 *
145 * The memory segment address and length specified must align to the
146 * characteristics of the cache line. This means the address and length must be
147 * multiples of the cache line size. To determine the cache line size, use the
148 * \b ALT_CACHE_LINE_SIZE macro.
149 *
150 * The following pseudocode outlines the operations carried out by this
151 * function:
152 *  -# L2 invalidate address(es)
153 *  -# L2 cache sync
154 *  -# L1 invalidate address(es)
155 *  -# DSB instruction
156 *
157 * The current implementation of the system cache APIs assumes that the MMU is
158 * configured with a flat memory mapping or that every virtual address matches
159 * perfectly with the physical address. This restriction may be lifted in a
160 * future release of the cache API implementation.
161 *
162 * \param       vaddress
163 *              The virtual address of the memory segment to be invalidated.
164 *
165 * \param       length
166 *              The length of the memory segment to be invalidated.
167 *
168 * \retval      ALT_E_SUCCESS   The operation was successful.
169 * \retval      ALT_E_ERROR     The operation failed.
170 * \retval      ALT_E_BAD_ARG   The memory segment is invalid.
171 * \retval      ALT_E_TMO       The memory operation timed out.
172 */
173ALT_STATUS_CODE alt_cache_system_invalidate(void * vaddress, size_t length);
174
175/*!
176 * Cleans the specified contents of all cache levels visible to the current
177 * CPU core for the given memory segment.
178 *
179 * The memory segment address and length specified must align to the
180 * characteristics of the cache line. This means the address and length must be
181 * multiples of the cache line size. To determine the cache line size, use the
182 * \b ALT_CACHE_LINE_SIZE macro.
183 *
184 * The following pseudocode outlines the operations carried out by this
185 * function:
186 *  -# L1 clean address(es)
187 *  -# DSB instruction
188 *  -# L2 clean address(es)
189 *  -# L2 cache sync
190 *
191 * The current implementation of the system cache APIs assumes that the MMU is
192 * configured with a flat memory mapping or that every virtual address matches
193 * perfectly with the physical address. This restriction may be lifted in a
194 * future release of the cache API implementation.
195 *
196 * \param       vaddress
197 *              The virtual address of the memory segment to be cleaned.
198 *
199 * \param       length
200 *              The length of the memory segment to be cleaned.
201 *
202 * \retval      ALT_E_SUCCESS   The operation was successful.
203 * \retval      ALT_E_ERROR     The operation failed.
204 * \retval      ALT_E_BAD_ARG   The memory segment is invalid.
205 * \retval      ALT_E_TMO       The memory operation timed out.
206 */
207ALT_STATUS_CODE alt_cache_system_clean(void * vaddress, size_t length);
208
209/*!
210 * Cleans and invalidates the specified contents of all cache levels visible
211 * to the current CPU core for the given memory segment.
212 *
213 * The memory segment address and length specified must align to the
214 * characteristics of the cache line. This means the address and length must be
215 * multiples of the cache line size. To determine the cache line size, use the
216 * \b ALT_CACHE_LINE_SIZE macro.
217 *
218 * The following pseudocode outlines the operations carried out by this
219 * function:
220 *  -# L1 clean address(es)
221 *  -# DSB instruction
222 *  -# L2 clean and invalidate address(es)
223 *  -# L2 cache sync
224 *  -# L1 invalidate address(es)
225 *  -# DSB instruction
226 *
227 * The current implementation of the system cache APIs assumes that the MMU is
228 * configured with a flat memory mapping or that every virtual address matches
229 * perfectly with the physical address. This restriction may be lifted in a
230 * future release of the cache API implementation.
231 *
232 * \param       vaddress
233 *              The virtual address of the memory segment to be purged.
234 *
235 * \param       length
236 *              The length of the memory segment to be purged.
237 *
238 * \retval      ALT_E_SUCCESS   The operation was successful.
239 * \retval      ALT_E_ERROR     The operation failed.
240 * \retval      ALT_E_BAD_ARG   The memory segment is invalid.
241 * \retval      ALT_E_TMO       The memory operation timed out.
242 */
243ALT_STATUS_CODE alt_cache_system_purge(void * vaddress, size_t length);
244
245/*!
246 * @}
247 */
248
249/*!
250 * \addtogroup CACHE_L1 L1 Cache Management API
251 *
252 * This API group provides functions to interact with various components of the
253 * L1 cache on the SoCFPGA. This includes the following cache components:
254 *  * Instruction Cache
255 *  * Data Cache
256 *  * Parity error detection
257 *  * Dynamic branch prediction
258 *  * Data prefetching
259 *
260 * The API within this group only affects the L1 cache on the current CPU. To
261 * interact the L1 cache on another CPU, the API must be called from that other
262 * CPU.
263 *
264 * With respect to bring-up, the L1 and L2 cache controller setups are fully
265 * independent. The L2 can be setup at any time, before or after the L1 is setup.
266 * \internal
267 * Source: Cortex-A9 MPCore TRM, section 5.3.4 "Multiprocessor bring-up".
268 * \endinternal
269 *
270 * @{
271 */
272
273/*!
274 * Enables all L1 caches and features on the current CPU core. This includes
275 * the instruction cache, data cache, parity error detection, branch target
276 * address cache, global history buffer, and data prefetching. All necessary
277 * maintenance tasks will be taken care of.
278 *
279 * This function should not be mixed with other L1 cache related functions
280 * which enable or disable caches individually.
281 *
282 * \retval      ALT_E_SUCCESS   The operation was successful.
283 * \retval      ALT_E_ERROR     The operation failed.
284 */
285ALT_STATUS_CODE alt_cache_l1_enable_all(void);
286
287/*!
288 * Disables all L1 caches and features on the current CPU core. This includes
289 * the instruction cache, data cache, parity error detection, branch target
290 * address cache, global history buffer, and data prefetching. All necessary
291 * maintenance tasks will be taken care of.
292 *
293 * This function should not be mixed with other L1 cache related functions
294 * which enable or disable caches individually.
295 *
296 * \retval      ALT_E_SUCCESS   The operation was successful.
297 * \retval      ALT_E_ERROR     The operation failed.
298 */
299ALT_STATUS_CODE alt_cache_l1_disable_all(void);
300
301/*!
302 * Enables the L1 instruction cache on the current CPU core. If the cache is
303 * already enabled, nothing is done. Otherwise the instruction cache is first
304 * invalidated before being enabled.
305 *
306 * \retval      ALT_E_SUCCESS   The operation was successful.
307 * \retval      ALT_E_ERROR     The operation failed.
308 */
309ALT_STATUS_CODE alt_cache_l1_instruction_enable(void);
310
311/*!
312 * Disables the L1 instruction cache on the current CPU core.
313 *
314 * \retval      ALT_E_SUCCESS   The operation was successful.
315 * \retval      ALT_E_ERROR     The operation failed.
316 */
317ALT_STATUS_CODE alt_cache_l1_instruction_disable(void);
318
319/*!
320 * Returns \b true when the L1 instruction cache is enabled and \b false when
321 * it is disabled on the current CPU core.
322 *
323 * \retval      true            The L1 instruction cache is enabled.
324 * \retval      false           The L1 instruction cache is disabled.
325 */
326bool alt_cache_l1_instruction_is_enabled(void);
327
328/*!
329 * Invalidates the contents of the L1 instruction cache on the current CPU
330 * core.
331 *
332 * Normally this is done automatically as part of
333 * alt_cache_l1_instruction_enable(), but in certain circumstances it may be
334 * necessary to invalidate it manually. An example of this situation is when
335 * the address space is remapped and the processor executes instructions from
336 * the new memory area.
337 *
338 * \retval      ALT_E_SUCCESS   The operation was successful.
339 * \retval      ALT_E_ERROR     The operation failed.
340 */
341ALT_STATUS_CODE alt_cache_l1_instruction_invalidate(void);
342
343/*!
344 * Enables the L1 data cache on the current CPU core.
345 *
346 * If the cache is already enabled nothing is done. Otherwise the data cache is
347 * first invalidated before being enabled.
348 *
349 * \retval      ALT_E_SUCCESS   The operation was successful.
350 * \retval      ALT_E_ERROR     The operation failed.
351 */
352ALT_STATUS_CODE alt_cache_l1_data_enable(void);
353
354/*!
355 * Disables the L1 data cache on the current CPU core.
356 *
357 * If the cache is already disabled nothing is done. Otherwise the data cache
358 * is first cleaned before being disabled.
359 *
360 * \retval      ALT_E_SUCCESS   The operation was successful.
361 * \retval      ALT_E_ERROR     The operation failed.
362 */
363ALT_STATUS_CODE alt_cache_l1_data_disable(void);
364
365/*!
366 * Returns \b true when the L1 data cache is enabled and \b false when it is
367 * disabled on the current CPU core.
368 *
369 * \retval      true            The L1 data cache is enabled.
370 * \retval      false           The L1 data cache is disabled.
371 */
372bool alt_cache_l1_data_is_enabled(void);
373
374/*!
375 * Invalidates the specified contents of the L1 data cache on the current CPU
376 * core for the given memory segment.
377 *
378 * The memory segment address and length specified must align to the
379 * characteristics of the cache line. This means the address and length must be
380 * multiples of the cache line size. To determine the cache line size, use the
381 * \b ALT_CACHE_LINE_SIZE macro.
382 *
383 * \param       vaddress
384 *              The virtual address of the memory segment to be invalidated.
385 *
386 * \param       length
387 *              The length of the memory segment to be invalidated.
388 *
389 * \retval      ALT_E_SUCCESS   The operation was successful.
390 * \retval      ALT_E_ERROR     The operation failed.
391 * \retval      ALT_E_BAD_ARG   The memory segment is invalid.
392 */
393ALT_STATUS_CODE alt_cache_l1_data_invalidate(void * vaddress, size_t length);
394
395/*!
396 * Invalidates the entire contents of the L1 data cache on the current CPU
397 * core.
398 *
399 * Normally this is done automatically as part of alt_cache_l1_data_enable(),
400 * but in certain circumstances it may be necessary to invalidate it manually.
401 * An example of this situation is when the address space is remapped and the
402 * processor accesses memory from the new memory area.
403 *
404 * \retval      ALT_E_SUCCESS   The operation was successful.
405 * \retval      ALT_E_ERROR     The operation failed.
406 */
407ALT_STATUS_CODE alt_cache_l1_data_invalidate_all(void);
408
409/*!
410 * Cleans the specified contents of the L1 data cache on the current CPU core
411 * for the given memory segment.
412 *
413 * The memory segment address and length specified must align to the
414 * characteristics of the cache line. This means the address and length must be
415 * multiples of the cache line size. To determine the cache line size, use the
416 * \b ALT_CACHE_LINE_SIZE macro.
417 *
418 * \param       vaddress
419 *              The virtual address of the memory segment to be cleaned.
420 *
421 * \param       length
422 *              The length of the memory segment to be cleaned.
423 *
424 * \retval      ALT_E_SUCCESS   The operation was successful.
425 * \retval      ALT_E_ERROR     The operation failed.
426 * \retval      ALT_E_BAD_ARG   The memory segment is invalid.
427 */
428ALT_STATUS_CODE alt_cache_l1_data_clean(void * vaddress, size_t length);
429
430/*!
431 * Cleans the entire L1 data cache for the current CPU core.
432 *
433 * \retval      ALT_E_SUCCESS   The operation was successful.
434 * \retval      ALT_E_ERROR     The operation failed.
435 */
436ALT_STATUS_CODE alt_cache_l1_data_clean_all(void);
437
438/*!
439 * Cleans and invalidates the specified contents of the L1 data cache on the
440 * current CPU core for the given memory segment.
441 *
442 * The memory segment address and length specified must align to the
443 * characteristics of the cache line. This means the address and length must be
444 * multiples of the cache line size. To determine the cache line size, use the
445 * \b ALT_CACHE_LINE_SIZE macro.
446 *
447 * Normally this is done automatically as part of alt_cache_l1_data_disable(),
448 * but in certain circumstances it may be necessary to purged it manually.
449 * An example of this situation is when the address space is remapped and the
450 * processor accesses memory from the new memory area.
451 *
452 * \param       vaddress
453 *              The virtual address of the memory segment to be purged.
454 *
455 * \param       length
456 *              The length of the memory segment to be purged.
457 *
458 * \retval      ALT_E_SUCCESS   The operation was successful.
459 * \retval      ALT_E_ERROR     The operation failed.
460 * \retval      ALT_E_BAD_ARG   The memory segment is invalid.
461 */
462ALT_STATUS_CODE alt_cache_l1_data_purge(void * vaddress, size_t length);
463
464/*!
465 * Cleans and invalidates the entire L1 data cache for the current CPU core.
466 *
467 * \retval      ALT_E_SUCCESS   The operation was successful.
468 * \retval      ALT_E_ERROR     The operation failed.
469 */
470ALT_STATUS_CODE alt_cache_l1_data_purge_all(void);
471
472/*!
473 * Enables the parity error detection feature in the L1 caches on the current
474 * CPU core.
475 *
476 * Ideally parity should be enabled before any L1 caches are enabled. If the
477 * instruction, data, and / or dynamic branch predictor caches are already
478 * enabled, they will first be cleaned (if needed) and disabled before parity
479 * is enabled in hardware. Afterwards, the affected caches will be invalidated
480 * and enabled.
481 *
482 * Parity and TLB interaction deserves special attention. The TLB is considered
483 * to be a L1 cache but is enabled when the MMU, which is grouped in another
484 * API, is enabled. Due to the system-wide influence of the MMU, it cannot be
485 * disabled and enabled with impunity as the other L1 caches, which are
486 * designed to operate as transparently as possible. Thus parity error
487 * detection must be enabled before the L1 TLB cache, and by extension the MMU,
488 * is enabled.
489 *
490 * For a parity error to be reported, the appropriate CPU PARITYFAIL interrupt
491 * for the current CPU core must be enabled using the interrupt controller API.
492 * For CPU0, ALT_INT_INTERRUPT_CPU0_PARITYFAIL is asserted if any parity error
493 * is detected while the other PARITYFAIL interrupts are for parity errors in a
494 * specific memory. Refer to the interrupt controller API for more details
495 * about programming the interrupt controller.
496 *
497 * In the event of a parity error is detected, the appropriate CPU parity
498 * interrupt will be raised. CPU parity interrupts are all edge triggered and
499 * are cleared by acknowledging them in the interrupt controller API.
500 *
501 * \retval      ALT_E_SUCCESS   The operation was successful.
502 * \retval      ALT_E_ERROR     The operation failed.
503 */
504ALT_STATUS_CODE alt_cache_l1_parity_enable(void);
505
506/*!
507 * Disables parity error detection in the L1 caches.
508 *
509 * \retval      ALT_E_SUCCESS   The operation was successful.
510 * \retval      ALT_E_ERROR     The operation failed.
511 */
512ALT_STATUS_CODE alt_cache_l1_parity_disable(void);
513
514/*!
515 * Returns \b true when parity error detection is enabled and \b false when it
516 * is disabled on the current CPU core.
517 *
518 * \retval      true            Parity error detection for L1 caches is
519 *                              enabled.
520 * \retval      false           Parity error detection for L1 caches is
521 *                              disabled.
522 */
523bool alt_cache_l1_parity_is_enabled(void);
524
525/*!
526 * Enables the dynamic branch predictor features on the current CPU core.
527 *
528 * This operation enables both the Branch Target Address Cache (BTAC) and
529 * the Global History Buffer (GHB). Affected caches are automatically
530 * invalidated before use.
531 *
532 * \retval      ALT_E_SUCCESS   The operation was successful.
533 * \retval      ALT_E_ERROR     The operation failed.
534 */
535ALT_STATUS_CODE alt_cache_l1_branch_enable(void);
536
537/*!
538 * Disables the dynamic branch predictor features on the current CPU core.
539 *
540 * This operation disables both the Branch Target Address Cache (BTAC) and
541 * the Global History Buffer (GHB).
542 *
543 * \retval      ALT_E_SUCCESS   The operation was successful.
544 * \retval      ALT_E_ERROR     The operation failed.
545 */
546ALT_STATUS_CODE alt_cache_l1_branch_disable(void);
547
548/*!
549 * Returns \b true when both the dynamic predictor features are enabled and
550 * \b false when they are disabled on the current CPU core.
551 *
552 * \retval      true            The L1 branch predictor caches are all enabled.
553 * \retval      false           Some or all L1 branch predictor caches are
554 *                              disabled.
555 */
556bool alt_cache_l1_branch_is_enabled(void);
557
558/*!
559 * Invalidates the dynamic branch predictor feature caches on the current CPU
560 * core.
561 *
562 * \retval      ALT_E_SUCCESS   The operation was successful.
563 * \retval      ALT_E_ERROR     The operation failed.
564 */
565ALT_STATUS_CODE alt_cache_l1_branch_invalidate(void);
566
567/*!
568 * Enables the L1 cache data prefetch feature on the current CPU core.
569 *
570 * This allows data to be prefetched into the data cache before it is to be
571 * used. For example in a loop the current iteration may want to preload the
572 * data which will be used in the next teration. This is done by using the PLD
573 * instructions.
574 *
575 * \retval      ALT_E_SUCCESS   The operation was successful.
576 * \retval      ALT_E_ERROR     The operation failed.
577 */
578ALT_STATUS_CODE alt_cache_l1_prefetch_enable(void);
579
580/*!
581 * Disables the L1 cache data prefetch feature on the current CPU core.
582 *
583 * \retval      ALT_E_SUCCESS   The operation was successful.
584 * \retval      ALT_E_ERROR     The operation failed.
585 */
586ALT_STATUS_CODE alt_cache_l1_prefetch_disable(void);
587
588/*!
589 * Returns \b true if the L1 cache data prefetch feature is enabled and
590 * \b false if it is disabled on the current CPU core.
591 *
592 * \retval      true            The L1 data cache prefetch feature is enabled.
593 * \retval      false           The L1 data cache prefetch feature is disabled.
594 */
595bool alt_cache_l1_prefetch_is_enabled(void);
596
597/*!
598 * @}
599 */
600
601/*!
602 * \addtogroup CACHE_L2 L2 Cache Management API
603 *
604 * This API group provides functions to interact with various features of the
605 * L2 cache on the SoCFPGA. This includes the following features:
606 *  * L2 cache
607 *  * Parity error detection
608 *  * Data prefetching
609 *  * Interrupt Management
610 *
611 * \internal
612 * Additional features that may be implemented in the future:
613 *  * Lockdown
614 *  * Event counter
615 * \endinternal
616 *
617 * The API within this group affects the L2 cache which is visible to all CPUs
618 * on the system.
619 *
620 * With respect to bring-up, the L1 and L2 cache controller setups are fully
621 * independent. The L2 can be setup at any time, before or after the L1 is setup.
622 * \internal
623 * Source: Cortex-A9 MPCore TRM, section 5.3.4 "Multiprocessor bring-up".
624 * \endinternal
625 *
626 * @{
627 */
628
629/*!
630 * Initializes the L2 cache controller.
631 *
632 * \retval      ALT_E_SUCCESS   Successful status.
633 * \retval      ALT_E_ERROR     Details about error status code
634 */
635ALT_STATUS_CODE alt_cache_l2_init(void);
636
637/*!
638 * Uninitializes the L2 cache controller.
639 *
640 * \retval      ALT_E_SUCCESS   Successful status.
641 * \retval      ALT_E_ERROR     Details about error status code
642 */
643ALT_STATUS_CODE alt_cache_l2_uninit(void);
644
645/*!
646 * Enables the L2 cache features for data and instruction prefetching.
647 *
648 * Prefetching can be enabled or disabled while the L2 cache is enabled.
649 * \internal
650 * Source: Use the Prefetch Control Register.
651 * \endinternal
652 *
653 * \retval      ALT_E_SUCCESS   The operation was successful.
654 * \retval      ALT_E_ERROR     The operation failed.
655 */
656ALT_STATUS_CODE alt_cache_l2_prefetch_enable(void);
657
658/*!
659 * Disables the L2 cache features for data and instruction prefetching.
660 *
661 * Prefetching can be enabled or disabled while the L2 cache is enabled.
662 * \internal
663 * Source: Use the Prefetch Control Register.
664 * \endinternal
665 *
666 * \retval      ALT_E_SUCCESS   The operation was successful.
667 * \retval      ALT_E_ERROR     The operation failed.
668 */
669ALT_STATUS_CODE alt_cache_l2_prefetch_disable(void);
670
671/*!
672 * Returns \b true if either L2 cache data or instruction prefetch features are
673 * enabled and \b false if no prefetching features are enabled.
674 *
675 * \retval      true            The L2 data and instruction prefetch features
676 *                              are enabled.
677 * \retval      false           Some L2 data and instruction prefetch features
678 *                              are disabled.
679 */
680bool alt_cache_l2_prefetch_is_enabled(void);
681
682/*!
683 * Enables parity error detection in the L2 cache.
684 *
685 * Ideally parity should be enabled before the L2 cache is enabled. If the
686 * cache is already enabled, it will first be cleaned and disabled before
687 * parity is enabled in hardware. Afterwards, the cache will be invalidated and
688 * enabled.
689 *
690 * For a parity error to be reported, the ALT_CACHE_L2_INTERRUPT_PARRD and / or
691 * ALT_CACHE_L2_INTERRUPT_PARRT interrupt condition(s) must be enabled. This is
692 * done by calling alt_cache_l2_int_enable(). As well, the L2 cache interrupt
693 * must be enabled using the interrupt controller API. Refer to the interrupt
694 * controller API for more details about programming the interrupt controller.
695 *
696 * In the event of a parity error is detected, the appropriate L2 cache parity
697 * interrupt will be raised. To clear the parity interrupt(s), the appropriate
698 * L2 cache parity interrupt must be cleared by calling
699 * alt_cache_l2_int_status_clear().
700 *
701 * For ECC support, refer to the ECC related API documentation for more
702 * information.
703 *
704 * \retval      ALT_E_SUCCESS   The operation was successful.
705 * \retval      ALT_E_ERROR     The operation failed.
706 */
707ALT_STATUS_CODE alt_cache_l2_parity_enable(void);
708
709/*!
710 * Disables parity error detection in the L2 cache.
711 *
712 * \retval      ALT_E_SUCCESS   The operation was successful.
713 * \retval      ALT_E_ERROR     The operation failed.
714 */
715ALT_STATUS_CODE alt_cache_l2_parity_disable(void);
716
717/*!
718 * Returns \b true when parity error detection is enabled and \b false when it
719 * is disabled.
720 *
721 * \retval      true            The L2 cache parity error detection feature is
722 *                              enabled.
723 * \retval      false           The L2 cache parity error detection feature is
724 *                              disabled.
725 */
726bool alt_cache_l2_parity_is_enabled(void);
727
728/*!
729 * Enables the L2 cache.
730 *
731 * If the L2 cache is already enabled, nothing is done. Otherwise the entire
732 * contents of the cache is first invalidated before being enabled.
733 *
734 * \retval      ALT_E_SUCCESS   The operation was successful.
735 * \retval      ALT_E_ERROR     The operation failed.
736 */
737ALT_STATUS_CODE alt_cache_l2_enable(void);
738
739/*!
740 * Disables the L2 cache.
741 *
742 * If the L2 cache is already disabled, nothing is done. Otherwise the entire
743 * contents of the cache is first cleaned before being disabled.
744 *
745 * \retval      ALT_E_SUCCESS   The operation was successful.
746 * \retval      ALT_E_ERROR     The operation failed.
747 */
748ALT_STATUS_CODE alt_cache_l2_disable(void);
749
750/*!
751 * Returns \b true when the L2 cache is enabled and \b false when it is
752 * disabled.
753 *
754 * \retval      true            The L2 cache is enabled.
755 * \retval      false           The L2 cache is disabled.
756 */
757bool alt_cache_l2_is_enabled(void);
758
759/*!
760 * Flushes the L2 cache controller hardware buffers.
761 *
762 * \retval      ALT_E_SUCCESS   The operation was successful.
763 * \retval      ALT_E_ERROR     The operation failed.
764 * \retval      ALT_E_TMO       The memory operation timed out.
765 */
766ALT_STATUS_CODE alt_cache_l2_sync(void);
767
768/*!
769 * Invalidates the specified contents of the L2 cache for the given memory
770 * segment.
771 *
772 * The memory segment address and length specified must align to the
773 * characteristics of the cache line. This means the address and length must be
774 * multiples of the cache line size. To determine the cache line size, use the
775 * \b ALT_CACHE_LINE_SIZE macro.
776 *
777 * \param       paddress
778 *              The physical address of the memory segment to be invalidated.
779 *
780 * \param       length
781 *              The length of the memory segment to be invalidated.
782 *
783 * \retval      ALT_E_SUCCESS   The operation was successful.
784 * \retval      ALT_E_ERROR     The operation failed.
785 * \retval      ALT_E_BAD_ARG   The memory segment is invalid.
786 * \retval      ALT_E_TMO       The memory operation timed out.
787 */
788ALT_STATUS_CODE alt_cache_l2_invalidate(void * paddress, size_t length);
789
790/*!
791 * Invalidates th entire contents of the L2 cache.
792 *
793 * Normally this is done automatically as part of alt_cache_l2_enable(), but
794 * in certain circumstances it may be necessary to invalidate it manually. An
795 * example of this situation is when the address space is remapped and the
796 * processor accesses memory from the new memory area.
797
798 * \retval      ALT_E_SUCCESS   The operation was successful.
799 * \retval      ALT_E_ERROR     The operation failed.
800 * \retval      ALT_E_TMO       The memory operation timed out.
801 */
802ALT_STATUS_CODE alt_cache_l2_invalidate_all(void);
803
804/*!
805 * Cleans the specified contents of the L2 cache for the given memory segment.
806 *
807 * The memory segment address and length specified must align to the
808 * characteristics of the cache line. This means the address and length must be
809 * multiples of the cache line size. To determine the cache line size, use the
810 * \b ALT_CACHE_LINE_SIZE macro.
811 *
812 * \param       paddress
813 *              The physical address of the memory segment to be cleaned.
814 *
815 * \param       length
816 *              The length of the memory segment to be cleaned.
817 *
818 * \retval      ALT_E_SUCCESS   The operation was successful.
819 * \retval      ALT_E_ERROR     The operation failed.
820 * \retval      ALT_E_BAD_ARG   The memory segment is invalid.
821 * \retval      ALT_E_TMO       The memory operation timed out.
822 */
823ALT_STATUS_CODE alt_cache_l2_clean(void * paddress, size_t length);
824
825/*!
826 * Cleans the entire L2 cache. All L2 cache controller interrupts will be
827 * temporarily disabled while the clean operation is in progress and restored
828 * once the it is finished.
829 *
830 * \retval      ALT_E_SUCCESS   The operation was successful.
831 * \retval      ALT_E_ERROR     The operation failed.
832 * \retval      ALT_E_TMO       The memory operation timed out.
833 */
834ALT_STATUS_CODE alt_cache_l2_clean_all(void);
835
836/*!
837 * Cleans and invalidates the specified contents of the L2 cache for the
838 * given memory segment.
839 *
840 * The memory segment address and length specified must align to the
841 * characteristics of the cache line. This means the address and length must be
842 * multiples of the cache line size. To determine the cache line size, use the
843 * \b ALT_CACHE_LINE_SIZE macro.
844 *
845 * \param       paddress
846 *              The physical address of the memory segment to be purged.
847 *
848 * \param       length
849 *              The length of the memory segment to be purged.
850 *
851 * \retval      ALT_E_SUCCESS   The operation was successful.
852 * \retval      ALT_E_ERROR     The operation failed.
853 * \retval      ALT_E_BAD_ARG   The memory segment is invalid.
854 */
855ALT_STATUS_CODE alt_cache_l2_purge(void * paddress, size_t length);
856
857/*!
858 * Cleans and invalidates the entire L2 cache. All L2 cache controller
859 * interrupts will be temporarily disabled while the clean and invalidate
860 * operation is in progress and restored once the it is finished.
861 *
862 * \retval      ALT_E_SUCCESS   The operation was successful.
863 * \retval      ALT_E_ERROR     The operation failed.
864 * \retval      ALT_E_TMO       The memory operation timed out.
865 */
866ALT_STATUS_CODE alt_cache_l2_purge_all(void);
867
868/*!
869 * This type definition enumerates all the interrupt conditions that can be
870 * generated by the L2 cache controller as register mask values.
871 */
872enum ALT_CACHE_L2_INTERRUPT_e
873{
874    /*! Decode error received on the master ports from L3. */
875    ALT_CACHE_L2_INTERRUPT_DECERR = 1 << 8,
876
877    /*! Slave error received on the master ports from L3.  */
878    ALT_CACHE_L2_INTERRUPT_SLVERR = 1 << 7,
879
880    /*! Error on the L2 data RAM read.                     */
881    ALT_CACHE_L2_INTERRUPT_ERRRD  = 1 << 6,
882
883    /*! Error on the L2 tag RAM read.                      */
884    ALT_CACHE_L2_INTERRUPT_ERRRT  = 1 << 5,
885
886    /*! Error on the L2 data RAM write.                    */
887    ALT_CACHE_L2_INTERRUPT_ERRWD  = 1 << 4,
888
889    /*! Error on the L2 tag RAM write.                     */
890    ALT_CACHE_L2_INTERRUPT_ERRWT  = 1 << 3,
891
892    /*! Parity error on the L2 data RAM read.              */
893    ALT_CACHE_L2_INTERRUPT_PARRD  = 1 << 2,
894
895    /*! Parity error on the L2 tag RAM read.               */
896    ALT_CACHE_L2_INTERRUPT_PARRT  = 1 << 1,
897
898    /*! Event counter overflow or increment.               */
899    ALT_CACHE_L2_INTERRUPT_ECNTR  = 1 << 0
900};
901typedef enum ALT_CACHE_L2_INTERRUPT_e ALT_CACHE_L2_INTERRUPT_t;
902
903/*!
904 * Enables the L2 cache controller interrupts for the specified set of
905 * condition(s).
906 *
907 * \param       interrupt
908 *              A register mask of the selected L2 cache controller
909 *              interrupting conditions.
910 *
911 * \retval      ALT_E_SUCCESS   The operation was successful.
912 * \retval      ALT_E_ERROR     The operation failed.
913 */
914ALT_STATUS_CODE alt_cache_l2_int_enable(uint32_t interrupt);
915
916/*!
917 * Disables the L2 cache controller interrupts for the specified set of
918 * condition(s).
919 *
920 * \param       interrupt
921 *              A register mask of the selected L2 cache controller
922 *              interrupting conditions.
923 *
924 * \retval      ALT_E_SUCCESS   The operation was successful.
925 * \retval      ALT_E_ERROR     The operation failed.
926 */
927ALT_STATUS_CODE alt_cache_l2_int_disable(uint32_t interrupt);
928
929/*!
930 * Gets the condition(s) causing the L2 cache controller to interrupt as a
931 * register mask.
932 *
933 * \returns     A register mask of the currently asserted and enabled
934 *              conditions resulting in an interrupt being generated.
935 */
936uint32_t alt_cache_l2_int_status_get(void);
937
938/*!
939 * Clears the specified conditon(s) causing the L2 cache controller to
940 * interrupt as a mask. Condition(s) specified which are not causing an
941 * interrupt or condition(s) specified which are not enabled are ignored.
942 *
943 * \param       interrupt
944 *              A register mask of the selected L2 cache controller
945 *              interrupting conditions.
946 *
947 * \retval      ALT_E_SUCCESS   The operation was successful.
948 * \retval      ALT_E_ERROR     The operation failed.
949 */
950ALT_STATUS_CODE alt_cache_l2_int_status_clear(uint32_t interrupt);
951
952/*!
953 * @}
954 */
955
956/*!
957 * @}
958 */
959
960#ifdef __cplusplus
961}
962#endif
963
964#endif /* __ALT_CACHE_H__ */
Note: See TracBrowser for help on using the repository browser.