1 | /** |
---|
2 | * @file |
---|
3 | * |
---|
4 | * @ingroup ppc_exc |
---|
5 | * |
---|
6 | * @brief PowerPC Exceptions implementation. |
---|
7 | */ |
---|
8 | |
---|
9 | /* |
---|
10 | * Copyright (C) 1999 Eric Valette (valette@crf.canon.fr) |
---|
11 | * Canon Centre Recherche France. |
---|
12 | * |
---|
13 | * Copyright (C) 2007 Till Straumann <strauman@slac.stanford.edu> |
---|
14 | * |
---|
15 | * Copyright (C) 2009 embedded brains GmbH. |
---|
16 | * |
---|
17 | * Derived from file "libcpu/powerpc/new-exceptions/bspsupport/vectors_init.c". |
---|
18 | * Derived from file "libcpu/powerpc/new-exceptions/e500_raw_exc_init.c". |
---|
19 | * |
---|
20 | * The license and distribution terms for this file may be |
---|
21 | * found in found in the file LICENSE in this distribution or at |
---|
22 | * http://www.rtems.com/license/LICENSE. |
---|
23 | * |
---|
24 | * $Id$ |
---|
25 | */ |
---|
26 | |
---|
27 | #include <rtems.h> |
---|
28 | |
---|
29 | #include <bsp/vectors.h> |
---|
30 | |
---|
31 | uint32_t ppc_exc_cache_wb_check = 1; |
---|
32 | |
---|
33 | #define MTIVPR(prefix) asm volatile ("mtivpr %0" : : "r" (prefix)) |
---|
34 | #define MTIVOR(x, vec) asm volatile ("mtivor"#x" %0" : : "r" (vec)) |
---|
35 | |
---|
36 | static void ppc_exc_initialize_e500(void) |
---|
37 | { |
---|
38 | /* Interupt vector prefix register */ |
---|
39 | MTIVPR(ppc_exc_vector_base); |
---|
40 | |
---|
41 | /* Interupt vector offset register */ |
---|
42 | MTIVOR(0, ppc_exc_vector_address(ASM_BOOKE_CRIT_VECTOR)); /* Critical input not (yet) supported; use reset vector */ |
---|
43 | MTIVOR(1, ppc_exc_vector_address(ASM_MACH_VECTOR)); |
---|
44 | MTIVOR(2, ppc_exc_vector_address(ASM_PROT_VECTOR)); |
---|
45 | MTIVOR(3, ppc_exc_vector_address(ASM_ISI_VECTOR)); |
---|
46 | MTIVOR(4, ppc_exc_vector_address(ASM_EXT_VECTOR)); |
---|
47 | MTIVOR(5, ppc_exc_vector_address(ASM_ALIGN_VECTOR)); |
---|
48 | MTIVOR(6, ppc_exc_vector_address(ASM_PROG_VECTOR)); |
---|
49 | MTIVOR(7, ppc_exc_vector_address(ASM_FLOAT_VECTOR)); |
---|
50 | MTIVOR(8, ppc_exc_vector_address(ASM_SYS_VECTOR)); |
---|
51 | MTIVOR(9, ppc_exc_vector_address(0x0b)); |
---|
52 | MTIVOR(10, ppc_exc_vector_address(ASM_BOOKE_DEC_VECTOR)); |
---|
53 | MTIVOR(11, ppc_exc_vector_address(ASM_BOOKE_FIT_VECTOR)); |
---|
54 | MTIVOR(12, ppc_exc_vector_address(ASM_BOOKE_WDOG_VECTOR)); |
---|
55 | MTIVOR(13, ppc_exc_vector_address(ASM_60X_DSMISS_VECTOR)); |
---|
56 | MTIVOR(14, ppc_exc_vector_address(ASM_60X_DLMISS_VECTOR)); |
---|
57 | MTIVOR(15, ppc_exc_vector_address(ASM_TRACE_VECTOR)); |
---|
58 | MTIVOR(32, ppc_exc_vector_address(ASM_60X_VEC_VECTOR)); |
---|
59 | MTIVOR(33, ppc_exc_vector_address(0x16)); |
---|
60 | MTIVOR(34, ppc_exc_vector_address(0x15)); |
---|
61 | MTIVOR(35, ppc_exc_vector_address(ASM_60X_PERFMON_VECTOR)); |
---|
62 | } |
---|
63 | |
---|
64 | static void ppc_exc_initialize_e200(void) |
---|
65 | { |
---|
66 | /* Interupt vector prefix register */ |
---|
67 | MTIVPR(ppc_exc_vector_base); |
---|
68 | |
---|
69 | /* Interupt vector offset register */ |
---|
70 | MTIVOR(0, 0); /* Critical input */ |
---|
71 | MTIVOR(1, ppc_exc_vector_address( ASM_MACH_VECTOR)); |
---|
72 | MTIVOR(2, ppc_exc_vector_address( ASM_PROT_VECTOR)); |
---|
73 | MTIVOR(3, ppc_exc_vector_address( ASM_ISI_VECTOR)); |
---|
74 | MTIVOR(4, ppc_exc_vector_address( ASM_EXT_VECTOR)); |
---|
75 | MTIVOR(5, ppc_exc_vector_address( ASM_ALIGN_VECTOR)); |
---|
76 | MTIVOR(6, ppc_exc_vector_address( ASM_PROG_VECTOR)); |
---|
77 | MTIVOR(7, ppc_exc_vector_address( ASM_FLOAT_VECTOR)); |
---|
78 | MTIVOR(8, ppc_exc_vector_address( ASM_SYS_VECTOR)); |
---|
79 | MTIVOR(9, 0); /* APU unavailable */ |
---|
80 | MTIVOR(10, ppc_exc_vector_address( ASM_BOOKE_DEC_VECTOR)); |
---|
81 | MTIVOR(11, ppc_exc_vector_address( ASM_BOOKE_FIT_VECTOR)); |
---|
82 | MTIVOR(12, ppc_exc_vector_address( ASM_BOOKE_WDOG_VECTOR)); |
---|
83 | MTIVOR(13, ppc_exc_vector_address( ASM_BOOKE_ITLBMISS_VECTOR)); |
---|
84 | MTIVOR(14, ppc_exc_vector_address( ASM_BOOKE_DTLBMISS_VECTOR)); |
---|
85 | MTIVOR(15, ppc_exc_vector_address( ASM_TRACE_VECTOR)); |
---|
86 | MTIVOR(32, ppc_exc_vector_address( ASM_E200_SPE_UNAVAILABLE_VECTOR)); |
---|
87 | MTIVOR(33, ppc_exc_vector_address( ASM_E200_SPE_DATA_VECTOR)); |
---|
88 | MTIVOR(34, ppc_exc_vector_address( ASM_E200_SPE_ROUND_VECTOR)); |
---|
89 | } |
---|
90 | |
---|
91 | rtems_status_code ppc_exc_initialize( |
---|
92 | uint32_t interrupt_disable_mask, |
---|
93 | uintptr_t interrupt_stack_begin, |
---|
94 | uintptr_t interrupt_stack_size |
---|
95 | ) |
---|
96 | { |
---|
97 | rtems_status_code sc = RTEMS_SUCCESSFUL; |
---|
98 | const ppc_exc_categories *const categories = ppc_exc_current_categories(); |
---|
99 | uintptr_t const interrupt_stack_end = interrupt_stack_begin + interrupt_stack_size; |
---|
100 | uintptr_t interrupt_stack_pointer = interrupt_stack_end - PPC_MINIMUM_STACK_FRAME_SIZE; |
---|
101 | unsigned vector = 0; |
---|
102 | uint32_t sda_base = 0; |
---|
103 | uint32_t r13 = 0; |
---|
104 | |
---|
105 | if (categories == NULL) { |
---|
106 | return RTEMS_NOT_IMPLEMENTED; |
---|
107 | } |
---|
108 | |
---|
109 | /* Assembly code needs SDA_BASE in r13 (SVR4 or EABI). Make sure |
---|
110 | * early init code put it there. |
---|
111 | */ |
---|
112 | asm volatile ( |
---|
113 | "lis %0, _SDA_BASE_@h\n" |
---|
114 | "ori %0, %0, _SDA_BASE_@l\n" |
---|
115 | "mr %1, 13\n" |
---|
116 | : "=r" (sda_base), "=r"(r13) |
---|
117 | ); |
---|
118 | |
---|
119 | if (sda_base != r13) { |
---|
120 | return RTEMS_NOT_CONFIGURED; |
---|
121 | } |
---|
122 | |
---|
123 | /* Ensure proper interrupt stack alignment */ |
---|
124 | interrupt_stack_pointer &= ~((uint32_t) CPU_STACK_ALIGNMENT - 1); |
---|
125 | |
---|
126 | /* Tag interrupt stack bottom */ |
---|
127 | *(uint32_t *) interrupt_stack_pointer = 0; |
---|
128 | |
---|
129 | /* Move interrupt stack values to special purpose registers */ |
---|
130 | PPC_SET_SPECIAL_PURPOSE_REGISTER(SPRG1, interrupt_stack_pointer); |
---|
131 | PPC_SET_SPECIAL_PURPOSE_REGISTER(SPRG2, interrupt_stack_begin); |
---|
132 | |
---|
133 | ppc_interrupt_set_disable_mask(interrupt_disable_mask); |
---|
134 | |
---|
135 | /* Use current MMU / RI settings when running C exception handlers */ |
---|
136 | ppc_exc_msr_bits = ppc_machine_state_register() & (MSR_DR | MSR_IR | MSR_RI); |
---|
137 | |
---|
138 | #ifdef __ALTIVEC__ |
---|
139 | /* Need vector unit enabled to save/restore altivec context */ |
---|
140 | ppc_exc_msr_bits |= MSR_VE; |
---|
141 | #endif |
---|
142 | |
---|
143 | if (ppc_cpu_is(PPC_e200z6)) { |
---|
144 | ppc_exc_initialize_e200(); |
---|
145 | } else if (ppc_cpu_is_bookE() == PPC_BOOKE_STD || ppc_cpu_is_bookE() == PPC_BOOKE_E500) { |
---|
146 | ppc_exc_initialize_e500(); |
---|
147 | } |
---|
148 | |
---|
149 | for (vector = 0; vector <= LAST_VALID_EXC; ++vector) { |
---|
150 | ppc_exc_category category = ppc_exc_category_for_vector(categories, vector); |
---|
151 | |
---|
152 | if (category != PPC_EXC_INVALID) { |
---|
153 | void *const vector_address = ppc_exc_vector_address(vector); |
---|
154 | uint32_t prologue [16]; |
---|
155 | size_t prologue_size = sizeof(prologue); |
---|
156 | |
---|
157 | sc = ppc_exc_make_prologue(vector, category, prologue, &prologue_size); |
---|
158 | if (sc != RTEMS_SUCCESSFUL) { |
---|
159 | return RTEMS_INTERNAL_ERROR; |
---|
160 | } |
---|
161 | |
---|
162 | ppc_code_copy(vector_address, prologue, prologue_size); |
---|
163 | } |
---|
164 | } |
---|
165 | |
---|
166 | /* If we are on a classic PPC with MSR_DR enabled then |
---|
167 | * assert that the mapping for at least this task's |
---|
168 | * stack is write-back-caching enabled (see README/CAVEATS) |
---|
169 | * Do this only if the cache is physically enabled. |
---|
170 | * Since it is not easy to figure that out in a |
---|
171 | * generic way we need help from the BSP: BSPs |
---|
172 | * which run entirely w/o the cache may set |
---|
173 | * ppc_exc_cache_wb_check to zero prior to calling |
---|
174 | * this routine. |
---|
175 | * |
---|
176 | * We run this check only after exception handling is |
---|
177 | * initialized so that we have some chance to get |
---|
178 | * information printed if it fails. |
---|
179 | * |
---|
180 | * Note that it is unsafe to ignore this issue; if |
---|
181 | * the check fails, do NOT disable it unless caches |
---|
182 | * are always physically disabled. |
---|
183 | */ |
---|
184 | if (ppc_exc_cache_wb_check && (MSR_DR & ppc_exc_msr_bits)) { |
---|
185 | /* The size of 63 assumes cache lines are at most 32 bytes */ |
---|
186 | uint8_t dummy[63]; |
---|
187 | uintptr_t p = (uintptr_t) dummy; |
---|
188 | /* If the dcbz instruction raises an alignment exception |
---|
189 | * then the stack is mapped as write-thru or caching-disabled. |
---|
190 | * The low-level code is not capable of dealing with this |
---|
191 | * ATM. |
---|
192 | */ |
---|
193 | p = (p + 31U) & ~31U; |
---|
194 | asm volatile ("dcbz 0, %0"::"b" (p)); |
---|
195 | /* If we make it thru here then things seem to be OK */ |
---|
196 | } |
---|
197 | |
---|
198 | return RTEMS_SUCCESSFUL; |
---|
199 | } |
---|