source: rtems/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_initialize.c @ ade27c6

4.115
Last change on this file since ade27c6 was ade27c6, checked in by Sebastian Huber <sebastian.huber@…>, on 06/20/13 at 09:44:04

bsps: Move bsp_generic_fatal_code to new file

Add bsp_generic_fatal().

  • Property mode set to 100644
File size: 8.9 KB
RevLine 
[856cce50]1/**
2 * @file
3 *
4 * @ingroup ppc_exc
5 *
6 * @brief PowerPC Exceptions implementation.
7 */
8
9/*
10 * Copyright (C) 1999 Eric Valette (valette@crf.canon.fr)
11 *                    Canon Centre Recherche France.
12 *
13 * Copyright (C) 2007 Till Straumann <strauman@slac.stanford.edu>
14 *
[b1e8a58]15 * Copyright (C) 2009-2012 embedded brains GmbH.
[856cce50]16 *
17 * Derived from file "libcpu/powerpc/new-exceptions/bspsupport/vectors_init.c".
18 * Derived from file "libcpu/powerpc/new-exceptions/e500_raw_exc_init.c".
19 *
20 * The license and distribution terms for this file may be
[e71a3a84]21 * found in the file LICENSE in this distribution or at
[856cce50]22 * http://www.rtems.com/license/LICENSE.
23 */
24
25#include <rtems.h>
26
27#include <bsp/vectors.h>
[ade27c6]28#include <bsp/generic-fatal.h>
[856cce50]29
[d2202ac]30#define PPC_EXC_ASSERT_OFFSET(field, off) \
31  RTEMS_STATIC_ASSERT( \
32    offsetof(CPU_Exception_frame, field) + FRAME_LINK_SPACE == off, \
33    CPU_Exception_frame_offset_ ## field \
34  )
35
36#define PPC_EXC_ASSERT_CANONIC_OFFSET(field) \
37  PPC_EXC_ASSERT_OFFSET(field, field ## _OFFSET)
38
39PPC_EXC_ASSERT_OFFSET(EXC_SRR0, SRR0_FRAME_OFFSET);
40PPC_EXC_ASSERT_OFFSET(EXC_SRR1, SRR1_FRAME_OFFSET);
41PPC_EXC_ASSERT_OFFSET(_EXC_number, EXCEPTION_NUMBER_OFFSET);
42PPC_EXC_ASSERT_CANONIC_OFFSET(EXC_CR);
43PPC_EXC_ASSERT_CANONIC_OFFSET(EXC_CTR);
44PPC_EXC_ASSERT_CANONIC_OFFSET(EXC_XER);
45PPC_EXC_ASSERT_CANONIC_OFFSET(EXC_LR);
46#ifdef __SPE__
47  PPC_EXC_ASSERT_OFFSET(EXC_SPEFSCR, PPC_EXC_SPEFSCR_OFFSET);
48  PPC_EXC_ASSERT_OFFSET(EXC_ACC, PPC_EXC_ACC_OFFSET);
49#endif
50PPC_EXC_ASSERT_CANONIC_OFFSET(GPR0);
51PPC_EXC_ASSERT_CANONIC_OFFSET(GPR1);
52PPC_EXC_ASSERT_CANONIC_OFFSET(GPR2);
53PPC_EXC_ASSERT_CANONIC_OFFSET(GPR3);
54PPC_EXC_ASSERT_CANONIC_OFFSET(GPR4);
55PPC_EXC_ASSERT_CANONIC_OFFSET(GPR5);
56PPC_EXC_ASSERT_CANONIC_OFFSET(GPR6);
57PPC_EXC_ASSERT_CANONIC_OFFSET(GPR7);
58PPC_EXC_ASSERT_CANONIC_OFFSET(GPR8);
59PPC_EXC_ASSERT_CANONIC_OFFSET(GPR9);
60PPC_EXC_ASSERT_CANONIC_OFFSET(GPR10);
61PPC_EXC_ASSERT_CANONIC_OFFSET(GPR11);
62PPC_EXC_ASSERT_CANONIC_OFFSET(GPR12);
63PPC_EXC_ASSERT_CANONIC_OFFSET(GPR13);
64PPC_EXC_ASSERT_CANONIC_OFFSET(GPR14);
65PPC_EXC_ASSERT_CANONIC_OFFSET(GPR15);
66PPC_EXC_ASSERT_CANONIC_OFFSET(GPR16);
67PPC_EXC_ASSERT_CANONIC_OFFSET(GPR17);
68PPC_EXC_ASSERT_CANONIC_OFFSET(GPR18);
69PPC_EXC_ASSERT_CANONIC_OFFSET(GPR19);
70PPC_EXC_ASSERT_CANONIC_OFFSET(GPR20);
71PPC_EXC_ASSERT_CANONIC_OFFSET(GPR21);
72PPC_EXC_ASSERT_CANONIC_OFFSET(GPR22);
73PPC_EXC_ASSERT_CANONIC_OFFSET(GPR23);
74PPC_EXC_ASSERT_CANONIC_OFFSET(GPR24);
75PPC_EXC_ASSERT_CANONIC_OFFSET(GPR25);
76PPC_EXC_ASSERT_CANONIC_OFFSET(GPR26);
77PPC_EXC_ASSERT_CANONIC_OFFSET(GPR27);
78PPC_EXC_ASSERT_CANONIC_OFFSET(GPR28);
79PPC_EXC_ASSERT_CANONIC_OFFSET(GPR29);
80PPC_EXC_ASSERT_CANONIC_OFFSET(GPR30);
81PPC_EXC_ASSERT_CANONIC_OFFSET(GPR31);
82
83RTEMS_STATIC_ASSERT(
84  PPC_EXC_MINIMAL_FRAME_SIZE % CPU_STACK_ALIGNMENT == 0,
85  PPC_EXC_MINIMAL_FRAME_SIZE
86);
87
88RTEMS_STATIC_ASSERT(
89  PPC_EXC_FRAME_SIZE % CPU_STACK_ALIGNMENT == 0,
90  PPC_EXC_FRAME_SIZE
91);
92
93RTEMS_STATIC_ASSERT(
94  sizeof(CPU_Exception_frame) + FRAME_LINK_SPACE <= PPC_EXC_FRAME_SIZE,
95  CPU_Exception_frame
96);
97
[856cce50]98uint32_t ppc_exc_cache_wb_check = 1;
99
[f9acc33]100#define MTIVPR(prefix) __asm__ volatile ("mtivpr %0" : : "r" (prefix))
101#define MTIVOR(x, vec) __asm__ volatile ("mtivor"#x" %0" : : "r" (vec))
[856cce50]102
[5f91272]103static void ppc_exc_initialize_booke(void *vector_base)
[856cce50]104{
105  /* Interupt vector prefix register */
[5f91272]106  MTIVPR((uint32_t) vector_base);
[856cce50]107
[39de538]108  if (
109    ppc_cpu_is_specific_e200(PPC_e200z0)
110      || ppc_cpu_is_specific_e200(PPC_e200z1)
111  ) {
[4e9d8ea]112    /*
113     * These cores have hard wired IVOR registers.  An access will case a
114     * program exception.
115     */
116    return;
117  }
118
[644448f]119  /* Interupt vector offset registers */
[5f91272]120  MTIVOR(0,  ppc_exc_vector_address(ASM_BOOKE_CRIT_VECTOR, vector_base));
121  MTIVOR(1,  ppc_exc_vector_address(ASM_MACH_VECTOR, vector_base));
122  MTIVOR(2,  ppc_exc_vector_address(ASM_PROT_VECTOR, vector_base));
123  MTIVOR(3,  ppc_exc_vector_address(ASM_ISI_VECTOR, vector_base));
124  MTIVOR(4,  ppc_exc_vector_address(ASM_EXT_VECTOR, vector_base));
125  MTIVOR(5,  ppc_exc_vector_address(ASM_ALIGN_VECTOR, vector_base));
126  MTIVOR(6,  ppc_exc_vector_address(ASM_PROG_VECTOR, vector_base));
127  MTIVOR(7,  ppc_exc_vector_address(ASM_FLOAT_VECTOR, vector_base));
128  MTIVOR(8,  ppc_exc_vector_address(ASM_SYS_VECTOR, vector_base));
129  MTIVOR(9,  ppc_exc_vector_address(ASM_BOOKE_APU_VECTOR, vector_base));
130  MTIVOR(10, ppc_exc_vector_address(ASM_BOOKE_DEC_VECTOR, vector_base));
131  MTIVOR(11, ppc_exc_vector_address(ASM_BOOKE_FIT_VECTOR, vector_base));
132  MTIVOR(12, ppc_exc_vector_address(ASM_BOOKE_WDOG_VECTOR, vector_base));
133  MTIVOR(13, ppc_exc_vector_address(ASM_BOOKE_DTLBMISS_VECTOR, vector_base));
134  MTIVOR(14, ppc_exc_vector_address(ASM_BOOKE_ITLBMISS_VECTOR, vector_base));
135  MTIVOR(15, ppc_exc_vector_address(ASM_BOOKE_DEBUG_VECTOR, vector_base));
[644448f]136  if (ppc_cpu_is_e200() || ppc_cpu_is_e500()) {
[5f91272]137    MTIVOR(32, ppc_exc_vector_address(ASM_E500_SPE_UNAVAILABLE_VECTOR, vector_base));
138    MTIVOR(33, ppc_exc_vector_address(ASM_E500_EMB_FP_DATA_VECTOR, vector_base));
139    MTIVOR(34, ppc_exc_vector_address(ASM_E500_EMB_FP_ROUND_VECTOR, vector_base));
[644448f]140  }
[39de538]141  if (ppc_cpu_is_specific_e200(PPC_e200z7) || ppc_cpu_is_e500()) {
[5f91272]142    MTIVOR(35, ppc_exc_vector_address(ASM_E500_PERFMON_VECTOR, vector_base));
[29313369]143  }
[856cce50]144}
145
[b1e8a58]146static void ppc_exc_fatal_error(void)
147{
[ade27c6]148  bsp_generic_fatal(BSP_GENERIC_FATAL_EXCEPTION_INITIALIZATION);
[b1e8a58]149}
150
[5f91272]151void ppc_exc_initialize_with_vector_base(
[856cce50]152  uint32_t interrupt_disable_mask,
153  uintptr_t interrupt_stack_begin,
[5f91272]154  uintptr_t interrupt_stack_size,
155  void *vector_base
[856cce50]156)
157{
158  rtems_status_code sc = RTEMS_SUCCESSFUL;
159  const ppc_exc_categories *const categories = ppc_exc_current_categories();
160  uintptr_t const interrupt_stack_end = interrupt_stack_begin + interrupt_stack_size;
161  uintptr_t interrupt_stack_pointer = interrupt_stack_end - PPC_MINIMUM_STACK_FRAME_SIZE;
162  unsigned vector = 0;
163  uint32_t sda_base = 0;
164  uint32_t r13 = 0;
165
166  if (categories == NULL) {
[b1e8a58]167    ppc_exc_fatal_error();
[856cce50]168  }
169
170  /* Assembly code needs SDA_BASE in r13 (SVR4 or EABI). Make sure
171   * early init code put it there.
172   */
[f9acc33]173  __asm__ volatile (
[856cce50]174    "lis %0, _SDA_BASE_@h\n"
175    "ori %0, %0, _SDA_BASE_@l\n"
176    "mr  %1, 13\n"
177    : "=r" (sda_base), "=r"(r13)
178  );
179
180  if (sda_base != r13) {
[b1e8a58]181    ppc_exc_fatal_error();
[856cce50]182  }
183
184  /* Ensure proper interrupt stack alignment */
[644448f]185  interrupt_stack_pointer &= ~((uintptr_t) CPU_STACK_ALIGNMENT - 1);
[856cce50]186
187  /* Tag interrupt stack bottom */
188  *(uint32_t *) interrupt_stack_pointer = 0;
189
190  /* Move interrupt stack values to special purpose registers */
191  PPC_SET_SPECIAL_PURPOSE_REGISTER(SPRG1, interrupt_stack_pointer);
192  PPC_SET_SPECIAL_PURPOSE_REGISTER(SPRG2, interrupt_stack_begin);
193
194  ppc_interrupt_set_disable_mask(interrupt_disable_mask);
195
[f665f13]196#ifndef PPC_EXC_CONFIG_BOOKE_ONLY
197
[856cce50]198  /* Use current MMU / RI settings when running C exception handlers */
199  ppc_exc_msr_bits = ppc_machine_state_register() & (MSR_DR | MSR_IR | MSR_RI);
200
[c7f8408d]201#ifdef __ALTIVEC__
202  /* Need vector unit enabled to save/restore altivec context */
203  ppc_exc_msr_bits |= MSR_VE;
204#endif
[f665f13]205
206#endif /* PPC_EXC_CONFIG_BOOKE_ONLY */
207
[644448f]208  if (ppc_cpu_is_bookE() == PPC_BOOKE_STD || ppc_cpu_is_bookE() == PPC_BOOKE_E500) {
[5f91272]209    ppc_exc_initialize_booke(vector_base);
[856cce50]210  }
211
212  for (vector = 0; vector <= LAST_VALID_EXC; ++vector) {
213    ppc_exc_category category = ppc_exc_category_for_vector(categories, vector);
214
215    if (category != PPC_EXC_INVALID) {
[5f91272]216      void *const vector_address = ppc_exc_vector_address(vector, vector_base);
[856cce50]217      uint32_t prologue [16];
218      size_t prologue_size = sizeof(prologue);
219
[5f91272]220      sc = ppc_exc_make_prologue(
221        vector,
222        vector_base,
223        category,
224        prologue,
225        &prologue_size
226      );
[856cce50]227      if (sc != RTEMS_SUCCESSFUL) {
[b1e8a58]228        ppc_exc_fatal_error();
[856cce50]229      }
230
231      ppc_code_copy(vector_address, prologue, prologue_size);
232    }
233  }
234
[f665f13]235#ifndef PPC_EXC_CONFIG_BOOKE_ONLY
[856cce50]236  /* If we are on a classic PPC with MSR_DR enabled then
237   * assert that the mapping for at least this task's
238   * stack is write-back-caching enabled (see README/CAVEATS)
239   * Do this only if the cache is physically enabled.
240   * Since it is not easy to figure that out in a
241   * generic way we need help from the BSP: BSPs
242   * which run entirely w/o the cache may set
243   * ppc_exc_cache_wb_check to zero prior to calling
244   * this routine.
245   *
246   * We run this check only after exception handling is
247   * initialized so that we have some chance to get
248   * information printed if it fails.
249   *
250   * Note that it is unsafe to ignore this issue; if
251   * the check fails, do NOT disable it unless caches
252   * are always physically disabled.
253   */
254  if (ppc_exc_cache_wb_check && (MSR_DR & ppc_exc_msr_bits)) {
255    /* The size of 63 assumes cache lines are at most 32 bytes */
256    uint8_t dummy[63];
257    uintptr_t p = (uintptr_t) dummy;
258    /* If the dcbz instruction raises an alignment exception
259     * then the stack is mapped as write-thru or caching-disabled.
260     * The low-level code is not capable of dealing with this
261     * ATM.
262     */
263    p = (p + 31U) & ~31U;
[f9acc33]264    __asm__ volatile ("dcbz 0, %0"::"b" (p));
[856cce50]265    /* If we make it thru here then things seem to be OK */
266  }
[f665f13]267#endif /* PPC_EXC_CONFIG_BOOKE_ONLY */
[856cce50]268}
Note: See TracBrowser for help on using the repository browser.