source: rtems/bsps/powerpc/shared/exceptions/ppc_exc_initialize.c @ bd150801

5
Last change on this file since bd150801 was bd150801, checked in by Sebastian Huber <sebastian.huber@…>, on 03/13/18 at 15:24:16

bsps/powerpc: Move exceptions support to bsps

This patch is a part of the BSP source reorganization.

Update #3285.

  • Property mode set to 100644
File size: 6.1 KB
Line 
1/**
2 * @file
3 *
4 * @ingroup ppc_exc
5 *
6 * @brief PowerPC Exceptions implementation.
7 */
8
9/*
10 * Copyright (C) 1999 Eric Valette (valette@crf.canon.fr)
11 *                    Canon Centre Recherche France.
12 *
13 * Copyright (C) 2007 Till Straumann <strauman@slac.stanford.edu>
14 *
15 * Copyright (C) 2009-2012 embedded brains GmbH.
16 *
17 * Derived from file "libcpu/powerpc/new-exceptions/bspsupport/vectors_init.c".
18 * Derived from file "libcpu/powerpc/new-exceptions/e500_raw_exc_init.c".
19 *
20 * The license and distribution terms for this file may be
21 * found in the file LICENSE in this distribution or at
22 * http://www.rtems.org/license/LICENSE.
23 */
24
25#include <rtems.h>
26
27#include <bsp/vectors.h>
28#include <bsp/fatal.h>
29
30uint32_t ppc_exc_cache_wb_check = 1;
31
32#define MTIVPR(prefix) __asm__ volatile ("mtivpr %0" : : "r" (prefix))
33#define MTIVOR(x, vec) __asm__ volatile ("mtivor"#x" %0" : : "r" (vec))
34
35static void ppc_exc_initialize_booke(void *vector_base)
36{
37  /* Interupt vector prefix register */
38  MTIVPR((uint32_t) vector_base);
39
40  if (
41    ppc_cpu_is_specific_e200(PPC_e200z0)
42      || ppc_cpu_is_specific_e200(PPC_e200z1)
43  ) {
44    /*
45     * These cores have hard wired IVOR registers.  An access will case a
46     * program exception.
47     */
48    return;
49  }
50
51  /* Interupt vector offset registers */
52  MTIVOR(0,  ppc_exc_vector_address(ASM_BOOKE_CRIT_VECTOR, vector_base));
53  MTIVOR(1,  ppc_exc_vector_address(ASM_MACH_VECTOR, vector_base));
54  MTIVOR(2,  ppc_exc_vector_address(ASM_PROT_VECTOR, vector_base));
55  MTIVOR(3,  ppc_exc_vector_address(ASM_ISI_VECTOR, vector_base));
56  MTIVOR(4,  ppc_exc_vector_address(ASM_EXT_VECTOR, vector_base));
57  MTIVOR(5,  ppc_exc_vector_address(ASM_ALIGN_VECTOR, vector_base));
58  MTIVOR(6,  ppc_exc_vector_address(ASM_PROG_VECTOR, vector_base));
59  MTIVOR(7,  ppc_exc_vector_address(ASM_FLOAT_VECTOR, vector_base));
60  MTIVOR(8,  ppc_exc_vector_address(ASM_SYS_VECTOR, vector_base));
61  MTIVOR(9,  ppc_exc_vector_address(ASM_BOOKE_APU_VECTOR, vector_base));
62  MTIVOR(10, ppc_exc_vector_address(ASM_BOOKE_DEC_VECTOR, vector_base));
63  MTIVOR(11, ppc_exc_vector_address(ASM_BOOKE_FIT_VECTOR, vector_base));
64  MTIVOR(12, ppc_exc_vector_address(ASM_BOOKE_WDOG_VECTOR, vector_base));
65  MTIVOR(13, ppc_exc_vector_address(ASM_BOOKE_DTLBMISS_VECTOR, vector_base));
66  MTIVOR(14, ppc_exc_vector_address(ASM_BOOKE_ITLBMISS_VECTOR, vector_base));
67  MTIVOR(15, ppc_exc_vector_address(ASM_BOOKE_DEBUG_VECTOR, vector_base));
68  if (ppc_cpu_is_e200() || ppc_cpu_is_e500()) {
69    MTIVOR(32, ppc_exc_vector_address(ASM_E500_SPE_UNAVAILABLE_VECTOR, vector_base));
70    MTIVOR(33, ppc_exc_vector_address(ASM_E500_EMB_FP_DATA_VECTOR, vector_base));
71    MTIVOR(34, ppc_exc_vector_address(ASM_E500_EMB_FP_ROUND_VECTOR, vector_base));
72  }
73  if (ppc_cpu_is_specific_e200(PPC_e200z7) || ppc_cpu_is_e500()) {
74    MTIVOR(35, ppc_exc_vector_address(ASM_E500_PERFMON_VECTOR, vector_base));
75  }
76}
77
78static void ppc_exc_fatal_error(void)
79{
80  bsp_fatal(PPC_FATAL_EXCEPTION_INITIALIZATION);
81}
82
83void ppc_exc_initialize_with_vector_base(
84  uintptr_t interrupt_stack_begin,
85  uintptr_t interrupt_stack_size,
86  void *vector_base
87)
88{
89  rtems_status_code sc = RTEMS_SUCCESSFUL;
90  const ppc_exc_categories *const categories = ppc_exc_current_categories();
91  unsigned vector = 0;
92  uint32_t sda_base = 0;
93  uint32_t r13 = 0;
94
95  if (categories == NULL) {
96    ppc_exc_fatal_error();
97  }
98
99  /* Assembly code needs SDA_BASE in r13 (SVR4 or EABI). Make sure
100   * early init code put it there.
101   */
102  __asm__ volatile (
103    "lis %0, _SDA_BASE_@h\n"
104    "ori %0, %0, _SDA_BASE_@l\n"
105    "mr  %1, 13\n"
106    : "=r" (sda_base), "=r"(r13)
107  );
108
109  if (sda_base != r13) {
110    ppc_exc_fatal_error();
111  }
112
113  ppc_exc_initialize_interrupt_stack(interrupt_stack_begin, interrupt_stack_size);
114
115#ifndef PPC_EXC_CONFIG_BOOKE_ONLY
116
117  /* Use current MMU / RI settings when running C exception handlers */
118  ppc_exc_msr_bits = ppc_machine_state_register() & (MSR_DR | MSR_IR | MSR_RI);
119
120#ifdef __ALTIVEC__
121  /* Need vector unit enabled to save/restore altivec context */
122  ppc_exc_msr_bits |= MSR_VE;
123#endif
124
125#endif /* PPC_EXC_CONFIG_BOOKE_ONLY */
126
127  if (ppc_cpu_is_bookE() == PPC_BOOKE_STD || ppc_cpu_is_bookE() == PPC_BOOKE_E500) {
128    ppc_exc_initialize_booke(vector_base);
129  }
130
131  for (vector = 0; vector <= LAST_VALID_EXC; ++vector) {
132    ppc_exc_category category = ppc_exc_category_for_vector(categories, vector);
133
134    if (category != PPC_EXC_INVALID) {
135      void *const vector_address = ppc_exc_vector_address(vector, vector_base);
136      uint32_t prologue [16];
137      size_t prologue_size = sizeof(prologue);
138
139      sc = ppc_exc_make_prologue(
140        vector,
141        vector_base,
142        category,
143        prologue,
144        &prologue_size
145      );
146      if (sc != RTEMS_SUCCESSFUL) {
147        ppc_exc_fatal_error();
148      }
149
150      ppc_code_copy(vector_address, prologue, prologue_size);
151    }
152  }
153
154#ifndef PPC_EXC_CONFIG_BOOKE_ONLY
155  /* If we are on a classic PPC with MSR_DR enabled then
156   * assert that the mapping for at least this task's
157   * stack is write-back-caching enabled (see README/CAVEATS)
158   * Do this only if the cache is physically enabled.
159   * Since it is not easy to figure that out in a
160   * generic way we need help from the BSP: BSPs
161   * which run entirely w/o the cache may set
162   * ppc_exc_cache_wb_check to zero prior to calling
163   * this routine.
164   *
165   * We run this check only after exception handling is
166   * initialized so that we have some chance to get
167   * information printed if it fails.
168   *
169   * Note that it is unsafe to ignore this issue; if
170   * the check fails, do NOT disable it unless caches
171   * are always physically disabled.
172   */
173  if (ppc_exc_cache_wb_check && (MSR_DR & ppc_exc_msr_bits)) {
174    /* The size of 63 assumes cache lines are at most 32 bytes */
175    uint8_t dummy[63];
176    uintptr_t p = (uintptr_t) dummy;
177    /* If the dcbz instruction raises an alignment exception
178     * then the stack is mapped as write-thru or caching-disabled.
179     * The low-level code is not capable of dealing with this
180     * ATM.
181     */
182    p = (p + 31U) & ~31U;
183    __asm__ volatile ("dcbz 0, %0"::"b" (p));
184    /* If we make it thru here then things seem to be OK */
185  }
186#endif /* PPC_EXC_CONFIG_BOOKE_ONLY */
187}
Note: See TracBrowser for help on using the repository browser.