source: rtems/c/src/lib/libcpu/powerpc/new-exceptions/bspsupport/ppc_exc_initialize.c @ 9b4422a2

4.11
Last change on this file since 9b4422a2 was 9b4422a2, checked in by Joel Sherrill <joel.sherrill@…>, on May 3, 2012 at 3:09:24 PM

Remove All CVS Id Strings Possible Using a Script

Script does what is expected and tries to do it as
smartly as possible.

+ remove occurrences of two blank comment lines

next to each other after Id string line removed.

+ remove entire comment blocks which only exited to

contain CVS Ids

+ If the processing left a blank line at the top of

a file, it was removed.

  • Property mode set to 100644
File size: 6.0 KB
Line 
1/**
2 * @file
3 *
4 * @ingroup ppc_exc
5 *
6 * @brief PowerPC Exceptions implementation.
7 */
8
9/*
10 * Copyright (C) 1999 Eric Valette (valette@crf.canon.fr)
11 *                    Canon Centre Recherche France.
12 *
13 * Copyright (C) 2007 Till Straumann <strauman@slac.stanford.edu>
14 *
15 * Copyright (C) 2009 embedded brains GmbH.
16 *
17 * Derived from file "libcpu/powerpc/new-exceptions/bspsupport/vectors_init.c".
18 * Derived from file "libcpu/powerpc/new-exceptions/e500_raw_exc_init.c".
19 *
20 * The license and distribution terms for this file may be
21 * found in the file LICENSE in this distribution or at
22 * http://www.rtems.com/license/LICENSE.
23 */
24
25#include <rtems.h>
26
27#include <bsp/vectors.h>
28
29uint32_t ppc_exc_cache_wb_check = 1;
30
31#define MTIVPR(prefix) __asm__ volatile ("mtivpr %0" : : "r" (prefix))
32#define MTIVOR(x, vec) __asm__ volatile ("mtivor"#x" %0" : : "r" (vec))
33
34static void ppc_exc_initialize_booke(void)
35{
36  /* Interupt vector prefix register */
37  MTIVPR(ppc_exc_vector_base);
38
39  if (ppc_cpu_is(PPC_e200z0) || ppc_cpu_is(PPC_e200z1)) {
40    /*
41     * These cores have hard wired IVOR registers.  An access will case a
42     * program exception.
43     */
44    return;
45  }
46
47  /* Interupt vector offset registers */
48  MTIVOR(0,  ppc_exc_vector_address(ASM_BOOKE_CRIT_VECTOR));
49  MTIVOR(1,  ppc_exc_vector_address(ASM_MACH_VECTOR));
50  MTIVOR(2,  ppc_exc_vector_address(ASM_PROT_VECTOR));
51  MTIVOR(3,  ppc_exc_vector_address(ASM_ISI_VECTOR));
52  MTIVOR(4,  ppc_exc_vector_address(ASM_EXT_VECTOR));
53  MTIVOR(5,  ppc_exc_vector_address(ASM_ALIGN_VECTOR));
54  MTIVOR(6,  ppc_exc_vector_address(ASM_PROG_VECTOR));
55  MTIVOR(7,  ppc_exc_vector_address(ASM_FLOAT_VECTOR));
56  MTIVOR(8,  ppc_exc_vector_address(ASM_SYS_VECTOR));
57  MTIVOR(9,  ppc_exc_vector_address(ASM_BOOKE_APU_VECTOR));
58  MTIVOR(10, ppc_exc_vector_address(ASM_BOOKE_DEC_VECTOR));
59  MTIVOR(11, ppc_exc_vector_address(ASM_BOOKE_FIT_VECTOR));
60  MTIVOR(12, ppc_exc_vector_address(ASM_BOOKE_WDOG_VECTOR));
61  MTIVOR(13, ppc_exc_vector_address(ASM_BOOKE_DTLBMISS_VECTOR));
62  MTIVOR(14, ppc_exc_vector_address(ASM_BOOKE_ITLBMISS_VECTOR));
63  MTIVOR(15, ppc_exc_vector_address(ASM_BOOKE_DEBUG_VECTOR));
64  if (ppc_cpu_is_e200() || ppc_cpu_is_e500()) {
65    MTIVOR(32, ppc_exc_vector_address(ASM_E500_SPE_UNAVAILABLE_VECTOR));
66    MTIVOR(33, ppc_exc_vector_address(ASM_E500_EMB_FP_DATA_VECTOR));
67    MTIVOR(34, ppc_exc_vector_address(ASM_E500_EMB_FP_ROUND_VECTOR));
68  }
69  if (ppc_cpu_is_e500()) {
70    MTIVOR(35, ppc_exc_vector_address(ASM_E500_PERFMON_VECTOR));
71  }
72}
73
74rtems_status_code ppc_exc_initialize(
75  uint32_t interrupt_disable_mask,
76  uintptr_t interrupt_stack_begin,
77  uintptr_t interrupt_stack_size
78)
79{
80  rtems_status_code sc = RTEMS_SUCCESSFUL;
81  const ppc_exc_categories *const categories = ppc_exc_current_categories();
82  uintptr_t const interrupt_stack_end = interrupt_stack_begin + interrupt_stack_size;
83  uintptr_t interrupt_stack_pointer = interrupt_stack_end - PPC_MINIMUM_STACK_FRAME_SIZE;
84  unsigned vector = 0;
85  uint32_t sda_base = 0;
86  uint32_t r13 = 0;
87
88  if (categories == NULL) {
89    return RTEMS_NOT_IMPLEMENTED;
90  }
91
92  /* Assembly code needs SDA_BASE in r13 (SVR4 or EABI). Make sure
93   * early init code put it there.
94   */
95  __asm__ volatile (
96    "lis %0, _SDA_BASE_@h\n"
97    "ori %0, %0, _SDA_BASE_@l\n"
98    "mr  %1, 13\n"
99    : "=r" (sda_base), "=r"(r13)
100  );
101
102  if (sda_base != r13) {
103    return RTEMS_NOT_CONFIGURED;
104  }
105
106  /* Ensure proper interrupt stack alignment */
107  interrupt_stack_pointer &= ~((uintptr_t) CPU_STACK_ALIGNMENT - 1);
108
109  /* Tag interrupt stack bottom */
110  *(uint32_t *) interrupt_stack_pointer = 0;
111
112  /* Move interrupt stack values to special purpose registers */
113  PPC_SET_SPECIAL_PURPOSE_REGISTER(SPRG1, interrupt_stack_pointer);
114  PPC_SET_SPECIAL_PURPOSE_REGISTER(SPRG2, interrupt_stack_begin);
115
116  ppc_interrupt_set_disable_mask(interrupt_disable_mask);
117
118  /* Use current MMU / RI settings when running C exception handlers */
119  ppc_exc_msr_bits = ppc_machine_state_register() & (MSR_DR | MSR_IR | MSR_RI);
120
121#ifdef __ALTIVEC__
122  /* Need vector unit enabled to save/restore altivec context */
123  ppc_exc_msr_bits |= MSR_VE;
124#endif
125 
126  if (ppc_cpu_is_bookE() == PPC_BOOKE_STD || ppc_cpu_is_bookE() == PPC_BOOKE_E500) {
127    ppc_exc_initialize_booke();
128  }
129
130  for (vector = 0; vector <= LAST_VALID_EXC; ++vector) {
131    ppc_exc_category category = ppc_exc_category_for_vector(categories, vector);
132
133    if (category != PPC_EXC_INVALID) {
134      void *const vector_address = ppc_exc_vector_address(vector);
135      uint32_t prologue [16];
136      size_t prologue_size = sizeof(prologue);
137
138      sc = ppc_exc_make_prologue(vector, category, prologue, &prologue_size);
139      if (sc != RTEMS_SUCCESSFUL) {
140        return RTEMS_INTERNAL_ERROR;
141      }
142
143      ppc_code_copy(vector_address, prologue, prologue_size);
144    }
145  }
146
147  /* If we are on a classic PPC with MSR_DR enabled then
148   * assert that the mapping for at least this task's
149   * stack is write-back-caching enabled (see README/CAVEATS)
150   * Do this only if the cache is physically enabled.
151   * Since it is not easy to figure that out in a
152   * generic way we need help from the BSP: BSPs
153   * which run entirely w/o the cache may set
154   * ppc_exc_cache_wb_check to zero prior to calling
155   * this routine.
156   *
157   * We run this check only after exception handling is
158   * initialized so that we have some chance to get
159   * information printed if it fails.
160   *
161   * Note that it is unsafe to ignore this issue; if
162   * the check fails, do NOT disable it unless caches
163   * are always physically disabled.
164   */
165  if (ppc_exc_cache_wb_check && (MSR_DR & ppc_exc_msr_bits)) {
166    /* The size of 63 assumes cache lines are at most 32 bytes */
167    uint8_t dummy[63];
168    uintptr_t p = (uintptr_t) dummy;
169    /* If the dcbz instruction raises an alignment exception
170     * then the stack is mapped as write-thru or caching-disabled.
171     * The low-level code is not capable of dealing with this
172     * ATM.
173     */
174    p = (p + 31U) & ~31U;
175    __asm__ volatile ("dcbz 0, %0"::"b" (p));
176    /* If we make it thru here then things seem to be OK */
177  }
178
179  return RTEMS_SUCCESSFUL;
180}
Note: See TracBrowser for help on using the repository browser.