source: rtems/bsps/powerpc/shared/altivec/vec_sup.c @ 9964895

5
Last change on this file since 9964895 was 4fd1ff0f, checked in by Sebastian Huber <sebastian.huber@…>, on 03/26/18 at 04:57:10

bsps/powerpc: Move AltiVec? support to bsps

This patch is a part of the BSP source reorganization.

Update #3285.

  • Property mode set to 100644
File size: 7.0 KB
Line 
1/* Altivec support for RTEMS; vector register context management.  */
2
3/*
4 * Authorship
5 * ----------
6 * This software was created by
7 *     Till Straumann <strauman@slac.stanford.edu>, 2009,
8 *         Stanford Linear Accelerator Center, Stanford University.
9 *
10 * Acknowledgement of sponsorship
11 * ------------------------------
12 * This software was produced by
13 *     the Stanford Linear Accelerator Center, Stanford University,
14 *         under Contract DE-AC03-76SFO0515 with the Department of Energy.
15 *
16 * Government disclaimer of liability
17 * ----------------------------------
18 * Neither the United States nor the United States Department of Energy,
19 * nor any of their employees, makes any warranty, express or implied, or
20 * assumes any legal liability or responsibility for the accuracy,
21 * completeness, or usefulness of any data, apparatus, product, or process
22 * disclosed, or represents that its use would not infringe privately owned
23 * rights.
24 *
25 * Stanford disclaimer of liability
26 * --------------------------------
27 * Stanford University makes no representations or warranties, express or
28 * implied, nor assumes any liability for the use of this software.
29 *
30 * Stanford disclaimer of copyright
31 * --------------------------------
32 * Stanford University, owner of the copyright, hereby disclaims its
33 * copyright and all other rights in this software.  Hence, anyone may
34 * freely use it for any purpose without restriction.
35 *
36 * Maintenance of notices
37 * ----------------------
38 * In the interest of clarity regarding the origin and status of this
39 * SLAC software, this and all the preceding Stanford University notices
40 * are to remain affixed to any copy or derivative of this software made
41 * or distributed by the recipient and are to be affixed to any copy of
42 * software made or distributed by the recipient that contains a copy or
43 * derivative of this software.
44 *
45 * ------------------ SLAC Software Notices, Set 4 OTT.002a, 2004 FEB 03
46 */
47
48#ifdef __ALTIVEC__
49
50#include <rtems.h>
51#include <libcpu/cpuIdent.h>
52#include <rtems/bspIo.h>
53#include <rtems/error.h>
54#include <rtems/score/cpu.h>
55#include <rtems/powerpc/powerpc.h>
56
57#define STATIC static
58
59#define VEC_ALIGNMENT   16
60
61#define NAM                             "AltiVec Support"
62#define ERRID(a,b,c,d)  (((a)<<24) | ((b)<<16) | ((c)<<8) | (d))
63
64typedef uint32_t _vu32 __attribute__((vector_size(VEC_ALIGNMENT)));
65
66#ifndef MSR_VE
67#define MSR_VE  (1<<(31-6))
68#endif
69
70/* NOTE: These two variables are accessed by assembly code
71 *       which assumes 32-bit data!
72 */
73uint32_t _CPU_altivec_ctxt_off = 0;
74uint32_t _CPU_altivec_psim_cpu = 0;
75
76static inline uint32_t
77mfmsr(void)
78{
79uint32_t v;     
80        _CPU_MSR_GET(v);
81        return v;
82}
83
84static inline void
85mtmsr(uint32_t v)
86{
87        _CPU_MSR_SET(v);
88}
89
90static inline void
91isync(void)
92{
93        asm volatile("isync");
94}
95
96static inline void
97dssall(void)
98{
99        if ( !_CPU_altivec_psim_cpu)
100                asm volatile("dssall");
101}
102
103static inline uint32_t
104set_MSR_VE(void)
105{
106uint32_t rval;
107        rval=mfmsr();
108        if ( ! (MSR_VE & rval ) ) {
109                mtmsr(rval | MSR_VE);
110                isync();
111        }
112        return rval;
113}
114
115static inline void
116clr_MSR_VE(void)
117{
118        dssall();
119        mtmsr(mfmsr() & ~MSR_VE);
120        isync();
121}
122
123static inline void
124rst_MSR_VE(uint32_t old)
125{
126        if ( ! ( MSR_VE & old ) ) {
127                dssall();
128                mtmsr(old);
129                isync();
130        }
131}
132
133
134/* Code to probe the compiler's stack alignment (PowerPC);
135 * The routine determines at run-time if the compiler generated
136 * 8 or 16-byte aligned code.
137 *
138 * Till Straumann <strauman@slac.stanford.edu>, 2005
139 */
140
141static void dummy(void) __attribute__((noinline));
142/* add (empty) asm-statement to make sure this isn't optimized away */
143static void dummy(void) { __asm__ volatile(""); }
144
145static unsigned probe_r1(void) __attribute__((noinline));
146static unsigned probe_r1(void)
147{
148unsigned r1;
149        /* call something to enforce creation of a minimal stack frame;
150     * (8 bytes: r1 and lr space for 'dummy' callee). If compiled
151     * with -meabi -mno-altivec gcc allocates 8 bytes, if -mno-eabi
152     * or -maltivec / -mabi=altivec then gcc allocates 16 bytes
153     * according to the sysv / altivec ABI specs.
154     */
155        dummy();
156        /* return stack pointer */
157        asm volatile("mr %0,1":"=r"(r1));
158        return r1;
159}
160
161static unsigned
162probe_ppc_stack_alignment(void)
163{
164unsigned r1;
165        asm volatile("mr %0,1":"=r"(r1));
166        return (r1 - probe_r1()) & ~ 0xf;
167}
168
169STATIC int check_stack_alignment(void)
170{
171int rval = 0;
172        if ( VEC_ALIGNMENT > PPC_STACK_ALIGNMENT ) {
173                printk(NAM": CPU support has unsufficient stack alignment;\n");
174                printk("modify 'cpukit/score/cpu/powerpc/rtems/score/powerpc.h'\n");
175                printk("and choose PPC_ABI_SVR4. I'll enable a workaround for now.\n");
176                rval |= 1;
177        }
178        /* Run-time check; should compile with -mabi=altivec */
179        if ( probe_ppc_stack_alignment() < VEC_ALIGNMENT ) {
180                printk(NAM": run-time stack alignment unsufficient; make sure you compile with -mabi=altivec\n");
181                rval |= 2;
182        }
183        return rval;
184}
185
186
187static uint32_t probe_vrsave(_vu32 *p_v) __attribute__((noinline));
188
189/* Check if this code was compiled with -mvrsave=yes or no
190 * so that we can set the default/init value accordingly.
191 */
192static uint32_t probe_vrsave(_vu32 *p_v)
193{
194_vu32     x;
195uint32_t vrsave;
196        /* Explicitly clobber a volatile vector reg (0) that is
197         * not used to pass return values.
198         * If -mvrsave=yes was used this should cause gcc to
199         * set bit 0 in vrsave. OTOH this bit cannot be set
200         * because v0 is volatile and not used to pass a value
201         * to the caller...
202         */
203        asm volatile("vxor %0, 0, 0; mfvrsave %1":"=v"(x),"=r"(vrsave)::"v0");
204        if ( p_v ) {
205                *p_v = x;
206        }
207        return vrsave;
208}
209
210static int vrsave_yes(void) __attribute__((noinline));
211
212static int vrsave_yes(void)
213{
214uint32_t vrsave_pre;
215        asm volatile("mfvrsave %0":"=r"(vrsave_pre));
216        if ( (vrsave_pre & 0x80000000) ) {
217                printk(NAM": WARNING - unable to determine whether -mvrsave was used; assuming NO\n");
218                return 0;
219        }
220        return probe_vrsave(0) != vrsave_pre;
221}
222
223extern void
224_CPU_altivec_set_vrsave_initval(uint32_t);
225
226
227void
228_CPU_Initialize_altivec(void)
229{
230unsigned          pvr;
231
232        /* I don't like to have to #define the offset of the altivec area
233         * for use by assembly code.
234         * Therefore, we compute it here and store it in memory...
235         */
236        _CPU_altivec_ctxt_off  = offsetof(ppc_context, altivec);
237
238        /*
239         * See ppc_get_context() and PPC_CONTEXT_OFFSET_GPR1
240         */
241        _CPU_altivec_ctxt_off += PPC_DEFAULT_CACHE_LINE_SIZE;
242
243        /*
244         * Add space possibly needed for alignment
245         */
246        _CPU_altivec_ctxt_off += PPC_CACHE_ALIGNMENT - 1;
247
248        if ( ! vrsave_yes() ) {
249                /* They seemed to compile with -mvrsave=no. Hence we
250                 * must set VRSAVE so that all registers are saved/restored
251                 * in case this support was not built with IGNORE_VRSAVE.
252                 */
253                _CPU_altivec_set_vrsave_initval( -1 );
254        }
255
256        if ( check_stack_alignment() & 2 )
257                rtems_fatal_error_occurred(ERRID('V','E','C','1'));
258
259        pvr                   = get_ppc_cpu_type();
260        /* psim has altivec but lacks the streaming instructions :-( */
261        _CPU_altivec_psim_cpu = (PPC_PSIM == pvr);
262
263        if ( ! ppc_cpu_has_altivec() ) {
264                printk(NAM": This CPU seems not to have AltiVec\n");
265                rtems_panic("Unable to initialize AltiVec Support\n");
266        }
267
268        if ( ! (mfmsr() & MSR_VE) ) {
269                printk(NAM": Warning: BSP should set MSR_VE early; doing it now...\n");
270                set_MSR_VE();   
271        }
272}
273#endif
Note: See TracBrowser for help on using the repository browser.