source: rtems/c/src/lib/libbsp/powerpc/motorola_powerpc/bootloader/head.S @ fcee56c0

4.104.114.84.95
Last change on this file since fcee56c0 was fcee56c0, checked in by Joel Sherrill <joel.sherrill@…>, on Jul 1, 1999 at 11:39:13 PM

Patch from Eric Valette <valette@…> to clean up the
previous submission.

  • Property mode set to 100644
File size: 8.6 KB
Line 
1/*
2 *  head.S -- Bootloader Entry point
3 *
4 *  Copyright (C) 1998, 1999 Gabriel Paubert, paubert@iram.es
5 *
6 *  Modified to compile in RTEMS development environment
7 *  by Eric Valette
8 *
9 *  Copyright (C) 1999 Eric Valette. valette@crf.canon.fr
10 *
11 *  The license and distribution terms for this file may be
12 *  found in found in the file LICENSE in this distribution or at
13 *  http://www.OARcorp.com/rtems/license.html.
14 *
15 * $Id$
16 */
17
18#include "bootldr.h"
19#include <libcpu/cpu.h>
20#include <rtems/score/targopts.h>
21#include "asm.h"
22               
23#undef TEST_PPCBUG_CALLS       
24#define FRAME_SIZE 32
25#define LOCK_CACHES (HID0_DLOCK|HID0_ILOCK)
26#define INVL_CACHES (HID0_DCI|HID0_ICFI)
27#define ENBL_CACHES (HID0_DCE|HID0_ICE)
28
29#define USE_PPCBUG
30#undef  USE_PPCBUG
31       
32        START_GOT
33        GOT_ENTRY(_GOT2_TABLE_)
34        GOT_ENTRY(_FIXUP_TABLE_)
35        GOT_ENTRY(.bss)
36        GOT_ENTRY(codemove)
37        GOT_ENTRY(0)
38        GOT_ENTRY(__bd)
39        GOT_ENTRY(moved)
40        GOT_ENTRY(_binary_rtems_gz_start)
41        GOT_ENTRY(_binary_initrd_gz_start)
42        GOT_ENTRY(_binary_initrd_gz_end)
43#ifdef TEST_PPCBUG_CALLS       
44        GOT_ENTRY(banner_start)
45        GOT_ENTRY(banner_end)
46#endif 
47        END_GOT
48        .globl  start
49        .type   start,@function
50/* Point the stack into the PreP partition header in the x86 reserved
51 * code area, so that simple C routines can be called.
52 */
53start:  bl      1f
541:      mflr    r1
55        li      r0,0
56        stwu    r0,start-1b-0x400+0x1b0-FRAME_SIZE(r1)
57        stmw    r26,FRAME_SIZE-24(r1)
58        GET_GOT
59        mfmsr   r28             /* Turn off interrupts */
60        ori     r0,r28,MSR_EE
61        xori    r0,r0,MSR_EE
62        mtmsr   r0
63       
64/* Enable the caches, from now on cr2.eq set means processor is 601 */
65        mfpvr   r0
66        mfspr   r29,HID0
67        srwi    r0,r0,16
68        cmplwi  cr2,r0,1
69        beq     2,2f
70#ifndef USE_PPCBUG
71        ori     r0,r29,ENBL_CACHES|INVL_CACHES|LOCK_CACHES
72        xori    r0,r0,INVL_CACHES|LOCK_CACHES
73        sync
74        isync
75        mtspr   HID0,r0
76#endif
772:      bl      reloc
78       
79/* save all the parameters and the orginal msr/hid0/r31 */
80        lwz     bd,GOT(__bd)
81        stw     r3,0(bd)
82        stw     r4,4(bd)
83        stw     r5,8(bd)
84        stw     r6,12(bd)
85        lis     r3,__size@sectoff@ha
86        stw     r7,16(bd)
87        stw     r8,20(bd)
88        addi    r3,r3,__size@sectoff@l
89        stw     r9,24(bd)
90        stw     r10,28(bd)
91        stw     r28,o_msr(bd)
92        stw     r29,o_hid0(bd)
93        stw     r31,o_r31(bd)
94
95/* Call the routine to fill boot_data structure from residual data.
96 * And to find where the code has to be moved.
97 */
98        bl      early_setup
99
100/* Now we need to relocate ourselves, where we are told to. First put a
101 * copy of the codemove routine to some place in memory.
102 * (which may be where the 0x41 partition was loaded, so size is critical).
103 */
104        lwz     r4,GOT(codemove)
105        li      r5,_size_codemove
106        lwz     r3,mover(bd)
107        lwz     r6,cache_lsize(bd)
108        bl      codemove
109        mtctr   r3              # Where the temporary codemove is.
110        lwz     r3,image(bd)
111        lis     r5,_edata@sectoff@ha
112        lwz     r4,GOT(0)       # Our own address
113        addi    r5,r5,_edata@sectoff@l
114        lwz     r6,cache_lsize(bd)
115        lwz     r8,GOT(moved)
116        sub     r7,r3,r4        # Difference to adjust pointers.
117        add     r8,r8,r7
118        add     r30,r30,r7
119        add     bd,bd,r7
120/* Call the copy routine but return to the new area. */
121        mtlr    r8              # for the return address
122        bctr                    # returns to the moved instruction
123/* Establish the new top stack frame. */
124moved:  lwz     r1,stack(bd)
125        li      r0,0
126        stwu    r0,-16(r1)
127
128/* relocate again */
129        bl      reloc   
130/* Clear all of BSS */
131        lwz     r10,GOT(.bss)
132        li      r0,__bss_words@sectoff@l
133        subi    r10,r10,4
134        cmpwi   r0,0
135        mtctr   r0
136        li      r0,0
137        beq     4f
1383:      stwu    r0,4(r10)
139        bdnz    3b
140
141/* Final memory initialization. First switch to unmapped mode
142 * in case the FW had set the MMU on, and flush the TLB to avoid
143 * stale entries from interfering. No I/O access is allowed
144 * during this time!
145 */
146#ifndef USE_PPCBUG     
1474:      bl      MMUoff
148#endif 
149        bl      flush_tlb
150/* Some firmware versions leave stale values in the BATs, it's time
151 * to invalidate them to avoid interferences with our own mappings.
152 * But the 601 valid bit is in the BATL (IBAT only) and others are in
153 * the [ID]BATU. Bloat, bloat.. fortunately thrown away later.
154 */
155        li      r3,0
156        beq     cr2,5f
157        mtdbatu 0,r3
158        mtdbatu 1,r3
159        mtdbatu 2,r3
160        mtdbatu 3,r3
1615:      mtibatu 0,r3
162        mtibatl 0,r3
163        mtibatu 1,r3
164        mtibatl 1,r3
165        mtibatu 2,r3
166        mtibatl 2,r3
167        mtibatu 3,r3
168        mtibatl 3,r3
169        lis     r3,__size@sectoff@ha
170        addi    r3,r3,__size@sectoff@l
171        sync                            # We are going to touch SDR1 !
172        bl      mm_init
173        bl      MMUon
174       
175/* Now we are mapped and can perform I/O if we want */
176#ifdef TEST_PPCBUG_CALLS       
177/* Experience seems to show that PPCBug can only be called with the
178 * data cache disabled and with MMU disabled. Bummer.
179 */     
180        li      r10,0x22                # .OUTLN
181        lwz     r3,GOT(banner_start)
182        lwz     r4,GOT(banner_end)
183        sc
184#endif 
185        bl      setup_hw
186        lwz     r4,GOT(_binary_rtems_gz_start)
187        lis     r5,_rtems_gz_size@sectoff@ha
188        lwz     r6,GOT(_binary_initrd_gz_start)
189        lis     r3,_rtems_size@sectoff@ha
190        lwz     r7,GOT(_binary_initrd_gz_end)
191        addi    r5,r5,_rtems_gz_size@sectoff@l
192        addi    r3,r3,_rtems_size@sectoff@l
193        sub     r7,r7,r6
194        bl      decompress_kernel
195
196/* Back here we are unmapped and we start the kernel, passing up to eight
197 * parameters just in case, only r3 to r7 used for now. Flush the tlb so
198 * that the loaded image starts in a clean state.
199 */
200        bl      flush_tlb
201        lwz     r3,0(bd)
202        lwz     r4,4(bd)
203        lwz     r5,8(bd)
204        lwz     r6,12(bd)
205        lwz     r7,16(bd)
206        lwz     r8,20(bd)
207        lwz     r9,24(bd)
208        lwz     r10,28(bd)
209
210        lwz     r30,0(0)
211        mtctr   r30
212/*
213 *      Linux code again
214        lis     r30,0xdeadc0de@ha
215        addi    r30,r30,0xdeadc0de@l
216        stw     r30,0(0)
217        li      r30,0
218*/
219        dcbst   0,r30   /* Make sure it's in memory ! */
220/* We just flash invalidate and disable the dcache, unless it's a 601,
221 * critical areas have been flushed and we don't care about the stack
222 * and other scratch areas.
223 */
224        beq     cr2,1f
225        mfspr   r0,HID0
226        ori     r0,r0,HID0_DCI|HID0_DCE
227        sync
228        mtspr   HID0,r0
229        xori    r0,r0,HID0_DCI|HID0_DCE
230        mtspr   HID0,r0
231/* Provisional return to FW, works for PPCBug */
232#if 0
2331:      mfmsr   r10
234        ori     r10,r10,MSR_IP
235        mtmsr   r10
236        li      r10,0x63
237        sc
238#else
2391:      bctr
240#endif
241               
242       
243
244/* relocation function, r30 must point to got2+0x8000 */
245reloc: 
246/* Adjust got2 pointers, no need to check for 0, this code already puts
247 * a few entries in the table.
248 */
249        li      r0,__got2_entries@sectoff@l
250        la      r12,GOT(_GOT2_TABLE_)
251        lwz     r11,GOT(_GOT2_TABLE_)
252        mtctr   r0
253        sub     r11,r12,r11
254        addi    r12,r12,-4
2551:      lwzu    r0,4(r12)
256        add     r0,r0,r11
257        stw     r0,0(r12)
258        bdnz    1b
259       
260/* Now adjust the fixups and the pointers to the fixups in case we need
261 * to move ourselves again.
262 */     
2632:      li      r0,__fixup_entries@sectoff@l
264        lwz     r12,GOT(_FIXUP_TABLE_)
265        cmpwi   r0,0
266        mtctr   r0
267        addi    r12,r12,-4
268        beqlr
2693:      lwzu    r10,4(r12)
270        lwzux   r0,r10,r11
271        add     r0,r0,r11
272        stw     r10,0(r12)
273        stw     r0,0(r10)
274        bdnz    3b
275        blr             
276
277/* Set the MMU on and off: code is always mapped 1:1 and does not need MMU,
278 * but it does not cost so much to map it also and it catches calls through
279 * NULL function pointers.
280 */
281        .globl  MMUon
282        .type   MMUon,@function
283MMUon:  mfmsr   r0
284        ori     r0,r0,MSR_IR|MSR_DR|MSR_IP
285        mflr    r11
286        xori    r0,r0,MSR_IP
287        mtsrr0  r11
288        mtsrr1  r0
289        rfi
290        .globl  MMUoff
291        .type   MMUoff,@function
292MMUoff: mfmsr   r0
293        ori     r0,r0,MSR_IR|MSR_DR|MSR_IP
294        mflr    r11
295        xori    r0,r0,MSR_IR|MSR_DR
296        mtsrr0  r11
297        mtsrr1  r0
298        rfi
299
300/* Due to the PPC architecture (and according to the specifications), a
301 * series of tlbie which goes through a whole 256 MB segment always flushes
302 * the whole TLB. This is obviously overkill and slow, but who cares ?
303 * It takes about 1 ms on a 200 MHz 603e and works even if residual data
304 * get the number of TLB entries wrong.
305 */
306flush_tlb:
307        lis     r11,0x1000
3081:      addic.  r11,r11,-0x1000
309        tlbie   r11
310        bnl     1b
311/* tlbsync is not implemented on 601, so use sync which seems to be a superset
312 * of tlbsync in all cases and do not bother with CPU dependant code
313 */
314        sync   
315        blr                                     
316/* A few utility functions, some copied from arch/ppc/lib/string.S */
317
318#if 0
319        .globl  strnlen
320        .type   strnlen,@function
321strnlen:
322        addi    r4,r4,1
323        mtctr   r4
324        addi    r4,r3,-1
3251:      lbzu    r0,1(r4)
326        cmpwi   0,r0,0
327        bdnzf   eq,1b
328        subf    r3,r3,r4
329        blr
330#endif
331        .globl  codemove
332codemove:
333        .type   codemove,@function
334/* r3 dest, r4 src, r5 length in bytes, r6 cachelinesize */
335        cmplw   cr1,r3,r4
336        addi    r0,r5,3
337        srwi.   r0,r0,2
338        beq     cr1,4f  /* In place copy is not necessary */
339        beq     7f      /* Protect against 0 count */
340        mtctr   r0
341        bge     cr1,2f
342       
343        la      r8,-4(r4)
344        la      r7,-4(r3)
3451:      lwzu    r0,4(r8)
346        stwu    r0,4(r7)       
347        bdnz    1b
348        b       4f
349
3502:      slwi    r0,r0,2
351        add     r8,r4,r0
352        add     r7,r3,r0
3533:      lwzu    r0,-4(r8)
354        stwu    r0,-4(r7)
355        bdnz    3b
356       
357/* Now flush the cache: note that we must start from a cache aligned
358 * address. Otherwise we might miss one cache line.
359 */
3604:      cmpwi   r6,0
361        add     r5,r3,r5
362        beq     7f      /* Always flush prefetch queue in any case */
363        subi    r0,r6,1
364        andc    r3,r3,r0
365        mr      r4,r3
3665:      cmplw   r4,r5   
367        dcbst   0,r4
368        add     r4,r4,r6
369        blt     5b
370        sync            /* Wait for all dcbst to complete on bus */
371        mr      r4,r3
3726:      cmplw   r4,r5   
373        icbi    0,r4
374        add     r4,r4,r6
375        blt     6b
3767:      sync            /* Wait for all icbi to complete on bus */
377        isync
378        blr
379        .size   codemove,.-codemove
380_size_codemove=.-codemove
381
382        .section        ".data" # .rodata
383        .align 2
384#ifdef TEST_PPCBUG_CALLS       
385banner_start:   
386        .ascii "This message was printed by PPCBug with MMU enabled"
387banner_end:     
388#endif
Note: See TracBrowser for help on using the repository browser.