1 | /* cpu_asm.c ===> cpu_asm.S or cpu_asm.s |
---|
2 | * |
---|
3 | * This file contains the basic algorithms for all assembly code used |
---|
4 | * in an specific CPU port of RTEMS. These algorithms must be implemented |
---|
5 | * in assembly language |
---|
6 | * |
---|
7 | * NOTE: This is supposed to be a .S or .s file NOT a C file. |
---|
8 | * |
---|
9 | * COPYRIGHT (c) 1989-2008. |
---|
10 | * On-Line Applications Research Corporation (OAR). |
---|
11 | * |
---|
12 | * The license and distribution terms for this file may be |
---|
13 | * found in the file LICENSE in this distribution or at |
---|
14 | * http://www.rtems.com/license/LICENSE. |
---|
15 | * |
---|
16 | * $Id$ |
---|
17 | */ |
---|
18 | |
---|
19 | /* |
---|
20 | * This is supposed to be an assembly file. This means that system.h |
---|
21 | * and cpu.h should not be included in a "real" cpu_asm file. An |
---|
22 | * implementation in assembly should include "cpu_asm.h> |
---|
23 | */ |
---|
24 | #include <avr/io.h> |
---|
25 | #include <avr/sfr_defs.h> |
---|
26 | #include <rtems/asm.h> |
---|
27 | |
---|
28 | |
---|
29 | #define jmpb_hi r25 |
---|
30 | #define jmpb_lo r24 |
---|
31 | #define val_hi r23 |
---|
32 | #define val_lo r22 |
---|
33 | |
---|
34 | #define ret_lo r24 |
---|
35 | #define ret_hi r25 |
---|
36 | |
---|
37 | |
---|
38 | |
---|
39 | PUBLIC( setjmp ) |
---|
40 | |
---|
41 | SYM( setjmp ): |
---|
42 | X_movw XL, jmpb_lo |
---|
43 | /*;save call-saved registers and frame pointer*/ |
---|
44 | .irp .L_regno, 2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,28,29 |
---|
45 | st X+, r\.L_regno |
---|
46 | .endr |
---|
47 | /*;get return address*/ |
---|
48 | |
---|
49 | pop ZH |
---|
50 | pop ZL |
---|
51 | /*save stack pointer (after popping)*/ |
---|
52 | |
---|
53 | in ret_lo, AVR_STACK_POINTER_LO_ADDR |
---|
54 | st X+, ret_lo |
---|
55 | |
---|
56 | #ifdef _HAVE_AVR_STACK_POINTER_HI |
---|
57 | in ret_lo, AVR_STACK_POINTER_HI_ADDR |
---|
58 | st X+, ret_lo |
---|
59 | #else |
---|
60 | in ret_lo, __zero_reg__ |
---|
61 | st X+, ret_lo |
---|
62 | #endif |
---|
63 | /*save status reg (I flag)*/ |
---|
64 | in ret_lo, AVR_STATUS_ADDR |
---|
65 | st X+, ret_lo |
---|
66 | /*save return addr*/ |
---|
67 | st X+, ZL |
---|
68 | st X+, ZH |
---|
69 | /*return zero*/ |
---|
70 | clr ret_hi |
---|
71 | clr ret_lo |
---|
72 | ijmp |
---|
73 | |
---|
74 | .size _U(setjmp),.-_U(setjmp) |
---|
75 | |
---|
76 | |
---|
77 | .global _U(longjmp) |
---|
78 | .type _U(longjmp), @function |
---|
79 | |
---|
80 | _U(longjmp): |
---|
81 | X_movw XL, jmpb_lo |
---|
82 | /*return value*/ |
---|
83 | X_movw ret_lo, val_lo |
---|
84 | /*if zero, change to 1*/ |
---|
85 | cpi ret_lo, 1 |
---|
86 | cpc ret_hi, __zero_reg__ |
---|
87 | adc ret_lo, __zero_reg__ |
---|
88 | /*restore call-saved registers and frame pointer*/ |
---|
89 | .irp .L_regno, 2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,28,29 |
---|
90 | ld r\.L_regno, X+ |
---|
91 | .endr |
---|
92 | /*; restore stack pointer (SP value before the setjmp() call) and SREG*/ |
---|
93 | ld ZL, X+ |
---|
94 | ld ZH, X+ |
---|
95 | ld __tmp_reg__, X+ |
---|
96 | #if defined (__AVR_XMEGA__) && __AVR_XMEGA__ |
---|
97 | /* A write to SPL will automatically disable interrupts for up to 4 |
---|
98 | instructions or until the next I/O memory write. */ |
---|
99 | out AVR_STATUS_ADDR, __tmp_reg__ |
---|
100 | out AVR_STACK_POINTER_LO_ADDR, ZL |
---|
101 | out AVR_STACK_POINTER_HI_ADDR, ZH |
---|
102 | #else |
---|
103 | # ifdef _HAVE_AVR_STACK_POINTER_HI |
---|
104 | /* interrupts disabled for shortest possible time (3 cycles) */ |
---|
105 | cli |
---|
106 | out AVR_STACK_POINTER_HI_ADDR, ZH |
---|
107 | # endif |
---|
108 | /* Restore status register (including the interrupt enable flag). |
---|
109 | Interrupts are re-enabled only after the next instruction. */ |
---|
110 | out AVR_STATUS_ADDR, __tmp_reg__ |
---|
111 | out AVR_STACK_POINTER_LO_ADDR, ZL |
---|
112 | #endif |
---|
113 | ; get return address and jump |
---|
114 | ld ZL, X+ |
---|
115 | ld ZH, X+ |
---|
116 | #if defined(__AVR_3_BYTE_PC__) && __AVR_3_BYTE_PC__ |
---|
117 | ld __tmp_reg__, X+ |
---|
118 | .L_jmp3: |
---|
119 | push ZL |
---|
120 | push ZH |
---|
121 | push __tmp_reg__ |
---|
122 | ret |
---|
123 | #else |
---|
124 | ijmp |
---|
125 | #endif |
---|
126 | .size _U(longjmp), . - _U(longjmp) |
---|
127 | |
---|
128 | |
---|
129 | |
---|
130 | |
---|
131 | |
---|
132 | |
---|
133 | |
---|
134 | /* |
---|
135 | * _CPU_Context_save_fp_context |
---|
136 | * |
---|
137 | * This routine is responsible for saving the FP context |
---|
138 | * at *fp_context_ptr. If the point to load the FP context |
---|
139 | * from is changed then the pointer is modified by this routine. |
---|
140 | * |
---|
141 | * Sometimes a macro implementation of this is in cpu.h which dereferences |
---|
142 | * the ** and a similarly named routine in this file is passed something |
---|
143 | * like a (Context_Control_fp *). The general rule on making this decision |
---|
144 | * is to avoid writing assembly language. |
---|
145 | * |
---|
146 | * NO_CPU Specific Information: |
---|
147 | * |
---|
148 | * XXX document implementation including references if appropriate |
---|
149 | |
---|
150 | |
---|
151 | void _CPU_Context_save_fp( |
---|
152 | Context_Control_fp **fp_context_ptr |
---|
153 | ) |
---|
154 | { |
---|
155 | } |
---|
156 | */ |
---|
157 | |
---|
158 | PUBLIC(_CPU_Context_save_fp) |
---|
159 | |
---|
160 | SYM(_CPU_Context_save_fp): |
---|
161 | ret |
---|
162 | |
---|
163 | |
---|
164 | |
---|
165 | |
---|
166 | |
---|
167 | |
---|
168 | |
---|
169 | /* |
---|
170 | * _CPU_Context_restore_fp_context |
---|
171 | * |
---|
172 | * This routine is responsible for restoring the FP context |
---|
173 | * at *fp_context_ptr. If the point to load the FP context |
---|
174 | * from is changed then the pointer is modified by this routine. |
---|
175 | * |
---|
176 | * Sometimes a macro implementation of this is in cpu.h which dereferences |
---|
177 | * the ** and a similarly named routine in this file is passed something |
---|
178 | * like a (Context_Control_fp *). The general rule on making this decision |
---|
179 | * is to avoid writing assembly language. |
---|
180 | * |
---|
181 | * NO_CPU Specific Information: |
---|
182 | * |
---|
183 | * XXX document implementation including references if appropriate |
---|
184 | |
---|
185 | |
---|
186 | void _CPU_Context_restore_fp( |
---|
187 | Context_Control_fp **fp_context_ptr |
---|
188 | ) |
---|
189 | { |
---|
190 | } |
---|
191 | */ |
---|
192 | |
---|
193 | |
---|
194 | PUBLIC(_CPU_Context_restore_fp) |
---|
195 | |
---|
196 | SYM(_CPU_Context_restore_fp): |
---|
197 | ret |
---|
198 | |
---|
199 | |
---|
200 | |
---|
201 | /* _CPU_Context_switch |
---|
202 | * |
---|
203 | * This routine performs a normal non-FP context switch. |
---|
204 | * |
---|
205 | * NO_CPU Specific Information: |
---|
206 | * |
---|
207 | * XXX document implementation including references if appropriate |
---|
208 | |
---|
209 | */ |
---|
210 | |
---|
211 | |
---|
212 | |
---|
213 | |
---|
214 | |
---|
215 | PUBLIC(_CPU_Context_switch) |
---|
216 | |
---|
217 | SYM(_CPU_Context_switch): |
---|
218 | mov r26, r24 |
---|
219 | mov r27, r25 |
---|
220 | st X+, r2 |
---|
221 | st X+, r3 |
---|
222 | st X+, r4 |
---|
223 | st X+, r5 |
---|
224 | st X+, r6 |
---|
225 | st X+, r7 |
---|
226 | st X+, r8 |
---|
227 | st X+, r9 |
---|
228 | st X+, r10 |
---|
229 | st X+, r11 |
---|
230 | st X+, r12 |
---|
231 | st X+, r13 |
---|
232 | st X+, r14 |
---|
233 | st X+, r15 |
---|
234 | st X+, r16 |
---|
235 | st X+, r17 |
---|
236 | st X+, r28 |
---|
237 | st X+, r29 |
---|
238 | st X+, r29 |
---|
239 | lds r25,0x5f /*load sreg*/ |
---|
240 | st X+, r25 |
---|
241 | lds r25,0x5d /*spl*/ |
---|
242 | st X+, r25 |
---|
243 | lds r25,0x5e /*sph*/ |
---|
244 | |
---|
245 | |
---|
246 | restore: |
---|
247 | mov r26,r22 |
---|
248 | mov r27,r23 |
---|
249 | ld r2, X+ |
---|
250 | ld r3, X+ |
---|
251 | ld r4, X+ |
---|
252 | ld r5, X+ |
---|
253 | ld r6, X+ |
---|
254 | ld r7, X+ |
---|
255 | ld r8, X+ |
---|
256 | ld r9, X+ |
---|
257 | ld r10, X+ |
---|
258 | ld r11, X+ |
---|
259 | ld r12, X+ |
---|
260 | ld r13, X+ |
---|
261 | ld r14, X+ |
---|
262 | ld r15, X+ |
---|
263 | ld r16, X+ |
---|
264 | ld r17, X+ |
---|
265 | ld r28, X+ |
---|
266 | ld r29, X+ |
---|
267 | ld r25, X+ |
---|
268 | sts 0x5f,r25 /*sreg*/ |
---|
269 | ld r25, X+ |
---|
270 | sts 0x5d,r25 /*spl*/ |
---|
271 | ld r25, X+ |
---|
272 | sts 0x5e ,r25 /*sph*/ |
---|
273 | ret |
---|
274 | |
---|
275 | |
---|
276 | PUBLIC(_CPU_Push) |
---|
277 | SYM(_CPU_Push): |
---|
278 | lds r20, 0x5d /*spl*/ |
---|
279 | lds r21, 0x5e /*sph*/ |
---|
280 | sts 0x5d, r24 /*spl*/ |
---|
281 | sts 0x5e, r25 /*sph*/ |
---|
282 | push r22 |
---|
283 | push r23 |
---|
284 | sts 0x5d, r20 /*spl*/ |
---|
285 | sts 0x5e, r21 /*sph*/ |
---|
286 | ret |
---|
287 | |
---|
288 | /* |
---|
289 | * _CPU_Context_restore |
---|
290 | * |
---|
291 | * This routine is generally used only to restart self in an |
---|
292 | * efficient manner. It may simply be a label in _CPU_Context_switch. |
---|
293 | * |
---|
294 | * NOTE: May be unnecessary to reload some registers. |
---|
295 | * |
---|
296 | * NO_CPU Specific Information: |
---|
297 | * |
---|
298 | * XXX document implementation including references if appropriate |
---|
299 | |
---|
300 | |
---|
301 | void _CPU_Context_restore( |
---|
302 | Context_Control *new_context |
---|
303 | ) |
---|
304 | { |
---|
305 | printk( "AVR _CPU_Context_restore\n" ); |
---|
306 | } |
---|
307 | */ |
---|
308 | |
---|
309 | PUBLIC(_CPU_Context_restore) |
---|
310 | |
---|
311 | SYM(_CPU_Context_restore): |
---|
312 | //call printk("AVR _CPU_Context_restore\n") |
---|
313 | ret |
---|
314 | |
---|
315 | |
---|
316 | |
---|
317 | /* void __ISR_Handler() |
---|
318 | * |
---|
319 | * This routine provides the RTEMS interrupt management. |
---|
320 | * |
---|
321 | * NO_CPU Specific Information: |
---|
322 | * |
---|
323 | * XXX document implementation including references if appropriate |
---|
324 | |
---|
325 | |
---|
326 | void _ISR_Handler(void) |
---|
327 | { |
---|
328 | |
---|
329 | */ |
---|
330 | /* |
---|
331 | * This discussion ignores a lot of the ugly details in a real |
---|
332 | * implementation such as saving enough registers/state to be |
---|
333 | * able to do something real. Keep in mind that the goal is |
---|
334 | * to invoke a user's ISR handler which is written in C and |
---|
335 | * uses a certain set of registers. |
---|
336 | * |
---|
337 | * Also note that the exact order is to a large extent flexible. |
---|
338 | * Hardware will dictate a sequence for a certain subset of |
---|
339 | * _ISR_Handler while requirements for setting |
---|
340 | */ |
---|
341 | |
---|
342 | /* |
---|
343 | * At entry to "common" _ISR_Handler, the vector number must be |
---|
344 | * available. On some CPUs the hardware puts either the vector |
---|
345 | * number or the offset into the vector table for this ISR in a |
---|
346 | * known place. If the hardware does not give us this information, |
---|
347 | * then the assembly portion of RTEMS for this port will contain |
---|
348 | * a set of distinct interrupt entry points which somehow place |
---|
349 | * the vector number in a known place (which is safe if another |
---|
350 | * interrupt nests this one) and branches to _ISR_Handler. |
---|
351 | * |
---|
352 | * save some or all context on stack |
---|
353 | * may need to save some special interrupt information for exit |
---|
354 | * |
---|
355 | * #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE ) |
---|
356 | * if ( _ISR_Nest_level == 0 ) |
---|
357 | * switch to software interrupt stack |
---|
358 | * #endif |
---|
359 | * |
---|
360 | * _ISR_Nest_level++; |
---|
361 | * |
---|
362 | * _Thread_Dispatch_disable_level++; |
---|
363 | * |
---|
364 | * (*_ISR_Vector_table[ vector ])( vector ); |
---|
365 | * |
---|
366 | * _Thread_Dispatch_disable_level--; |
---|
367 | * |
---|
368 | * --_ISR_Nest_level; |
---|
369 | * |
---|
370 | * if ( _ISR_Nest_level ) |
---|
371 | * goto the label "exit interrupt (simple case)" |
---|
372 | * |
---|
373 | * if ( _Thread_Dispatch_disable_level ) |
---|
374 | * _ISR_Signals_to_thread_executing = FALSE; |
---|
375 | * goto the label "exit interrupt (simple case)" |
---|
376 | * |
---|
377 | * if ( _Context_Switch_necessary || _ISR_Signals_to_thread_executing ) { |
---|
378 | * _ISR_Signals_to_thread_executing = FALSE; |
---|
379 | * call _Thread_Dispatch() or prepare to return to _ISR_Dispatch |
---|
380 | * prepare to get out of interrupt |
---|
381 | * return from interrupt (maybe to _ISR_Dispatch) |
---|
382 | * |
---|
383 | * LABEL "exit interrupt (simple case): |
---|
384 | * #if ( CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE ) |
---|
385 | * if outermost interrupt |
---|
386 | * restore stack |
---|
387 | * #endif |
---|
388 | * prepare to get out of interrupt |
---|
389 | * return from interrupt |
---|
390 | */ |
---|
391 | /*} */ |
---|
392 | PUBLIC(_ISR_Handler) |
---|
393 | |
---|
394 | SYM(_ISR_Handler): |
---|
395 | ret |
---|
396 | |
---|