[c62d36f] | 1 | /* cpu_asm.s |
---|
| 2 | * |
---|
| 3 | * This file contains the basic algorithms for all assembly code used |
---|
| 4 | * in an specific CPU port of RTEMS. These algorithms must be implemented |
---|
[9700578] | 5 | * in assembly language. |
---|
[c62d36f] | 6 | * |
---|
[08311cc3] | 7 | * COPYRIGHT (c) 1989-1999. |
---|
[c4808ca] | 8 | * On-Line Applications Research Corporation (OAR). |
---|
| 9 | * |
---|
[98e4ebf5] | 10 | * The license and distribution terms for this file may be |
---|
| 11 | * found in the file LICENSE in this distribution or at |
---|
[c949da7b] | 12 | * http://www.rtems.com/license/LICENSE. |
---|
[c4808ca] | 13 | * |
---|
[669a6dc3] | 14 | * Ported to ERC32 implementation of the SPARC by On-Line Applications |
---|
| 15 | * Research Corporation (OAR) under contract to the European Space |
---|
| 16 | * Agency (ESA). |
---|
| 17 | * |
---|
| 18 | * ERC32 modifications of respective RTEMS file: COPYRIGHT (c) 1995. |
---|
| 19 | * European Space Agency. |
---|
| 20 | * |
---|
[c62d36f] | 21 | * $Id$ |
---|
| 22 | */ |
---|
| 23 | |
---|
| 24 | #include <asm.h> |
---|
| 25 | |
---|
[9700578] | 26 | #if (SPARC_HAS_FPU == 1) |
---|
| 27 | |
---|
[c62d36f] | 28 | /* |
---|
[9700578] | 29 | * void _CPU_Context_save_fp( |
---|
| 30 | * void **fp_context_ptr |
---|
| 31 | * ) |
---|
[c62d36f] | 32 | * |
---|
| 33 | * This routine is responsible for saving the FP context |
---|
| 34 | * at *fp_context_ptr. If the point to load the FP context |
---|
| 35 | * from is changed then the pointer is modified by this routine. |
---|
| 36 | * |
---|
[9700578] | 37 | * NOTE: See the README in this directory for information on the |
---|
| 38 | * management of the "EF" bit in the PSR. |
---|
[c62d36f] | 39 | */ |
---|
| 40 | |
---|
| 41 | .align 4 |
---|
| 42 | PUBLIC(_CPU_Context_save_fp) |
---|
| 43 | SYM(_CPU_Context_save_fp): |
---|
[9700578] | 44 | save %sp, -CPU_MINIMUM_STACK_FRAME_SIZE, %sp |
---|
| 45 | |
---|
| 46 | /* |
---|
| 47 | * The following enables the floating point unit. |
---|
| 48 | */ |
---|
| 49 | |
---|
| 50 | mov %psr, %l0 |
---|
| 51 | sethi %hi(SPARC_PSR_EF_MASK), %l1 |
---|
| 52 | or %l1, %lo(SPARC_PSR_EF_MASK), %l1 |
---|
| 53 | or %l0, %l1, %l0 |
---|
| 54 | mov %l0, %psr ! **** ENABLE FLOAT ACCESS **** |
---|
[4009fd8] | 55 | nop; nop; nop; ! Need three nops before EF is |
---|
| 56 | ld [%i0], %l0 ! active due to pipeline delay!!! |
---|
[9700578] | 57 | std %f0, [%l0 + FO_F1_OFFSET] |
---|
| 58 | std %f2, [%l0 + F2_F3_OFFSET] |
---|
| 59 | std %f4, [%l0 + F4_F5_OFFSET] |
---|
| 60 | std %f6, [%l0 + F6_F7_OFFSET] |
---|
| 61 | std %f8, [%l0 + F8_F9_OFFSET] |
---|
| 62 | std %f10, [%l0 + F1O_F11_OFFSET] |
---|
| 63 | std %f12, [%l0 + F12_F13_OFFSET] |
---|
| 64 | std %f14, [%l0 + F14_F15_OFFSET] |
---|
| 65 | std %f16, [%l0 + F16_F17_OFFSET] |
---|
| 66 | std %f18, [%l0 + F18_F19_OFFSET] |
---|
| 67 | std %f20, [%l0 + F2O_F21_OFFSET] |
---|
| 68 | std %f22, [%l0 + F22_F23_OFFSET] |
---|
| 69 | std %f24, [%l0 + F24_F25_OFFSET] |
---|
| 70 | std %f26, [%l0 + F26_F27_OFFSET] |
---|
| 71 | std %f28, [%l0 + F28_F29_OFFSET] |
---|
| 72 | std %f30, [%l0 + F3O_F31_OFFSET] |
---|
| 73 | st %fsr, [%l0 + FSR_OFFSET] |
---|
[c62d36f] | 74 | ret |
---|
| 75 | restore |
---|
| 76 | |
---|
| 77 | /* |
---|
[9700578] | 78 | * void _CPU_Context_restore_fp( |
---|
| 79 | * void **fp_context_ptr |
---|
| 80 | * ) |
---|
[c62d36f] | 81 | * |
---|
| 82 | * This routine is responsible for restoring the FP context |
---|
| 83 | * at *fp_context_ptr. If the point to load the FP context |
---|
| 84 | * from is changed then the pointer is modified by this routine. |
---|
| 85 | * |
---|
[9700578] | 86 | * NOTE: See the README in this directory for information on the |
---|
| 87 | * management of the "EF" bit in the PSR. |
---|
[c62d36f] | 88 | */ |
---|
| 89 | |
---|
| 90 | .align 4 |
---|
| 91 | PUBLIC(_CPU_Context_restore_fp) |
---|
| 92 | SYM(_CPU_Context_restore_fp): |
---|
[9700578] | 93 | save %sp, -CPU_MINIMUM_STACK_FRAME_SIZE , %sp |
---|
| 94 | |
---|
| 95 | /* |
---|
| 96 | * The following enables the floating point unit. |
---|
| 97 | */ |
---|
| 98 | |
---|
| 99 | mov %psr, %l0 |
---|
| 100 | sethi %hi(SPARC_PSR_EF_MASK), %l1 |
---|
| 101 | or %l1, %lo(SPARC_PSR_EF_MASK), %l1 |
---|
| 102 | or %l0, %l1, %l0 |
---|
| 103 | mov %l0, %psr ! **** ENABLE FLOAT ACCESS **** |
---|
[4009fd8] | 104 | nop; nop; nop; ! Need three nops before EF is |
---|
| 105 | ld [%i0], %l0 ! active due to pipeline delay!!! |
---|
[9700578] | 106 | ldd [%l0 + FO_F1_OFFSET], %f0 |
---|
| 107 | ldd [%l0 + F2_F3_OFFSET], %f2 |
---|
| 108 | ldd [%l0 + F4_F5_OFFSET], %f4 |
---|
| 109 | ldd [%l0 + F6_F7_OFFSET], %f6 |
---|
| 110 | ldd [%l0 + F8_F9_OFFSET], %f8 |
---|
| 111 | ldd [%l0 + F1O_F11_OFFSET], %f10 |
---|
| 112 | ldd [%l0 + F12_F13_OFFSET], %f12 |
---|
| 113 | ldd [%l0 + F14_F15_OFFSET], %f14 |
---|
| 114 | ldd [%l0 + F16_F17_OFFSET], %f16 |
---|
| 115 | ldd [%l0 + F18_F19_OFFSET], %f18 |
---|
| 116 | ldd [%l0 + F2O_F21_OFFSET], %f20 |
---|
| 117 | ldd [%l0 + F22_F23_OFFSET], %f22 |
---|
| 118 | ldd [%l0 + F24_F25_OFFSET], %f24 |
---|
| 119 | ldd [%l0 + F26_F27_OFFSET], %f26 |
---|
| 120 | ldd [%l0 + F28_F29_OFFSET], %f28 |
---|
| 121 | ldd [%l0 + F3O_F31_OFFSET], %f30 |
---|
| 122 | ld [%l0 + FSR_OFFSET], %fsr |
---|
[c62d36f] | 123 | ret |
---|
| 124 | restore |
---|
| 125 | |
---|
[9700578] | 126 | #endif /* SPARC_HAS_FPU */ |
---|
| 127 | |
---|
| 128 | /* |
---|
[c62d36f] | 129 | * void _CPU_Context_switch( |
---|
| 130 | * Context_Control *run, |
---|
| 131 | * Context_Control *heir |
---|
| 132 | * ) |
---|
[9700578] | 133 | * |
---|
| 134 | * This routine performs a normal non-FP context switch. |
---|
[c62d36f] | 135 | */ |
---|
| 136 | |
---|
| 137 | .align 4 |
---|
| 138 | PUBLIC(_CPU_Context_switch) |
---|
| 139 | SYM(_CPU_Context_switch): |
---|
[9700578] | 140 | ! skip g0 |
---|
| 141 | st %g1, [%o0 + G1_OFFSET] ! save the global registers |
---|
| 142 | std %g2, [%o0 + G2_OFFSET] |
---|
| 143 | std %g4, [%o0 + G4_OFFSET] |
---|
| 144 | std %g6, [%o0 + G6_OFFSET] |
---|
| 145 | |
---|
| 146 | std %l0, [%o0 + L0_OFFSET] ! save the local registers |
---|
| 147 | std %l2, [%o0 + L2_OFFSET] |
---|
| 148 | std %l4, [%o0 + L4_OFFSET] |
---|
| 149 | std %l6, [%o0 + L6_OFFSET] |
---|
| 150 | |
---|
| 151 | std %i0, [%o0 + I0_OFFSET] ! save the input registers |
---|
| 152 | std %i2, [%o0 + I2_OFFSET] |
---|
| 153 | std %i4, [%o0 + I4_OFFSET] |
---|
| 154 | std %i6, [%o0 + I6_FP_OFFSET] |
---|
| 155 | |
---|
| 156 | std %o0, [%o0 + O0_OFFSET] ! save the output registers |
---|
| 157 | std %o2, [%o0 + O2_OFFSET] |
---|
| 158 | std %o4, [%o0 + O4_OFFSET] |
---|
| 159 | std %o6, [%o0 + O6_SP_OFFSET] |
---|
| 160 | |
---|
| 161 | rd %psr, %o2 |
---|
| 162 | st %o2, [%o0 + PSR_OFFSET] ! save status register |
---|
| 163 | |
---|
| 164 | /* |
---|
| 165 | * This is entered from _CPU_Context_restore with: |
---|
| 166 | * o1 = context to restore |
---|
| 167 | * o2 = psr |
---|
| 168 | */ |
---|
| 169 | |
---|
| 170 | PUBLIC(_CPU_Context_restore_heir) |
---|
| 171 | SYM(_CPU_Context_restore_heir): |
---|
| 172 | /* |
---|
| 173 | * Flush all windows with valid contents except the current one. |
---|
| 174 | * In examining the set register windows, one may logically divide |
---|
| 175 | * the windows into sets (some of which may be empty) based on their |
---|
| 176 | * current status: |
---|
| 177 | * |
---|
| 178 | * + current (i.e. in use), |
---|
| 179 | * + used (i.e. a restore would not trap) |
---|
| 180 | * + invalid (i.e. 1 in corresponding bit in WIM) |
---|
| 181 | * + unused |
---|
| 182 | * |
---|
| 183 | * Either the used or unused set of windows may be empty. |
---|
| 184 | * |
---|
| 185 | * NOTE: We assume only one bit is set in the WIM at a time. |
---|
| 186 | * |
---|
| 187 | * Given a CWP of 5 and a WIM of 0x1, the registers are divided |
---|
| 188 | * into sets as follows: |
---|
| 189 | * |
---|
| 190 | * + 0 - invalid |
---|
| 191 | * + 1-4 - unused |
---|
| 192 | * + 5 - current |
---|
| 193 | * + 6-7 - used |
---|
| 194 | * |
---|
| 195 | * In this case, we only would save the used windows -- 6 and 7. |
---|
| 196 | * |
---|
| 197 | * Traps are disabled for the same logical period as in a |
---|
| 198 | * flush all windows trap handler. |
---|
| 199 | * |
---|
| 200 | * Register Usage while saving the windows: |
---|
| 201 | * g1 = current PSR |
---|
| 202 | * g2 = current wim |
---|
| 203 | * g3 = CWP |
---|
| 204 | * g4 = wim scratch |
---|
| 205 | * g5 = scratch |
---|
| 206 | */ |
---|
| 207 | |
---|
| 208 | ld [%o1 + PSR_OFFSET], %g1 ! g1 = saved psr |
---|
| 209 | |
---|
| 210 | and %o2, SPARC_PSR_CWP_MASK, %g3 ! g3 = CWP |
---|
| 211 | ! g1 = psr w/o cwp |
---|
| 212 | andn %g1, SPARC_PSR_ET_MASK | SPARC_PSR_CWP_MASK, %g1 |
---|
| 213 | or %g1, %g3, %g1 ! g1 = heirs psr |
---|
| 214 | mov %g1, %psr ! restore status register and |
---|
| 215 | ! **** DISABLE TRAPS **** |
---|
| 216 | mov %wim, %g2 ! g2 = wim |
---|
| 217 | mov 1, %g4 |
---|
| 218 | sll %g4, %g3, %g4 ! g4 = WIM mask for CW invalid |
---|
| 219 | |
---|
| 220 | save_frame_loop: |
---|
| 221 | sll %g4, 1, %g5 ! rotate the "wim" left 1 |
---|
| 222 | srl %g4, SPARC_NUMBER_OF_REGISTER_WINDOWS - 1, %g4 |
---|
| 223 | or %g4, %g5, %g4 ! g4 = wim if we do one restore |
---|
| 224 | |
---|
| 225 | /* |
---|
| 226 | * If a restore would not underflow, then continue. |
---|
| 227 | */ |
---|
| 228 | |
---|
| 229 | andcc %g4, %g2, %g0 ! Any windows to flush? |
---|
| 230 | bnz done_flushing ! No, then continue |
---|
| 231 | nop |
---|
| 232 | |
---|
| 233 | restore ! back one window |
---|
| 234 | |
---|
| 235 | /* |
---|
| 236 | * Now save the window just as if we overflowed to it. |
---|
| 237 | */ |
---|
| 238 | |
---|
| 239 | std %l0, [%sp + CPU_STACK_FRAME_L0_OFFSET] |
---|
| 240 | std %l2, [%sp + CPU_STACK_FRAME_L2_OFFSET] |
---|
| 241 | std %l4, [%sp + CPU_STACK_FRAME_L4_OFFSET] |
---|
| 242 | std %l6, [%sp + CPU_STACK_FRAME_L6_OFFSET] |
---|
| 243 | |
---|
| 244 | std %i0, [%sp + CPU_STACK_FRAME_I0_OFFSET] |
---|
| 245 | std %i2, [%sp + CPU_STACK_FRAME_I2_OFFSET] |
---|
| 246 | std %i4, [%sp + CPU_STACK_FRAME_I4_OFFSET] |
---|
| 247 | std %i6, [%sp + CPU_STACK_FRAME_I6_FP_OFFSET] |
---|
| 248 | |
---|
| 249 | ba save_frame_loop |
---|
| 250 | nop |
---|
| 251 | |
---|
| 252 | done_flushing: |
---|
| 253 | |
---|
| 254 | add %g3, 1, %g3 ! calculate desired WIM |
---|
| 255 | and %g3, SPARC_NUMBER_OF_REGISTER_WINDOWS - 1, %g3 |
---|
| 256 | mov 1, %g4 |
---|
| 257 | sll %g4, %g3, %g4 ! g4 = new WIM |
---|
| 258 | mov %g4, %wim |
---|
| 259 | |
---|
| 260 | or %g1, SPARC_PSR_ET_MASK, %g1 |
---|
| 261 | mov %g1, %psr ! **** ENABLE TRAPS **** |
---|
| 262 | ! and restore CWP |
---|
| 263 | nop |
---|
| 264 | nop |
---|
| 265 | nop |
---|
| 266 | |
---|
| 267 | ! skip g0 |
---|
| 268 | ld [%o1 + G1_OFFSET], %g1 ! restore the global registers |
---|
| 269 | ldd [%o1 + G2_OFFSET], %g2 |
---|
| 270 | ldd [%o1 + G4_OFFSET], %g4 |
---|
| 271 | ldd [%o1 + G6_OFFSET], %g6 |
---|
| 272 | |
---|
| 273 | ldd [%o1 + L0_OFFSET], %l0 ! restore the local registers |
---|
| 274 | ldd [%o1 + L2_OFFSET], %l2 |
---|
| 275 | ldd [%o1 + L4_OFFSET], %l4 |
---|
| 276 | ldd [%o1 + L6_OFFSET], %l6 |
---|
| 277 | |
---|
| 278 | ldd [%o1 + I0_OFFSET], %i0 ! restore the output registers |
---|
| 279 | ldd [%o1 + I2_OFFSET], %i2 |
---|
| 280 | ldd [%o1 + I4_OFFSET], %i4 |
---|
| 281 | ldd [%o1 + I6_FP_OFFSET], %i6 |
---|
| 282 | |
---|
| 283 | ldd [%o1 + O2_OFFSET], %o2 ! restore the output registers |
---|
| 284 | ldd [%o1 + O4_OFFSET], %o4 |
---|
| 285 | ldd [%o1 + O6_SP_OFFSET], %o6 |
---|
| 286 | ! do o0/o1 last to avoid destroying heir context pointer |
---|
| 287 | ldd [%o1 + O0_OFFSET], %o0 ! overwrite heir pointer |
---|
| 288 | |
---|
| 289 | jmp %o7 + 8 ! return |
---|
| 290 | nop ! delay slot |
---|
[c62d36f] | 291 | |
---|
| 292 | /* |
---|
| 293 | * void _CPU_Context_restore( |
---|
| 294 | * Context_Control *new_context |
---|
| 295 | * ) |
---|
[9700578] | 296 | * |
---|
| 297 | * This routine is generally used only to perform restart self. |
---|
| 298 | * |
---|
| 299 | * NOTE: It is unnecessary to reload some registers. |
---|
[c62d36f] | 300 | */ |
---|
| 301 | |
---|
| 302 | .align 4 |
---|
| 303 | PUBLIC(_CPU_Context_restore) |
---|
| 304 | SYM(_CPU_Context_restore): |
---|
[9700578] | 305 | save %sp, -CPU_MINIMUM_STACK_FRAME_SIZE, %sp |
---|
| 306 | rd %psr, %o2 |
---|
| 307 | ba SYM(_CPU_Context_restore_heir) |
---|
| 308 | mov %i0, %o1 ! in the delay slot |
---|
[c62d36f] | 309 | |
---|
[9700578] | 310 | /* |
---|
| 311 | * void _ISR_Handler() |
---|
[c62d36f] | 312 | * |
---|
| 313 | * This routine provides the RTEMS interrupt management. |
---|
| 314 | * |
---|
[9700578] | 315 | * We enter this handler from the 4 instructions in the trap table with |
---|
| 316 | * the following registers assumed to be set as shown: |
---|
| 317 | * |
---|
| 318 | * l0 = PSR |
---|
| 319 | * l1 = PC |
---|
| 320 | * l2 = nPC |
---|
| 321 | * l3 = trap type |
---|
| 322 | * |
---|
| 323 | * NOTE: By an executive defined convention, trap type is between 0 and 255 if |
---|
| 324 | * it is an asynchonous trap and 256 and 511 if it is synchronous. |
---|
[c62d36f] | 325 | */ |
---|
| 326 | |
---|
| 327 | .align 4 |
---|
| 328 | PUBLIC(_ISR_Handler) |
---|
| 329 | SYM(_ISR_Handler): |
---|
[9700578] | 330 | /* |
---|
| 331 | * Fix the return address for synchronous traps. |
---|
| 332 | */ |
---|
| 333 | |
---|
| 334 | andcc %l3, SPARC_SYNCHRONOUS_TRAP_BIT_MASK, %g0 |
---|
| 335 | ! Is this a synchronous trap? |
---|
| 336 | be,a win_ovflow ! No, then skip the adjustment |
---|
| 337 | nop ! DELAY |
---|
[ba2adf5] | 338 | mov %l1, %l6 ! save trapped pc for debug info |
---|
[9700578] | 339 | mov %l2, %l1 ! do not return to the instruction |
---|
| 340 | add %l2, 4, %l2 ! indicated |
---|
| 341 | |
---|
| 342 | win_ovflow: |
---|
| 343 | /* |
---|
| 344 | * Save the globals this block uses. |
---|
| 345 | * |
---|
| 346 | * These registers are not restored from the locals. Their contents |
---|
| 347 | * are saved directly from the locals into the ISF below. |
---|
| 348 | */ |
---|
| 349 | |
---|
| 350 | mov %g4, %l4 ! save the globals this block uses |
---|
| 351 | mov %g5, %l5 |
---|
| 352 | |
---|
| 353 | /* |
---|
| 354 | * When at a "window overflow" trap, (wim == (1 << cwp)). |
---|
| 355 | * If we get here like that, then process a window overflow. |
---|
| 356 | */ |
---|
| 357 | |
---|
| 358 | rd %wim, %g4 |
---|
| 359 | srl %g4, %l0, %g5 ! g5 = win >> cwp ; shift count and CWP |
---|
| 360 | ! are LS 5 bits ; how convenient :) |
---|
| 361 | cmp %g5, 1 ! Is this an invalid window? |
---|
| 362 | bne dont_do_the_window ! No, then skip all this stuff |
---|
| 363 | ! we are using the delay slot |
---|
| 364 | |
---|
| 365 | /* |
---|
| 366 | * The following is same as a 1 position right rotate of WIM |
---|
| 367 | */ |
---|
| 368 | |
---|
| 369 | srl %g4, 1, %g5 ! g5 = WIM >> 1 |
---|
| 370 | sll %g4, SPARC_NUMBER_OF_REGISTER_WINDOWS-1 , %g4 |
---|
| 371 | ! g4 = WIM << (Number Windows - 1) |
---|
| 372 | or %g4, %g5, %g4 ! g4 = (WIM >> 1) | |
---|
| 373 | ! (WIM << (Number Windows - 1)) |
---|
| 374 | |
---|
| 375 | /* |
---|
| 376 | * At this point: |
---|
| 377 | * |
---|
| 378 | * g4 = the new WIM |
---|
| 379 | * g5 is free |
---|
| 380 | */ |
---|
| 381 | |
---|
| 382 | /* |
---|
| 383 | * Since we are tinkering with the register windows, we need to |
---|
| 384 | * make sure that all the required information is in global registers. |
---|
| 385 | */ |
---|
| 386 | |
---|
| 387 | save ! Save into the window |
---|
| 388 | wr %g4, 0, %wim ! WIM = new WIM |
---|
| 389 | nop ! delay slots |
---|
| 390 | nop |
---|
| 391 | nop |
---|
| 392 | |
---|
| 393 | /* |
---|
| 394 | * Now save the window just as if we overflowed to it. |
---|
| 395 | */ |
---|
| 396 | |
---|
| 397 | std %l0, [%sp + CPU_STACK_FRAME_L0_OFFSET] |
---|
| 398 | std %l2, [%sp + CPU_STACK_FRAME_L2_OFFSET] |
---|
| 399 | std %l4, [%sp + CPU_STACK_FRAME_L4_OFFSET] |
---|
| 400 | std %l6, [%sp + CPU_STACK_FRAME_L6_OFFSET] |
---|
| 401 | |
---|
| 402 | std %i0, [%sp + CPU_STACK_FRAME_I0_OFFSET] |
---|
| 403 | std %i2, [%sp + CPU_STACK_FRAME_I2_OFFSET] |
---|
| 404 | std %i4, [%sp + CPU_STACK_FRAME_I4_OFFSET] |
---|
| 405 | std %i6, [%sp + CPU_STACK_FRAME_I6_FP_OFFSET] |
---|
| 406 | |
---|
| 407 | restore |
---|
| 408 | nop |
---|
| 409 | |
---|
| 410 | dont_do_the_window: |
---|
| 411 | /* |
---|
| 412 | * Global registers %g4 and %g5 are saved directly from %l4 and |
---|
| 413 | * %l5 directly into the ISF below. |
---|
| 414 | */ |
---|
| 415 | |
---|
| 416 | save_isf: |
---|
| 417 | |
---|
| 418 | /* |
---|
| 419 | * Save the state of the interrupted task -- especially the global |
---|
| 420 | * registers -- in the Interrupt Stack Frame. Note that the ISF |
---|
| 421 | * includes a regular minimum stack frame which will be used if |
---|
| 422 | * needed by register window overflow and underflow handlers. |
---|
| 423 | * |
---|
| 424 | * REGISTERS SAME AS AT _ISR_Handler |
---|
| 425 | */ |
---|
| 426 | |
---|
| 427 | sub %fp, CONTEXT_CONTROL_INTERRUPT_FRAME_SIZE, %sp |
---|
| 428 | ! make space for ISF |
---|
| 429 | |
---|
| 430 | std %l0, [%sp + ISF_PSR_OFFSET] ! save psr, PC |
---|
| 431 | st %l2, [%sp + ISF_NPC_OFFSET] ! save nPC |
---|
| 432 | st %g1, [%sp + ISF_G1_OFFSET] ! save g1 |
---|
| 433 | std %g2, [%sp + ISF_G2_OFFSET] ! save g2, g3 |
---|
| 434 | std %l4, [%sp + ISF_G4_OFFSET] ! save g4, g5 -- see above |
---|
| 435 | std %g6, [%sp + ISF_G6_OFFSET] ! save g6, g7 |
---|
| 436 | |
---|
| 437 | std %i0, [%sp + ISF_I0_OFFSET] ! save i0, i1 |
---|
| 438 | std %i2, [%sp + ISF_I2_OFFSET] ! save i2, i3 |
---|
| 439 | std %i4, [%sp + ISF_I4_OFFSET] ! save i4, i5 |
---|
| 440 | std %i6, [%sp + ISF_I6_FP_OFFSET] ! save i6/fp, i7 |
---|
| 441 | |
---|
| 442 | rd %y, %g1 |
---|
| 443 | st %g1, [%sp + ISF_Y_OFFSET] ! save y |
---|
[ba2adf5] | 444 | st %l6, [%sp + ISF_TPC_OFFSET] ! save real trapped pc |
---|
[9700578] | 445 | |
---|
| 446 | mov %sp, %o1 ! 2nd arg to ISR Handler |
---|
| 447 | |
---|
| 448 | /* |
---|
| 449 | * Increment ISR nest level and Thread dispatch disable level. |
---|
| 450 | * |
---|
| 451 | * Register usage for this section: |
---|
| 452 | * |
---|
| 453 | * l4 = _Thread_Dispatch_disable_level pointer |
---|
| 454 | * l5 = _ISR_Nest_level pointer |
---|
| 455 | * l6 = _Thread_Dispatch_disable_level value |
---|
| 456 | * l7 = _ISR_Nest_level value |
---|
| 457 | * |
---|
| 458 | * NOTE: It is assumed that l4 - l7 will be preserved until the ISR |
---|
| 459 | * nest and thread dispatch disable levels are unnested. |
---|
| 460 | */ |
---|
| 461 | |
---|
| 462 | sethi %hi(SYM(_Thread_Dispatch_disable_level)), %l4 |
---|
| 463 | ld [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))], %l6 |
---|
| 464 | sethi %hi(SYM(_ISR_Nest_level)), %l5 |
---|
| 465 | ld [%l5 + %lo(SYM(_ISR_Nest_level))], %l7 |
---|
| 466 | |
---|
| 467 | add %l6, 1, %l6 |
---|
| 468 | st %l6, [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))] |
---|
| 469 | |
---|
| 470 | add %l7, 1, %l7 |
---|
| 471 | st %l7, [%l5 + %lo(SYM(_ISR_Nest_level))] |
---|
| 472 | |
---|
| 473 | /* |
---|
| 474 | * If ISR nest level was zero (now 1), then switch stack. |
---|
| 475 | */ |
---|
| 476 | |
---|
| 477 | mov %sp, %fp |
---|
| 478 | subcc %l7, 1, %l7 ! outermost interrupt handler? |
---|
| 479 | bnz dont_switch_stacks ! No, then do not switch stacks |
---|
| 480 | |
---|
| 481 | sethi %hi(SYM(_CPU_Interrupt_stack_high)), %g4 |
---|
| 482 | ld [%g4 + %lo(SYM(_CPU_Interrupt_stack_high))], %sp |
---|
| 483 | |
---|
| 484 | dont_switch_stacks: |
---|
| 485 | /* |
---|
| 486 | * Make sure we have a place on the stack for the window overflow |
---|
| 487 | * trap handler to write into. At this point it is safe to |
---|
| 488 | * enable traps again. |
---|
| 489 | */ |
---|
| 490 | |
---|
| 491 | sub %sp, CPU_MINIMUM_STACK_FRAME_SIZE, %sp |
---|
| 492 | |
---|
[448ba47] | 493 | /* |
---|
| 494 | * Check if we have an external interrupt (trap 0x11 - 0x1f). If so, |
---|
| 495 | * set the PIL in the %psr to mask off interrupts with lower priority. |
---|
| 496 | * The original %psr in %l0 is not modified since it will be restored |
---|
| 497 | * when the interrupt handler returns. |
---|
| 498 | */ |
---|
| 499 | |
---|
[669a6dc3] | 500 | mov %l0, %g5 |
---|
| 501 | and %l3, 0x0ff, %g4 |
---|
| 502 | |
---|
[b73e57b] | 503 | /* This is a fix for ERC32 with FPU rev.B or rev.C */ |
---|
| 504 | |
---|
| 505 | #if defined(FPU_REVB) |
---|
| 506 | |
---|
| 507 | |
---|
| 508 | subcc %g4, 0x08, %g0 |
---|
| 509 | be fpu_revb |
---|
| 510 | subcc %g4, 0x11, %g0 |
---|
[448ba47] | 511 | bl dont_fix_pil |
---|
[b73e57b] | 512 | subcc %g4, 0x1f, %g0 |
---|
[448ba47] | 513 | bg dont_fix_pil |
---|
[b73e57b] | 514 | sll %g4, 8, %g4 |
---|
[448ba47] | 515 | and %g4, SPARC_PSR_PIL_MASK, %g4 |
---|
| 516 | andn %l0, SPARC_PSR_PIL_MASK, %g5 |
---|
| 517 | or %g4, %g5, %g5 |
---|
[b73e57b] | 518 | srl %l0, 12, %g4 |
---|
| 519 | andcc %g4, 1, %g0 |
---|
| 520 | be dont_fix_pil |
---|
| 521 | nop |
---|
| 522 | ba,a enable_irq |
---|
| 523 | |
---|
| 524 | |
---|
| 525 | fpu_revb: |
---|
| 526 | srl %l0, 12, %g4 ! check if EF is set in %psr |
---|
| 527 | andcc %g4, 1, %g0 |
---|
| 528 | be dont_fix_pil ! if FPU disabled than continue as normal |
---|
| 529 | and %l3, 0xff, %g4 |
---|
| 530 | subcc %g4, 0x08, %g0 |
---|
| 531 | bne enable_irq ! if not a FPU exception then do two fmovs |
---|
| 532 | set __sparc_fq, %g4 |
---|
| 533 | st %fsr, [%g4] ! if FQ is not empty and FQ[1] = fmovs |
---|
| 534 | ld [%g4], %g4 ! than this is bug 3.14 |
---|
| 535 | srl %g4, 13, %g4 |
---|
| 536 | andcc %g4, 1, %g0 |
---|
| 537 | be dont_fix_pil |
---|
| 538 | set __sparc_fq, %g4 |
---|
| 539 | std %fq, [%g4] |
---|
| 540 | ld [%g4+4], %g4 |
---|
| 541 | set 0x81a00020, %g5 |
---|
| 542 | subcc %g4, %g5, %g0 |
---|
| 543 | bne,a dont_fix_pil2 |
---|
| 544 | wr %l0, SPARC_PSR_ET_MASK, %psr ! **** ENABLE TRAPS **** |
---|
| 545 | ba,a simple_return |
---|
| 546 | |
---|
| 547 | enable_irq: |
---|
| 548 | or %g5, SPARC_PSR_PIL_MASK, %g4 |
---|
| 549 | wr %g4, SPARC_PSR_ET_MASK, %psr ! **** ENABLE TRAPS **** |
---|
| 550 | nop; nop; nop |
---|
| 551 | fmovs %f0, %f0 |
---|
| 552 | ba dont_fix_pil |
---|
| 553 | fmovs %f0, %f0 |
---|
| 554 | |
---|
| 555 | .data |
---|
| 556 | .global __sparc_fq |
---|
| 557 | .align 8 |
---|
| 558 | __sparc_fq: |
---|
| 559 | .word 0,0 |
---|
| 560 | |
---|
| 561 | .text |
---|
| 562 | /* end of ERC32 FPU rev.B/C fix */ |
---|
| 563 | |
---|
| 564 | #else |
---|
| 565 | |
---|
| 566 | subcc %g4, 0x11, %g0 |
---|
| 567 | bl dont_fix_pil |
---|
| 568 | subcc %g4, 0x1f, %g0 |
---|
| 569 | bg dont_fix_pil |
---|
| 570 | sll %g4, 8, %g4 |
---|
| 571 | and %g4, SPARC_PSR_PIL_MASK, %g4 |
---|
| 572 | andn %l0, SPARC_PSR_PIL_MASK, %g5 |
---|
| 573 | or %g4, %g5, %g5 |
---|
| 574 | #endif |
---|
| 575 | |
---|
[139e6efe] | 576 | dont_fix_pil: |
---|
| 577 | wr %g5, SPARC_PSR_ET_MASK, %psr ! **** ENABLE TRAPS **** |
---|
[b73e57b] | 578 | dont_fix_pil2: |
---|
[9700578] | 579 | |
---|
| 580 | /* |
---|
| 581 | * Vector to user's handler. |
---|
| 582 | * |
---|
| 583 | * NOTE: TBR may no longer have vector number in it since |
---|
| 584 | * we just enabled traps. It is definitely in l3. |
---|
| 585 | */ |
---|
| 586 | |
---|
| 587 | sethi %hi(SYM(_ISR_Vector_table)), %g4 |
---|
[fe7acdcf] | 588 | ld [%g4+%lo(SYM(_ISR_Vector_table))], %g4 |
---|
[9700578] | 589 | and %l3, 0xFF, %g5 ! remove synchronous trap indicator |
---|
| 590 | sll %g5, 2, %g5 ! g5 = offset into table |
---|
| 591 | ld [%g4 + %g5], %g4 ! g4 = _ISR_Vector_table[ vector ] |
---|
| 592 | |
---|
| 593 | |
---|
| 594 | ! o1 = 2nd arg = address of the ISF |
---|
| 595 | ! WAS LOADED WHEN ISF WAS SAVED!!! |
---|
| 596 | mov %l3, %o0 ! o0 = 1st arg = vector number |
---|
| 597 | call %g4, 0 |
---|
| 598 | nop ! delay slot |
---|
| 599 | |
---|
| 600 | /* |
---|
| 601 | * Redisable traps so we can finish up the interrupt processing. |
---|
| 602 | * This is a VERY conservative place to do this. |
---|
| 603 | * |
---|
| 604 | * NOTE: %l0 has the PSR which was in place when we took the trap. |
---|
| 605 | */ |
---|
| 606 | |
---|
| 607 | mov %l0, %psr ! **** DISABLE TRAPS **** |
---|
| 608 | |
---|
| 609 | /* |
---|
| 610 | * Decrement ISR nest level and Thread dispatch disable level. |
---|
| 611 | * |
---|
| 612 | * Register usage for this section: |
---|
| 613 | * |
---|
| 614 | * l4 = _Thread_Dispatch_disable_level pointer |
---|
| 615 | * l5 = _ISR_Nest_level pointer |
---|
| 616 | * l6 = _Thread_Dispatch_disable_level value |
---|
| 617 | * l7 = _ISR_Nest_level value |
---|
| 618 | */ |
---|
| 619 | |
---|
| 620 | sub %l6, 1, %l6 |
---|
| 621 | st %l6, [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))] |
---|
| 622 | |
---|
| 623 | st %l7, [%l5 + %lo(SYM(_ISR_Nest_level))] |
---|
| 624 | |
---|
| 625 | /* |
---|
| 626 | * If dispatching is disabled (includes nested interrupt case), |
---|
| 627 | * then do a "simple" exit. |
---|
| 628 | */ |
---|
| 629 | |
---|
| 630 | orcc %l6, %g0, %g0 ! Is dispatching disabled? |
---|
| 631 | bnz simple_return ! Yes, then do a "simple" exit |
---|
| 632 | nop ! delay slot |
---|
| 633 | |
---|
| 634 | /* |
---|
| 635 | * If a context switch is necessary, then do fudge stack to |
---|
| 636 | * return to the interrupt dispatcher. |
---|
| 637 | */ |
---|
| 638 | |
---|
| 639 | sethi %hi(SYM(_Context_Switch_necessary)), %l4 |
---|
| 640 | ld [%l4 + %lo(SYM(_Context_Switch_necessary))], %l5 |
---|
| 641 | |
---|
| 642 | orcc %l5, %g0, %g0 ! Is thread switch necessary? |
---|
| 643 | bnz SYM(_ISR_Dispatch) ! yes, then invoke the dispatcher |
---|
| 644 | nop ! delay slot |
---|
| 645 | |
---|
| 646 | /* |
---|
| 647 | * Finally, check to see if signals were sent to the currently |
---|
| 648 | * executing task. If so, we need to invoke the interrupt dispatcher. |
---|
| 649 | */ |
---|
| 650 | |
---|
| 651 | sethi %hi(SYM(_ISR_Signals_to_thread_executing)), %l6 |
---|
| 652 | ld [%l6 + %lo(SYM(_ISR_Signals_to_thread_executing))], %l7 |
---|
| 653 | |
---|
| 654 | orcc %l7, %g0, %g0 ! Were signals sent to the currently |
---|
| 655 | ! executing thread? |
---|
| 656 | bz simple_return ! yes, then invoke the dispatcher |
---|
[6ca1184] | 657 | ! use the delay slot to clear the signals |
---|
| 658 | ! to the currently executing task flag |
---|
| 659 | st %g0, [%l6 + %lo(SYM(_ISR_Signals_to_thread_executing))] |
---|
| 660 | |
---|
[9700578] | 661 | |
---|
| 662 | /* |
---|
| 663 | * Invoke interrupt dispatcher. |
---|
| 664 | */ |
---|
| 665 | |
---|
| 666 | PUBLIC(_ISR_Dispatch) |
---|
| 667 | SYM(_ISR_Dispatch): |
---|
| 668 | |
---|
| 669 | /* |
---|
| 670 | * The following subtract should get us back on the interrupted |
---|
| 671 | * tasks stack and add enough room to invoke the dispatcher. |
---|
| 672 | * When we enable traps, we are mostly back in the context |
---|
| 673 | * of the task and subsequent interrupts can operate normally. |
---|
| 674 | */ |
---|
| 675 | |
---|
| 676 | sub %fp, CPU_MINIMUM_STACK_FRAME_SIZE, %sp |
---|
| 677 | |
---|
| 678 | or %l0, SPARC_PSR_ET_MASK, %l7 ! l7 = PSR with ET=1 |
---|
| 679 | mov %l7, %psr ! **** ENABLE TRAPS **** |
---|
| 680 | nop |
---|
| 681 | nop |
---|
| 682 | nop |
---|
| 683 | |
---|
| 684 | call SYM(_Thread_Dispatch), 0 |
---|
| 685 | nop |
---|
| 686 | |
---|
[1a17efb] | 687 | /* |
---|
| 688 | * We invoked _Thread_Dispatch in a state similar to the interrupted |
---|
| 689 | * task. In order to safely be able to tinker with the register |
---|
| 690 | * windows and get the task back to its pre-interrupt state, |
---|
| 691 | * we need to disable interrupts disabled so we can safely tinker |
---|
| 692 | * with the register windowing. In particular, the CWP in the PSR |
---|
| 693 | * is fragile during this period. (See PR578.) |
---|
| 694 | */ |
---|
| 695 | mov 2,%g1 ! syscall (disable interrupts) |
---|
| 696 | ta 0 ! syscall (disable interrupts) |
---|
| 697 | |
---|
[9700578] | 698 | /* |
---|
| 699 | * The CWP in place at this point may be different from |
---|
| 700 | * that which was in effect at the beginning of the ISR if we |
---|
| 701 | * have been context switched between the beginning of this invocation |
---|
| 702 | * of _ISR_Handler and this point. Thus the CWP and WIM should |
---|
| 703 | * not be changed back to their values at ISR entry time. Any |
---|
| 704 | * changes to the PSR must preserve the CWP. |
---|
| 705 | */ |
---|
| 706 | |
---|
| 707 | simple_return: |
---|
| 708 | ld [%fp + ISF_Y_OFFSET], %l5 ! restore y |
---|
| 709 | wr %l5, 0, %y |
---|
| 710 | |
---|
| 711 | ldd [%fp + ISF_PSR_OFFSET], %l0 ! restore psr, PC |
---|
| 712 | ld [%fp + ISF_NPC_OFFSET], %l2 ! restore nPC |
---|
| 713 | rd %psr, %l3 |
---|
| 714 | and %l3, SPARC_PSR_CWP_MASK, %l3 ! want "current" CWP |
---|
| 715 | andn %l0, SPARC_PSR_CWP_MASK, %l0 ! want rest from task |
---|
| 716 | or %l3, %l0, %l0 ! install it later... |
---|
| 717 | andn %l0, SPARC_PSR_ET_MASK, %l0 |
---|
| 718 | |
---|
| 719 | /* |
---|
| 720 | * Restore tasks global and out registers |
---|
| 721 | */ |
---|
| 722 | |
---|
| 723 | mov %fp, %g1 |
---|
| 724 | |
---|
| 725 | ! g1 is restored later |
---|
| 726 | ldd [%fp + ISF_G2_OFFSET], %g2 ! restore g2, g3 |
---|
| 727 | ldd [%fp + ISF_G4_OFFSET], %g4 ! restore g4, g5 |
---|
| 728 | ldd [%fp + ISF_G6_OFFSET], %g6 ! restore g6, g7 |
---|
| 729 | |
---|
| 730 | ldd [%fp + ISF_I0_OFFSET], %i0 ! restore i0, i1 |
---|
| 731 | ldd [%fp + ISF_I2_OFFSET], %i2 ! restore i2, i3 |
---|
| 732 | ldd [%fp + ISF_I4_OFFSET], %i4 ! restore i4, i5 |
---|
| 733 | ldd [%fp + ISF_I6_FP_OFFSET], %i6 ! restore i6/fp, i7 |
---|
| 734 | |
---|
| 735 | /* |
---|
| 736 | * Registers: |
---|
| 737 | * |
---|
| 738 | * ALL global registers EXCEPT G1 and the input registers have |
---|
| 739 | * already been restored and thuse off limits. |
---|
| 740 | * |
---|
| 741 | * The following is the contents of the local registers: |
---|
| 742 | * |
---|
| 743 | * l0 = original psr |
---|
| 744 | * l1 = return address (i.e. PC) |
---|
| 745 | * l2 = nPC |
---|
| 746 | * l3 = CWP |
---|
| 747 | */ |
---|
| 748 | |
---|
| 749 | /* |
---|
| 750 | * if (CWP + 1) is an invalid window then we need to reload it. |
---|
| 751 | * |
---|
| 752 | * WARNING: Traps should now be disabled |
---|
| 753 | */ |
---|
| 754 | |
---|
| 755 | mov %l0, %psr ! **** DISABLE TRAPS **** |
---|
| 756 | nop |
---|
| 757 | nop |
---|
| 758 | nop |
---|
| 759 | rd %wim, %l4 |
---|
| 760 | add %l0, 1, %l6 ! l6 = cwp + 1 |
---|
| 761 | and %l6, SPARC_PSR_CWP_MASK, %l6 ! do the modulo on it |
---|
| 762 | srl %l4, %l6, %l5 ! l5 = win >> cwp + 1 ; shift count |
---|
| 763 | ! and CWP are conveniently LS 5 bits |
---|
| 764 | cmp %l5, 1 ! Is tasks window invalid? |
---|
| 765 | bne good_task_window |
---|
| 766 | |
---|
| 767 | /* |
---|
| 768 | * The following code is the same as a 1 position left rotate of WIM. |
---|
| 769 | */ |
---|
| 770 | |
---|
| 771 | sll %l4, 1, %l5 ! l5 = WIM << 1 |
---|
| 772 | srl %l4, SPARC_NUMBER_OF_REGISTER_WINDOWS-1 , %l4 |
---|
| 773 | ! l4 = WIM >> (Number Windows - 1) |
---|
| 774 | or %l4, %l5, %l4 ! l4 = (WIM << 1) | |
---|
| 775 | ! (WIM >> (Number Windows - 1)) |
---|
| 776 | |
---|
| 777 | /* |
---|
| 778 | * Now restore the window just as if we underflowed to it. |
---|
| 779 | */ |
---|
| 780 | |
---|
| 781 | wr %l4, 0, %wim ! WIM = new WIM |
---|
[16bae52] | 782 | nop ! must delay after writing WIM |
---|
| 783 | nop |
---|
| 784 | nop |
---|
[9700578] | 785 | restore ! now into the tasks window |
---|
| 786 | |
---|
| 787 | ldd [%g1 + CPU_STACK_FRAME_L0_OFFSET], %l0 |
---|
| 788 | ldd [%g1 + CPU_STACK_FRAME_L2_OFFSET], %l2 |
---|
| 789 | ldd [%g1 + CPU_STACK_FRAME_L4_OFFSET], %l4 |
---|
| 790 | ldd [%g1 + CPU_STACK_FRAME_L6_OFFSET], %l6 |
---|
| 791 | ldd [%g1 + CPU_STACK_FRAME_I0_OFFSET], %i0 |
---|
| 792 | ldd [%g1 + CPU_STACK_FRAME_I2_OFFSET], %i2 |
---|
| 793 | ldd [%g1 + CPU_STACK_FRAME_I4_OFFSET], %i4 |
---|
| 794 | ldd [%g1 + CPU_STACK_FRAME_I6_FP_OFFSET], %i6 |
---|
| 795 | ! reload of sp clobbers ISF |
---|
| 796 | save ! Back to ISR dispatch window |
---|
| 797 | |
---|
| 798 | good_task_window: |
---|
| 799 | |
---|
| 800 | mov %l0, %psr ! **** DISABLE TRAPS **** |
---|
| 801 | ! and restore condition codes. |
---|
| 802 | ld [%g1 + ISF_G1_OFFSET], %g1 ! restore g1 |
---|
| 803 | jmp %l1 ! transfer control and |
---|
| 804 | rett %l2 ! go back to tasks window |
---|
[c62d36f] | 805 | |
---|
[9700578] | 806 | /* end of file */ |
---|