Changeset a11e1ff5 in rtems
- Timestamp:
- 03/07/17 06:58:11 (7 years ago)
- Branches:
- 5, master
- Children:
- 088acbb0
- Parents:
- c6f76392
- Location:
- c/src/lib/libcpu/powerpc
- Files:
-
- 2 edited
Legend:
- Unmodified
- Added
- Removed
-
c/src/lib/libcpu/powerpc/mpc6xx/altivec/vec_sup_asm.S
rc6f76392 ra11e1ff5 74 74 .set r3, 3 75 75 .set r4, 4 76 .set r5, 576 /* Do not use r5, since this is used by _CPU_Context_switch() */ 77 77 .set r6, 6 78 78 .set r7, 7 79 .set r8, 8 79 80 .set r9, 9 80 81 .set r10, 10 … … 579 580 #endif 580 581 581 PREP_FOR_SAVE r0, r3, r4, r 5, r6, r10582 PREP_FOR_SAVE r0, r3, r4, r8, r6, r10 582 583 /* r0 now contains VRSAVE, r3 still the aligned memory area 583 * and r4, r 5, r6 are offset by 16, 32, and 48 bytes from r3,584 * and r4, r8, r6 are offset by 16, 32, and 48 bytes from r3, 584 585 * respectively. r10 holds zero 585 586 */ 586 S_V0TOV19 _B0=r3, _B1=r4, _B2=r 5, _B3=r6, _O1=r10, _O2=r11587 S_V0TOV19 _B0=r3, _B1=r4, _B2=r8, _B3=r6, _O1=r10, _O2=r11 587 588 mfvscr v0 588 589 /* Store vrsave (still in r0) and vscr (in v0) to memory area */ … … 614 615 dcbt 0, r3 615 616 L_VSCR_VRSAVE r3, r0, v0 616 CMP_BASES r3, r4, r 5, r6, r10617 CMP_BASES r3, r4, r8, r6, r10 617 618 /* Start preloading 3rd line (where vectors 3 and 4 are) */ 618 dcbt 0, r 5619 L_V0TOV19 r3, r4, r 5, r6, r10, r11619 dcbt 0, r8 620 L_V0TOV19 r3, r4, r8, r6, r10, r11 620 621 621 622 #ifndef IGNORE_VRSAVE … … 628 629 629 630 /* fetch offset of altivec area in context */ 630 CMPOFF r 5631 CMPOFF r8 631 632 /* down-align 'to' area to cache-line boundary */ 632 add r4, r4, r 5633 add r4, r4, r8 633 634 CACHE_DOWNALGN r4 634 635 … … 659 660 /* SAVE NON-VOLATILE REGISTERS */ 660 661 661 /* Compute aligned destination pointer (r 5still holds offset662 /* Compute aligned destination pointer (r8 still holds offset 662 663 * to 'altivec' area in context) 663 664 */ 664 add r3, r3, r 5665 add r3, r3, r8 665 666 CACHE_DOWNALGN r3 666 667 667 PREP_FOR_SAVE r0, r3, r 5, r6, r7, r10668 PREP_FOR_SAVE r0, r3, r8, r6, r7, r10 668 669 /* The manual says reading vscr can take some time - do 669 670 * read it here (into a volatile vector register) while … … 671 672 */ 672 673 mfvscr v0 673 S_V20TOV31 _LRU=l, _B0=r3, _B1=r 5, _B2=r6, _B3=r7, _O1=r10, _O2=r11674 S_V20TOV31 _LRU=l, _B0=r3, _B1=r8, _B2=r6, _B3=r7, _O1=r10, _O2=r11 674 675 /* vrsave is now in r0 (PREP_FOR_SAVE), vscr in v0 */ 675 S_VSCR_VRSAVE r0, v0, r3, r 5676 S_VSCR_VRSAVE r0, v0, r3, r8 676 677 677 678 1: … … 682 683 addi r4, r4, PPC_CACHE_ALIGNMENT 683 684 L_VSCR_VRSAVE r4, r0, v0 684 CMP_BASES r4, r 5, r6, r7, r10685 L_V20TOV31 r4, r 5, r6, r7, r10, r11685 CMP_BASES r4, r8, r6, r7, r10 686 L_V20TOV31 r4, r8, r6, r7, r10, r11 686 687 687 688 #ifndef IGNORE_VRSAVE … … 692 693 .global _CPU_Context_initialize_altivec 693 694 _CPU_Context_initialize_altivec: 694 CMPOFF r 5695 add r3, r3, r 5695 CMPOFF r8 696 add r3, r3, r8 696 697 CACHE_DOWNALGN r3 697 lis r 5, _CPU_altivec_vrsave_initval@ha698 lwz r 5, _CPU_altivec_vrsave_initval@l(r5)699 stw r 5, VRSAVE_OFF(r3)698 lis r8, _CPU_altivec_vrsave_initval@ha 699 lwz r8, _CPU_altivec_vrsave_initval@l(r8) 700 stw r8, VRSAVE_OFF(r3) 700 701 lis r6, _CPU_altivec_vscr_initval@ha 701 702 lwz r6, _CPU_altivec_vscr_initval@l(r6) … … 716 717 .global _CPU_altivec_set_vrsave_initval 717 718 _CPU_altivec_set_vrsave_initval: 718 lis r 5, _CPU_altivec_vrsave_initval@ha719 stw r3, _CPU_altivec_vrsave_initval@l(r 5)719 lis r8, _CPU_altivec_vrsave_initval@ha 720 stw r3, _CPU_altivec_vrsave_initval@l(r8) 720 721 mtvrsave r3 721 722 blr … … 772 773 dcbt 0, r3 773 774 L_VSCR_VRSAVE r3, r0, v0 774 CMP_BASES r3, r4, r 5, r6, r10775 CMP_BASES r3, r4, r8, r6, r10 775 776 /* Start preloading 3rd line (where vectors 3 and 4 are) */ 776 dcbt 0, r 5777 L_V0TOV31 r3, r4, r 5, r6, r10, r11777 dcbt 0, r8 778 L_V0TOV31 r3, r4, r8, r6, r10, r11 778 779 779 780 #ifndef IGNORE_VRSAVE … … 795 796 #endif 796 797 797 PREP_FOR_SAVE r0, r3, r4, r 5, r6, r10798 PREP_FOR_SAVE r0, r3, r4, r8, r6, r10 798 799 /* r0 now contains VRSAVE, r3 still the aligned memory area 799 * and r4, r 5, r6 are offset by 16, 32, and 48 bytes from r3,800 * and r4, r8, r6 are offset by 16, 32, and 48 bytes from r3, 800 801 * respectively. r10 holds zero 801 802 */ 802 S_V0TOV31 _B0=r3, _B1=r4, _B2=r 5, _B3=r6, _O1=r10, _O2=r11803 S_V0TOV31 _B0=r3, _B1=r4, _B2=r8, _B3=r6, _O1=r10, _O2=r11 803 804 mfvscr v0 804 805 /* Store vrsave (still in r0) and vscr (in v0) to memory area */ -
c/src/lib/libcpu/powerpc/new-exceptions/cpu_asm.S
rc6f76392 ra11e1ff5 436 436 437 437 #if defined(__ALTIVEC__) && !defined(PPC_MULTILIB_ALTIVEC) 438 mr r14, r5439 438 mr r4, r5 440 439 .extern _CPU_Context_switch_altivec 441 440 bl _CPU_Context_switch_altivec 442 mr r5, r14443 441 #endif 444 442
Note: See TracChangeset
for help on using the changeset viewer.