Changeset 632e4306 in rtems


Ignore:
Timestamp:
Jul 17, 2009, 1:50:31 PM (10 years ago)
Author:
Thomas Doerfler <Thomas.Doerfler@…>
Branches:
4.10, 4.11, master
Children:
e63acfbb
Parents:
20857e12
Message:

exception handler maintenance

Location:
cpukit/score/cpu/arm
Files:
6 edited

Legend:

Unmodified
Added
Removed
  • cpukit/score/cpu/arm/ChangeLog

    r20857e12 r632e4306  
     12009-07-15      Sebastian Huber <sebastian.huber@embedded-brains.de>
     2
     3        * arm_exc_handler_high.c, arm_exc_handler_low.S, arm_exc_interrupt.S:
     4        New files.
     5        * Makefile.am: Update.
     6        * rtems/score/cpu.h:  Removed all generic comments.  Changed inline
     7        assembler of interrupt support functions.  Removed operating system
     8        support for fast interrupts (FIQ).  Overall cleanup.
     9        * cpu.c: Changed type of arm_cpu_mode to uint32_t to match the type in
     10        _CPU_Context_Initialize().  Moved exception handler code into
     11        'arm_exc_handler_high.c'.  _CPU_ISR_install_vector() writes now only
     12        if necessary.
     13        * cpu_asm.S: Moved exception handler code into 'arm_exc_handler_low.S'.
     14        * rtems/score/types.h: Removed superfluous defines.
     15        * ChangeLog, thumb_isr.c: Removed files.
     16
    1172009-05-05      Joel Sherrill <joel.sherrill@oarcorp.com>
    218
  • cpukit/score/cpu/arm/Makefile.am

    r20857e12 r632e4306  
    1212noinst_LIBRARIES = libscorecpu.a
    1313libscorecpu_a_CPPFLAGS = $(AM_CPPFLAGS)
    14 libscorecpu_a_SOURCES = cpu.c cpu_asm.S
    15 libscorecpu_a_SOURCES += thumb/thumb_isr.c
     14libscorecpu_a_SOURCES = cpu.c \
     15        cpu_asm.S \
     16        arm_exc_interrupt.S \
     17        arm_exc_handler_low.S \
     18        arm_exc_handler_high.c
    1619
    1720include $(srcdir)/preinstall.am
  • cpukit/score/cpu/arm/cpu.c

    r20857e12 r632e4306  
     1/**
     2 * @file
     3 *
     4 * ARM support code.
     5 */
     6
    17/*
    2  *  ARM CPU Dependent Source
    3  *
    4  *
    58 *  COPYRIGHT (c) 2000 Canon Research Centre France SA.
    69 *  Emmanuel Raguet, mailto:raguet@crf.canon.fr
     
    1013 *
    1114 *  Copyright (c) 2007 Ray xu <rayx.cn@gmail.com>
     15 *
     16 *  Copyright (c) 2009 embedded brains GmbH
    1217 *
    1318 *  The license and distribution terms for this file may be
     
    3035 * contexts.
    3136 */
     37uint32_t arm_cpu_mode = 0x13;
    3238
    33 unsigned int arm_cpu_mode = 0x13;
    34 
    35 /*  _CPU_Initialize
    36  *
    37  *  INPUT PARAMETERS: NONE
    38  *
    39  *  This routine performs processor dependent initialization.
    40  */
    41 
    42 void _CPU_Initialize(void)
     39void _CPU_Context_Initialize(
     40  Context_Control *the_context,
     41  uint32_t *stack_base,
     42  uint32_t size,
     43  uint32_t new_level,
     44  void *entry_point,
     45  bool is_fp
     46)
    4347{
     48  the_context->register_sp = (uint32_t) stack_base + size ;
     49  the_context->register_lr = (uint32_t) entry_point;
     50  the_context->register_cpsr = new_level | arm_cpu_mode;
    4451}
    4552
    46 /*
    47  *
    48  *  _CPU_ISR_Get_level - returns the current interrupt level
    49  */
    50 #define str(x) #x
    51 #define xstr(x) str(x)
    52 #define L(x) #x "_" xstr(__LINE__)
     53/* Preprocessor magic for stringification of x */
     54#define _CPU_ISR_LEVEL_DO_STRINGOF( x) #x
     55#define _CPU_ISR_LEVEL_STRINGOF( x) _CPU_ISR_LEVEL_DO_STRINGOF( x)
    5356
    54 #define TO_ARM_MODE(x)      \
    55     asm volatile (          \
    56     ".code  16           \n" \
    57     L(x) "_thumb:        \n" \
    58     ".align 2            \n" \
    59     "push {lr}           \n" \
    60     "adr %0, "L(x) "_arm \n" \
    61     "bl " L(x)"         \n" \
    62     "pop    {pc}        \n" \
    63     ".balign 4          \n" \
    64     L(x) ":             \n" \
    65     "bx %0              \n" \
    66     "nop                \n" \
    67     ".pool              \n" \
    68     ".code 32           \n" \
    69     L(x) "_arm:         \n" \
    70     :"=&r" (reg))
    71  
     57void _CPU_ISR_Set_level( uint32_t level )
     58{
     59  uint32_t reg;
    7260
    73 /*
    74  * Switch to Thumb mode Veneer,ugly but safe
    75  */
     61  asm volatile (
     62    THUMB_TO_ARM
     63    "mrs %0, cpsr\n"
     64    "bic %0, %0, #" _CPU_ISR_LEVEL_STRINGOF( CPU_MODES_INTERRUPT_MASK ) "\n"
     65    "orr %0, %0, %1\n"
     66    "msr cpsr, %0\n"
     67    ARM_TO_THUMB
     68    : "=r" (reg)
     69    : "r" (level)
     70  );
     71}
    7672
    77 #define TO_THUMB_MODE(x)    \
    78     asm volatile (          \
    79         ".code  32                  \n"\
    80         "adr %0, "L(x) "_thumb +1   \n"\
    81         "bx  %0                     \n"\
    82         ".pool                      \n"\
    83         ".thumb_func                \n"\
    84         L(x) "_thumb:               \n"\
    85         : "=&r" (reg))
     73uint32_t _CPU_ISR_Get_level( void )
     74{
     75  uint32_t reg;
     76  uint32_t level;
    8677
    87 #if (!defined(__THUMB_INTERWORK__) &&  !defined(__thumb__))
    88 uint32_t   _CPU_ISR_Get_level( void )
    89 {
    90     uint32_t   reg = 0; /* to avoid warning */
    91     asm volatile ("mrs  %0, cpsr \n"           \
    92                   "and  %0,  %0, #0xc0 \n"     \
    93                   : "=r" (reg)                 \
    94                   : "0" (reg) );
    95     return reg;
     78  asm volatile (
     79    THUMB_TO_ARM
     80    "mrs %0, cpsr\n"
     81    "and %1, %0, #" _CPU_ISR_LEVEL_STRINGOF( CPU_MODES_INTERRUPT_MASK ) "\n"
     82    ARM_TO_THUMB
     83    : "=r" (reg), "=r" (level)
     84  );
     85
     86  return level;
    9687}
    97 #endif
    9888
    99 
    100 
    101 /*
    102  *  _CPU_ISR_install_vector
    103  *
    104  *  This kernel routine installs the RTEMS handler for the
    105  *  specified vector.
    106  *
    107  *  Input parameters:
    108  *    vector      - interrupt vector number
    109  *    new_handler - replacement ISR for this vector number
    110  *    old_handler - pointer to store former ISR for this vector number
    111  *
    112  *  FIXME: This vector scheme should be changed to allow FIQ to be
    113  *         handled better. I'd like to be able to put VectorTable
    114  *         elsewhere - JTM
    115  *
    116  *
    117  *  Output parameters:  NONE
    118  *
    119  */
    12089void _CPU_ISR_install_vector(
    121   uint32_t    vector,
    122   proc_ptr    new_handler,
    123   proc_ptr   *old_handler
     90  uint32_t vector,
     91  proc_ptr new_handler,
     92  proc_ptr *old_handler
    12493)
    12594{
    126     /* pointer on the redirection table in RAM */
    127     long *VectorTable = (long *)(MAX_EXCEPTIONS * 4);
    128    
    129     if (old_handler != NULL) {
    130         old_handler = *(proc_ptr *)(VectorTable + vector);
    131     }
     95  /* Redirection table starts at the end of the vector table */
     96  volatile uint32_t *table = (volatile uint32_t *) (MAX_EXCEPTIONS * 4);
    13297
    133     *(VectorTable + vector) = (long)new_handler ;
     98  uint32_t current_handler = table [vector];
    13499 
     100  /* The current handler is now the old one */
     101  if (old_handler != NULL) {
     102    *old_handler = (proc_ptr) current_handler;
     103  }
     104
     105  /* Write only if necessary to avoid writes to a maybe read-only memory */
     106  if (current_handler != (uint32_t) new_handler) {
     107    table [vector] = (uint32_t) new_handler;
     108  }
    135109}
    136 
    137 void _CPU_Context_Initialize(
    138   Context_Control  *the_context,
    139   uint32_t         *stack_base,
    140   uint32_t          size,
    141   uint32_t          new_level,
    142   void             *entry_point,
    143   bool              is_fp
    144 )
    145 {
    146     the_context->register_sp = (uint32_t)stack_base + size ;
    147     the_context->register_lr = (uint32_t)entry_point;
    148     the_context->register_cpsr = new_level | arm_cpu_mode;
    149 }
    150 
    151 
    152 /*
    153  *  _CPU_Install_interrupt_stack - this function is empty since the
    154  *  BSP must set up the interrupt stacks.
    155  */
    156110
    157111void _CPU_Install_interrupt_stack( void )
    158112{
     113  /* This function is empty since the BSP must set up the interrupt stacks */
    159114}
    160115
    161 void _defaultExcHandler (CPU_Exception_frame *ctx)
     116void _CPU_Initialize( void )
    162117{
    163     printk("\n\r");
    164     printk("----------------------------------------------------------\n\r");
    165 #if 1
    166     printk("Exception 0x%x caught at PC 0x%x by thread %d\n",
    167            ctx->register_ip, ctx->register_lr - 4,
    168            _Thread_Executing->Object.id);
    169 #endif
    170     printk("----------------------------------------------------------\n\r");
    171     printk("Processor execution context at time of the fault was  :\n\r");
    172     printk("----------------------------------------------------------\n\r");
    173 #if 0
    174     printk(" r0  = %8x  r1  = %8x  r2  = %8x  r3  = %8x\n\r",
    175            ctx->register_r0, ctx->register_r1,
    176            ctx->register_r2, ctx->register_r3);
    177     printk(" r4  = %8x  r5  = %8x  r6  = %8x  r7  = %8x\n\r",
    178            ctx->register_r4, ctx->register_r5,
    179            ctx->register_r6, ctx->register_r7);
    180     printk(" r8  = %8x  r9  = %8x  r10 = %8x\n\r",
    181            ctx->register_r8, ctx->register_r9, ctx->register_r10);
    182     printk(" fp  = %8x  ip  = %8x  sp  = %8x  pc  = %8x\n\r",
    183            ctx->register_fp, ctx->register_ip,
    184            ctx->register_sp, ctx->register_lr - 4);
    185     printk("----------------------------------------------------------\n\r");
    186 #endif   
    187     if (_ISR_Nest_level > 0) {
    188         /*
    189          * In this case we shall not delete the task interrupted as
    190          * it has nothing to do with the fault. We cannot return either
    191          * because the eip points to the faulty instruction so...
    192          */
    193         printk("Exception while executing ISR!!!. System locked\n\r");
    194         while(1);
    195     }
    196     else {
    197         printk("*********** FAULTY THREAD WILL BE DELETED **************\n\r");
    198         rtems_task_delete(_Thread_Executing->Object.id);
    199     }
     118  /* Do nothing */
    200119}
    201 
    202 cpuExcHandlerType _currentExcHandler = _defaultExcHandler;
    203 
    204 extern void _Exception_Handler_Undef_Swi(void);
    205 extern void _Exception_Handler_Abort(void);
    206 extern void _exc_data_abort(void);
    207 
    208 
    209 
    210 /* FIXME: put comments here */
    211 void rtems_exception_init_mngt(void)
    212 {
    213         ISR_Level level;
    214      
    215       _CPU_ISR_Disable(level);
    216       _CPU_ISR_install_vector(ARM_EXCEPTION_UNDEF,
    217                               _Exception_Handler_Undef_Swi,
    218                               NULL);
    219  
    220       _CPU_ISR_install_vector(ARM_EXCEPTION_SWI,
    221                               _Exception_Handler_Undef_Swi,
    222                               NULL);
    223      
    224       _CPU_ISR_install_vector(ARM_EXCEPTION_PREF_ABORT,
    225                               _Exception_Handler_Abort,
    226                               NULL);
    227      
    228       _CPU_ISR_install_vector(ARM_EXCEPTION_DATA_ABORT,
    229                               _exc_data_abort,
    230                               NULL);
    231      
    232       _CPU_ISR_install_vector(ARM_EXCEPTION_FIQ,       
    233                               _Exception_Handler_Abort,
    234                               NULL);
    235      
    236       _CPU_ISR_install_vector(ARM_EXCEPTION_IRQ,
    237                               _Exception_Handler_Abort,
    238                               NULL);
    239      
    240       _CPU_ISR_Enable(level);
    241 }
    242  
    243 #define INSN_MASK         0xc5
    244 
    245 #define INSN_STM1         0x80
    246 #define INSN_STM2         0x84
    247 #define INSN_STR          0x40
    248 #define INSN_STRB         0x44
    249 
    250 #define INSN_LDM1         0x81
    251 #define INSN_LDM23        0x85
    252 #define INSN_LDR          0x41
    253 #define INSN_LDRB         0x45
    254 
    255 #define GET_RD(x)         ((x & 0x0000f000) >> 12)
    256 #define GET_RN(x)         ((x & 0x000f0000) >> 16)
    257 
    258 #define GET_U(x)              ((x & 0x00800000) >> 23)
    259 #define GET_I(x)              ((x & 0x02000000) >> 25)
    260 
    261 #define GET_REG(r, ctx)      (((uint32_t *)ctx)[r])
    262 #define SET_REG(r, ctx, v)   (((uint32_t *)ctx)[r] = v)
    263 #define GET_OFFSET(insn)     (insn & 0xfff)
    264 
  • cpukit/score/cpu/arm/cpu_asm.S

    r20857e12 r632e4306  
    8585        mov     r1, r0
    8686        b       _restore
    87 
    88 
    89 
    90 /* FIXME:       _Exception_Handler_Undef_Swi is untested */
    91 FUNC_START_ARM(_Exception_Handler_Undef_Swi)
    92 /* FIXME: This should use load and store multiple instructions */
    93         sub     r13,r13,#SIZE_REGS
    94         str     r4,  [r13, #REG_R4]
    95         str     r5,  [r13, #REG_R5]
    96         str     r6,  [r13, #REG_R6]
    97         str     r7,  [r13, #REG_R7]
    98         str     r8,  [r13, #REG_R8]
    99         str     r9,  [r13, #REG_R9]
    100         str     r10, [r13, #REG_R10]
    101         str     r11, [r13, #REG_R11]
    102         str     sp,  [r13, #REG_SP]
    103         str     lr,  [r13, #REG_LR]
    104         mrs     r0,  cpsr               /* read the status */
    105         and     r0,  r0,#0x1f           /* we keep the mode as exception number */
    106         str     r0,  [r13, #REG_PC]     /* we store it in a free place */
    107         mov     r0,  r13                /* put frame address in r0 (C arg 1) */
    108 
    109         ldr     r1, =SWI_Handler
    110         ldr     lr, =_go_back_1
    111         ldr     pc,[r1]                         /* call handler  */
    112 _go_back_1:
    113         ldr     r4,  [r13, #REG_R4]
    114         ldr     r5,  [r13, #REG_R5]
    115         ldr     r6,  [r13, #REG_R6]
    116         ldr     r7,  [r13, #REG_R7]
    117         ldr     r8,  [r13, #REG_R8]
    118         ldr     r9,  [r13, #REG_R9]
    119         ldr     r10, [r13, #REG_R10]
    120         ldr     r11, [r13, #REG_R11]
    121         ldr     sp,  [r13, #REG_SP]
    122         ldr     lr,  [r13, #REG_LR]
    123         add     r13,r13,#SIZE_REGS
    124         movs    pc,r14                  /* return  */
    125        
    126 /* FIXME:       _Exception_Handler_Abort is untested */
    127 FUNC_START_ARM(_Exception_Handler_Abort)
    128 /* FIXME: This should use load and store multiple instructions */
    129         sub     r13,r13,#SIZE_REGS
    130         str     r4,  [r13, #REG_R4]
    131         str     r5,  [r13, #REG_R5]
    132         str     r6,  [r13, #REG_R6]
    133         str     r7,  [r13, #REG_R7]
    134         str     r8,  [r13, #REG_R8]
    135         str     r9,  [r13, #REG_R9]
    136         str     sp,  [r13, #REG_R11]
    137         str     lr,  [r13, #REG_SP]
    138         str     lr,  [r13, #REG_LR]
    139         mrs     r0,  cpsr               /* read the status */
    140         and     r0,  r0,#0x1f           /* we keep the mode as exception number */
    141         str     r0,  [r13, #REG_PC]     /* we store it in a free place */
    142         mov     r0,  r13                /* put frame address in ro (C arg 1) */
    143        
    144         ldr     r1, =_currentExcHandler
    145         ldr     lr, =_go_back_2
    146         ldr     pc,[r1]                         /* call handler  */
    147 _go_back_2:
    148         ldr     r4,  [r13, #REG_R4]
    149         ldr     r5,  [r13, #REG_R5]
    150         ldr     r6,  [r13, #REG_R6]
    151         ldr     r7,  [r13, #REG_R7]
    152         ldr     r8,  [r13, #REG_R8]
    153         ldr     r9,  [r13, #REG_R9]
    154         ldr     r10, [r13, #REG_R10]
    155         ldr     sp,  [r13, #REG_R11]
    156         ldr     lr,  [r13, #REG_SP]
    157         ldr     lr,  [r13, #REG_LR]
    158         add     r13,r13,#SIZE_REGS
    159 #ifdef  __thumb__
    160         subs    r11, r14,#4
    161         bx      r11
    162         nop
    163 #else
    164         subs    pc,r14,#4                       /* return */
    165 #endif
    166 
    167 #define ABORT_REGS_OFFS 32-REG_R4
    168 #define ABORT_SIZE_REGS SIZE_REGS+ABORT_REGS_OFFS
    169        
    170 FUNC_START_ARM(_exc_data_abort)
    171         sub     sp, sp, #ABORT_SIZE_REGS        /* reserve register frame */
    172         stmia   sp, {r0-r11}
    173         add     sp, sp, #ABORT_REGS_OFFS        /* the Context_Control structure starts by CPSR, R4, ... */
    174 
    175         str     ip, [sp, #REG_PC]               /* store R12 (ip) somewhere, oh hackery, hackery, hack */
    176         str     lr, [sp, #REG_LR]
    177 
    178         mov     r1, lr
    179         ldr     r0, [r1, #-8]                   /* r0 = bad instruction */
    180         mrs     r1, spsr                        /* r1 = spsr */
    181         mov     r2, r13                         /* r2 = exception frame of Context_Control type */
    182 #if defined(__thumb__)
    183         .code 32
    184         /*arm to thumb*/
    185         adr     r5, to_thumb + 1
    186         bx      r5
    187         .code 16
    188 to_thumb:       
    189 #endif 
    190         bl      do_data_abort
    191 #if defined(__thumb__)
    192 /*back to arm*/         
    193         .code 16
    194 thumb_to_arm:
    195         .align 2
    196         adr r5, arm_code
    197         bx      r5
    198         nop
    199         .code 32
    200 arm_code:
    201 #endif
    202        
    203         ldr     lr, [sp, #REG_LR]
    204         ldr     ip, [sp, #REG_PC]               /* restore R12 (ip) */
    205 
    206         sub     sp, sp, #ABORT_REGS_OFFS
    207         ldmia   sp, {r0-r11}
    208         add     sp, sp, #ABORT_SIZE_REGS
    209 #ifdef  __thumb__
    210         subs    r11, r14, #4                    /* return to the instruction */
    211         bx      r11
    212         nop
    213 #else
    214         subs    pc, r14, #4
    215 #endif
    216                                                 /* _AFTER_ the aborted one */
  • cpukit/score/cpu/arm/rtems/score/cpu.h

    r20857e12 r632e4306  
    2525 */
    2626
    27 /* FIXME: finish commenting/cleaning up this file */
    2827#ifndef _RTEMS_SCORE_CPU_H
    2928#define _RTEMS_SCORE_CPU_H
    3029
     30#include <rtems/score/arm.h>
     31
     32#ifndef ASM
     33  #include <rtems/score/types.h>
     34#endif
     35
     36#ifndef TRUE
     37  #warning "TRUE not defined"
     38  #define TRUE 1
     39#endif
     40
     41#ifndef FALSE
     42  #warning "FALSE not defined"
     43  #define FALSE 0
     44#endif
     45
     46#ifdef __thumb__
     47  #define ARM_TO_THUMB "add %0, pc, #1\nbx %0\n.thumb\n"
     48  #define THUMB_TO_ARM ".align 2\nbx pc\n.arm\n"
     49#else
     50  #define ARM_TO_THUMB
     51  #define THUMB_TO_ARM
     52#endif
     53
     54/* If someone uses THUMB we assume she wants minimal code size */
     55#ifdef __thumb__
     56  #define CPU_INLINE_ENABLE_DISPATCH FALSE
     57#else
     58  #define CPU_INLINE_ENABLE_DISPATCH TRUE
     59#endif
     60
     61#if defined(__ARMEL__)
     62  #define CPU_BIG_ENDIAN FALSE
     63  #define CPU_LITTLE_ENDIAN TRUE
     64#elif defined(__ARMEB__)
     65  #define CPU_BIG_ENDIAN TRUE
     66  #define CPU_LITTLE_ENDIAN FALSE
     67#else
     68  #error "unknown endianness"
     69#endif
     70
     71#define CPU_UNROLL_ENQUEUE_PRIORITY TRUE
     72
     73#define CPU_HAS_SOFTWARE_INTERRUPT_STACK FALSE
     74
     75#define CPU_HAS_HARDWARE_INTERRUPT_STACK TRUE
     76
     77#define CPU_ALLOCATE_INTERRUPT_STACK FALSE
     78
     79#define CPU_ISR_PASSES_FRAME_POINTER 0
     80
     81#if ( ARM_HAS_FPU == 1 )
     82  #define CPU_HARDWARE_FP TRUE
     83#else
     84  #define CPU_HARDWARE_FP FALSE
     85#endif
     86
     87#define CPU_SOFTWARE_FP FALSE
     88
     89#define CPU_ALL_TASKS_ARE_FP FALSE
     90
     91#define CPU_IDLE_TASK_IS_FP FALSE
     92
     93#define CPU_USE_DEFERRED_FP_SWITCH FALSE
     94
     95#define CPU_PROVIDES_IDLE_THREAD_BODY FALSE
     96
     97#define CPU_STACK_GROWS_UP FALSE
     98
     99/* XXX Why 32? */
     100#define CPU_STRUCTURE_ALIGNMENT __attribute__ ((aligned (32)))
     101
     102/*
     103 * The interrupt mask disables only normal interrupts (IRQ).
     104 *
     105 * In order to support fast interrupts (FIQ) such that they can do something
     106 * useful, we have to disable the operating system support for FIQs.  Having
     107 * operating system support for them would require that FIQs are disabled
     108 * during critical sections of the operating system and application.  At this
     109 * level IRQs and FIQs would be equal.  It is true that FIQs could interrupt
     110 * the non critical sections of IRQs, so here they would have a small
     111 * advantage.  Without operating system support, the FIQs can execute at any
     112 * time (of course not during the service of another FIQ). If someone needs
     113 * operating system support for a FIQ, she can trigger a software interrupt and
     114 * service the request in a two-step process.
     115 */
     116#define CPU_MODES_INTERRUPT_MASK 0x80
     117
     118#define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
     119
     120#define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
     121
     122#define CPU_INTERRUPT_NUMBER_OF_VECTORS 8
     123
     124#define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER (CPU_INTERRUPT_NUMBER_OF_VECTORS - 1)
     125
     126#define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
     127
     128#define CPU_STACK_MINIMUM_SIZE (1024 * 4)
     129
     130#define CPU_ALIGNMENT 4
     131
     132#define CPU_HEAP_ALIGNMENT CPU_ALIGNMENT
     133
     134#define CPU_PARTITION_ALIGNMENT CPU_ALIGNMENT
     135
     136#define CPU_STACK_ALIGNMENT 4
     137
     138/*
     139 * Bitfield handler macros.
     140 *
     141 * If we had a particularly fast function for finding the first
     142 * bit set in a word, it would go here. Since we don't (*), we'll
     143 * just use the universal macros.
     144 *
     145 * (*) On ARM V5 and later, there's a CLZ function which could be
     146 *     used to implement much quicker than the default macro.
     147 */
     148
     149#define CPU_USE_GENERIC_BITFIELD_CODE TRUE
     150
     151#define CPU_USE_GENERIC_BITFIELD_DATA TRUE
     152
     153#define CPU_ENABLE_C_ISR_DISPATCH_IMPLEMENTATION TRUE
     154
     155#ifndef ASM
     156
    31157#ifdef __cplusplus
    32158extern "C" {
    33159#endif
    34160
    35 #include <rtems/score/arm.h>            /* pick up machine definitions */
    36 #ifndef ASM
    37 #include <rtems/score/types.h>
    38 #endif
    39 
    40 /* conditional compilation parameters */
    41 
    42 /*
    43  *  Should the calls to _Thread_Enable_dispatch be inlined?
    44  *
    45  *  If TRUE, then they are inlined.
    46  *  If FALSE, then a subroutine call is made.
    47  *
    48  *  Basically this is an example of the classic trade-off of size
    49  *  versus speed.  Inlining the call (TRUE) typically increases the
    50  *  size of RTEMS while speeding up the enabling of dispatching.
    51  *  [NOTE: In general, the _Thread_Dispatch_disable_level will
    52  *  only be 0 or 1 unless you are in an interrupt handler and that
    53  *  interrupt handler invokes the executive.]  When not inlined
    54  *  something calls _Thread_Enable_dispatch which in turns calls
    55  *  _Thread_Dispatch.  If the enable dispatch is inlined, then
    56  *  one subroutine call is avoided entirely.]
    57  */
    58 
    59 #if defined(__thumb__)
    60 #define CPU_INLINE_ENABLE_DISPATCH       FALSE
    61 #else
    62 #define CPU_INLINE_ENABLE_DISPATCH       TRUE
    63 #endif
    64 
    65 /*
    66  *  Should the body of the search loops in _Thread_queue_Enqueue_priority
    67  *  be unrolled one time?  In unrolled each iteration of the loop examines
    68  *  two "nodes" on the chain being searched.  Otherwise, only one node
    69  *  is examined per iteration.
    70  *
    71  *  If TRUE, then the loops are unrolled.
    72  *  If FALSE, then the loops are not unrolled.
    73  *
    74  *  The primary factor in making this decision is the cost of disabling
    75  *  and enabling interrupts (_ISR_Flash) versus the cost of rest of the
    76  *  body of the loop.  On some CPUs, the flash is more expensive than
    77  *  one iteration of the loop body.  In this case, it might be desirable
    78  *  to unroll the loop.  It is important to note that on some CPUs, this
    79  *  code is the longest interrupt disable period in RTEMS.  So it is
    80  *  necessary to strike a balance when setting this parameter.
    81  */
    82 
    83 #define CPU_UNROLL_ENQUEUE_PRIORITY      TRUE
    84 
    85 /*
    86  *  Does RTEMS manage a dedicated interrupt stack in software?
    87  *
    88  *  If TRUE, then a stack is allocated in _Interrupt_Manager_initialization.
    89  *  If FALSE, nothing is done.
    90  *
    91  *  If the CPU supports a dedicated interrupt stack in hardware,
    92  *  then it is generally the responsibility of the BSP to allocate it
    93  *  and set it up.
    94  *
    95  *  If the CPU does not support a dedicated interrupt stack, then
    96  *  the porter has two options: (1) execute interrupts on the
    97  *  stack of the interrupted task, and (2) have RTEMS manage a dedicated
    98  *  interrupt stack.
    99  *
    100  *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
    101  *
    102  *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
    103  *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
    104  *  possible that both are FALSE for a particular CPU.  Although it
    105  *  is unclear what that would imply about the interrupt processing
    106  *  procedure on that CPU.
    107  */
    108 
    109 #define CPU_HAS_SOFTWARE_INTERRUPT_STACK FALSE
    110 
    111 /*
    112  *  Does this CPU have hardware support for a dedicated interrupt stack?
    113  *
    114  *  If TRUE, then it must be installed during initialization.
    115  *  If FALSE, then no installation is performed.
    116  *
    117  *  If this is TRUE, CPU_ALLOCATE_INTERRUPT_STACK should also be TRUE.
    118  *
    119  *  Only one of CPU_HAS_SOFTWARE_INTERRUPT_STACK and
    120  *  CPU_HAS_HARDWARE_INTERRUPT_STACK should be set to TRUE.  It is
    121  *  possible that both are FALSE for a particular CPU.  Although it
    122  *  is unclear what that would imply about the interrupt processing
    123  *  procedure on that CPU.
    124  */
    125 
    126 #define CPU_HAS_HARDWARE_INTERRUPT_STACK TRUE
    127 
    128 /*
    129  *  Does RTEMS allocate a dedicated interrupt stack in the Interrupt Manager?
    130  *
    131  *  If TRUE, then the memory is allocated during initialization.
    132  *  If FALSE, then the memory is allocated during initialization.
    133  *
    134  *  This should be TRUE is CPU_HAS_SOFTWARE_INTERRUPT_STACK is TRUE.
    135  */
    136 
    137 #define CPU_ALLOCATE_INTERRUPT_STACK FALSE
    138 
    139 /*
    140  *  Does the RTEMS invoke the user's ISR with the vector number and
    141  *  a pointer to the saved interrupt frame (1) or just the vector
    142  *  number (0)?
    143  */
    144 
    145 #define CPU_ISR_PASSES_FRAME_POINTER 0
    146 
    147 /*
    148  *  Does the CPU have hardware floating point?
    149  *
    150  *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is supported.
    151  *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is ignored.
    152  *
    153  *  If there is a FP coprocessor such as the i387 or mc68881, then
    154  *  the answer is TRUE.
    155  *
    156  *  The macro name "ARM_HAS_FPU" should be made CPU specific.
    157  *  It indicates whether or not this CPU model has FP support.  For
    158  *  example, it would be possible to have an i386_nofp CPU model
    159  *  which set this to false to indicate that you have an i386 without
    160  *  an i387 and wish to leave floating point support out of RTEMS.
    161  */
    162 
    163 #if ( ARM_HAS_FPU == 1 )
    164 #define CPU_HARDWARE_FP     TRUE
    165 #else
    166 #define CPU_HARDWARE_FP     FALSE
    167 #endif
    168 
    169 #define CPU_SOFTWARE_FP     FALSE
    170 
    171 /*
    172  *  Are all tasks RTEMS_FLOATING_POINT tasks implicitly?
    173  *
    174  *  If TRUE, then the RTEMS_FLOATING_POINT task attribute is assumed.
    175  *  If FALSE, then the RTEMS_FLOATING_POINT task attribute is followed.
    176  *
    177  *  If CPU_HARDWARE_FP is FALSE, then this should be FALSE as well.
    178  */
    179 
    180 #define CPU_ALL_TASKS_ARE_FP     FALSE
    181 
    182 /*
    183  *  Should the IDLE task have a floating point context?
    184  *
    185  *  If TRUE, then the IDLE task is created as a RTEMS_FLOATING_POINT task
    186  *  and it has a floating point context which is switched in and out.
    187  *  If FALSE, then the IDLE task does not have a floating point context.
    188  *
    189  *  Setting this to TRUE negatively impacts the time required to preempt
    190  *  the IDLE task from an interrupt because the floating point context
    191  *  must be saved as part of the preemption.
    192  */
    193 
    194 #define CPU_IDLE_TASK_IS_FP      FALSE
    195 
    196 /*
    197  *  Should the saving of the floating point registers be deferred
    198  *  until a context switch is made to another different floating point
    199  *  task?
    200  *
    201  *  If TRUE, then the floating point context will not be stored until
    202  *  necessary.  It will remain in the floating point registers and not
    203  *  disturned until another floating point task is switched to.
    204  *
    205  *  If FALSE, then the floating point context is saved when a floating
    206  *  point task is switched out and restored when the next floating point
    207  *  task is restored.  The state of the floating point registers between
    208  *  those two operations is not specified.
    209  *
    210  *  If the floating point context does NOT have to be saved as part of
    211  *  interrupt dispatching, then it should be safe to set this to TRUE.
    212  *
    213  *  Setting this flag to TRUE results in using a different algorithm
    214  *  for deciding when to save and restore the floating point context.
    215  *  The deferred FP switch algorithm minimizes the number of times
    216  *  the FP context is saved and restored.  The FP context is not saved
    217  *  until a context switch is made to another, different FP task.
    218  *  Thus in a system with only one FP task, the FP context will never
    219  *  be saved or restored.
    220  */
    221 
    222 #define CPU_USE_DEFERRED_FP_SWITCH   FALSE
    223 
    224 /*
    225  *  Does this port provide a CPU dependent IDLE task implementation?
    226  *
    227  *  If TRUE, then the routine _CPU_Thread_Idle_body
    228  *  must be provided and is the default IDLE thread body instead of
    229  *  _CPU_Thread_Idle_body.
    230  *
    231  *  If FALSE, then use the generic IDLE thread body if the BSP does
    232  *  not provide one.
    233  *
    234  *  This is intended to allow for supporting processors which have
    235  *  a low power or idle mode.  When the IDLE thread is executed, then
    236  *  the CPU can be powered down.
    237  *
    238  *  The order of precedence for selecting the IDLE thread body is:
    239  *
    240  *    1.  BSP provided
    241  *    2.  CPU dependent (if provided)
    242  *    3.  generic (if no BSP and no CPU dependent)
    243  */
    244 
    245 #define CPU_PROVIDES_IDLE_THREAD_BODY    FALSE
    246 
    247 /*
    248  *  Does the stack grow up (toward higher addresses) or down
    249  *  (toward lower addresses)?
    250  *
    251  *  If TRUE, then the grows upward.
    252  *  If FALSE, then the grows toward smaller addresses.
    253  */
    254 
    255 #define CPU_STACK_GROWS_UP               FALSE
    256 
    257 /*
    258  *  The following is the variable attribute used to force alignment
    259  *  of critical RTEMS structures.  On some processors it may make
    260  *  sense to have these aligned on tighter boundaries than
    261  *  the minimum requirements of the compiler in order to have as
    262  *  much of the critical data area as possible in a cache line.
    263  *
    264  *  The placement of this macro in the declaration of the variables
    265  *  is based on the syntactically requirements of the GNU C
    266  *  "__attribute__" extension.  For example with GNU C, use
    267  *  the following to force a structures to a 32 byte boundary.
    268  *
    269  *      __attribute__ ((aligned (32)))
    270  *
    271  *  NOTE:  Currently only the Priority Bit Map table uses this feature.
    272  *         To benefit from using this, the data must be heavily
    273  *         used so it will stay in the cache and used frequently enough
    274  *         in the executive to justify turning this on.
    275  */
    276 
    277 #define CPU_STRUCTURE_ALIGNMENT  __attribute__ ((aligned (32)))
    278 
    279 /*
    280  *  Define what is required to specify how the network to host conversion
    281  *  routines are handled.
    282  */
    283 
    284 #if defined(__ARMEL__)
    285 #define CPU_BIG_ENDIAN          FALSE
    286 #define CPU_LITTLE_ENDIAN       TRUE
    287 #elif defined(__ARMEB__)
    288 #define CPU_BIG_ENDIAN          TRUE
    289 #define CPU_LITTLE_ENDIAN       FALSE
    290 #else
    291 #error "Unknown endianness"
    292 #endif
    293                        
    294 /*
    295  *  The following defines the number of bits actually used in the
    296  *  interrupt field of the task mode.  How those bits map to the
    297  *  CPU interrupt levels is defined by the routine _CPU_ISR_Set_level().
    298  */
    299 
    300 #define CPU_MODES_INTERRUPT_MASK   0x000000c0
    301 
    302 /*
    303  *  Processor defined structures required by cpukit/score.
    304  */
    305 
    306 /* may need to put some structures here.  */
    307 
    308 /*
    309  * Contexts
    310  *
    311  *  Generally there are 2 types of context to save.
    312  *     1. Interrupt registers to save
    313  *     2. Task level registers to save
    314  *
    315  *  This means we have the following 3 context items:
    316  *     1. task level context stuff::  Context_Control
    317  *     2. floating point task stuff:: Context_Control_fp
    318  *     3. special interrupt level context :: Context_Control_interrupt
    319  *
    320  *  On some processors, it is cost-effective to save only the callee
    321  *  preserved registers during a task context switch.  This means
    322  *  that the ISR code needs to save those registers which do not
    323  *  persist across function calls.  It is not mandatory to make this
    324  *  distinctions between the caller/callee saves registers for the
    325  *  purpose of minimizing context saved during task switch and on interrupts.
    326  *  If the cost of saving extra registers is minimal, simplicity is the
    327  *  choice.  Save the same context on interrupt entry as for tasks in
    328  *  this case.
    329  *
    330  *  Additionally, if gdb is to be made aware of RTEMS tasks for this CPU, then
    331  *  care should be used in designing the context area.
    332  *
    333  *  On some CPUs with hardware floating point support, the Context_Control_fp
    334  *  structure will not be used or it simply consist of an array of a
    335  *  fixed number of bytes.   This is done when the floating point context
    336  *  is dumped by a "FP save context" type instruction and the format
    337  *  is not really defined by the CPU.  In this case, there is no need
    338  *  to figure out the exact format -- only the size.  Of course, although
    339  *  this is enough information for RTEMS, it is probably not enough for
    340  *  a debugger such as gdb.  But that is another problem.
    341  */
     161typedef enum {
     162  ARM_EXCEPTION_RESET = 0,
     163  ARM_EXCEPTION_UNDEF = 1,
     164  ARM_EXCEPTION_SWI = 2,
     165  ARM_EXCEPTION_PREF_ABORT = 3,
     166  ARM_EXCEPTION_DATA_ABORT = 4,
     167  ARM_EXCEPTION_RESERVED = 5,
     168  ARM_EXCEPTION_IRQ = 6,
     169  ARM_EXCEPTION_FIQ = 7,
     170  MAX_EXCEPTIONS = 8
     171} Arm_symbolic_exception_name;
     172
    342173typedef struct {
    343     uint32_t  register_cpsr;
    344     uint32_t  register_r4;
    345     uint32_t  register_r5;
    346     uint32_t  register_r6;
    347     uint32_t  register_r7;
    348     uint32_t  register_r8;
    349     uint32_t  register_r9;
    350     uint32_t  register_r10;
    351     uint32_t  register_fp;
    352     uint32_t  register_sp;
    353     uint32_t  register_lr;
    354     uint32_t  register_pc;
     174  uint32_t register_cpsr;
     175  uint32_t register_r4;
     176  uint32_t register_r5;
     177  uint32_t register_r6;
     178  uint32_t register_r7;
     179  uint32_t register_r8;
     180  uint32_t register_r9;
     181  uint32_t register_r10;
     182  uint32_t register_fp;
     183  uint32_t register_sp;
     184  uint32_t register_lr;
     185  uint32_t register_pc;
    355186} Context_Control;
     187
     188/* XXX This is out of date */
     189typedef struct {
     190  uint32_t register_r0;
     191  uint32_t register_r1;
     192  uint32_t register_r2;
     193  uint32_t register_r3;
     194  uint32_t register_ip;
     195  uint32_t register_lr;
     196} CPU_Exception_frame;
     197
     198typedef CPU_Exception_frame CPU_Interrupt_frame;
     199
     200typedef struct {
     201  /* Not supported */
     202} Context_Control_fp;
     203
     204SCORE_EXTERN Context_Control_fp _CPU_Null_fp_context;
     205
     206static inline uint32_t arm_interrupt_disable( void )
     207{
     208  uint32_t reg;
     209  uint32_t level;
     210
     211  asm volatile (
     212    THUMB_TO_ARM
     213    "mrs %1, cpsr\n"
     214    "orr %0, %1, #0x80\n"
     215    "msr cpsr, %0\n"
     216    ARM_TO_THUMB
     217    : "=r" (reg), "=r" (level)
     218  );
     219
     220  return level;
     221}
     222
     223static inline void arm_interrupt_enable( uint32_t level )
     224{
     225  #ifdef __thumb__
     226    uint32_t reg;
     227
     228    asm volatile (
     229      THUMB_TO_ARM
     230      "msr cpsr, %1\n"
     231      ARM_TO_THUMB
     232      : "=r" (reg)
     233      : "r" (level)
     234    );
     235  #else
     236    asm volatile (
     237      "msr cpsr, %0"
     238      :
     239      : "r" (level)
     240    );
     241  #endif
     242}
     243
     244static inline void arm_interrupt_flash( uint32_t level )
     245{
     246  uint32_t reg;
     247
     248  asm volatile (
     249    THUMB_TO_ARM
     250    "mrs %0, cpsr\n"
     251    "msr cpsr, %1\n"
     252    "msr cpsr, %0\n"
     253    ARM_TO_THUMB
     254    : "=r" (reg)
     255    : "r" (level)
     256  );
     257}
     258
     259static inline uint32_t arm_status_irq_enable( void )
     260{
     261  uint32_t reg;
     262  uint32_t psr;
     263
     264  RTEMS_COMPILER_MEMORY_BARRIER();
     265
     266  asm volatile (
     267    THUMB_TO_ARM
     268    "mrs %1, cpsr\n"
     269    "bic %0, %1, #0x80\n"
     270    "msr cpsr, %0\n"
     271    ARM_TO_THUMB
     272    : "=r" (reg), "=r" (psr)
     273  );
     274
     275  return psr;
     276}
     277
     278static inline void arm_status_restore( uint32_t psr )
     279{
     280  #ifdef __thumb__
     281    uint32_t reg;
     282
     283    asm volatile (
     284      THUMB_TO_ARM
     285      "msr cpsr, %1\n"
     286      ARM_TO_THUMB
     287      : "=r" (reg)
     288      : "r" (psr)
     289    );
     290  #else
     291    asm volatile (
     292      "msr cpsr, %0"
     293      :
     294      : "r" (psr)
     295    );
     296  #endif
     297
     298  RTEMS_COMPILER_MEMORY_BARRIER();
     299}
     300
     301#define _CPU_ISR_Disable( _isr_cookie ) \
     302  do { \
     303    _isr_cookie = arm_interrupt_disable(); \
     304  } while (0)
     305
     306#define _CPU_ISR_Enable( _isr_cookie )  \
     307  arm_interrupt_enable( _isr_cookie )
     308
     309#define _CPU_ISR_Flash( _isr_cookie ) \
     310  arm_interrupt_flash( _isr_cookie )
     311
     312void _CPU_ISR_Set_level( uint32_t level );
     313
     314uint32_t _CPU_ISR_Get_level( void );
     315
     316void _CPU_Context_Initialize(
     317  Context_Control *the_context,
     318  uint32_t *stack_base,
     319  uint32_t size,
     320  uint32_t new_level,
     321  void *entry_point,
     322  bool is_fp
     323);
    356324
    357325#define _CPU_Context_Get_SP( _context ) \
    358326  (_context)->register_sp
    359327
    360 typedef struct {
    361     double      some_float_register;
    362 } Context_Control_fp;
    363 
    364 typedef struct {
    365     uint32_t   register_r0;
    366     uint32_t   register_r1;
    367     uint32_t   register_r2;
    368     uint32_t   register_r3;
    369     uint32_t   register_ip;
    370     uint32_t   register_lr;
    371 } CPU_Exception_frame;
    372 
    373 typedef void (*cpuExcHandlerType) (CPU_Exception_frame*);
    374 extern cpuExcHandlerType _currentExcHandler;
    375 extern void rtems_exception_init_mngt(void);
    376  
    377 /*
    378  *  The following structure defines the set of information saved
    379  *  on the current stack by RTEMS upon receipt of each interrupt
    380  *  that will lead to re-enter the kernel to signal the thread.
    381  */
    382 
    383 typedef CPU_Exception_frame CPU_Interrupt_frame;
    384 
    385 /*
    386  *  This variable is optional.  It is used on CPUs on which it is difficult
    387  *  to generate an "uninitialized" FP context.  It is filled in by
    388  *  _CPU_Initialize and copied into the task's FP context area during
    389  *  _CPU_Context_Initialize.
    390  */
    391 
    392 SCORE_EXTERN Context_Control_fp  _CPU_Null_fp_context;
    393 
    394 /*
    395  *  The size of the floating point context area.  On some CPUs this
    396  *  will not be a "sizeof" because the format of the floating point
    397  *  area is not defined -- only the size is.  This is usually on
    398  *  CPUs with a "floating point save context" instruction.
    399  */
    400 
    401 #define CPU_CONTEXT_FP_SIZE sizeof( Context_Control_fp )
    402 
    403 /*
    404  *  Amount of extra stack (above minimum stack size) required by
    405  *  MPCI receive server thread.  Remember that in a multiprocessor
    406  *  system this thread must exist and be able to process all directives.
    407  */
    408 
    409 #define CPU_MPCI_RECEIVE_SERVER_EXTRA_STACK 0
    410 
    411 /*
    412  *  This defines the number of entries in the ISR_Vector_table managed
    413  *  by RTEMS.
    414  */
    415 
    416 #define CPU_INTERRUPT_NUMBER_OF_VECTORS      8
    417 #define CPU_INTERRUPT_MAXIMUM_VECTOR_NUMBER  (CPU_INTERRUPT_NUMBER_OF_VECTORS - 1)
    418 
    419 /*
    420  *  This is defined if the port has a special way to report the ISR nesting
    421  *  level.  Most ports maintain the variable _ISR_Nest_level.
    422  */
    423 
    424 #define CPU_PROVIDES_ISR_IS_IN_PROGRESS FALSE
    425 
    426 /*
    427  *  Should be large enough to run all RTEMS tests.  This ensures
    428  *  that a "reasonable" small application should not have any problems.
    429  */
    430 
    431 #define CPU_STACK_MINIMUM_SIZE          (1024*4)
    432 
    433 /*
    434  *  CPU's worst alignment requirement for data types on a byte boundary.  This
    435  *  alignment does not take into account the requirements for the stack.
    436  */
    437 
    438 #define CPU_ALIGNMENT              4
    439 
    440 /*
    441  *  This number corresponds to the byte alignment requirement for the
    442  *  heap handler.  This alignment requirement may be stricter than that
    443  *  for the data types alignment specified by CPU_ALIGNMENT.  It is
    444  *  common for the heap to follow the same alignment requirement as
    445  *  CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict enough for the heap,
    446  *  then this should be set to CPU_ALIGNMENT.
    447  *
    448  *  NOTE:  This does not have to be a power of 2.  It does have to
    449  *         be greater or equal to than CPU_ALIGNMENT.
    450  */
    451 
    452 #define CPU_HEAP_ALIGNMENT         CPU_ALIGNMENT
    453 
    454 /*
    455  *  This number corresponds to the byte alignment requirement for memory
    456  *  buffers allocated by the partition manager.  This alignment requirement
    457  *  may be stricter than that for the data types alignment specified by
    458  *  CPU_ALIGNMENT.  It is common for the partition to follow the same
    459  *  alignment requirement as CPU_ALIGNMENT.  If the CPU_ALIGNMENT is strict
    460  *  enough for the partition, then this should be set to CPU_ALIGNMENT.
    461  *
    462  *  NOTE:  This does not have to be a power of 2.  It does have to
    463  *         be greater or equal to than CPU_ALIGNMENT.
    464  */
    465 
    466 #define CPU_PARTITION_ALIGNMENT    CPU_ALIGNMENT
    467 
    468 /*
    469  *  This number corresponds to the byte alignment requirement for the
    470  *  stack.  This alignment requirement may be stricter than that for the
    471  *  data types alignment specified by CPU_ALIGNMENT.  If the CPU_ALIGNMENT
    472  *  is strict enough for the stack, then this should be set to 0.
    473  *
    474  *  NOTE:  This must be a power of 2 either 0 or greater than CPU_ALIGNMENT.
    475  */
    476 
    477 #define CPU_STACK_ALIGNMENT        4
    478 
    479 /* ISR handler macros */
    480 
    481 /*
    482  *  Support routine to initialize the RTEMS vector table after it is allocated.
    483  */
    484 
    485 #define _CPU_Initialize_vectors()
    486 
    487 /*
    488  *  Disable all interrupts for an RTEMS critical section.  The previous
    489  *  level is returned in _level.
    490  */
    491 #if (defined(__THUMB_INTERWORK__) || defined(__thumb__))
    492 
    493 extern uint32_t _CPU_ISR_Disable_Thumb(void) __attribute__ ((naked));
    494 extern void _CPU_ISR_Enable_Thumb( int ) __attribute__ ((naked));
    495 extern void _CPU_ISR_Flash_Thumb(int) __attribute__ ((naked));
    496 extern void _CPU_ISR_Set_level_Thumb(int ) __attribute__ ((naked));
    497 extern uint32_t _CPU_ISR_Get_level_Thumb(void ) __attribute__ ((naked));
    498 
    499 #define _CPU_ISR_Disable(_level)        \
    500  (_level) = _CPU_ISR_Disable_Thumb()
    501 
    502 #define _CPU_ISR_Enable(a)      _CPU_ISR_Enable_Thumb(a)
    503 
    504 #define _CPU_ISR_Flash(a)       _CPU_ISR_Flash_Thumb(a)
    505 
    506 #define _CPU_ISR_Set_level(a)   _CPU_ISR_Set_level_Thumb(a)
    507 
    508 #define _CPU_ISR_Get_level(a)   _CPU_ISR_Get_level_Thumb(a)
    509 
    510 #else /*For ARM mode*/
    511 #define _CPU_ISR_Disable( _level )                \
    512   {                                               \
    513     int reg;                                       \
    514     asm volatile ("MRS  %0, cpsr \n"               \
    515                   "ORR  %1, %0, #0xc0 \n"          \
    516                   "MSR  cpsr, %1 \n"               \
    517                    : "=&r" (_level), "=&r" (reg)); \
    518   }
    519 
    520 /*
    521  *  Enable interrupts to the previous level (returned by _CPU_ISR_Disable).
    522  *  This indicates the end of an RTEMS critical section.  The parameter
    523  *  _level is not modified.
    524  */
    525 
    526 #define _CPU_ISR_Enable( _level )               \
    527   {                                             \
    528     asm volatile ("MSR  cpsr, %0 \n"            \
    529                   : : "r" (_level));            \
    530   }
    531 
    532 /*
    533  *  This temporarily restores the interrupt to _level before immediately
    534  *  disabling them again.  This is used to divide long RTEMS critical
    535  *  sections into two or more parts.  The parameter _level is not
    536  * modified.
    537  */
    538 
    539 #define _CPU_ISR_Flash( _level ) \
    540   { \
    541     int reg;                                    \
    542     asm volatile ("MRS  %0, cpsr \n"            \
    543                   "MSR  cpsr, %1 \n"            \
    544                   "MSR  cpsr, %0 \n"            \
    545                   : "=&r" (reg)                 \
    546                   : "r" (_level));              \
    547   }
    548 
    549 /*
    550  *  Map interrupt level in task mode onto the hardware that the CPU
    551  *  actually provides.  Currently, interrupt levels which do not
    552  *  map onto the CPU in a generic fashion are undefined.  Someday,
    553  *  it would be nice if these were "mapped" by the application
    554  *  via a callout.  For example, m68k has 8 levels 0 - 7, levels
    555  *  8 - 255 would be available for bsp/application specific meaning.
    556  *  This could be used to manage a programmable interrupt controller
    557  *  via the rtems_task_mode directive.
    558  *
    559  *  The get routine usually must be implemented as a subroutine.
    560  */
    561 
    562 #define _CPU_ISR_Set_level( new_level )         \
    563   {                                             \
    564     int reg = 0; /* to avoid warning */         \
    565     asm volatile ("MRS  %0, cpsr \n"            \
    566                   "BIC  %0, %0, #0xc0 \n"       \
    567                   "ORR  %0, %0, %2 \n"          \
    568                   "MSR  cpsr_c, %0 \n"          \
    569                   : "=r" (reg)                  \
    570                   : "0" (reg), "r" (new_level)); \
    571   }
    572 
    573 #endif /*(defined(__THUMB_INTERWORK__) || defined(__thumb__))*/
    574 
    575 uint32_t   _CPU_ISR_Get_level( void );
    576 
    577 /* end of ISR handler macros */
    578 
    579 /* Context handler macros */
    580 
    581 /*
    582  *  Initialize the context to a state suitable for starting a
    583  *  task after a context restore operation.  Generally, this
    584  *  involves:
    585  *
    586  *     - setting a starting address
    587  *     - preparing the stack
    588  *     - preparing the stack and frame pointers
    589  *     - setting the proper interrupt level in the context
    590  *     - initializing the floating point context
    591  *
    592  *  This routine generally does not set any unnecessary register
    593  *  in the context.  The state of the "general data" registers is
    594  *  undefined at task start time.
    595  *
    596  *  NOTE: This is_fp parameter is TRUE if the thread is to be a floating
    597  *        point thread.  This is typically only used on CPUs where the
    598  *        FPU may be easily disabled by software such as on the SPARC
    599  *        where the PSR contains an enable FPU bit.
    600  */
    601 
    602 void _CPU_Context_Initialize(
    603   Context_Control  *the_context,
    604   uint32_t         *stack_base,
    605   uint32_t          size,
    606   uint32_t          new_level,
    607   void             *entry_point,
    608   bool              is_fp
    609 );
    610 
    611 /*
    612  *  This routine is responsible for somehow restarting the currently
    613  *  executing task.  If you are lucky, then all that is necessary
    614  *  is restoring the context.  Otherwise, there will need to be
    615  *  a special assembly routine which does something special in this
    616  *  case.  Context_Restore should work most of the time.  It will
    617  *  not work if restarting self conflicts with the stack frame
    618  *  assumptions of restoring a context.
    619  */
    620 
    621328#define _CPU_Context_Restart_self( _the_context ) \
    622329   _CPU_Context_restore( (_the_context) );
    623330
    624 /*
    625  *  The purpose of this macro is to allow the initial pointer into
    626  *  a floating point context area (used to save the floating point
    627  *  context) to be at an arbitrary place in the floating point
    628  *  context area.
    629  *
    630  *  This is necessary because some FP units are designed to have
    631  *  their context saved as a stack which grows into lower addresses.
    632  *  Other FP units can be saved by simply moving registers into offsets
    633  *  from the base of the context area.  Finally some FP units provide
    634  *  a "dump context" instruction which could fill in from high to low
    635  *  or low to high based on the whim of the CPU designers.
    636  */
    637 
    638331#define _CPU_Context_Fp_start( _base, _offset ) \
    639332   ( (void *) _Addresses_Add_offset( (_base), (_offset) ) )
    640333
    641 /*
    642  *  This routine initializes the FP context area passed to it to.
    643  *  There are a few standard ways in which to initialize the
    644  *  floating point context.  The code included for this macro assumes
    645  *  that this is a CPU in which a "initial" FP context was saved into
    646  *  _CPU_Null_fp_context and it simply copies it to the destination
    647  *  context passed to it.
    648  *
    649  *  Other models include (1) not doing anything, and (2) putting
    650  *  a "null FP status word" in the correct place in the FP context.
    651  */
    652 
    653334#define _CPU_Context_Initialize_fp( _destination ) \
    654   { \
    655    *(*(_destination)) = _CPU_Null_fp_context; \
    656   }
    657 
    658 /* end of Context handler macros */
    659 
    660 /* Fatal Error manager macros */
    661 
    662 /*
    663  *  This routine copies _error into a known place -- typically a stack
    664  *  location or a register, optionally disables interrupts, and
    665  *  halts/stops the CPU.
    666  */
     335  do { \
     336    *(*(_destination)) = _CPU_Null_fp_context; \
     337  } while (0)
    667338
    668339#define _CPU_Fatal_halt( _error )           \
     
    674345                   : "0" (_error)           \
    675346                   : "r0" );                \
    676      while(1) ;                             \
    677    } while(0);
    678  
    679 
    680 /* end of Fatal Error manager macros */
    681 
    682 /* Bitfield handler macros */
    683 /*
    684  * If we had a particularly fast function for finding the first
    685  * bit set in a word, it would go here. Since we don't (*), we'll
    686  * just use the universal macros.
    687  *
    688  * (*) On ARM V5 and later, there's a CLZ function which could be
    689  *     used to implement much quicker than the default macro.
    690  */
    691 #define CPU_USE_GENERIC_BITFIELD_CODE TRUE
    692 #define CPU_USE_GENERIC_BITFIELD_DATA TRUE
    693 
    694 /* functions */
    695 
    696 /*
    697  *  _CPU_Initialize
    698  *
    699  *  This routine performs CPU dependent initialization.
    700  */
    701 
    702 void _CPU_Initialize(void);
    703 
    704 typedef enum {
    705   ARM_EXCEPTION_RESET      = 0,
    706   ARM_EXCEPTION_UNDEF      = 1,
    707   ARM_EXCEPTION_SWI        = 2,
    708   ARM_EXCEPTION_PREF_ABORT = 3,
    709   ARM_EXCEPTION_DATA_ABORT = 4,
    710   ARM_EXCEPTION_RESERVED   = 5,
    711   ARM_EXCEPTION_IRQ        = 6,
    712   ARM_EXCEPTION_FIQ        = 7,
    713   MAX_EXCEPTIONS           = 8
    714 } Arm_symbolic_exception_name;
    715 
    716 /*
    717  *  _CPU_ISR_install_vector
    718  *
    719  *  This routine installs an interrupt vector.
    720  */
     347     while (1);                             \
     348   } while (0);
     349
     350void _CPU_Initialize( void );
     351
     352#define _CPU_Initialize_vectors()
    721353
    722354void _CPU_ISR_install_vector(
    723   uint32_t    vector,
    724   proc_ptr    new_handler,
    725   proc_ptr   *old_handler
     355  uint32_t vector,
     356  proc_ptr new_handler,
     357  proc_ptr *old_handler
    726358);
    727359
    728 /*
    729  *  _CPU_Install_interrupt_stack
    730  *
    731  *  This routine installs the hardware interrupt stack pointer.
    732  *
    733  *  NOTE:  It need only be provided if CPU_HAS_HARDWARE_INTERRUPT_STACK
    734  *         is TRUE.
    735  */
    736 
    737360void _CPU_Install_interrupt_stack( void );
    738361
    739 /*
    740  *  _CPU_Context_switch
    741  *
    742  *  This routine switches from the run context to the heir context.
    743  */
    744 
    745 void _CPU_Context_switch(
    746   Context_Control  *run,
    747   Context_Control  *heir
    748 );
    749 
    750 /*
    751  *  _CPU_Context_restore
    752  *
    753  *  This routine is generally used only to restart self in an
    754  *  efficient manner.  It may simply be a label in _CPU_Context_switch.
    755  *
    756  *  NOTE: May be unnecessary to reload some registers.
    757  */
    758 
    759 void _CPU_Context_restore(
    760   Context_Control *new_context
    761 );
    762 
    763 #if (ARM_HAS_FPU == 1)
    764 /*
    765  *  _CPU_Context_save_fp
    766  *
    767  *  This routine saves the floating point context passed to it.
    768  */
    769 
    770 void _CPU_Context_save_fp(
    771   Context_Control_fp **fp_context_ptr
    772 );
    773 
    774 /*
    775  *  _CPU_Context_restore_fp
    776  *
    777  *  This routine restores the floating point context passed to it.
    778  */
    779 
    780 void _CPU_Context_restore_fp(
    781   Context_Control_fp **fp_context_ptr
    782 );
    783 #endif /* (ARM_HAS_FPU == 1) */
    784 
    785 /*  The following routine swaps the endian format of an unsigned int.
    786  *  It must be static because it is referenced indirectly.
    787  *
    788  *  This version will work on any processor, but if there is a better
    789  *  way for your CPU PLEASE use it.  The most common way to do this is to:
    790  *
    791  *     swap least significant two bytes with 16-bit rotate
    792  *     swap upper and lower 16-bits
    793  *     swap most significant two bytes with 16-bit rotate
    794  *
    795  *  Some CPUs have special instructions which swap a 32-bit quantity in
    796  *  a single instruction (e.g. i486).  It is probably best to avoid
    797  *  an "endian swapping control bit" in the CPU.  One good reason is
    798  *  that interrupts would probably have to be disabled to ensure that
    799  *  an interrupt does not try to access the same "chunk" with the wrong
    800  *  endian.  Another good reason is that on some CPUs, the endian bit
    801  *  endianness for ALL fetches -- both code and data -- so the code
    802  *  will be fetched incorrectly.
    803  */
    804  
    805 static inline uint32_t CPU_swap_u32(
    806   uint32_t value
    807 )
     362void _CPU_Context_switch( Context_Control *run, Context_Control *heir );
     363
     364void _CPU_Context_restore( Context_Control *new_context );
     365
     366void _CPU_Context_save_fp( Context_Control_fp **fp_context_ptr );
     367
     368void _CPU_Context_restore_fp( Context_Control_fp **fp_context_ptr );
     369
     370static inline uint32_t CPU_swap_u32( uint32_t value )
    808371{
    809372#if defined(__thumb__)
    810373  uint32_t byte1, byte2, byte3, byte4, swapped;
    811  
     374
    812375  byte4 = (value >> 24) & 0xff;
    813376  byte3 = (value >> 16) & 0xff;
    814377  byte2 = (value >> 8)  & 0xff;
    815   byte1 =  value        & 0xff;
    816  
     378  byte1 =  value & 0xff;
     379
    817380  swapped = (byte1 << 24) | (byte2 << 16) | (byte3 << 8) | byte4;
    818381  return swapped;
    819382#else
    820   uint32_t   tmp = value; /* make compiler warnings go away */
    821   asm volatile ("EOR   %1, %0, %0, ROR #16\n"
    822                 "BIC   %1, %1, #0xff0000\n"   
    823                 "MOV   %0, %0, ROR #8\n"     
    824                 "EOR   %0, %0, %1, LSR #8\n" 
    825                 : "=r" (value), "=r" (tmp) 
     383  uint32_t tmp = value; /* make compiler warnings go away */
     384  asm volatile ("EOR %1, %0, %0, ROR #16\n"
     385                "BIC %1, %1, #0xff0000\n"
     386                "MOV %0, %0, ROR #8\n"
     387                "EOR %0, %0, %1, LSR #8\n"
     388                : "=r" (value), "=r" (tmp)
    826389                : "0" (value), "1" (tmp));
    827390  return value;
     
    829392}
    830393
    831 static inline uint16_t CPU_swap_u16(uint16_t value)
    832 {
    833     uint16_t   lower;
    834     uint16_t   upper;
    835 
    836     value = value & (uint16_t) 0xffff;
    837     lower = (value >> 8);
    838     upper = (value << 8) & 0xff;
    839 
    840     return (lower | upper);
    841 }
     394static inline uint16_t CPU_swap_u16( uint16_t value )
     395{
     396  return (uint16_t) (((value & 0xffU) << 8) | ((value >> 8) & 0xffU));
     397}
     398
     399/* XXX */
     400
     401extern uint32_t arm_cpu_mode;
     402
     403void arm_exc_abort_data( void );
     404
     405void arm_exc_abort_prefetch( void );
     406
     407void arm_exc_interrupt( void );
     408
     409void arm_exc_undefined( void );
     410
     411void bsp_interrupt_dispatch( void );
    842412
    843413#ifdef __cplusplus
     
    845415#endif
    846416
    847 #endif
     417#endif /* ASM */
     418
     419#endif /* _RTEMS_SCORE_CPU_H */
  • cpukit/score/cpu/arm/rtems/score/types.h

    r20857e12 r632e4306  
    3636typedef uint16_t         Priority_Bit_map_control;
    3737
    38 typedef void               arm_cpu_isr;
    39 typedef void            (*arm_cpu_isr_entry)( void );
    40 
    4138#ifdef RTEMS_DEPRECATED_TYPES
    4239typedef bool            boolean;            /* Boolean value   */
Note: See TracChangeset for help on using the changeset viewer.