Changeset 06dcaf0 in rtems


Ignore:
Timestamp:
Mar 16, 2011, 8:05:06 PM (8 years ago)
Author:
Joel Sherrill <joel.sherrill@…>
Branches:
4.11, master
Children:
66729db
Parents:
ef99f23a
Message:

2011-03-16 Jennifer Averett <jennifer.averett@…>

PR 1729/cpukit

  • configure.ac, sapi/include/confdefs.h, sapi/src/exinit.c, score/Makefile.am, score/preinstall.am, score/cpu/i386/rtems/score/cpu.h, score/cpu/sparc/cpu_asm.S, score/cpu/sparc/rtems/score/cpu.h, score/include/rtems/score/basedefs.h, score/include/rtems/score/context.h, score/include/rtems/score/percpu.h, score/src/percpu.c, score/src/thread.c, score/src/threadcreateidle.c: Add next step in SMP support. This adds an allocated array of the Per_CPU structures to support multiple cpus vs a single instance of the structure which is still used if SMP support is disabled. Configuration support is also added to explicitly enable or disable SMP. But SMP can only be enabled for the CPUs which will support it initially -- SPARC and i386. With the stub BSP support, a BSP can be run as a single core SMP system from an RTEMS data structure standpoint.
  • aclocal/check-smp.m4, aclocal/enable-smp.m4, score/include/rtems/bspsmp.h, score/include/rtems/score/smplock.h, score/src/smp.c, score/src/smplock.c: New files.
Location:
cpukit
Files:
6 added
15 edited

Legend:

Unmodified
Added
Removed
  • cpukit/ChangeLog

    ref99f23a r06dcaf0  
     12011-03-16      Jennifer Averett <jennifer.averett@OARcorp.com>
     2
     3        PR 1729/cpukit
     4        * configure.ac, sapi/include/confdefs.h, sapi/src/exinit.c,
     5        score/Makefile.am, score/preinstall.am,
     6        score/cpu/i386/rtems/score/cpu.h, score/cpu/sparc/cpu_asm.S,
     7        score/cpu/sparc/rtems/score/cpu.h,
     8        score/include/rtems/score/basedefs.h,
     9        score/include/rtems/score/context.h,
     10        score/include/rtems/score/percpu.h, score/src/percpu.c,
     11        score/src/thread.c, score/src/threadcreateidle.c: Add next step in
     12        SMP support. This adds an allocated array of the Per_CPU structures
     13        to support multiple cpus vs a single instance of the structure which
     14        is still used if SMP support is disabled. Configuration support is
     15        also added to explicitly enable or disable SMP. But SMP can only be
     16        enabled for the CPUs which will support it initially -- SPARC and
     17        i386. With the stub BSP support, a BSP can be run as a single core
     18        SMP system from an RTEMS data structure standpoint.
     19        * aclocal/check-smp.m4, aclocal/enable-smp.m4,
     20        score/include/rtems/bspsmp.h, score/include/rtems/score/smplock.h,
     21        score/src/smp.c, score/src/smplock.c: New files.
     22
    1232011-03-16      Jennifer Averett <jennifer.averett@OARcorp.com>
    224
  • cpukit/configure.ac

    ref99f23a r06dcaf0  
    139139RTEMS_CHECK_POSIX_API
    140140RTEMS_CHECK_NETWORKING
     141RTEMS_CHECK_SMP
    141142
    142143rtems_major=`echo _RTEMS_VERSION | sed "s/\..*//"`
     
    169170  [1],
    170171  [if posix api is supported])
     172
     173RTEMS_CPUOPT([RTEMS_SMP],
     174  [test x"$RTEMS_HAS_SMP" = xyes],
     175  [1],
     176  [if SMP is enabled])
    171177
    172178RTEMS_CPUOPT([RTEMS_NETWORKING],
     
    291297
    292298AM_CONDITIONAL(HAS_MP,test x"$enable_multiprocessing" = x"yes" )
     299AM_CONDITIONAL(HAS_SMP,[test "$RTEMS_HAS_SMP" = "yes"])
    293300
    294301AM_CONDITIONAL(HAS_PTHREADS,test x"$rtems_cv_HAS_POSIX_API" = x"yes")
  • cpukit/sapi/include/confdefs.h

    ref99f23a r06dcaf0  
    173173#else
    174174  extern int rtems_telnetd_maximum_ptys;
     175#endif
     176
     177#if defined(RTEMS_SMP)
     178  /*
     179   *  If configured for SMP, then we need to know the maximum CPU cores.
     180   */
     181  #if !defined(CONFIGURE_SMP_APPLICATION)
     182    #if !defined(CONFIGURE_SMP_MAXIMUM_PROCESSORS)
     183      #define CONFIGURE_SMP_MAXIMUM_PROCESSORS 1
     184    #endif
     185  #else
     186    #if !defined(CONFIGURE_SMP_MAXIMUM_PROCESSORS)
     187      #error "CONFIGURE_SMP_MAXIMUM_PROCESSORS not specified for SMP Application"
     188    #endif
     189  #endif
    175190#endif
    176191
     
    18061821
    18071822/**
    1808  *  This defines the amount of memory reserved for the IDLE task
    1809  *  control structures and stack.
    1810  */
    1811 #define CONFIGURE_MEMORY_FOR_IDLE_TASK \
    1812     (CONFIGURE_MEMORY_FOR_TASKS(1, 0) + \
    1813      (CONFIGURE_IDLE_TASK_STACK_SIZE - CONFIGURE_MINIMUM_TASK_STACK_SIZE))
     1823 *  This defines the formula used to compute the amount of memory
     1824 *  reserved for IDLE task control structures and stacks.
     1825 */
     1826#define CONFIGURE_IDLE_TASKS(_count) \
     1827    (CONFIGURE_MEMORY_FOR_TASKS(_count, 0) + \
     1828      _count * _Configure_From_workspace( \
     1829       (CONFIGURE_IDLE_TASK_STACK_SIZE - CONFIGURE_MINIMUM_TASK_STACK_SIZE)))
     1830
     1831/**
     1832 *  This calculates the amount of memory reserved for the IDLE tasks.
     1833 *  In an SMP system, each CPU core has its own idle task.
     1834 */
     1835#if defined(RTEMS_SMP)
     1836  #define CONFIGURE_MEMORY_FOR_IDLE_TASK \
     1837          CONFIGURE_IDLE_TASKS(CONFIGURE_SMP_MAXIMUM_PROCESSORS)
     1838#else
     1839  #define CONFIGURE_MEMORY_FOR_IDLE_TASK \
     1840          CONFIGURE_IDLE_TASKS(1)
     1841#endif
    18141842
    18151843/**
     
    18981926   CONFIGURE_MEMORY_FOR_USER_EXTENSIONS(CONFIGURE_MAXIMUM_USER_EXTENSIONS) \
    18991927  )
     1928
     1929#if defined(RTEMS_SMP)
     1930  #define CONFIGURE_MEMORY_FOR_SMP \
     1931     (CONFIGURE_SMP_MAXIMUM_PROCESSORS * \
     1932      _Configure_From_workspace( CONFIGURE_INTERRUPT_STACK_SIZE ) \
     1933     )
     1934#else
     1935  #define CONFIGURE_MEMORY_FOR_SMP 0
     1936#endif
    19001937
    19011938#if defined(CONFIGURE_CONFDEFS_DEBUG) && defined(CONFIGURE_INIT)
     
    20132050   CONFIGURE_MEMORY_FOR_POSIX + \
    20142051   (CONFIGURE_MAXIMUM_POSIX_THREADS * CONFIGURE_MINIMUM_TASK_STACK_SIZE ) + \
    2015    (CONFIGURE_MAXIMUM_GOROUTINES * CONFIGURE_MINIMUM_TASK_STACK_SIZE ) + \
     2052   (CONFIGURE_MAXIMUM_GOROUTINES * CONFIGURE_MINIMUM_TASK_STACK_SIZE) + \
    20162053   CONFIGURE_INITIALIZATION_THREADS_STACKS + \
    20172054   CONFIGURE_MEMORY_FOR_STATIC_EXTENSIONS + \
    20182055   CONFIGURE_MEMORY_FOR_MP + \
     2056   CONFIGURE_MEMORY_FOR_SMP + \
    20192057   CONFIGURE_MESSAGE_BUFFER_MEMORY + \
    20202058   (CONFIGURE_MEMORY_OVERHEAD * 1024) + \
     
    21182156#endif /* CONFIGURE_HAS_OWN_CONFIGURATION_TABLE */
    21192157
     2158#if defined(RTEMS_SMP)
     2159  /**
     2160   *  Instantiate the variable which specifies the number of CPUs
     2161   *  in an SMP configuration.
     2162   */
     2163  #if defined(CONFIGURE_INIT)
     2164    uint32_t rtems_smp_maximum_processors = CONFIGURE_SMP_MAXIMUM_PROCESSORS;
     2165  #else
     2166    extern uint32_t rtems_smp_maximum_processors;
     2167  #endif
     2168 /*
     2169  * Instantiate the Per CPU information based upon the user configuration.
     2170  */
     2171 #if defined(CONFIGURE_INIT)
     2172   Per_CPU_Control _Per_CPU_Information[CONFIGURE_SMP_MAXIMUM_PROCESSORS];
     2173   Per_CPU_Control *_Per_CPU_Information_p[CONFIGURE_SMP_MAXIMUM_PROCESSORS];
     2174 #endif
     2175
     2176#endif
     2177
    21202178/*
    21212179 *  If the user has configured a set of Classic API Initialization Tasks,
  • cpukit/sapi/src/exinit.c

    ref99f23a r06dcaf0  
    22 *  Initialization Manager
    33 *
    4  *  COPYRIGHT (c) 1989-2008.
     4 *  COPYRIGHT (c) 1989-2011.
    55 *  On-Line Applications Research Corporation (OAR).
    66 *
     
    5656#ifdef RTEMS_POSIX_API
    5757  #include <rtems/posix/posixapi.h>
     58#endif
     59
     60#if defined(RTEMS_SMP)
     61  #include <rtems/bspsmp.h>
     62  #include <rtems/score/percpu.h>
    5863#endif
    5964
     
    115120  _Workspace_Handler_initialization();
    116121
     122  #if defined(RTEMS_SMP)
     123    _SMP_Handler_initialize();
     124  #endif
     125
    117126  _User_extensions_Handler_initialization();
    118127  _ISR_Handler_initialization();
     
    149158  #ifdef RTEMS_POSIX_API
    150159    _POSIX_API_Initialize();
     160  #endif
     161
     162  /*
     163   * Discover and initialize the secondary cores in an SMP system.
     164   */
     165  #if defined(RTEMS_SMP)
     166    _SMP_Processor_count = bsp_smp_initialize( rtems_smp_maximum_processors );
    151167  #endif
    152168
  • cpukit/score/Makefile.am

    ref99f23a r06dcaf0  
    1313
    1414include_rtems_HEADERS = include/rtems/debug.h include/rtems/system.h \
    15     include/rtems/seterr.h
     15    include/rtems/seterr.h include/rtems/bspsmp.h
    1616
    1717include_rtems_scoredir = $(includedir)/rtems/score
     
    3636    include/rtems/score/tqdata.h include/rtems/score/userext.h \
    3737    include/rtems/score/watchdog.h include/rtems/score/wkspace.h \
    38     include/rtems/score/cpuopts.h include/rtems/score/basedefs.h
     38    include/rtems/score/cpuopts.h include/rtems/score/basedefs.h \
     39    include/rtems/score/smplock.h
    3940
    4041if HAS_PTHREADS
     
    8990endif
    9091
     92libscore_a_SOURCES += src/smp.c src/smplock.c
     93
    9194## CORE_APIMUTEX_C_FILES
    9295libscore_a_SOURCES += src/apimutex.c src/apimutexallocate.c \
  • cpukit/score/cpu/i386/rtems/score/cpu.h

    ref99f23a r06dcaf0  
    77 *  i386 processor.
    88 *
    9  *  COPYRIGHT (c) 1989-2008.
     9 *  COPYRIGHT (c) 1989-2011.
    1010 *  On-Line Applications Research Corporation (OAR).
    1111 *
     
    271271
    272272SCORE_EXTERN Context_Control_fp  _CPU_Null_fp_context;
    273 SCORE_EXTERN void               *_CPU_Interrupt_stack_low;
    274 SCORE_EXTERN void               *_CPU_Interrupt_stack_high;
    275273
    276274#endif /* ASM */
     
    437435#define _CPU_Context_Restart_self( _the_context ) \
    438436   _CPU_Context_restore( (_the_context) );
     437
     438#if defined(RTEMS_SMP)
     439  #define _CPU_Context_switch_to_first_task_smp( _the_context ) \
     440     _CPU_Context_restore( (_the_context) );
     441
     442  /* address space 1 is uncacheable */
     443  #define SMP_CPU_SWAP( _address, _value, _previous ) \
     444    do { \
     445      asm volatile("lock; xchgl %0, %1" : \
     446        "+m" (*_address), "=a" (_previous) : \
     447        "1" (_value) : \
     448        "cc"); \
     449    } while (0)
     450#endif
    439451
    440452#define _CPU_Context_Fp_start( _base, _offset ) \
  • cpukit/score/cpu/sparc/cpu_asm.S

    ref99f23a r06dcaf0  
    55 *  in assembly language.
    66 *
    7  *  COPYRIGHT (c) 1989-2010.
     7 *  COPYRIGHT (c) 1989-2011.
    88 *  On-Line Applications Research Corporation (OAR).
    99 *
     
    320320 *  NOTE: It is unnecessary to reload some registers.
    321321 */
    322 
    323322        .align 4
    324323        PUBLIC(_CPU_Context_restore)
     
    328327        ba      SYM(_CPU_Context_restore_heir)
    329328        mov     %i0, %o1                      ! in the delay slot
    330 
    331 /*
    332  *  void _ISR_Handler()
    333  *
    334  *  This routine provides the RTEMS interrupt management.
    335  *
    336  *  We enter this handler from the 4 instructions in the trap table with
    337  *  the following registers assumed to be set as shown:
    338  *
    339  *    l0 = PSR
    340  *    l1 = PC
    341  *    l2 = nPC
    342  *    l3 = trap type
    343  *
    344  *  NOTE: By an executive defined convention, trap type is between 0 and 255 if
    345  *        it is an asynchonous trap and 256 and 511 if it is synchronous.
    346  */
    347 
    348         .align 4
    349         PUBLIC(_ISR_Handler)
    350 SYM(_ISR_Handler):
    351         /*
    352          *  Fix the return address for synchronous traps.
    353          */
    354 
    355         andcc   %l3, SPARC_SYNCHRONOUS_TRAP_BIT_MASK, %g0
    356                                       ! Is this a synchronous trap?
    357         be,a    win_ovflow            ! No, then skip the adjustment
    358         nop                           ! DELAY
    359         mov     %l1, %l6              ! save trapped pc for debug info
    360         mov     %l2, %l1              ! do not return to the instruction
    361         add     %l2, 4, %l2           ! indicated
    362 
    363 win_ovflow:
    364         /*
    365          *  Save the globals this block uses.
    366          *
    367          *  These registers are not restored from the locals.  Their contents
    368          *  are saved directly from the locals into the ISF below.
    369          */
    370 
    371         mov     %g4, %l4                 ! save the globals this block uses
    372         mov     %g5, %l5
    373 
    374         /*
    375          *  When at a "window overflow" trap, (wim == (1 << cwp)).
    376          *  If we get here like that, then process a window overflow.
    377          */
    378 
    379         rd      %wim, %g4
    380         srl     %g4, %l0, %g5            ! g5 = win >> cwp ; shift count and CWP
    381                                          !   are LS 5 bits ; how convenient :)
    382         cmp     %g5, 1                   ! Is this an invalid window?
    383         bne     dont_do_the_window       ! No, then skip all this stuff
    384         ! we are using the delay slot
    385 
    386         /*
    387          *  The following is same as a 1 position right rotate of WIM
    388          */
    389 
    390         srl     %g4, 1, %g5              ! g5 = WIM >> 1
    391         sll     %g4, SPARC_NUMBER_OF_REGISTER_WINDOWS-1 , %g4
    392                                          ! g4 = WIM << (Number Windows - 1)
    393         or      %g4, %g5, %g4            ! g4 = (WIM >> 1) |
    394                                          !      (WIM << (Number Windows - 1))
    395 
    396         /*
    397          *  At this point:
    398          *
    399          *    g4 = the new WIM
    400          *    g5 is free
    401          */
    402 
    403         /*
    404          *  Since we are tinkering with the register windows, we need to
    405          *  make sure that all the required information is in global registers.
    406          */
    407 
    408         save                          ! Save into the window
    409         wr      %g4, 0, %wim          ! WIM = new WIM
    410         nop                           ! delay slots
    411         nop
    412         nop
    413 
    414         /*
    415          *  Now save the window just as if we overflowed to it.
    416          */
    417 
    418         std     %l0, [%sp + CPU_STACK_FRAME_L0_OFFSET]
    419         std     %l2, [%sp + CPU_STACK_FRAME_L2_OFFSET]
    420         std     %l4, [%sp + CPU_STACK_FRAME_L4_OFFSET]
    421         std     %l6, [%sp + CPU_STACK_FRAME_L6_OFFSET]
    422 
    423         std     %i0, [%sp + CPU_STACK_FRAME_I0_OFFSET]
    424         std     %i2, [%sp + CPU_STACK_FRAME_I2_OFFSET]
    425         std     %i4, [%sp + CPU_STACK_FRAME_I4_OFFSET]
    426         std     %i6, [%sp + CPU_STACK_FRAME_I6_FP_OFFSET]
    427 
    428         restore
    429         nop
    430 
    431 dont_do_the_window:
    432         /*
    433          *  Global registers %g4 and %g5 are saved directly from %l4 and
    434          *  %l5 directly into the ISF below.
    435          */
    436 
    437 save_isf:
    438 
    439         /*
    440          *  Save the state of the interrupted task -- especially the global
    441          *  registers -- in the Interrupt Stack Frame.  Note that the ISF
    442          *  includes a regular minimum stack frame which will be used if
    443          *  needed by register window overflow and underflow handlers.
    444          *
    445          *  REGISTERS SAME AS AT _ISR_Handler
    446          */
    447 
    448         sub     %fp, CONTEXT_CONTROL_INTERRUPT_FRAME_SIZE, %sp
    449                                                ! make space for ISF
    450 
    451         std     %l0, [%sp + ISF_PSR_OFFSET]    ! save psr, PC
    452         st      %l2, [%sp + ISF_NPC_OFFSET]    ! save nPC
    453         st      %g1, [%sp + ISF_G1_OFFSET]     ! save g1
    454         std     %g2, [%sp + ISF_G2_OFFSET]     ! save g2, g3
    455         std     %l4, [%sp + ISF_G4_OFFSET]     ! save g4, g5 -- see above
    456         std     %g6, [%sp + ISF_G6_OFFSET]     ! save g6, g7
    457 
    458         std     %i0, [%sp + ISF_I0_OFFSET]     ! save i0, i1
    459         std     %i2, [%sp + ISF_I2_OFFSET]     ! save i2, i3
    460         std     %i4, [%sp + ISF_I4_OFFSET]     ! save i4, i5
    461         std     %i6, [%sp + ISF_I6_FP_OFFSET]  ! save i6/fp, i7
    462 
    463         rd      %y, %g1
    464         st      %g1, [%sp + ISF_Y_OFFSET]      ! save y
    465         st      %l6, [%sp + ISF_TPC_OFFSET]    ! save real trapped pc
    466 
    467         mov     %sp, %o1                       ! 2nd arg to ISR Handler
    468 
    469         /*
    470          *  Increment ISR nest level and Thread dispatch disable level.
    471          *
    472          *  Register usage for this section:
    473          *
    474          *    l4 = _Thread_Dispatch_disable_level pointer
    475          *    l5 = per cpu info pointer
    476          *    l6 = _Thread_Dispatch_disable_level value
    477          *    l7 = _ISR_Nest_level value
    478          *
    479          *  NOTE: It is assumed that l4 - l7 will be preserved until the ISR
    480          *        nest and thread dispatch disable levels are unnested.
    481          */
    482 
    483         sethi    %hi(SYM(_Thread_Dispatch_disable_level)), %l4
    484         ld       [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))], %l6
    485 
    486         sethi    %hi(_Per_CPU_Information), %l5
    487         add      %l5, %lo(_Per_CPU_Information), %l5
    488 
    489         ld       [%l5 + PER_CPU_ISR_NEST_LEVEL], %l7
    490 
    491         add      %l6, 1, %l6
    492         st       %l6, [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))]
    493 
    494         add      %l7, 1, %l7
    495         st       %l7, [%l5 + PER_CPU_ISR_NEST_LEVEL]
    496 
    497         /*
    498          *  If ISR nest level was zero (now 1), then switch stack.
    499          */
    500 
    501         mov      %sp, %fp
    502         subcc    %l7, 1, %l7             ! outermost interrupt handler?
    503         bnz      dont_switch_stacks      ! No, then do not switch stacks
    504 
    505         nop
    506         ld       [%l5 + PER_CPU_INTERRUPT_STACK_HIGH], %sp
    507 
    508 dont_switch_stacks:
    509         /*
    510          *  Make sure we have a place on the stack for the window overflow
    511          *  trap handler to write into.  At this point it is safe to
    512          *  enable traps again.
    513          */
    514 
    515         sub      %sp, CPU_MINIMUM_STACK_FRAME_SIZE, %sp
    516 
    517         /*
    518          *  Check if we have an external interrupt (trap 0x11 - 0x1f). If so,
    519          *  set the PIL in the %psr to mask off interrupts with lower priority.
    520          *  The original %psr in %l0 is not modified since it will be restored
    521          *  when the interrupt handler returns.
    522          */
    523 
    524         mov      %l0, %g5
    525         and      %l3, 0x0ff, %g4
    526 
    527 /* This is a fix for ERC32 with FPU rev.B or rev.C */
    528 
    529 #if defined(FPU_REVB)
    530 
    531 
    532         subcc    %g4, 0x08, %g0
    533         be       fpu_revb
    534         subcc    %g4, 0x11, %g0
    535         bl       dont_fix_pil
    536         subcc    %g4, 0x1f, %g0
    537         bg       dont_fix_pil
    538         sll      %g4, 8, %g4
    539         and      %g4, SPARC_PSR_PIL_MASK, %g4
    540         andn     %l0, SPARC_PSR_PIL_MASK, %g5
    541         or       %g4, %g5, %g5
    542         srl      %l0, 12, %g4
    543         andcc    %g4, 1, %g0
    544         be       dont_fix_pil
    545         nop
    546         ba,a     enable_irq
    547 
    548 
    549 fpu_revb:
    550         srl      %l0, 12, %g4   ! check if EF is set in %psr
    551         andcc    %g4, 1, %g0
    552         be       dont_fix_pil   ! if FPU disabled than continue as normal
    553         and      %l3, 0xff, %g4
    554         subcc    %g4, 0x08, %g0
    555         bne      enable_irq     ! if not a FPU exception then do two fmovs
    556         set      __sparc_fq, %g4
    557         st       %fsr, [%g4]    ! if FQ is not empty and FQ[1] = fmovs
    558         ld       [%g4], %g4     ! than this is bug 3.14
    559         srl      %g4, 13, %g4
    560         andcc    %g4, 1, %g0
    561         be       dont_fix_pil
    562         set      __sparc_fq, %g4
    563         std      %fq, [%g4]
    564         ld       [%g4+4], %g4
    565         set      0x81a00020, %g5
    566         subcc    %g4, %g5, %g0
    567         bne,a    dont_fix_pil2
    568         wr       %l0, SPARC_PSR_ET_MASK, %psr ! **** ENABLE TRAPS ****
    569         ba,a     simple_return
    570        
    571 enable_irq:
    572         or       %g5, SPARC_PSR_PIL_MASK, %g4
    573         wr       %g4, SPARC_PSR_ET_MASK, %psr ! **** ENABLE TRAPS ****
    574         nop; nop; nop
    575         fmovs    %f0, %f0
    576         ba       dont_fix_pil
    577         fmovs    %f0, %f0
    578 
    579         .data
    580         .global __sparc_fq
    581         .align 8
    582 __sparc_fq:
    583         .word 0,0
    584 
    585         .text
    586 /* end of ERC32 FPU rev.B/C fix */
    587 
    588 #else
    589 
    590         subcc    %g4, 0x11, %g0
    591         bl       dont_fix_pil
    592         subcc    %g4, 0x1f, %g0
    593         bg       dont_fix_pil
    594         sll      %g4, 8, %g4
    595         and      %g4, SPARC_PSR_PIL_MASK, %g4
    596         andn     %l0, SPARC_PSR_PIL_MASK, %g5
    597         ba       pil_fixed
    598         or       %g4, %g5, %g5
     329        .align 4
     330
     331#if defined(RTEMS_SMP)
     332/*
     333 *  void _CPU_Context_switch_to_first_task_smp(
     334 *    Context_Control *new_context
     335 *  )
     336 *
     337 *  This routine is only used to switch to the first task on a
     338 *  secondary core in an SMP configuration.  We do not need to
     339 *  flush all the windows and, in fact, this can be dangerous
     340 *  as they may or may not be initialized properly.  So we just
     341 *  reinitialize the PSR and WIM.
     342 */
     343        PUBLIC(_CPU_Context_switch_to_first_task_smp)
     344SYM(_CPU_Context_switch_to_first_task_smp):
     345        save    %sp, -CPU_MINIMUM_STACK_FRAME_SIZE, %sp
     346
     347        mov     %psr, %g1               ! Initialize WIM
     348        add     %g1, 1, %g2
     349        and     %g2, 0x7, %g2
     350        set     1, %g3
     351        sll     %g3, %g2, %g3
     352        mov     %g3, %wim
     353        ba      done_flushing
     354        mov     %i0, %o1                      ! in the delay slot
    599355#endif
    600356
    601 dont_fix_pil:
    602         or       %g5, SPARC_PSR_PIL_MASK, %g5
    603 pil_fixed:
    604         wr       %g5, SPARC_PSR_ET_MASK, %psr ! **** ENABLE TRAPS ****
    605 dont_fix_pil2:
    606 
    607         /*
    608          *  Vector to user's handler.
    609          *
    610          *  NOTE: TBR may no longer have vector number in it since
    611          *        we just enabled traps.  It is definitely in l3.
    612          */
    613 
    614         sethi    %hi(SYM(_ISR_Vector_table)), %g4
    615         ld       [%g4+%lo(SYM(_ISR_Vector_table))], %g4
    616         and      %l3, 0xFF, %g5         ! remove synchronous trap indicator
    617         sll      %g5, 2, %g5            ! g5 = offset into table
    618         ld       [%g4 + %g5], %g4       ! g4 = _ISR_Vector_table[ vector ]
    619 
    620 
    621                                         ! o1 = 2nd arg = address of the ISF
    622                                         !   WAS LOADED WHEN ISF WAS SAVED!!!
    623         mov      %l3, %o0               ! o0 = 1st arg = vector number
    624         call     %g4, 0
    625         nop                             ! delay slot
    626 
    627         /*
    628          *  Redisable traps so we can finish up the interrupt processing.
    629          *  This is a VERY conservative place to do this.
    630          *
    631          *  NOTE: %l0 has the PSR which was in place when we took the trap.
    632          */
    633 
    634         mov      %l0, %psr             ! **** DISABLE TRAPS ****
    635         nop; nop; nop
    636 
    637         /*
    638          *  Decrement ISR nest level and Thread dispatch disable level.
    639          *
    640          *  Register usage for this section:
    641          *
    642          *    l4 = _Thread_Dispatch_disable_level pointer
    643          *    l5 = _ISR_Nest_level pointer
    644          *    l6 = _Thread_Dispatch_disable_level value
    645          *    l7 = _ISR_Nest_level value
    646          */
    647 
    648         sub      %l6, 1, %l6
    649         st       %l6, [%l4 + %lo(SYM(_Thread_Dispatch_disable_level))]
    650 
    651         st       %l7, [%l5 + PER_CPU_ISR_NEST_LEVEL]
    652 
    653         /*
    654          *  If dispatching is disabled (includes nested interrupt case),
    655          *  then do a "simple" exit.
    656          */
    657 
    658         orcc     %l6, %g0, %g0   ! Is dispatching disabled?
    659         bnz      simple_return   ! Yes, then do a "simple" exit
    660         ! NOTE: Use the delay slot
    661         sethi    %hi(SYM(_CPU_ISR_Dispatch_disable)), %l6
    662 
    663         ! Are we dispatching from a previous ISR in the interrupted thread?
    664         ld       [%l6 + %lo(SYM(_CPU_ISR_Dispatch_disable))], %l7
    665         orcc     %l7, %g0, %g0   ! Is this thread already doing an ISR?
    666         bnz      simple_return   ! Yes, then do a "simple" exit
    667         nop
    668 
    669 
    670         /*
    671          *  If a context switch is necessary, then do fudge stack to
    672          *  return to the interrupt dispatcher.
    673          */
    674 
    675         ldub     [%l5 + PER_CPU_DISPATCH_NEEDED], %l5
    676 
    677         orcc     %l5, %g0, %g0   ! Is thread switch necessary?
    678         bz       simple_return   ! no, then do a simple return
    679         nop
    680 
    681         /*
    682          *  Invoke interrupt dispatcher.
    683          */
    684 
    685         PUBLIC(_ISR_Dispatch)
    686 SYM(_ISR_Dispatch):
    687         ! Set ISR dispatch nesting prevention flag
    688         mov      1,%l6
    689         sethi    %hi(SYM(_CPU_ISR_Dispatch_disable)), %l5
    690         st       %l6,[%l5 + %lo(SYM(_CPU_ISR_Dispatch_disable))]
    691 
    692         /*
    693          *  The following subtract should get us back on the interrupted
    694          *  tasks stack and add enough room to invoke the dispatcher.
    695          *  When we enable traps, we are mostly back in the context
    696          *  of the task and subsequent interrupts can operate normally.
    697          */
    698 
    699         sub      %fp, CPU_MINIMUM_STACK_FRAME_SIZE, %sp
    700 
    701         or      %l0, SPARC_PSR_ET_MASK, %l7    ! l7 = PSR with ET=1
    702         mov     %l7, %psr                      !  **** ENABLE TRAPS ****
    703         nop
    704         nop
    705         nop
    706 isr_dispatch:
    707         call    SYM(_Thread_Dispatch), 0
    708         nop
    709 
    710         /*
    711          *  We invoked _Thread_Dispatch in a state similar to the interrupted
    712          *  task.  In order to safely be able to tinker with the register
    713          *  windows and get the task back to its pre-interrupt state,
    714          *  we need to disable interrupts disabled so we can safely tinker
    715          *  with the register windowing.  In particular, the CWP in the PSR
    716          *  is fragile during this period. (See PR578.)
    717          */
    718         mov     2,%g1                           ! syscall (disable interrupts)
    719         ta      0                               ! syscall (disable interrupts)
    720 
    721         /*
    722          *  While we had ISR dispatching disabled in this thread,
    723          *  did we miss anything.  If so, then we need to do another
    724          *  _Thread_Dispatch before leaving this ISR Dispatch context.
    725          */
    726 
    727         sethi    %hi(_Per_CPU_Information), %l5
    728         add      %l5, %lo(_Per_CPU_Information), %l5
    729 
    730         ldub     [%l5 + PER_CPU_DISPATCH_NEEDED], %l7
    731 
    732         orcc     %l7, %g0, %g0    ! Is thread switch necesary?
    733         bz       allow_nest_again ! No, then clear out and return
    734         nop
    735 
    736         ! Yes, then invoke the dispatcher
    737 dispatchAgain:
    738         mov     3,%g1                           ! syscall (enable interrupts)
    739         ta      0                               ! syscall (enable interrupts)
    740         ba      isr_dispatch
    741         nop
    742 
    743 allow_nest_again:
    744 
    745         ! Zero out ISR stack nesting prevention flag
    746         sethi    %hi(SYM(_CPU_ISR_Dispatch_disable)), %l5
    747         st       %g0,[%l5 + %lo(SYM(_CPU_ISR_Dispatch_disable))]
    748 
    749         /*
    750          *  The CWP in place at this point may be different from
    751          *  that which was in effect at the beginning of the ISR if we
    752          *  have been context switched between the beginning of this invocation
    753          *  of _ISR_Handler and this point.  Thus the CWP and WIM should
    754          *  not be changed back to their values at ISR entry time.  Any
    755          *  changes to the PSR must preserve the CWP.
    756          */
    757 
    758 simple_return:
    759         ld      [%fp + ISF_Y_OFFSET], %l5      ! restore y
    760         wr      %l5, 0, %y
    761 
    762         ldd     [%fp + ISF_PSR_OFFSET], %l0    ! restore psr, PC
    763         ld      [%fp + ISF_NPC_OFFSET], %l2    ! restore nPC
    764         rd      %psr, %l3
    765         and     %l3, SPARC_PSR_CWP_MASK, %l3   ! want "current" CWP
    766         andn    %l0, SPARC_PSR_CWP_MASK, %l0   ! want rest from task
    767         or      %l3, %l0, %l0                  ! install it later...
    768         andn    %l0, SPARC_PSR_ET_MASK, %l0
    769 
    770         /*
    771          *  Restore tasks global and out registers
    772          */
    773 
    774         mov    %fp, %g1
    775 
    776                                               ! g1 is restored later
    777         ldd     [%fp + ISF_G2_OFFSET], %g2    ! restore g2, g3
    778         ldd     [%fp + ISF_G4_OFFSET], %g4    ! restore g4, g5
    779         ldd     [%fp + ISF_G6_OFFSET], %g6    ! restore g6, g7
    780 
    781         ldd     [%fp + ISF_I0_OFFSET], %i0    ! restore i0, i1
    782         ldd     [%fp + ISF_I2_OFFSET], %i2    ! restore i2, i3
    783         ldd     [%fp + ISF_I4_OFFSET], %i4    ! restore i4, i5
    784         ldd     [%fp + ISF_I6_FP_OFFSET], %i6 ! restore i6/fp, i7
    785 
    786         /*
    787          *  Registers:
    788          *
    789          *   ALL global registers EXCEPT G1 and the input registers have
    790          *   already been restored and thuse off limits.
    791          *
    792          *   The following is the contents of the local registers:
    793          *
    794          *     l0 = original psr
    795          *     l1 = return address (i.e. PC)
    796          *     l2 = nPC
    797          *     l3 = CWP
    798          */
    799 
    800         /*
    801          *  if (CWP + 1) is an invalid window then we need to reload it.
    802          *
    803          *  WARNING: Traps should now be disabled
    804          */
    805 
    806         mov     %l0, %psr                  !  **** DISABLE TRAPS ****
    807         nop
    808         nop
    809         nop
    810         rd      %wim, %l4
    811         add     %l0, 1, %l6                ! l6 = cwp + 1
    812         and     %l6, SPARC_PSR_CWP_MASK, %l6 ! do the modulo on it
    813         srl     %l4, %l6, %l5              ! l5 = win >> cwp + 1 ; shift count
    814                                            !  and CWP are conveniently LS 5 bits
    815         cmp     %l5, 1                     ! Is tasks window invalid?
    816         bne     good_task_window
    817 
    818         /*
    819          *  The following code is the same as a 1 position left rotate of WIM.
    820          */
    821 
    822         sll     %l4, 1, %l5                ! l5 = WIM << 1
    823         srl     %l4, SPARC_NUMBER_OF_REGISTER_WINDOWS-1 , %l4
    824                                            ! l4 = WIM >> (Number Windows - 1)
    825         or      %l4, %l5, %l4              ! l4 = (WIM << 1) |
    826                                            !      (WIM >> (Number Windows - 1))
    827 
    828         /*
    829          *  Now restore the window just as if we underflowed to it.
    830          */
    831 
    832         wr      %l4, 0, %wim               ! WIM = new WIM
    833         nop                                ! must delay after writing WIM
    834         nop
    835         nop
    836         restore                            ! now into the tasks window
    837 
    838         ldd     [%g1 + CPU_STACK_FRAME_L0_OFFSET], %l0
    839         ldd     [%g1 + CPU_STACK_FRAME_L2_OFFSET], %l2
    840         ldd     [%g1 + CPU_STACK_FRAME_L4_OFFSET], %l4
    841         ldd     [%g1 + CPU_STACK_FRAME_L6_OFFSET], %l6
    842         ldd     [%g1 + CPU_STACK_FRAME_I0_OFFSET], %i0
    843         ldd     [%g1 + CPU_STACK_FRAME_I2_OFFSET], %i2
    844         ldd     [%g1 + CPU_STACK_FRAME_I4_OFFSET], %i4
    845         ldd     [%g1 + CPU_STACK_FRAME_I6_FP_OFFSET], %i6
    846                                            ! reload of sp clobbers ISF
    847         save                               ! Back to ISR dispatch window
    848 
    849 good_task_window:
    850 
    851         mov     %l0, %psr                  !  **** DISABLE TRAPS ****
    852         nop; nop; nop
    853                                            !  and restore condition codes.
    854         ld      [%g1 + ISF_G1_OFFSET], %g1 ! restore g1
    855         jmp     %l1                        ! transfer control and
    856         rett    %l2                        ! go back to tasks window
    857 
    858357/* end of file */
  • cpukit/score/cpu/sparc/rtems/score/cpu.h

    ref99f23a r06dcaf0  
    77 *  the executive to the SPARC processor.
    88 *
    9  *  COPYRIGHT (c) 1989-2006.
     9 *  COPYRIGHT (c) 1989-2011.
    1010 *  On-Line Applications Research Corporation (OAR).
    1111 *
     
    928928) RTEMS_COMPILER_NO_RETURN_ATTRIBUTE;
    929929
     930#if defined(RTEMS_SMP)
     931  /*
     932   *  _CPU_Context_switch_to_first_task_smp
     933   *
     934   *  This routine is only used to switch to the first task on a
     935   *  secondary core in an SMP configuration.  We do not need to
     936   *  flush all the windows and, in fact, this can be dangerous
     937   *  as they may or may not be initialized properly.
     938   */
     939  void _CPU_Context_switch_to_first_task_smp(
     940    Context_Control *new_context
     941  );
     942
     943  /* address space 1 is uncacheable */
     944  #define SMP_CPU_SWAP( _address, _value, _previous ) \
     945    do { \
     946      register unsigned int _val = _value; \
     947      asm volatile( \
     948        "swapa [%2] %3, %0" : \
     949        "=r" (_val) : \
     950        "0" (_val), \
     951        "r" (_address), \
     952        "i" (1) \
     953      ); \
     954      _previous = _val; \
     955    } while (0)
     956
     957#endif
     958
    930959/*
    931960 *  _CPU_Context_save_fp
  • cpukit/score/include/rtems/score/basedefs.h

    ref99f23a r06dcaf0  
    137137
    138138/**
     139 *  The following defines a compiler specific attribute which informs
     140 *  the compiler that the method has no effect except the return value
     141 *  and that the return value depends only on parameters and/or global
     142 *  variables.
     143 */
     144#ifdef __GNUC__
     145  #define RTEMS_COMPILER_PURE_ATTRIBUTE \
     146     __attribute__ ((pure))
     147#else
     148  #define RTEMS_COMPILER_PURE_ATTRIBUTE
     149#endif
     150
     151/**
    139152 *  Instructs the compiler to issue a warning whenever a variable or function
    140153 *  with this attribute will be used.
  • cpukit/score/include/rtems/score/context.h

    ref99f23a r06dcaf0  
    102102   _CPU_Context_Restart_self( _the_context )
    103103
     104#if defined(RTEMS_SMP)
     105/*
     106 *  @brief Switch to First Task on Secondary Core
     107 *
     108 *  This routine is only used to switch to the first task on a
     109 *  secondary core in an SMP configuration.  Since the switch
     110 *  to the first task is done from an interrupt handler, this
     111 *  may be different from simply restarting the currently running
     112 *  task.
     113 *
     114 *  @param[in] _the_context is the context of the first thread to
     115 *             run on this core
     116 */
     117#define _Context_Switch_to_first_task_smp( _the_context ) \
     118   _CPU_Context_switch_to_first_task_smp( _the_context )
     119#endif
     120
    104121/**
    105122 *  @brief Return Starting Address of Floating Point Context
  • cpukit/score/include/rtems/score/percpu.h

    ref99f23a r06dcaf0  
    77
    88/*
    9  *  COPYRIGHT (c) 1989-2010.
     9 *  COPYRIGHT (c) 1989-2011.
    1010 *  On-Line Applications Research Corporation (OAR).
    1111 *
     
    1414 *  http://www.rtems.com/license/LICENSE.
    1515 *
    16  *  $Id$
     16 *  $Id$ 
    1717 */
    1818
     
    2424#ifdef ASM
    2525  #include <rtems/asm.h>
     26#else
     27  #if defined(RTEMS_SMP)
     28    #include <rtems/score/smplock.h>
     29  #endif
     30  #include <rtems/bspsmp.h>
    2631#endif
    2732
     
    4247
    4348#ifndef ASM
    44 
    45 /**
    46  * This forward defines the Thread Control Block structure.
    47  */
     49#ifndef __THREAD_CONTROL_DEFINED__
     50#define __THREAD_CONTROL_DEFINED__
    4851typedef struct Thread_Control_struct Thread_Control;
     52#endif
     53
     54#if (CPU_ALLOCATE_INTERRUPT_STACK == FALSE) && defined(RTEMS_SMP)
     55  #error "RTEMS must allocate per CPU interrupt stack for SMP"
     56#endif
     57
     58typedef enum {
     59 
     60  /**
     61   *  This defines the constant used to indicate that the cpu code is in
     62   *  its initial powered up start.
     63   */
     64   RTEMS_BSP_SMP_CPU_INITIAL_STATE = 1,
     65
     66  /**
     67   *  This defines the constant used to indicate that the cpu code has
     68   *  completed basic initialization and awaits further commands.
     69   */
     70   RTEMS_BSP_SMP_CPU_INITIALIZED = 2,
     71
     72  /**
     73   *  This defines the constant used to indicate that the cpu code has
     74   *  shut itself down.
     75   */
     76  RTEMS_BSP_SMP_CPU_SHUTDOWN = 3
     77} bsp_smp_cpu_state;
    4978
    5079/**
     
    5483 */
    5584typedef struct {
     85  #if defined(RTEMS_SMP)
     86    /** This element is used to lock this structure */
     87    SMP_lock_Control  lock;
     88
     89    /** This indicates that the CPU is online. */
     90    uint32_t          state;
     91
     92    /**
     93     *  This is the request for the interrupt.
     94     * 
     95     *  @note This may become a chain protected by atomic instructions.
     96     */
     97    uint32_t          message;
     98
     99  #endif
     100
    56101#if (CPU_ALLOCATE_INTERRUPT_STACK == TRUE) || \
    57102    (CPU_HAS_SOFTWARE_INTERRUPT_STACK == TRUE)
     
    70115
    71116  /**
    72    *
    73117   *  This contains the current interrupt nesting level on this
    74118   *  CPU.
     
    92136
    93137#ifdef ASM
     138#if defined(RTEMS_SMP)
     139  #define PER_CPU_LOCK     0
     140  #define PER_CPU_STATE    (1 * __RTEMS_SIZEOF_VOID_P__)
     141  #define PER_CPU_MESSAGE  (2 * __RTEMS_SIZEOF_VOID_P__)
     142  #define PER_CPU_END_SMP  (3 * __RTEMS_SIZEOF_VOID_P__)
     143#else
     144  #define PER_CPU_END_SMP  0
     145#endif
    94146
    95147#if (CPU_ALLOCATE_INTERRUPT_STACK == TRUE) || \
     
    99151   *  we need to have places in the per cpu table to hold them.
    100152   */
    101   #define PER_CPU_INTERRUPT_STACK_LOW   0
    102   #define PER_CPU_INTERRUPT_STACK_HIGH  (1 * __RTEMS_SIZEOF_VOID_P__)
    103   #define PER_CPU_END_STACK             (2 * __RTEMS_SIZEOF_VOID_P__)
     153  #define PER_CPU_INTERRUPT_STACK_LOW   PER_CPU_END_SMP
     154  #define PER_CPU_INTERRUPT_STACK_HIGH  \
     155          PER_CPU_INTERRUPT_STACK_LOW + (1 * __RTEMS_SIZEOF_VOID_P__)
     156  #define PER_CPU_END_STACK             \
     157          PER_CPU_INTERRUPT_STACK_HIGH + (1 * __RTEMS_SIZEOF_VOID_P__)
    104158#else
    105   /*
    106    *  Otherwise, there are no interrupt stack addresses in the per CPU table.
    107    */
    108   #define PER_CPU_END_STACK             0
     159  #define PER_CPU_END_STACK             PER_CPU_END_SMP
    109160#endif
    110161
     
    148199 *  This is an array of per CPU core information.
    149200 */
    150 extern Per_CPU_Control _Per_CPU_Information;
    151 
    152 /*
    153  * On an SMP system, these macros dereference the CPU core number.
    154  * But on a non-SMP system, these macros are simple references.
     201extern Per_CPU_Control _Per_CPU_Information[];
     202
     203#if defined(RTEMS_SMP)
     204/**
     205 *  @brief Set of Pointers to Per CPU Core Information
     206 *
     207 *  This is an array of pointers to each CPU's per CPU data structure.
     208 *  It should be simpler to retrieve this pointer in assembly language
     209 *  that to calculate the array offset.
     210 */
     211extern Per_CPU_Control *_Per_CPU_Information_p[];
     212
     213/**
     214 *  @brief Initialize SMP Handler
     215 *
     216 *  This method initialize the SMP Handler.
     217 */
     218void _SMP_Handler_initialize(void);
     219
     220/**
     221 *  @brief Allocate and Initialize Per CPU Structures
     222 *
     223 *  This method allocates and initialize the per CPU structure.
     224 */
     225void _Per_CPU_Initialize(void);
     226
     227#endif
     228
     229/*
     230 * On a non SMP system, the bsp_smp_processor_id is defined to 0.
    155231 * Thus when built for non-SMP, there should be no performance penalty.
    156232 */
    157 #define _Thread_Heir              _Per_CPU_Information.heir
    158 #define _Thread_Executing         _Per_CPU_Information.executing
    159 #define _Thread_Idle              _Per_CPU_Information.idle
    160 #define _ISR_Nest_level           _Per_CPU_Information.isr_nest_level
    161 #define _CPU_Interrupt_stack_low  _Per_CPU_Information.interrupt_stack_low
    162 #define _CPU_Interrupt_stack_high _Per_CPU_Information.interrupt_stack_high
    163 #define _Thread_Dispatch_necessary _Per_CPU_Information.dispatch_necessary
     233#define _Thread_Heir \
     234  _Per_CPU_Information[bsp_smp_processor_id()].heir
     235#define _Thread_Executing \
     236  _Per_CPU_Information[bsp_smp_processor_id()].executing
     237#define _Thread_Idle \
     238  _Per_CPU_Information[bsp_smp_processor_id()].idle
     239#define _ISR_Nest_level \
     240  _Per_CPU_Information[bsp_smp_processor_id()].isr_nest_level
     241#define _CPU_Interrupt_stack_low \
     242  _Per_CPU_Information[bsp_smp_processor_id()].interrupt_stack_low
     243#define _CPU_Interrupt_stack_high \
     244  _Per_CPU_Information[bsp_smp_processor_id()].interrupt_stack_high
     245#define _Thread_Dispatch_necessary \
     246  _Per_CPU_Information[bsp_smp_processor_id()].dispatch_necessary
    164247
    165248#endif  /* ASM */
  • cpukit/score/preinstall.am

    ref99f23a r06dcaf0  
    3131PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/seterr.h
    3232
     33$(PROJECT_INCLUDE)/rtems/bspsmp.h: include/rtems/bspsmp.h $(PROJECT_INCLUDE)/rtems/$(dirstamp)
     34        $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/bspsmp.h
     35PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/bspsmp.h
     36
    3337$(PROJECT_INCLUDE)/rtems/score/$(dirstamp):
    3438        @$(MKDIR_P) $(PROJECT_INCLUDE)/rtems/score
     
    188192PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/score/basedefs.h
    189193
     194$(PROJECT_INCLUDE)/rtems/score/smplock.h: include/rtems/score/smplock.h $(PROJECT_INCLUDE)/rtems/score/$(dirstamp)
     195        $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/score/smplock.h
     196PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/score/smplock.h
     197
    190198if HAS_PTHREADS
    191199$(PROJECT_INCLUDE)/rtems/score/corespinlock.h: include/rtems/score/corespinlock.h $(PROJECT_INCLUDE)/rtems/score/$(dirstamp)
  • cpukit/score/src/percpu.c

    ref99f23a r06dcaf0  
    11/*
    2  *  COPYRIGHT (c) 1989-2010.
     2 *  COPYRIGHT (c) 1989-2011.
    33 *  On-Line Applications Research Corporation (OAR).
    44 *
     
    2020#include <rtems/score/wkspace.h>
    2121#include <rtems/config.h>
     22#include <rtems/bspsmp.h>
    2223#include <string.h>
    2324
    24 /*
    25  * On single core systems, we can efficiently directly access a single
    26  * statically allocated per cpu structure.  And the fields are initialized
    27  * as individual elements just like it has always been done.
    28  */
    29 Per_CPU_Control _Per_CPU_Information;
     25#if defined(RTEMS_SMP)
     26  void _SMP_Handler_initialize(void)
     27  {
     28    int         cpu;
     29    size_t      size;
     30    uintptr_t   ptr;
     31
     32    /*
     33     *  Initialize per CPU structures.
     34     */
     35    size = (_SMP_Processor_count) * sizeof(Per_CPU_Control);
     36    memset( _Per_CPU_Information, '\0', size );
     37
     38    /*
     39     *  Initialize per cpu pointer table
     40     */
     41    size = Configuration.interrupt_stack_size;
     42    _Per_CPU_Information_p[0] = &_Per_CPU_Information[0];
     43    for (cpu=1 ; cpu < _SMP_Processor_count ; cpu++ ) {
     44      Per_CPU_Control *p = &_Per_CPU_Information[cpu];
     45
     46      _Per_CPU_Information_p[cpu] = p;
     47
     48      p->interrupt_stack_low = _Workspace_Allocate_or_fatal_error( size );
     49
     50      ptr = (uintptr_t) _Addresses_Add_offset( p->interrupt_stack_low, size );
     51      ptr &= ~CPU_STACK_ALIGNMENT;
     52      p->interrupt_stack_high = (void *)ptr;
     53      p->state = RTEMS_BSP_SMP_CPU_INITIAL_STATE;
     54      RTEMS_COMPILER_MEMORY_BARRIER();
     55    }
     56  }
     57#else
     58  /*
     59   * On single core systems, we can efficiently directly access a single
     60   * statically allocated per cpu structure.  And the fields are initialized
     61   * as individual elements just like it has always been done.
     62   */
     63  Per_CPU_Control _Per_CPU_Information[1];
     64#endif
  • cpukit/score/src/thread.c

    ref99f23a r06dcaf0  
    33 *
    44 *
    5  *  COPYRIGHT (c) 1989-2008.
     5 *  COPYRIGHT (c) 1989-2011.
    66 *  On-Line Applications Research Corporation (OAR).
    77 *
     
    3434#include <rtems/config.h>
    3535
    36 /*PAGE
    37  *
     36#if defined(RTEMS_SMP)
     37  #include <rtems/bspsmp.h>
     38#endif
     39
     40/*
    3841 *  _Thread_Handler_initialization
    3942 *
     
    4952  uint32_t     ticks_per_timeslice;
    5053  uint32_t     maximum_extensions;
     54  uint32_t     maximum_internal_threads;
    5155  #if defined(RTEMS_MULTIPROCESSING)
    5256    uint32_t   maximum_proxies;
     
    8185  _Thread_Ticks_per_timeslice  = ticks_per_timeslice;
    8286
    83 #if defined(RTEMS_MULTIPROCESSING)
    84   _Thread_MP_Handler_initialization( maximum_proxies );
    85 #endif
     87  #if defined(RTEMS_MULTIPROCESSING)
     88    _Thread_MP_Handler_initialization( maximum_proxies );
     89  #endif
    8690
    8791  /*
    88    *  Initialize this class of objects.
     92   *  Initialize the internal class of threads.  We need an IDLE thread
     93   *  per CPU in an SMP system.  In addition, if this is a loosely
     94   *  coupled multiprocessing system, account for the MPCI Server Thread.
    8995   */
     96  #if defined(RTEMS_SMP)
     97    maximum_internal_threads = rtems_smp_maximum_processors;
     98  #else
     99    maximum_internal_threads = 1;
     100  #endif
     101
     102  #if defined(RTEMS_MULTIPROCESSING)
     103    if ( _System_state_Is_multiprocessing )
     104      maximum_internal_threads += 1;
     105  #endif
    90106
    91107  _Objects_Initialize_information(
     
    93109    OBJECTS_INTERNAL_API,
    94110    OBJECTS_INTERNAL_THREADS,
    95 #if defined(RTEMS_MULTIPROCESSING)
    96     ( _System_state_Is_multiprocessing ) ?  2 : 1,
    97 #else
    98     1,
    99 #endif
     111    maximum_internal_threads,
    100112    sizeof( Thread_Control ),
    101113                                /* size of this object's control block */
    102114    false,                      /* true if names for this object are strings */
    103115    8                           /* maximum length of each object's name */
    104 #if defined(RTEMS_MULTIPROCESSING)
    105     ,
    106     false,                      /* true if this is a global object class */
    107     NULL                        /* Proxy extraction support callout */
    108 #endif
     116    #if defined(RTEMS_MULTIPROCESSING)
     117      ,
     118      false,                      /* true if this is a global object class */
     119      NULL                        /* Proxy extraction support callout */
     120    #endif
    109121  );
    110122
  • cpukit/score/src/threadcreateidle.c

    ref99f23a r06dcaf0  
    33 *
    44 *
    5  *  COPYRIGHT (c) 1989-2008.
     5 *  COPYRIGHT (c) 1989-2011.
    66 *  On-Line Applications Research Corporation (OAR).
    77 *
     
    3131#include <rtems/score/wkspace.h>
    3232#include <rtems/config.h>
     33#include <rtems/bspsmp.h>
    3334
    34 /*PAGE
    35  *
    36  *  _Thread_Create_idle
    37  */
     35static inline void _Thread_Create_idle_helper(
     36  uint32_t name_u32,
     37  int      cpu
     38)
     39{
     40  Objects_Name    name;
     41  Thread_Control *idle;
    3842
    39 void _Thread_Create_idle( void )
    40 {
    41   Objects_Name name;
    42 
    43   name.name_u32 = _Objects_Build_name( 'I', 'D', 'L', 'E' );
     43  name.name_u32 = name_u32;
    4444
    4545  /*
     
    4848   *  _Workspace_Initialization.
    4949   */
    50   _Thread_Idle = _Thread_Internal_allocate();
     50  idle = _Thread_Internal_allocate();
    5151
    5252  /*
     
    5959  _Thread_Initialize(
    6060    &_Thread_Internal_information,
    61     _Thread_Idle,
     61    idle,
    6262    NULL,        /* allocate the stack */
    6363    _Stack_Ensure_minimum( Configuration.idle_task_stack_size ),
     
    7777   *             MUST be done before _Thread_Start is invoked.
    7878   */
    79   _Thread_Heir      =
    80   _Thread_Executing = _Thread_Idle;
     79  _Per_CPU_Information[ cpu ].idle      =
     80  _Per_CPU_Information[ cpu ].heir      =
     81  _Per_CPU_Information[ cpu ].executing = idle;
    8182
    8283  _Thread_Start(
    83     _Thread_Idle,
     84    idle,
    8485    THREAD_START_NUMERIC,
    8586    Configuration.idle_task,
     
    8788    0
    8889  );
     90}
    8991
     92void _Thread_Create_idle( void )
     93{
     94  #if defined(RTEMS_SMP)
     95    int cpu;
     96
     97    for ( cpu=0 ; cpu < _SMP_Processor_count ; cpu++ ) {
     98      _Thread_Create_idle_helper(
     99        _Objects_Build_name( 'I', 'D', 'L', 'E' ),
     100        cpu
     101      );
     102    }
     103  #else
     104    _Thread_Create_idle_helper(_Objects_Build_name( 'I', 'D', 'L', 'E' ), 0);
     105  #endif
    90106}
Note: See TracChangeset for help on using the changeset viewer.