Changeset 38b59a6 in rtems


Ignore:
Timestamp:
May 2, 2014, 8:31:09 AM (5 years ago)
Author:
Sebastian Huber <sebastian.huber@…>
Branches:
4.11, master
Children:
6c36946f
Parents:
58444f7
git-author:
Sebastian Huber <sebastian.huber@…> (05/02/14 08:31:09)
git-committer:
Sebastian Huber <sebastian.huber@…> (05/07/14 12:26:28)
Message:

score: Implement forced thread migration

The current implementation of task migration in RTEMS has some
implications with respect to the interrupt latency. It is crucial to
preserve the system invariant that a task can execute on at most one
processor in the system at a time. This is accomplished with a boolean
indicator in the task context. The processor architecture specific
low-level task context switch code will mark that a task context is no
longer executing and waits that the heir context stopped execution
before it restores the heir context and resumes execution of the heir
task. So there is one point in time in which a processor is without a
task. This is essential to avoid cyclic dependencies in case multiple
tasks migrate at once. Otherwise some supervising entity is necessary to
prevent life-locks. Such a global supervisor would lead to scalability
problems so this approach is not used. Currently the thread dispatch is
performed with interrupts disabled. So in case the heir task is
currently executing on another processor then this prolongs the time of
disabled interrupts since one processor has to wait for another
processor to make progress.

It is difficult to avoid this issue with the interrupt latency since
interrupts normally store the context of the interrupted task on its
stack. In case a task is marked as not executing we must not use its
task stack to store such an interrupt context. We cannot use the heir
stack before it stopped execution on another processor. So if we enable
interrupts during this transition we have to provide an alternative task
independent stack for this time frame. This issue needs further
investigation.

Files:
4 added
39 edited

Legend:

Unmodified
Added
Removed
  • c/src/lib/libbsp/sparc/shared/irq_asm.S

    r58444f7 r38b59a6  
    164164        nop
    165165
     166#if defined(RTEMS_SMP)
     167        ! Indicate that this context is no longer executing
     168        stb     %g0, [%o0 + SPARC_CONTEXT_CONTROL_IS_EXECUTING_OFFSET]
     169
     170        ! Wait for context to stop execution if necessary
     1711:
     172        ldub    [%o1 + SPARC_CONTEXT_CONTROL_IS_EXECUTING_OFFSET], %g1
     173        cmp     %g1, 0
     174        bne     1b
     175         mov    1, %g1
     176
     177        ! Indicate that this context is executing
     178        stb     %g1, [%o1 + SPARC_CONTEXT_CONTROL_IS_EXECUTING_OFFSET]
     179#endif
     180
    166181        ld      [%o1 + G5_OFFSET], %g5        ! restore the global registers
    167182        ld      [%o1 + G7_OFFSET], %g7
     
    203218        save    %sp, -CPU_MINIMUM_STACK_FRAME_SIZE, %sp
    204219        rd      %psr, %o2
     220#if defined(RTEMS_SMP)
     221        ! On SPARC the restore path needs also a valid executing context on SMP
     222        ! to update the is executing indicator.
     223        mov     %i0, %o0
     224#endif
    205225        ba      SYM(_CPU_Context_restore_heir)
    206226        mov     %i0, %o1                      ! in the delay slot
  • c/src/lib/libcpu/powerpc/new-exceptions/cpu.c

    r58444f7 r38b59a6  
    131131  the_ppc_context->lr = (uint32_t) entry_point;
    132132
     133#ifdef RTEMS_SMP
     134  the_ppc_context->is_executing = false;
     135#endif
     136
    133137#ifdef __ALTIVEC__
    134138  _CPU_Context_initialize_altivec( the_ppc_context );
  • c/src/lib/libcpu/powerpc/new-exceptions/cpu_asm.S

    r58444f7 r38b59a6  
    327327        stw     r2, PPC_CONTEXT_OFFSET_GPR2(r3)
    328328
     329#ifdef RTEMS_SMP
     330        /* Indicate that this context is no longer executing */
     331        msync
     332        li      r5, 0
     333        stb     r5, PPC_CONTEXT_OFFSET_IS_EXECUTING(r3)
     334#endif
     335
    329336        /* Restore context from r4 */
    330337restore_context:
     338
     339#ifdef RTEMS_SMP
     340        /* Wait for context to stop execution if necessary */
     3411:
     342        lbz     r5, PPC_CONTEXT_OFFSET_IS_EXECUTING(r4)
     343        cmpwi   r5, 0
     344        bne     1b
     345
     346        /* Indicate that this context is executing */
     347        li      r5, 1
     348        stb     r5, PPC_CONTEXT_OFFSET_IS_EXECUTING(r4)
     349        isync
     350#endif
    331351
    332352#ifdef __ALTIVEC__
  • cpukit/libmisc/cpuuse/cpuusagereport.c

    r58444f7 r38b59a6  
    4444    #else
    4545      /* FIXME: Locking */
    46       if ( the_thread->is_executing ) {
     46      if ( _Thread_Is_executing_on_a_processor( the_thread ) ) {
    4747        *time_of_context_switch =
    4848          _Thread_Get_CPU( the_thread )->time_of_last_context_switch;
  • cpukit/rtems/include/rtems/rtems/tasks.h

    r58444f7 r38b59a6  
    570570 * @retval RTEMS_SUCCESSFUL Successful operation.
    571571 * @retval RTEMS_INVALID_ID Invalid task or scheduler identifier.
    572  * @retval RTEMS_INCORRECT_STATE The task is in the wrong state to perform a
    573  * scheduler change.
    574572 *
    575573 * @see rtems_scheduler_ident().
  • cpukit/rtems/src/tasksetscheduler.c

    r58444f7 r38b59a6  
    3131    Thread_Control    *the_thread;
    3232    Objects_Locations  location;
    33     bool               ok;
    3433
    3534    the_thread = _Thread_Get( id, &location );
     
    3736    switch ( location ) {
    3837      case OBJECTS_LOCAL:
    39         ok = _Scheduler_Set( scheduler, the_thread );
     38        _Scheduler_Set( scheduler, the_thread );
    4039        _Objects_Put( &the_thread->Object );
    41         sc = ok ? RTEMS_SUCCESSFUL : RTEMS_INCORRECT_STATE;
     40        sc = RTEMS_SUCCESSFUL;
    4241        break;
    4342#if defined(RTEMS_MULTIPROCESSING)
  • cpukit/score/cpu/arm/cpu.c

    r58444f7 r38b59a6  
    5151#endif
    5252
     53#ifdef RTEMS_SMP
     54  RTEMS_STATIC_ASSERT(
     55    offsetof( Context_Control, is_executing )
     56      == ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET,
     57    ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET
     58  );
     59#endif
     60
    5361RTEMS_STATIC_ASSERT(
    5462  sizeof( CPU_Exception_frame ) == ARM_EXCEPTION_FRAME_SIZE,
     
    92100#ifdef ARM_MULTILIB_HAS_THREAD_ID_REGISTER
    93101  the_context->thread_id = (uint32_t) tls_area;
     102#endif
     103
     104#ifdef RTEMS_SMP
     105  the_context->is_executing = false;
    94106#endif
    95107
  • cpukit/score/cpu/arm/cpu_asm.S

    r58444f7 r38b59a6  
    6868#endif
    6969
     70#ifdef RTEMS_SMP
     71        /* Indicate that this context is no longer executing */
     72        dmb
     73        mov     r3, #0
     74        strb    r3, [r0, #ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET]
     75#endif
     76
    7077/* Start restoring context */
    7178_restore:
    7279#ifdef ARM_MULTILIB_HAS_LOAD_STORE_EXCLUSIVE
    7380        clrex
     81#endif
     82
     83#ifdef RTEMS_SMP
     84        /* Wait for context to stop execution if necessary */
     851:
     86        ldrb    r3, [r1, #ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET]
     87        cmp     r3, #0
     88        bne     1b
     89
     90        /* Indicate that this context is executing */
     91        dmb
     92        mov     r3, #1
     93        strb    r3, [r1, #ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET]
    7494#endif
    7595
  • cpukit/score/cpu/arm/rtems/score/cpu.h

    r58444f7 r38b59a6  
    215215#ifdef ARM_MULTILIB_VFP_D32
    216216  #define ARM_CONTEXT_CONTROL_D8_OFFSET 48
     217#endif
     218
     219#ifdef RTEMS_SMP
     220  #ifdef ARM_MULTILIB_VFP_D32
     221    #define ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 112
     222  #else
     223    #define ARM_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 48
     224  #endif
    217225#endif
    218226
     
    281289  uint64_t register_d15;
    282290#endif
     291#ifdef RTEMS_SMP
     292  volatile bool is_executing;
     293#endif
    283294} Context_Control;
    284295
     
    410421#define _CPU_Context_Get_SP( _context ) \
    411422  (_context)->register_sp
     423
     424#ifdef RTEMS_SMP
     425  #define _CPU_Context_Get_is_executing( _context ) \
     426    (_context)->is_executing
     427#endif
    412428
    413429#define _CPU_Context_Restart_self( _the_context ) \
  • cpukit/score/cpu/i386/cpu.c

    r58444f7 r38b59a6  
    2626#include <rtems/bspIo.h>
    2727#include <rtems/score/thread.h>
     28
     29#define I386_ASSERT_OFFSET(field, off) \
     30  RTEMS_STATIC_ASSERT( \
     31    offsetof(Context_Control, field) \
     32      == I386_CONTEXT_CONTROL_ ## off ## _OFFSET, \
     33    Context_Control_ ## field \
     34  )
     35
     36I386_ASSERT_OFFSET(eflags, EFLAGS);
     37I386_ASSERT_OFFSET(esp, ESP);
     38I386_ASSERT_OFFSET(ebp, EBP);
     39I386_ASSERT_OFFSET(ebx, EBX);
     40I386_ASSERT_OFFSET(esi, ESI);
     41I386_ASSERT_OFFSET(edi, EDI);
     42
     43#ifdef RTEMS_SMP
     44  I386_ASSERT_OFFSET(is_executing, IS_EXECUTING);
     45#endif
    2846
    2947void _CPU_Initialize(void)
  • cpukit/score/cpu/i386/cpu_asm.S

    r58444f7 r38b59a6  
    2727 */
    2828
    29 .set REG_EFLAGS,  0
    30 .set REG_ESP,     REG_EFLAGS + 4
    31 .set REG_EBP,     REG_ESP + 4
    32 .set REG_EBX,     REG_EBP + 4
    33 .set REG_ESI,     REG_EBX + 4
    34 .set REG_EDI,     REG_ESI + 4
    35 .set SIZE_REGS,   REG_EDI + 4
     29.set REG_EFLAGS,  I386_CONTEXT_CONTROL_EFLAGS_OFFSET
     30.set REG_ESP,     I386_CONTEXT_CONTROL_ESP_OFFSET
     31.set REG_EBP,     I386_CONTEXT_CONTROL_EBP_OFFSET
     32.set REG_EBX,     I386_CONTEXT_CONTROL_EBX_OFFSET
     33.set REG_ESI,     I386_CONTEXT_CONTROL_ESI_OFFSET
     34.set REG_EDI,     I386_CONTEXT_CONTROL_EDI_OFFSET
    3635
    3736        BEGIN_CODE
     
    5958        movl      edi,REG_EDI(eax)         /* save destination register */
    6059
     60#ifdef RTEMS_SMP
     61        /* Indicate that this context is no longer executing */
     62        movb      $0, I386_CONTEXT_CONTROL_IS_EXECUTING_OFFSET(eax)
     63#endif
     64
    6165        movl      HEIRCONTEXT_ARG(esp),eax /* eax = heir threads context */
    6266
    6367restore:
     68#ifdef RTEMS_SMP
     69        /* Wait for context to stop execution if necessary */
     701:
     71        movb      I386_CONTEXT_CONTROL_IS_EXECUTING_OFFSET(eax), bl
     72        testb     bl, bl
     73        jne       1b
     74
     75        /* Indicate that this context is executing */
     76        movb      $1, I386_CONTEXT_CONTROL_IS_EXECUTING_OFFSET(eax)
     77#endif
     78
    6479        pushl     REG_EFLAGS(eax)          /* push eflags */
    6580        popf                               /* restore eflags */
  • cpukit/score/cpu/i386/rtems/score/cpu.h

    r58444f7 r38b59a6  
    129129#define CPU_PER_CPU_CONTROL_SIZE 0
    130130
     131#define I386_CONTEXT_CONTROL_EFLAGS_OFFSET 0
     132#define I386_CONTEXT_CONTROL_ESP_OFFSET 4
     133#define I386_CONTEXT_CONTROL_EBP_OFFSET 8
     134#define I386_CONTEXT_CONTROL_EBX_OFFSET 12
     135#define I386_CONTEXT_CONTROL_ESI_OFFSET 16
     136#define I386_CONTEXT_CONTROL_EDI_OFFSET 20
     137
     138#ifdef RTEMS_SMP
     139  #define I386_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 24
     140#endif
     141
    131142/* structures */
    132143
     
    148159  uint32_t    esi;      /* extended source index register            */
    149160  uint32_t    edi;      /* extended destination index flags register */
     161#ifdef RTEMS_SMP
     162  volatile bool is_executing;
     163#endif
    150164}   Context_Control;
    151165
    152166#define _CPU_Context_Get_SP( _context ) \
    153167  (_context)->esp
     168
     169#ifdef RTEMS_SMP
     170  #define _CPU_Context_Get_is_executing( _context ) \
     171    (_context)->is_executing
     172#endif
    154173
    155174/*
     
    436455
    437456
     457#ifdef RTEMS_SMP
     458  #define _I386_Context_Initialize_is_executing( _the_context ) \
     459    (_the_context)->is_executing = false
     460#else
     461  #define _I386_Context_Initialize_is_executing( _the_context )
     462#endif
     463
    438464#define _CPU_Context_Initialize( _the_context, _stack_base, _size, \
    439465                                   _isr, _entry_point, _is_fp, _tls_area ) \
     
    450476    (_the_context)->ebp     = (void *) 0; \
    451477    (_the_context)->esp     = (void *) _stack; \
     478    _I386_Context_Initialize_is_executing( _the_context ); \
    452479  } while (0)
    453480
  • cpukit/score/cpu/no_cpu/rtems/score/cpu.h

    r58444f7 r38b59a6  
    575575     */
    576576    uint32_t   stack_pointer;
     577
     578#ifdef RTEMS_SMP
     579    /**
     580     * @brief On SMP configurations the thread context must contain a boolean
     581     * indicator if this context is executing on a processor.
     582     *
     583     * This field must be updated during a context switch.  The context switch
     584     * to the heir must wait until the heir context indicates that it is no
     585     * longer executing on a processor.
     586     */
     587    volatile bool is_executing;
     588#endif
    577589} Context_Control;
    578590
     
    15831595    __asm__ volatile ( "" : : : "memory" );
    15841596  }
     1597
     1598  /**
     1599   * @brief Macro to return the is executing field of the thread context.
     1600   */
     1601  #define _CPU_Context_Get_is_executing( _context ) \
     1602    ( ( _context )->is_executing )
    15851603#endif
    15861604
  • cpukit/score/cpu/powerpc/cpu.c

    r58444f7 r38b59a6  
    5454PPC_ASSERT_OFFSET(gpr2, GPR2);
    5555
     56#ifdef RTEMS_SMP
     57  PPC_ASSERT_OFFSET(is_executing, IS_EXECUTING);
     58#endif
     59
    5660RTEMS_STATIC_ASSERT(
    5761  sizeof(Context_Control) % PPC_DEFAULT_CACHE_LINE_SIZE == 0,
  • cpukit/score/cpu/powerpc/rtems/score/cpu.h

    r58444f7 r38b59a6  
    303303  PPC_GPR_TYPE gpr31;
    304304  uint32_t gpr2;
     305  #ifdef RTEMS_SMP
     306    volatile bool is_executing;
     307  #endif
    305308  #ifdef __ALTIVEC__
    306309    /*
     
    328331} Context_Control;
    329332
    330 static inline ppc_context *ppc_get_context( Context_Control *context )
     333static inline ppc_context *ppc_get_context( const Context_Control *context )
    331334{
    332335  uintptr_t clsz = PPC_DEFAULT_CACHE_LINE_SIZE;
     
    339342#define _CPU_Context_Get_SP( _context ) \
    340343  ppc_get_context(_context)->gpr1
     344
     345#ifdef RTEMS_SMP
     346  #define _CPU_Context_Get_is_executing( _context ) \
     347    ppc_get_context(_context)->is_executing
     348#endif
    341349#endif /* ASM */
    342350
     
    368376#define PPC_CONTEXT_OFFSET_GPR31 PPC_CONTEXT_GPR_OFFSET( 31 )
    369377#define PPC_CONTEXT_OFFSET_GPR2 PPC_CONTEXT_GPR_OFFSET( 32 )
     378
     379#ifdef RTEMS_SMP
     380  #define PPC_CONTEXT_OFFSET_IS_EXECUTING (PPC_CONTEXT_GPR_OFFSET( 32 ) + 4)
     381#endif
    370382
    371383#ifndef ASM
  • cpukit/score/cpu/sparc/cpu.c

    r58444f7 r38b59a6  
    6868SPARC_ASSERT_OFFSET(isr_dispatch_disable, ISR_DISPATCH_DISABLE_STACK);
    6969
     70#if defined(RTEMS_SMP)
     71SPARC_ASSERT_OFFSET(is_executing, SPARC_CONTEXT_CONTROL_IS_EXECUTING);
     72#endif
     73
    7074/*
    7175 *  This initializes the set of opcodes placed in each trap
     
    327331    the_context->isr_dispatch_disable = 0;
    328332
     333#if defined(RTEMS_SMP)
     334  the_context->is_executing = false;
     335#endif
     336
    329337  if ( tls_area != NULL ) {
    330338    void *tcb = _TLS_TCB_after_TLS_block_initialize( tls_area );
  • cpukit/score/cpu/sparc/rtems/score/cpu.h

    r58444f7 r38b59a6  
    474474   */
    475475  uint32_t   isr_dispatch_disable;
     476
     477#if defined(RTEMS_SMP)
     478  volatile bool is_executing;
     479#endif
    476480} Context_Control;
    477481
     
    484488  (_context)->o6_sp
    485489
     490#ifdef RTEMS_SMP
     491  #define _CPU_Context_Get_is_executing( _context ) \
     492    (_context)->is_executing
     493#endif
     494
    486495#endif /* ASM */
    487496
     
    538547/** This macro defines an offset into the context for use in assembly. */
    539548#define ISR_DISPATCH_DISABLE_STACK_OFFSET 0x54
     549
     550#if defined(RTEMS_SMP)
     551  #define SPARC_CONTEXT_CONTROL_IS_EXECUTING_OFFSET 0x58
     552#endif
    540553
    541554/** This defines the size of the context area for use in assembly. */
  • cpukit/score/include/rtems/score/percpu.h

    r58444f7 r38b59a6  
    5656typedef struct Thread_Control_struct Thread_Control;
    5757#endif
     58
     59struct Scheduler_Context;
    5860
    5961/**
     
    269271  volatile uint32_t thread_dispatch_disable_level;
    270272
    271   /** This is set to true when this CPU needs to run the dispatcher. */
     273  /**
     274   * @brief This is set to true when this processor needs to run the
     275   * dispatcher.
     276   *
     277   * It is volatile since interrupts may alter this flag.
     278   *
     279   * This field is not protected by a lock.  There are two writers after
     280   * multitasking start.  The scheduler owning this processor sets this
     281   * indicator to true, after it updated the heir field.  This processor sets
     282   * this indicator to false, before it reads the heir.  This field is used in
     283   * combination with the heir field.
     284   *
     285   * @see _Thread_Get_heir_and_make_it_executing().
     286   */
    272287  volatile bool dispatch_necessary;
    273288
    274   /** This is the thread executing on this CPU. */
     289  /**
     290   * @brief This is the thread executing on this processor.
     291   *
     292   * This field is not protected by a lock.  The only writer is this processor.
     293   *
     294   * On SMP configurations a thread may be registered as executing on more than
     295   * one processor in case a thread migration is in progress.  On SMP
     296   * configurations use _Thread_Is_executing_on_a_processor() to figure out if
     297   * a thread context is executing on a processor.
     298   */
    275299  Thread_Control *executing;
    276300
    277   /** This is the heir thread for this this CPU. */
     301  /**
     302   * @brief This is the heir thread for this processor.
     303   *
     304   * This field is not protected by a lock.  The only writer after multitasking
     305   * start is the scheduler owning this processor.  This processor will set the
     306   * dispatch necessary indicator to false, before it reads the heir.  This
     307   * field is used in combination with the dispatch necessary indicator.
     308   *
     309   * A thread can be a heir on at most one processor in the system.
     310   *
     311   * @see _Thread_Get_heir_and_make_it_executing().
     312   */
    278313  Thread_Control *heir;
    279314
     
    283318  #if defined( RTEMS_SMP )
    284319    /**
    285      * @brief This lock protects the dispatch_necessary, executing, heir and
    286      * message fields.
     320     * @brief This lock protects some parts of the low-level thread dispatching.
    287321     *
    288322     * We must use a ticket lock here since we cannot transport a local context
    289323     * through the context switch.
     324     *
     325     * @see _Thread_Dispatch().
    290326     */
    291327    SMP_ticket_lock_Control Lock;
     
    309345     */
    310346    Atomic_Ulong message;
     347
     348    /**
     349     * @brief The scheduler context of the scheduler owning this processor.
     350     */
     351    const struct Scheduler_Context *scheduler_context;
    311352
    312353    /**
  • cpukit/score/include/rtems/score/scheduler.h

    r58444f7 r38b59a6  
    149149 * this structure at the begin of its context structure.
    150150 */
    151 typedef struct {
     151typedef struct Scheduler_Context {
    152152#if defined(RTEMS_SMP)
    153153  /**
  • cpukit/score/include/rtems/score/schedulerimpl.h

    r58444f7 r38b59a6  
    391391}
    392392
    393 RTEMS_INLINE_ROUTINE bool _Scheduler_Set(
    394   const Scheduler_Control *scheduler,
    395   Thread_Control          *the_thread
    396 )
    397 {
    398   bool ok;
    399 
    400   if ( _States_Is_dormant( the_thread->current_state ) ) {
    401 #if defined(RTEMS_SMP)
     393RTEMS_INLINE_ROUTINE void _Scheduler_Set(
     394  const Scheduler_Control *scheduler,
     395  Thread_Control          *the_thread
     396)
     397{
     398#if defined(RTEMS_SMP)
     399  const Scheduler_Control *current_scheduler = _Scheduler_Get( the_thread );
     400
     401  if ( current_scheduler != scheduler ) {
     402    _Thread_Set_state( the_thread, STATES_MIGRATING );
    402403    _Scheduler_Free( _Scheduler_Get( the_thread ), the_thread );
    403404    the_thread->scheduler = scheduler;
    404405    _Scheduler_Allocate( scheduler, the_thread );
    405406    _Scheduler_Update( scheduler, the_thread );
     407    _Thread_Clear_state( the_thread, STATES_MIGRATING );
     408  }
    406409#else
    407     (void) scheduler;
    408 #endif
    409 
    410     ok = true;
    411   } else {
    412     ok = false;
    413   }
    414 
    415   return ok;
     410  (void) scheduler;
     411#endif
    416412}
    417413
     
    449445  }
    450446
    451   if ( ok ) {
    452     ok = _Scheduler_Set( scheduler, the_thread );
    453   }
     447  _Scheduler_Set( scheduler, the_thread );
    454448
    455449  return ok;
  • cpukit/score/include/rtems/score/schedulersmp.h

    r58444f7 r38b59a6  
    2525
    2626#include <rtems/score/chain.h>
    27 #include <rtems/score/percpu.h>
    28 #include <rtems/score/prioritybitmap.h>
    29 #include <rtems/score/thread.h>
     27#include <rtems/score/scheduler.h>
    3028
    3129#ifdef __cplusplus
  • cpukit/score/include/rtems/score/schedulersmpimpl.h

    r58444f7 r38b59a6  
    2525
    2626#include <rtems/score/schedulersmp.h>
     27#include <rtems/score/assert.h>
     28#include <rtems/score/chainimpl.h>
    2729#include <rtems/score/schedulersimpleimpl.h>
    28 #include <rtems/score/chainimpl.h>
    29 #include <rtems/score/scheduler.h>
    3030
    3131#ifdef __cplusplus
     
    6565}
    6666
     67static inline bool _Scheduler_SMP_Is_processor_owned_by_us(
     68  const Scheduler_SMP_Context *self,
     69  const Per_CPU_Control *cpu
     70)
     71{
     72  return cpu->scheduler_context == &self->Base;
     73}
     74
     75static inline void _Scheduler_SMP_Update_heir(
     76  Per_CPU_Control *cpu_self,
     77  Per_CPU_Control *cpu_for_heir,
     78  Thread_Control *heir
     79)
     80{
     81  cpu_for_heir->heir = heir;
     82
     83  /*
     84   * It is critical that we first update the heir and then the dispatch
     85   * necessary so that _Thread_Get_heir_and_make_it_executing() cannot miss an
     86   * update.
     87   */
     88  _Atomic_Fence( ATOMIC_ORDER_SEQ_CST );
     89
     90  /*
     91   * Only update the dispatch necessary indicator if not already set to
     92   * avoid superfluous inter-processor interrupts.
     93   */
     94  if ( !cpu_for_heir->dispatch_necessary ) {
     95    cpu_for_heir->dispatch_necessary = true;
     96
     97    if ( cpu_for_heir != cpu_self ) {
     98      _Per_CPU_Send_interrupt( cpu_for_heir );
     99    }
     100  }
     101}
     102
    67103static inline void _Scheduler_SMP_Allocate_processor(
     104  Scheduler_SMP_Context *self,
    68105  Thread_Control *scheduled,
    69106  Thread_Control *victim
     
    72109  Per_CPU_Control *cpu_of_scheduled = _Thread_Get_CPU( scheduled );
    73110  Per_CPU_Control *cpu_of_victim = _Thread_Get_CPU( victim );
     111  Per_CPU_Control *cpu_self = _Per_CPU_Get();
    74112  Thread_Control *heir;
    75113
     
    77115  victim->is_scheduled = false;
    78116
    79   _Per_CPU_Acquire( cpu_of_scheduled );
    80 
    81   if ( scheduled->is_executing ) {
    82     heir = cpu_of_scheduled->heir;
    83     cpu_of_scheduled->heir = scheduled;
     117  _Assert( _ISR_Get_level() != 0 );
     118
     119  if ( _Thread_Is_executing_on_a_processor( scheduled ) ) {
     120    if ( _Scheduler_SMP_Is_processor_owned_by_us( self, cpu_of_scheduled ) ) {
     121      heir = cpu_of_scheduled->heir;
     122      _Scheduler_SMP_Update_heir( cpu_self, cpu_of_scheduled, scheduled );
     123    } else {
     124      /* We have to force a migration to our processor set */
     125      _Assert( scheduled->debug_real_cpu->heir != scheduled );
     126      heir = scheduled;
     127    }
    84128  } else {
    85129    heir = scheduled;
    86130  }
    87131
    88   _Per_CPU_Release( cpu_of_scheduled );
    89 
    90132  if ( heir != victim ) {
    91     const Per_CPU_Control *cpu_of_executing = _Per_CPU_Get();
    92 
    93133    _Thread_Set_CPU( heir, cpu_of_victim );
    94 
    95     cpu_of_victim->heir = heir;
    96 
    97     /*
    98      * It is critical that we first update the heir and then the dispatch
    99      * necessary so that _Thread_Dispatch() cannot miss an update.
    100      */
    101     _Atomic_Fence( ATOMIC_ORDER_RELEASE );
    102 
    103     cpu_of_victim->dispatch_necessary = true;
    104 
    105     if ( cpu_of_victim != cpu_of_executing ) {
    106       _Per_CPU_Send_interrupt( cpu_of_victim );
    107     }
     134    _Scheduler_SMP_Update_heir( cpu_self, cpu_of_victim, heir );
    108135  }
    109136}
     
    149176        && !( *order )( &thread->Object.Node, &highest_ready->Object.Node )
    150177    ) {
    151       _Scheduler_SMP_Allocate_processor( highest_ready, thread );
     178      _Scheduler_SMP_Allocate_processor( self, highest_ready, thread );
    152179
    153180      ( *insert_ready )( self, thread );
     
    169196        && ( *order )( &thread->Object.Node, &lowest_scheduled->Object.Node )
    170197    ) {
    171       _Scheduler_SMP_Allocate_processor( thread, lowest_scheduled );
     198      _Scheduler_SMP_Allocate_processor( self, thread, lowest_scheduled );
    172199
    173200      ( *insert_scheduled )( self, thread );
     
    188215  Thread_Control *highest_ready = ( *get_highest_ready )( self );
    189216
    190   _Scheduler_SMP_Allocate_processor( highest_ready, victim );
     217  _Scheduler_SMP_Allocate_processor( self, highest_ready, victim );
    191218
    192219  ( *move_from_ready_to_scheduled )( self, highest_ready );
  • cpukit/score/include/rtems/score/statesimpl.h

    r58444f7 r38b59a6  
    8383/** This macro corresponds to a task being a zombie. */
    8484#define STATES_ZOMBIE                          0x200000
     85/** This macro corresponds to a task migration to another scheduler. */
     86#define STATES_MIGRATING                       0x400000
    8587
    8688/** This macro corresponds to a task which is in an interruptible
  • cpukit/score/include/rtems/score/thread.h

    r58444f7 r38b59a6  
    505505
    506506  /**
    507    * @brief This field is true if the thread is executing.
    508    *
    509    * A thread is executing if it executes on a processor.  An executing thread
    510    * executes on exactly one processor.  There are exactly processor count
    511    * executing threads in the system.  An executing thread may have a heir
    512    * thread and thread dispatching is necessary.  On SMP a thread dispatch on a
    513    * remote processor needs help from an inter-processor interrupt, thus it
    514    * will take some time to complete the state change.  A lot of things can
    515    * happen in the meantime.  This field is volatile since it is polled in
    516    * _Thread_Kill_zombies().
    517    */
    518   volatile bool                         is_executing;
    519 
    520   /**
    521507   * @brief The scheduler of this thread.
    522508   */
     
    549535
    550536#ifdef RTEMS_SMP
     537  /**
     538   * @brief The processor assigned by the scheduler.
     539   */
    551540  Per_CPU_Control                      *cpu;
     541
     542#ifdef RTEMS_DEBUG
     543  /**
     544   * @brief The processor on which this thread executed the last time or is
     545   * executing.
     546   */
     547  Per_CPU_Control                      *debug_real_cpu;
     548#endif
    552549#endif
    553550
  • cpukit/score/include/rtems/score/threadimpl.h

    r58444f7 r38b59a6  
    455455}
    456456
     457#if defined(RTEMS_SMP)
     458/**
     459 * @brief Returns @true in case the thread executes currently on some processor
     460 * in the system, otherwise @a false.
     461 *
     462 * Do not confuse this with _Thread_Is_executing() which checks only the
     463 * current processor.
     464 */
     465RTEMS_INLINE_ROUTINE bool _Thread_Is_executing_on_a_processor(
     466  const Thread_Control *the_thread
     467)
     468{
     469  return _CPU_Context_Get_is_executing( &the_thread->Registers );
     470}
     471#endif
     472
    457473/**
    458474 * This function returns true if the_thread is the heir
     
    492508  _Giant_Release();
    493509
    494   _Per_CPU_ISR_disable_and_acquire( _Per_CPU_Get(), level );
     510  _ISR_Disable_without_giant( level );
    495511  ( void ) level;
    496512#endif
     
    591607{
    592608#if defined(RTEMS_SMP)
    593   if ( thread->is_executing ) {
     609  if ( _Thread_Is_executing_on_a_processor( thread ) ) {
    594610    const Per_CPU_Control *cpu_of_executing = _Per_CPU_Get();
    595611    Per_CPU_Control *cpu_of_thread = _Thread_Get_CPU( thread );
     
    612628  } else {
    613629#if defined(RTEMS_SMP)
    614     if ( thread->is_executing ) {
     630    if ( _Thread_Is_executing_on_a_processor( thread ) ) {
    615631      const Per_CPU_Control *cpu_of_executing = _Per_CPU_Get();
    616632      Per_CPU_Control *cpu_of_thread = _Thread_Get_CPU( thread );
     
    623639#endif
    624640  }
     641}
     642
     643/**
     644 * @brief Gets the heir of the processor and makes it executing.
     645 *
     646 * The thread dispatch necessary indicator is cleared as a side-effect.
     647 *
     648 * @return The heir thread.
     649 *
     650 * @see _Thread_Dispatch(), _Thread_Start_multitasking() and
     651 * _Scheduler_SMP_Update_heir().
     652 */
     653RTEMS_INLINE_ROUTINE Thread_Control *_Thread_Get_heir_and_make_it_executing(
     654  Per_CPU_Control *cpu_self
     655)
     656{
     657  Thread_Control *heir;
     658
     659  cpu_self->dispatch_necessary = false;
     660
     661#if defined( RTEMS_SMP )
     662  /*
     663   * It is critical that we first update the dispatch necessary and then the
     664   * read the heir so that we don't miss an update by
     665   * _Scheduler_SMP_Update_heir().
     666   */
     667  _Atomic_Fence( ATOMIC_ORDER_SEQ_CST );
     668#endif
     669
     670  heir = cpu_self->heir;
     671  cpu_self->executing = heir;
     672
     673  return heir;
    625674}
    626675
     
    737786}
    738787
     788RTEMS_INLINE_ROUTINE void _Thread_Debug_set_real_processor(
     789  Thread_Control  *the_thread,
     790  Per_CPU_Control *cpu
     791)
     792{
     793#if defined(RTEMS_SMP) && defined(RTEMS_DEBUG)
     794  the_thread->debug_real_cpu = cpu;
     795#else
     796  (void) the_thread;
     797  (void) cpu;
     798#endif
     799}
     800
    739801#if !defined(__DYNAMIC_REENT__)
    740802/**
  • cpukit/score/include/rtems/score/userextimpl.h

    r58444f7 r38b59a6  
    217217  const Chain_Node    *node = _Chain_Immutable_first( chain );
    218218
    219   while ( node != tail ) {
    220     const User_extensions_Switch_control *extension =
    221       (const User_extensions_Switch_control *) node;
    222 
    223     (*extension->thread_switch)( executing, heir );
    224 
    225     node = _Chain_Immutable_next( node );
     219  if ( node != tail ) {
     220    Per_CPU_Control *cpu_self = _Per_CPU_Get();
     221
     222    _Per_CPU_Acquire( cpu_self );
     223
     224    while ( node != tail ) {
     225      const User_extensions_Switch_control *extension =
     226        (const User_extensions_Switch_control *) node;
     227
     228      (*extension->thread_switch)( executing, heir );
     229
     230      node = _Chain_Immutable_next( node );
     231    }
     232
     233    _Per_CPU_Release( cpu_self );
    226234  }
    227235}
  • cpukit/score/src/smp.c

    r58444f7 r38b59a6  
    5959
    6060    if ( started ) {
    61       ++assignment->scheduler->context->processor_count;
     61      Scheduler_Context *scheduler_context = assignment->scheduler->context;
     62
     63      ++scheduler_context->processor_count;
     64      cpu->scheduler_context = scheduler_context;
    6265    }
    6366  }
  • cpukit/score/src/threaddispatch.c

    r58444f7 r38b59a6  
    6565  Per_CPU_Control  *cpu_self;
    6666  Thread_Control   *executing;
    67   Thread_Control   *heir;
    6867  ISR_Level         level;
    6968
    7069#if defined( RTEMS_SMP )
     70  /*
     71   * On SMP the complete context switch must be atomic with respect to one
     72   * processor.  See also _Thread_Handler() since _Context_switch() may branch
     73   * to this function.
     74   */
    7175  _ISR_Disable_without_giant( level );
    7276#endif
     
    7781  cpu_self->thread_dispatch_disable_level = 1;
    7882
    79 #if defined( RTEMS_SMP )
    80   _ISR_Enable_without_giant( level );
    81 #endif
    82 
    8383  /*
    8484   *  Now determine if we need to perform a dispatch on the current CPU.
    8585   */
    8686  executing = cpu_self->executing;
    87   _Per_CPU_ISR_disable_and_acquire( cpu_self, level );
     87
     88#if !defined( RTEMS_SMP )
     89  _ISR_Disable( level );
     90#endif
     91
    8892#if defined( RTEMS_SMP )
    89   /*
    90    * On SMP the complete context switch must be atomic with respect to one
    91    * processor.  The scheduler must obtain the per-CPU lock to check if a
    92    * thread is executing and to update the heir.  This ensures that a thread
    93    * cannot execute on more than one processor at a time.  See also
    94    * _Thread_Handler() since _Context_switch() may branch to this function.
    95    */
    9693  if ( cpu_self->dispatch_necessary ) {
    9794#else
    9895  while ( cpu_self->dispatch_necessary ) {
    9996#endif
    100     cpu_self->dispatch_necessary = false;
    101 
    102 #if defined( RTEMS_SMP )
    103     /*
    104      * It is critical that we first update the dispatch necessary and then the
    105      * read the heir so that we don't miss an update by
    106      * _Scheduler_SMP_Allocate_processor().
    107      */
    108     _Atomic_Fence( ATOMIC_ORDER_SEQ_CST );
    109 #endif
    110 
    111     heir = cpu_self->heir;
    112     cpu_self->executing = heir;
    113 
    114 #if defined( RTEMS_SMP )
    115     executing->is_executing = false;
    116     heir->is_executing = true;
    117 #endif
     97    Thread_Control *heir = _Thread_Get_heir_and_make_it_executing( cpu_self );
    11898
    11999    /*
     
    208188    cpu_self = _Per_CPU_Get();
    209189
     190    _Thread_Debug_set_real_processor( executing, cpu_self );
     191
    210192#if !defined( RTEMS_SMP )
    211193    _ISR_Disable( level );
     
    218200  _Profiling_Thread_dispatch_enable( cpu_self, 0 );
    219201
    220   _Per_CPU_Release_and_ISR_enable( cpu_self, level );
     202  _ISR_Enable_without_giant( level );
    221203
    222204  _Thread_Run_post_switch_actions( executing );
  • cpukit/score/src/threadhandler.c

    r58444f7 r38b59a6  
    154154      _Assert( _ISR_Get_level() != 0 );
    155155
     156      _Thread_Debug_set_real_processor( executing, cpu_self );
     157
    156158      cpu_self->thread_dispatch_disable_level = 0;
    157159      _Profiling_Thread_dispatch_enable( cpu_self, 0 );
    158 
    159       _Per_CPU_Release( cpu_self );
    160160
    161161      level = executing->Start.isr_level;
  • cpukit/score/src/threadinitialize.c

    r58444f7 r38b59a6  
    5353  size_t                   i;
    5454  bool                     scheduler_allocated = false;
     55  Per_CPU_Control         *cpu = _Per_CPU_Get_by_index( 0 );
    5556
    5657#if defined( RTEMS_SMP )
     
    183184  the_thread->is_scheduled            = false;
    184185  the_thread->is_in_the_air           = false;
    185   the_thread->is_executing            = false;
    186186  the_thread->scheduler               = scheduler;
    187187#endif
    188188
     189  _Thread_Debug_set_real_processor( the_thread, cpu );
     190
    189191  /* Initialize the CPU for the non-SMP schedulers */
    190   _Thread_Set_CPU( the_thread, _Per_CPU_Get_by_index( 0 ) );
     192  _Thread_Set_CPU( the_thread, cpu );
    191193
    192194  the_thread->current_state           = STATES_DORMANT;
  • cpukit/score/src/threadrestart.c

    r58444f7 r38b59a6  
    108108   * service on a remote processor.
    109109   */
    110   while (the_thread->is_executing) {
     110  while ( _Thread_Is_executing_on_a_processor( the_thread ) ) {
    111111    /* Wait */
    112112  }
  • cpukit/score/src/threadstartmultitasking.c

    r58444f7 r38b59a6  
    3131  /*
    3232   * Threads begin execution in the _Thread_Handler() function.   This
    33    * function will set the thread dispatch disable level to zero and calls
    34    * _Per_CPU_Release().
     33   * function will set the thread dispatch disable level to zero.
    3534   */
    36   _Per_CPU_Acquire( cpu_self );
    3735  cpu_self->thread_dispatch_disable_level = 1;
    3836#endif
    3937
    40   heir = cpu_self->heir;
    41 
    42 #if defined(RTEMS_SMP)
    43   cpu_self->executing->is_executing = false;
    44   heir->is_executing = true;
    45 #endif
    46 
    47   cpu_self->dispatch_necessary = false;
    48   cpu_self->executing = heir;
     38  heir = _Thread_Get_heir_and_make_it_executing( cpu_self );
    4939
    5040   /*
  • doc/user/smp.t

    r58444f7 r38b59a6  
    212212affinity.
    213213
     214@subsection Task Migration
     215
     216@cindex task migration
     217@cindex thread migration
     218
     219With more than one processor in the system tasks can migrate from one processor
     220to another.  There are three reasons why tasks migrate in RTEMS.
     221
     222@itemize @bullet
     223@item The scheduler changes explicitly via @code{rtems_task_set_scheduler()} or
     224similar directives.
     225@item The task resumes execution after a blocking operation.  On a priority
     226based scheduler it will evict the lowest priority task currently assigned to a
     227processor in the processor set managed by the scheduler instance.
     228@item The task moves temporarily to another scheduler instance due to locking
     229protocols like @cite{Migratory Priority Inheritance} or the
     230@cite{Multiprocessor Resource Sharing Protocol}.
     231@end itemize
     232
     233Task migration should be avoided so that the working set of a task can stay on
     234the most local cache level.
     235
     236The current implementation of task migration in RTEMS has some implications
     237with respect to the interrupt latency.  It is crucial to preserve the system
     238invariant that a task can execute on at most one processor in the system at a
     239time.  This is accomplished with a boolean indicator in the task context.  The
     240processor architecture specific low-level task context switch code will mark
     241that a task context is no longer executing and waits that the heir context
     242stopped execution before it restores the heir context and resumes execution of
     243the heir task.  So there is one point in time in which a processor is without a
     244task.  This is essential to avoid cyclic dependencies in case multiple tasks
     245migrate at once.  Otherwise some supervising entity is necessary to prevent
     246life-locks.  Such a global supervisor would lead to scalability problems so
     247this approach is not used.  Currently the thread dispatch is performed with
     248interrupts disabled.  So in case the heir task is currently executing on
     249another processor then this prolongs the time of disabled interrupts since one
     250processor has to wait for another processor to make progress.
     251
     252It is difficult to avoid this issue with the interrupt latency since interrupts
     253normally store the context of the interrupted task on its stack.  In case a
     254task is marked as not executing we must not use its task stack to store such an
     255interrupt context.  We cannot use the heir stack before it stopped execution on
     256another processor.  So if we enable interrupts during this transition we have
     257to provide an alternative task independent stack for this time frame.  This
     258issue needs further investigation.
     259
    214260@subsection Critical Section Techniques and SMP
    215261
  • testsuites/smptests/Makefile.am

    r58444f7 r38b59a6  
    2323SUBDIRS += smplock01
    2424SUBDIRS += smpmigration01
     25SUBDIRS += smpmigration02
    2526SUBDIRS += smpscheduler01
    2627SUBDIRS += smpscheduler02
  • testsuites/smptests/configure.ac

    r58444f7 r38b59a6  
    7878smplock01/Makefile
    7979smpmigration01/Makefile
     80smpmigration02/Makefile
    8081smppsxaffinity01/Makefile
    8182smppsxaffinity02/Makefile
  • testsuites/smptests/smpscheduler02/init.c

    r58444f7 r38b59a6  
    159159  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    160160
    161   sc = rtems_task_set_scheduler(task_id, scheduler_a_id);
    162   rtems_test_assert(sc == RTEMS_INCORRECT_STATE);
     161  sc = rtems_task_set_scheduler(task_id, scheduler_b_id);
     162  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    163163
    164164  sc = rtems_event_transient_receive(RTEMS_WAIT, RTEMS_NO_TIMEOUT);
  • testsuites/smptests/smpswitchextension01/smpswitchextension01.scn

    r58444f7 r38b59a6  
    1 *** TEST SMPSWITCHEXTENSION 1 ***
     1*** BEGIN OF TEST SMPSWITCHEXTENSION 1 ***
    22toggler 0
    3         toggles 2146479
     3        toggles 1555183
    44toggler 1
    5         toggles 2146477
     5        toggles 1555182
    66extension 0
    7         context switches 2146478
     7        context switches 1555185
    88extension 1
    9         context switches 2146481
     9        context switches 1244705
    1010extension 2
    11         context switches 2146482
    12 extension switches 718121
     11        context switches 1554688
     12extension switches 311649
    1313*** END OF TEST SMPSWITCHEXTENSION 1 ***
  • testsuites/sptests/spscheduler01/init.c

    r58444f7 r38b59a6  
    8282
    8383  sc = rtems_task_set_affinity(RTEMS_SELF, sizeof(cpuset), &cpuset);
    84   rtems_test_assert(sc == RTEMS_INVALID_NUMBER);
     84  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    8585
    8686  sc = rtems_task_set_affinity(self_id, sizeof(cpuset), &cpuset);
    87   rtems_test_assert(sc == RTEMS_INVALID_NUMBER);
     87  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    8888
    8989  sc = rtems_task_set_affinity(task_id, sizeof(cpuset), &cpuset);
     
    164164
    165165  sc = rtems_task_set_scheduler(self_id, scheduler_id);
    166   rtems_test_assert(sc == RTEMS_INCORRECT_STATE);
     166  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    167167
    168168  sc = rtems_task_create(
     
    189189
    190190  sc = rtems_task_set_scheduler(task_id, scheduler_id);
    191   rtems_test_assert(sc == RTEMS_INCORRECT_STATE);
     191  rtems_test_assert(sc == RTEMS_SUCCESSFUL);
    192192
    193193  sc = rtems_task_delete(task_id);
  • testsuites/tmtests/tm26/task1.c

    r58444f7 r38b59a6  
    147147  self_cpu = _Per_CPU_Get();
    148148  self_cpu->thread_dispatch_disable_level = 1;
    149 
    150   _Per_CPU_Acquire( self_cpu );
    151149#else
    152150  _Thread_Disable_dispatch();
Note: See TracChangeset for help on using the changeset viewer.