Changeset 05df0a8 in rtems


Ignore:
Timestamp:
May 17, 1999, 8:41:13 PM (22 years ago)
Author:
Joel Sherrill <joel.sherrill@…>
Branches:
4.10, 4.11, 4.8, 4.9, 5, master
Children:
c4d69e2
Parents:
a238cc9
Message:

Thread Handler split into multiple files. Eventually, as RTEMS is
split into one function per file, this will decrease the size of executables.

Files:
48 added
3 edited

Legend:

Unmodified
Added
Removed
  • c/src/exec/score/src/Makefile.in

    ra238cc9 r05df0a8  
    2020MP_PIECES = $(MP_PIECES_$(HAS_MP)_V)
    2121
     22THREAD_PIECES=\
     23    thread threadchangepriority threadclearstate threadclose \
     24    threadcreateidle threaddelayended threaddispatch \
     25    threadevaluatemode threadget threadhandler \
     26    threadidlebody threadinitialize threadloadenv \
     27    threadready threadresettimeslice threadrestart \
     28    threadsetpriority threadsetstate threadsettransient \
     29    threadstackallocate threadstackfree threadstart \
     30    threadstartmultitasking threadtickletimeslice \
     31    threadyieldprocessor
     32
    2233# C and C++ source names, if any, go here -- minus the .c or .cc
    2334C_PIECES=apiext chain coremsg coremutex coresem heap interr \
    24     isr object thread threadq tod userext \
     35    isr object $(THREAD_PIECES) threadq tod userext \
    2536    watchdog wkspace $(MP_PIECES)
    2637C_FILES=$(C_PIECES:%=%.c)
  • c/src/exec/score/src/thread.c

    ra238cc9 r05df0a8  
    4040 *  Output parameters:  NONE
    4141 */
    42 
    43 char *_Thread_Idle_name = "IDLE";
    4442
    4543void _Thread_Handler_initialization(
     
    103101}
    104102
    105 /*PAGE
    106  *
    107  *  _Thread_Create_idle
    108  */
    109 
    110 void _Thread_Create_idle( void )
    111 {
    112   void       *idle;
    113   unsigned32  idle_task_stack_size;
    114 
    115   /*
    116    *  The entire workspace is zeroed during its initialization.  Thus, all
    117    *  fields not explicitly assigned were explicitly zeroed by
    118    *  _Workspace_Initialization.
    119    */
    120  
    121   _Thread_Idle = _Thread_Internal_allocate();
    122  
    123   /*
    124    *  Initialize the IDLE task.
    125    */
    126  
    127 #if (CPU_PROVIDES_IDLE_THREAD_BODY == TRUE)
    128   idle = (void *) _CPU_Thread_Idle_body;
    129 #else
    130   idle = (void *) _Thread_Idle_body;
    131 #endif
    132  
    133   if ( _CPU_Table.idle_task )
    134     idle = _CPU_Table.idle_task;
    135  
    136   idle_task_stack_size =  _CPU_Table.idle_task_stack_size;
    137   if ( idle_task_stack_size < STACK_MINIMUM_SIZE )
    138     idle_task_stack_size = STACK_MINIMUM_SIZE;
    139  
    140   _Thread_Initialize(
    141     &_Thread_Internal_information,
    142     _Thread_Idle,
    143     NULL,        /* allocate the stack */
    144     idle_task_stack_size,
    145     CPU_IDLE_TASK_IS_FP,
    146     PRIORITY_MAXIMUM,
    147     TRUE,        /* preemptable */
    148     THREAD_CPU_BUDGET_ALGORITHM_NONE,
    149     NULL,        /* no budget algorithm callout */
    150     0,           /* all interrupts enabled */
    151     _Thread_Idle_name
    152   );
    153  
    154   /*
    155    *  WARNING!!! This is necessary to "kick" start the system and
    156    *             MUST be done before _Thread_Start is invoked.
    157    */
    158  
    159   _Thread_Heir      =
    160   _Thread_Executing = _Thread_Idle;
    161  
    162   _Thread_Start(
    163     _Thread_Idle,
    164     THREAD_START_NUMERIC,
    165     idle,
    166     NULL,
    167     0
    168   );
    169  
    170 }
    171 
    172 /*PAGE
    173  *
    174  *  _Thread_Start_multitasking
    175  *
    176  *  This kernel routine readies the requested thread, the thread chain
    177  *  is adjusted.  A new heir thread may be selected.
    178  *
    179  *  Input parameters:
    180  *    system_thread - pointer to system initialization thread control block
    181  *    idle_thread   - pointer to idle thread control block
    182  *
    183  *  Output parameters:  NONE
    184  *
    185  *  NOTE:  This routine uses the "blocking" heir selection mechanism.
    186  *         This insures the correct heir after a thread restart.
    187  *
    188  *  INTERRUPT LATENCY:
    189  *    ready chain
    190  *    select heir
    191  */
    192 
    193 void _Thread_Start_multitasking( void )
    194 {
    195   /*
    196    *  The system is now multitasking and completely initialized. 
    197    *  This system thread now either "goes away" in a single processor
    198    *  system or "turns into" the server thread in an MP system.
    199    */
    200 
    201   _System_state_Set( SYSTEM_STATE_UP );
    202 
    203   _Context_Switch_necessary = FALSE;
    204 
    205   _Thread_Executing = _Thread_Heir;
    206 
    207    /*
    208     * Get the init task(s) running.
    209     *
    210     * Note: Thread_Dispatch() is normally used to dispatch threads.  As
    211     *       part of its work, Thread_Dispatch() restores floating point
    212     *       state for the heir task.
    213     *
    214     *       This code avoids Thread_Dispatch(), and so we have to restore
    215     *       (actually initialize) the floating point state "by hand".
    216     *
    217     *       Ignore the CPU_USE_DEFERRED_FP_SWITCH because we must always
    218     *       switch in the first thread if it is FP.
    219     */
    220  
    221 
    222 #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
    223    /*
    224     *  don't need to worry about saving BSP's floating point state
    225     */
    226 
    227    if ( _Thread_Heir->fp_context != NULL )
    228      _Context_Restore_fp( &_Thread_Heir->fp_context );
    229 #endif
    230 
    231   _Context_Switch( &_Thread_BSP_context, &_Thread_Heir->Registers );
    232 }
    233 
    234 /*PAGE
    235  *
    236  *  _Thread_Dispatch
    237  *
    238  *  This kernel routine determines if a dispatch is needed, and if so
    239  *  dispatches to the heir thread.  Once the heir is running an attempt
    240  *  is made to dispatch any ASRs.
    241  *
    242  *  ALTERNATE ENTRY POINTS:
    243  *    void _Thread_Enable_dispatch();
    244  *
    245  *  Input parameters:  NONE
    246  *
    247  *  Output parameters:  NONE
    248  *
    249  *  INTERRUPT LATENCY:
    250  *    dispatch thread
    251  *    no dispatch thread
    252  */
    253 
    254 #if ( CPU_INLINE_ENABLE_DISPATCH == FALSE )
    255 void _Thread_Enable_dispatch( void )
    256 {
    257   if ( --_Thread_Dispatch_disable_level )
    258     return;
    259   _Thread_Dispatch();
    260 }
    261 #endif
    262 
    263 void _Thread_Dispatch( void )
    264 {
    265   Thread_Control   *executing;
    266   Thread_Control   *heir;
    267   ISR_Level         level;
    268 
    269   executing   = _Thread_Executing;
    270   _ISR_Disable( level );
    271   while ( _Context_Switch_necessary == TRUE ) {
    272     heir = _Thread_Heir;
    273     _Thread_Dispatch_disable_level = 1;
    274     _Context_Switch_necessary = FALSE;
    275     _Thread_Executing = heir;
    276     executing->rtems_ada_self = rtems_ada_self;
    277     rtems_ada_self = heir->rtems_ada_self;
    278     _ISR_Enable( level );
    279 
    280     heir->ticks_executed++;
    281 
    282     _User_extensions_Thread_switch( executing, heir );
    283 
    284     if ( heir->budget_algorithm == THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE )
    285       heir->cpu_time_budget = _Thread_Ticks_per_timeslice;
    286 
    287     /*
    288      *  If the CPU has hardware floating point, then we must address saving
    289      *  and restoring it as part of the context switch.
    290      *
    291      *  The second conditional compilation section selects the algorithm used
    292      *  to context switch between floating point tasks.  The deferred algorithm
    293      *  can be significantly better in a system with few floating point tasks
    294      *  because it reduces the total number of save and restore FP context
    295      *  operations.  However, this algorithm can not be used on all CPUs due
    296      *  to unpredictable use of FP registers by some compilers for integer
    297      *  operations.
    298      */
    299 
    300 #if ( CPU_HARDWARE_FP == TRUE )
    301 #if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
    302     if ( (heir->fp_context != NULL) && !_Thread_Is_allocated_fp( heir ) ) {
    303       if ( _Thread_Allocated_fp != NULL )
    304         _Context_Save_fp( &_Thread_Allocated_fp->fp_context );
    305       _Context_Restore_fp( &heir->fp_context );
    306       _Thread_Allocated_fp = heir;
    307     }
    308 #else
    309     if ( executing->fp_context != NULL )
    310       _Context_Save_fp( &executing->fp_context );
    311 
    312     if ( heir->fp_context != NULL )
    313       _Context_Restore_fp( &heir->fp_context );
    314 #endif
    315 #endif
    316 
    317     _Context_Switch( &executing->Registers, &heir->Registers );
    318 
    319     executing = _Thread_Executing;
    320 
    321     _ISR_Disable( level );
    322   }
    323 
    324   _Thread_Dispatch_disable_level = 0;
    325 
    326   _ISR_Enable( level );
    327 
    328   if ( _Thread_Do_post_task_switch_extension ||
    329        executing->do_post_task_switch_extension ) {
    330     executing->do_post_task_switch_extension = FALSE;
    331     _API_extensions_Run_postswitch();
    332   }
    333  
    334 }
    335 
    336 /*PAGE
    337  *
    338  *  _Thread_Stack_Allocate
    339  *
    340  *  Allocate the requested stack space for the thread.
    341  *  return the actual size allocated after any adjustment
    342  *  or return zero if the allocation failed.
    343  *  Set the Start.stack field to the address of the stack
    344  */
    345 
    346 static unsigned32 _Thread_Stack_Allocate(
    347   Thread_Control *the_thread,
    348   unsigned32 stack_size)
    349 {
    350   void *stack_addr = 0;
    351  
    352   if ( !_Stack_Is_enough( stack_size ) )
    353     stack_size = STACK_MINIMUM_SIZE;
    354  
    355   /*
    356    * Call ONLY the CPU table stack allocate hook, _or_ the
    357    * the RTEMS workspace allocate.  This is so the stack free
    358    * routine can call the correct deallocation routine.
    359    */
    360 
    361   if ( _CPU_Table.stack_allocate_hook )
    362   {
    363     stack_addr = (*_CPU_Table.stack_allocate_hook)( stack_size );
    364   } else {
    365 
    366     /*
    367      *  First pad the requested size so we allocate enough memory
    368      *  so the context initialization can align it properly.  The address
    369      *  returned the workspace allocate must be directly stored in the
    370      *  stack control block because it is later used in the free sequence.
    371      *
    372      *  Thus it is the responsibility of the CPU dependent code to
    373      *  get and keep the stack adjust factor, the stack alignment, and
    374      *  the context initialization sequence in sync.
    375      */
    376 
    377     stack_size = _Stack_Adjust_size( stack_size );
    378     stack_addr = _Workspace_Allocate( stack_size );
    379   }
    380  
    381   if ( !stack_addr )
    382       stack_size = 0;
    383  
    384   the_thread->Start.stack = stack_addr;
    385  
    386   return stack_size;
    387 }
    388 
    389 /*
    390  *  _Thread_Stack_Free
    391  *
    392  *  Deallocate the Thread's stack.
    393  */
    394 
    395 static void _Thread_Stack_Free(
    396   Thread_Control *the_thread
    397 )
    398 {
    399     /*
    400      *  If the API provided the stack space, then don't free it.
    401      */
    402 
    403     if ( !the_thread->Start.core_allocated_stack )
    404       return;
    405 
    406     /*
    407      * Call ONLY the CPU table stack free hook, or the
    408      * the RTEMS workspace free.  This is so the free
    409      * routine properly matches the allocation of the stack.
    410      */
    411 
    412     if ( _CPU_Table.stack_free_hook )
    413         (*_CPU_Table.stack_free_hook)( the_thread->Start.Initial_stack.area );
    414     else
    415         _Workspace_Free( the_thread->Start.Initial_stack.area );
    416 }
    417 
    418 /*PAGE
    419  *
    420  *  _Thread_Initialize
    421  *
    422  *  XXX
    423  */
    424 
    425 boolean _Thread_Initialize(
    426   Objects_Information                  *information,
    427   Thread_Control                       *the_thread,
    428   void                                 *stack_area,
    429   unsigned32                            stack_size,
    430   boolean                               is_fp,
    431   Priority_Control                      priority,
    432   boolean                               is_preemptible,
    433   Thread_CPU_budget_algorithms          budget_algorithm,
    434   Thread_CPU_budget_algorithm_callout   budget_callout,
    435   unsigned32                            isr_level,
    436   Objects_Name                          name
    437 )
    438 {
    439   unsigned32           actual_stack_size = 0;
    440   void                *stack = NULL;
    441   void                *fp_area;
    442   void                *extensions_area;
    443 
    444   /*
    445    *  Initialize the Ada self pointer
    446    */
    447 
    448   the_thread->rtems_ada_self = NULL;
    449 
    450   /*
    451    *  Allocate and Initialize the stack for this thread.
    452    */
    453 
    454 
    455   if ( !stack_area ) {
    456     if ( !_Stack_Is_enough( stack_size ) )
    457       actual_stack_size = STACK_MINIMUM_SIZE;
    458     else
    459       actual_stack_size = stack_size;
    460 
    461     actual_stack_size = _Thread_Stack_Allocate( the_thread, actual_stack_size );
    462  
    463     if ( !actual_stack_size )
    464       return FALSE;                     /* stack allocation failed */
    465 
    466     stack = the_thread->Start.stack;
    467     the_thread->Start.core_allocated_stack = TRUE;
    468   } else {
    469     stack = stack_area;
    470     actual_stack_size = stack_size;
    471     the_thread->Start.core_allocated_stack = FALSE;
    472   }
    473 
    474   _Stack_Initialize(
    475      &the_thread->Start.Initial_stack,
    476      stack,
    477      actual_stack_size
    478   );
    479 
    480   /*
    481    *  Allocate the floating point area for this thread
    482    */
    483  
    484   if ( is_fp ) {
    485 
    486     fp_area = _Workspace_Allocate( CONTEXT_FP_SIZE );
    487     if ( !fp_area ) {
    488       _Thread_Stack_Free( the_thread );
    489       return FALSE;
    490     }
    491     fp_area = _Context_Fp_start( fp_area, 0 );
    492 
    493   } else
    494     fp_area = NULL;
    495 
    496   the_thread->fp_context       = fp_area;
    497   the_thread->Start.fp_context = fp_area;
    498 
    499   /*
    500    *  Allocate the extensions area for this thread
    501    */
    502 
    503   if ( _Thread_Maximum_extensions ) {
    504     extensions_area = _Workspace_Allocate(
    505       (_Thread_Maximum_extensions + 1) * sizeof( void * )
    506     );
    507 
    508     if ( !extensions_area ) {
    509       if ( fp_area )
    510         (void) _Workspace_Free( fp_area );
    511 
    512       _Thread_Stack_Free( the_thread );
    513 
    514       return FALSE;
    515     }
    516   } else
    517     extensions_area = NULL;
    518  
    519   the_thread->extensions = (void **) extensions_area;
    520 
    521   /*
    522    *  General initialization
    523    */
    524 
    525   the_thread->Start.is_preemptible   = is_preemptible;
    526   the_thread->Start.budget_algorithm = budget_algorithm;
    527   the_thread->Start.budget_callout   = budget_callout;
    528   the_thread->Start.isr_level        = isr_level;
    529 
    530   the_thread->current_state          = STATES_DORMANT;
    531   the_thread->resource_count         = 0;
    532   the_thread->real_priority          = priority;
    533   the_thread->Start.initial_priority = priority;
    534   the_thread->ticks_executed         = 0;
    535  
    536   _Thread_Set_priority( the_thread, priority );
    537 
    538   /*
    539    *  Open the object
    540    */
    541 
    542   _Objects_Open( information, &the_thread->Object, name );
    543 
    544   /*
    545    *  Invoke create extensions
    546    */
    547 
    548   if ( !_User_extensions_Thread_create( the_thread ) ) {
    549 
    550     if ( extensions_area )
    551       (void) _Workspace_Free( extensions_area );
    552 
    553     if ( fp_area )
    554       (void) _Workspace_Free( fp_area );
    555 
    556     _Thread_Stack_Free( the_thread );
    557 
    558     return FALSE;
    559   }
    560 
    561   return TRUE;
    562    
    563 }
    564 
    565 /*
    566  *  _Thread_Start
    567  *
    568  *  DESCRIPTION:
    569  *
    570  *  XXX
    571  */
    572  
    573 boolean _Thread_Start(
    574   Thread_Control       *the_thread,
    575   Thread_Start_types    the_prototype,
    576   void                 *entry_point,
    577   void                 *pointer_argument,
    578   unsigned32            numeric_argument
    579 )
    580 {
    581   if ( _States_Is_dormant( the_thread->current_state ) ) {
    582  
    583     the_thread->Start.entry_point      = (Thread_Entry) entry_point;
    584    
    585     the_thread->Start.prototype        = the_prototype;
    586     the_thread->Start.pointer_argument = pointer_argument;
    587     the_thread->Start.numeric_argument = numeric_argument;
    588  
    589     _Thread_Load_environment( the_thread );
    590  
    591     _Thread_Ready( the_thread );
    592  
    593     _User_extensions_Thread_start( the_thread );
    594  
    595     return TRUE;
    596   }
    597  
    598   return FALSE;
    599  
    600 }
    601 
    602 /*
    603  *  _Thread_Restart
    604  *
    605  *  DESCRIPTION:
    606  *
    607  *  XXX
    608  */
    609  
    610 boolean _Thread_Restart(
    611   Thread_Control      *the_thread,
    612   void                *pointer_argument,
    613   unsigned32           numeric_argument
    614 )
    615 {
    616   if ( !_States_Is_dormant( the_thread->current_state ) ) {
    617  
    618     _Thread_Set_transient( the_thread );
    619     the_thread->resource_count = 0;
    620     the_thread->is_preemptible   = the_thread->Start.is_preemptible;
    621     the_thread->budget_algorithm = the_thread->Start.budget_algorithm;
    622     the_thread->budget_callout   = the_thread->Start.budget_callout;
    623 
    624     the_thread->Start.pointer_argument = pointer_argument;
    625     the_thread->Start.numeric_argument = numeric_argument;
    626  
    627     if ( !_Thread_queue_Extract_with_proxy( the_thread ) ) {
    628  
    629       if ( _Watchdog_Is_active( &the_thread->Timer ) )
    630         (void) _Watchdog_Remove( &the_thread->Timer );
    631     }
    632 
    633     if ( the_thread->current_priority != the_thread->Start.initial_priority ) {
    634       the_thread->real_priority = the_thread->Start.initial_priority;
    635       _Thread_Set_priority( the_thread, the_thread->Start.initial_priority );
    636     }
    637  
    638     _Thread_Load_environment( the_thread );
    639  
    640     _Thread_Ready( the_thread );
    641  
    642     _User_extensions_Thread_restart( the_thread );
    643  
    644     if ( _Thread_Is_executing ( the_thread ) )
    645       _Thread_Restart_self();
    646  
    647     return TRUE;
    648   }
    649  
    650   return FALSE;
    651 }
    652 
    653 /*
    654  *  _Thread_Close
    655  *
    656  *  DESCRIPTION:
    657  *
    658  *  XXX
    659  */
    660  
    661 void _Thread_Close(
    662   Objects_Information  *information,
    663   Thread_Control       *the_thread
    664 )
    665 {
    666   _Objects_Close( information, &the_thread->Object );
    667  
    668   _Thread_Set_state( the_thread, STATES_TRANSIENT );
    669  
    670   if ( !_Thread_queue_Extract_with_proxy( the_thread ) ) {
    671  
    672     if ( _Watchdog_Is_active( &the_thread->Timer ) )
    673       (void) _Watchdog_Remove( &the_thread->Timer );
    674   }
    675 
    676   _User_extensions_Thread_delete( the_thread );
    677  
    678 #if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
    679   if ( _Thread_Is_allocated_fp( the_thread ) )
    680     _Thread_Deallocate_fp();
    681 #endif
    682   the_thread->fp_context = NULL;
    683 
    684   if ( the_thread->Start.fp_context )
    685   (void) _Workspace_Free( the_thread->Start.fp_context );
    686 
    687   _Thread_Stack_Free( the_thread );
    688 
    689   if ( the_thread->extensions )
    690     (void) _Workspace_Free( the_thread->extensions );
    691 
    692   the_thread->Start.stack = NULL;
    693   the_thread->extensions = NULL;
    694 }
    695 
    696 /*PAGE
    697  *
    698  *  _Thread_Ready
    699  *
    700  *  This kernel routine readies the requested thread, the thread chain
    701  *  is adjusted.  A new heir thread may be selected.
    702  *
    703  *  Input parameters:
    704  *    the_thread - pointer to thread control block
    705  *
    706  *  Output parameters:  NONE
    707  *
    708  *  NOTE:  This routine uses the "blocking" heir selection mechanism.
    709  *         This insures the correct heir after a thread restart.
    710  *
    711  *  INTERRUPT LATENCY:
    712  *    ready chain
    713  *    select heir
    714  */
    715 
    716 void _Thread_Ready(
    717   Thread_Control *the_thread
    718 )
    719 {
    720   ISR_Level              level;
    721   Thread_Control *heir;
    722 
    723   _ISR_Disable( level );
    724 
    725   the_thread->current_state = STATES_READY;
    726 
    727   _Priority_Add_to_bit_map( &the_thread->Priority_map );
    728 
    729   _Chain_Append_unprotected( the_thread->ready, &the_thread->Object.Node );
    730 
    731   _ISR_Flash( level );
    732 
    733   _Thread_Calculate_heir();
    734 
    735   heir = _Thread_Heir;
    736 
    737   if ( !_Thread_Is_executing( heir ) && _Thread_Executing->is_preemptible )
    738     _Context_Switch_necessary = TRUE;
    739 
    740   _ISR_Enable( level );
    741 }
    742 
    743 /*PAGE
    744  *
    745  *  _Thread_Clear_state
    746  *
    747  *  This kernel routine clears the appropriate states in the
    748  *  requested thread.  The thread ready chain is adjusted if
    749  *  necessary and the Heir thread is set accordingly.
    750  *
    751  *  Input parameters:
    752  *    the_thread - pointer to thread control block
    753  *    state      - state set to clear
    754  *
    755  *  Output parameters:  NONE
    756  *
    757  *  INTERRUPT LATENCY:
    758  *    priority map
    759  *    select heir
    760  */
    761 
    762 
    763 void _Thread_Clear_state(
    764   Thread_Control *the_thread,
    765   States_Control  state
    766 )
    767 {
    768   ISR_Level       level;
    769   States_Control  current_state;
    770 
    771   _ISR_Disable( level );
    772     current_state = the_thread->current_state;
    773    
    774     if ( current_state & state ) {
    775       current_state =
    776       the_thread->current_state = _States_Clear( state, current_state );
    777 
    778       if ( _States_Is_ready( current_state ) ) {
    779 
    780         _Priority_Add_to_bit_map( &the_thread->Priority_map );
    781 
    782         _Chain_Append_unprotected(the_thread->ready, &the_thread->Object.Node);
    783 
    784         _ISR_Flash( level );
    785 
    786         if ( the_thread->current_priority < _Thread_Heir->current_priority ) {
    787           _Thread_Heir = the_thread;
    788           if ( _Thread_Executing->is_preemptible ||
    789                the_thread->current_priority == 0 )
    790             _Context_Switch_necessary = TRUE;
    791         }
    792       }
    793   }
    794   _ISR_Enable( level );
    795 }
    796 
    797 /*PAGE
    798  *
    799  * _Thread_Set_state
    800  *
    801  * This kernel routine sets the requested state in the THREAD.  The
    802  * THREAD chain is adjusted if necessary.
    803  *
    804  * Input parameters:
    805  *   the_thread   - pointer to thread control block
    806  *   state - state to be set
    807  *
    808  * Output parameters:  NONE
    809  *
    810  *  INTERRUPT LATENCY:
    811  *    ready chain
    812  *    select map
    813  */
    814 
    815 void _Thread_Set_state(
    816   Thread_Control *the_thread,
    817   States_Control         state
    818 )
    819 {
    820   ISR_Level             level;
    821   Chain_Control *ready;
    822 
    823   ready = the_thread->ready;
    824   _ISR_Disable( level );
    825   if ( !_States_Is_ready( the_thread->current_state ) ) {
    826     the_thread->current_state =
    827        _States_Set( state, the_thread->current_state );
    828     _ISR_Enable( level );
    829     return;
    830   }
    831 
    832   the_thread->current_state = state;
    833 
    834   if ( _Chain_Has_only_one_node( ready ) ) {
    835 
    836     _Chain_Initialize_empty( ready );
    837     _Priority_Remove_from_bit_map( &the_thread->Priority_map );
    838 
    839   } else
    840     _Chain_Extract_unprotected( &the_thread->Object.Node );
    841 
    842   _ISR_Flash( level );
    843 
    844   if ( _Thread_Is_heir( the_thread ) )
    845      _Thread_Calculate_heir();
    846 
    847   if ( _Thread_Is_executing( the_thread ) )
    848     _Context_Switch_necessary = TRUE;
    849 
    850   _ISR_Enable( level );
    851 }
    852 
    853 /*PAGE
    854  *
    855  *  _Thread_Set_transient
    856  *
    857  *  This kernel routine places the requested thread in the transient state
    858  *  which will remove it from the ready queue, if necessary.  No
    859  *  rescheduling is necessary because it is assumed that the transient
    860  *  state will be cleared before dispatching is enabled.
    861  *
    862  *  Input parameters:
    863  *    the_thread - pointer to thread control block
    864  *
    865  *  Output parameters:  NONE
    866  *
    867  *  INTERRUPT LATENCY:
    868  *    only case
    869  */
    870 
    871 void _Thread_Set_transient(
    872   Thread_Control *the_thread
    873 )
    874 {
    875   ISR_Level             level;
    876   unsigned32            old_state;
    877   Chain_Control *ready;
    878 
    879   ready = the_thread->ready;
    880   _ISR_Disable( level );
    881 
    882   old_state = the_thread->current_state;
    883   the_thread->current_state = _States_Set( STATES_TRANSIENT, old_state );
    884 
    885   if ( _States_Is_ready( old_state ) ) {
    886     if ( _Chain_Has_only_one_node( ready ) ) {
    887 
    888       _Chain_Initialize_empty( ready );
    889       _Priority_Remove_from_bit_map( &the_thread->Priority_map );
    890 
    891     } else
    892       _Chain_Extract_unprotected( &the_thread->Object.Node );
    893   }
    894 
    895   _ISR_Enable( level );
    896 
    897 }
    898 
    899 /*PAGE
    900  *
    901  *  _Thread_Reset_timeslice
    902  *
    903  *  This routine will remove the running thread from the ready chain
    904  *  and place it immediately at the rear of this chain and then the
    905  *  timeslice counter is reset.  The heir THREAD will be updated if
    906  *  the running is also the currently the heir.
    907  *
    908  *  Input parameters:   NONE
    909  *
    910  *  Output parameters:  NONE
    911  *
    912  *  INTERRUPT LATENCY:
    913  *    ready chain
    914  *    select heir
    915  */
    916 
    917 void _Thread_Reset_timeslice( void )
    918 {
    919   ISR_Level       level;
    920   Thread_Control *executing;
    921   Chain_Control  *ready;
    922 
    923   executing = _Thread_Executing;
    924   ready     = executing->ready;
    925   _ISR_Disable( level );
    926     if ( _Chain_Has_only_one_node( ready ) ) {
    927       _ISR_Enable( level );
    928       return;
    929     }
    930     _Chain_Extract_unprotected( &executing->Object.Node );
    931     _Chain_Append_unprotected( ready, &executing->Object.Node );
    932 
    933   _ISR_Flash( level );
    934 
    935     if ( _Thread_Is_heir( executing ) )
    936       _Thread_Heir = (Thread_Control *) ready->first;
    937 
    938     _Context_Switch_necessary = TRUE;
    939 
    940   _ISR_Enable( level );
    941 }
    942 
    943 /*PAGE
    944  *
    945  *  _Thread_Tickle_timeslice
    946  *
    947  *  This scheduler routine determines if timeslicing is enabled
    948  *  for the currently executing thread and, if so, updates the
    949  *  timeslice count and checks for timeslice expiration.
    950  *
    951  *  Input parameters:   NONE
    952  *
    953  *  Output parameters:  NONE
    954  */
    955 
    956 void _Thread_Tickle_timeslice( void )
    957 {
    958   Thread_Control *executing;
    959 
    960   executing = _Thread_Executing;
    961 
    962   /*
    963    *  Increment the number of ticks this thread has been executing
    964    */
    965 
    966   executing->ticks_executed++;
    967 
    968   /*
    969    *  If the thread is not preemptible or is not ready, then
    970    *  just return.
    971    */
    972 
    973   if ( !executing->is_preemptible )
    974     return;
    975 
    976   if ( !_States_Is_ready( executing->current_state ) )
    977     return;
    978 
    979   /*
    980    *  The cpu budget algorithm determines what happens next.
    981    */
    982 
    983   switch ( executing->budget_algorithm ) {
    984     case THREAD_CPU_BUDGET_ALGORITHM_NONE:
    985       break;
    986 
    987     case THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE:
    988     case THREAD_CPU_BUDGET_ALGORITHM_EXHAUST_TIMESLICE:
    989       if ( --executing->cpu_time_budget == 0 ) {
    990         _Thread_Reset_timeslice();
    991         executing->cpu_time_budget = _Thread_Ticks_per_timeslice;
    992       }
    993       break;
    994 
    995     case THREAD_CPU_BUDGET_ALGORITHM_CALLOUT:
    996       if ( --executing->cpu_time_budget == 0 )
    997         (*executing->budget_callout)( executing );
    998       break;
    999   }
    1000 }
    1001 
    1002 /*PAGE
    1003  *
    1004  *  _Thread_Yield_processor
    1005  *
    1006  *  This kernel routine will remove the running THREAD from the ready chain
    1007  *  and place it immediatly at the rear of this chain.  Reset timeslice
    1008  *  and yield the processor functions both use this routine, therefore if
    1009  *  reset is TRUE and this is the only thread on the chain then the
    1010  *  timeslice counter is reset.  The heir THREAD will be updated if the
    1011  *  running is also the currently the heir.
    1012  *
    1013  *  Input parameters:   NONE
    1014  *
    1015  *  Output parameters:  NONE
    1016  *
    1017  *  INTERRUPT LATENCY:
    1018  *    ready chain
    1019  *    select heir
    1020  */
    1021 
    1022 void _Thread_Yield_processor( void )
    1023 {
    1024   ISR_Level       level;
    1025   Thread_Control *executing;
    1026   Chain_Control  *ready;
    1027 
    1028   executing = _Thread_Executing;
    1029   ready     = executing->ready;
    1030   _ISR_Disable( level );
    1031     if ( !_Chain_Has_only_one_node( ready ) ) {
    1032       _Chain_Extract_unprotected( &executing->Object.Node );
    1033       _Chain_Append_unprotected( ready, &executing->Object.Node );
    1034 
    1035       _ISR_Flash( level );
    1036 
    1037       if ( _Thread_Is_heir( executing ) )
    1038         _Thread_Heir = (Thread_Control *) ready->first;
    1039       _Context_Switch_necessary = TRUE;
    1040     }
    1041     else if ( !_Thread_Is_heir( executing ) )
    1042       _Context_Switch_necessary = TRUE;
    1043 
    1044   _ISR_Enable( level );
    1045 }
    1046 
    1047 /*PAGE
    1048  *
    1049  *  _Thread_Load_environment
    1050  *
    1051  *  Load starting environment for another thread from its start area in the
    1052  *  thread.  Only called from t_restart and t_start.
    1053  *
    1054  *  Input parameters:
    1055  *    the_thread - thread control block pointer
    1056  *
    1057  *  Output parameters:  NONE
    1058  */
    1059 
    1060 void _Thread_Load_environment(
    1061   Thread_Control *the_thread
    1062 )
    1063 {
    1064   boolean is_fp = FALSE;
    1065 
    1066   if ( the_thread->Start.fp_context ) {
    1067     the_thread->fp_context = the_thread->Start.fp_context;
    1068     _Context_Initialize_fp( &the_thread->fp_context );
    1069     is_fp = TRUE;
    1070   }
    1071 
    1072   the_thread->do_post_task_switch_extension = FALSE;
    1073   the_thread->is_preemptible   = the_thread->Start.is_preemptible;
    1074   the_thread->budget_algorithm = the_thread->Start.budget_algorithm;
    1075   the_thread->budget_callout   = the_thread->Start.budget_callout;
    1076 
    1077   _Context_Initialize(
    1078     &the_thread->Registers,
    1079     the_thread->Start.Initial_stack.area,
    1080     the_thread->Start.Initial_stack.size,
    1081     the_thread->Start.isr_level,
    1082     _Thread_Handler,
    1083     is_fp
    1084   );
    1085 
    1086 }
    1087 
    1088 /*PAGE
    1089  *
    1090  *  _Thread_Handler
    1091  *
    1092  *  This routine is the "primal" entry point for all threads.
    1093  *  _Context_Initialize() dummies up the thread's initial context
    1094  *  to cause the first Context_Switch() to jump to _Thread_Handler().
    1095  *
    1096  *  This routine is the default thread exitted error handler.  It is
    1097  *  returned to when a thread exits.  The configured fatal error handler
    1098  *  is invoked to process the exit.
    1099  *
    1100  *  NOTE:
    1101  *
    1102  *  On entry, it is assumed all interrupts are blocked and that this
    1103  *  routine needs to set the initial isr level.  This may or may not
    1104  *  actually be needed by the context switch routine and as a result
    1105  *  interrupts may already be at there proper level.  Either way,
    1106  *  setting the initial isr level properly here is safe.
    1107  * 
    1108  *  Currently this is only really needed for the posix port,
    1109  *  ref: _Context_Switch in unix/cpu.c
    1110  *
    1111  *  Input parameters:   NONE
    1112  *
    1113  *  Output parameters:  NONE
    1114  */
    1115 
    1116 void _Thread_Handler( void )
    1117 {
    1118   ISR_Level  level;
    1119   Thread_Control *executing;
    1120  
    1121   executing = _Thread_Executing;
    1122  
    1123   /*
    1124    * have to put level into a register for those cpu's that use
    1125    * inline asm here
    1126    */
    1127  
    1128   level = executing->Start.isr_level;
    1129   _ISR_Set_level(level);
    1130 
    1131   /*
    1132    * Take care that 'begin' extensions get to complete before
    1133    * 'switch' extensions can run.  This means must keep dispatch
    1134    * disabled until all 'begin' extensions complete.
    1135    */
    1136  
    1137   _User_extensions_Thread_begin( executing );
    1138  
    1139   /*
    1140    *  At this point, the dispatch disable level BETTER be 1.
    1141    */
    1142 
    1143   _Thread_Enable_dispatch();
    1144  
    1145   switch ( executing->Start.prototype ) {
    1146     case THREAD_START_NUMERIC:
    1147       (*(Thread_Entry_numeric) executing->Start.entry_point)(
    1148         executing->Start.numeric_argument
    1149       );
    1150       break;
    1151     case THREAD_START_POINTER:
    1152       (*(Thread_Entry_pointer) executing->Start.entry_point)(
    1153         executing->Start.pointer_argument
    1154       );
    1155       break;
    1156     case THREAD_START_BOTH_POINTER_FIRST:
    1157       (*(Thread_Entry_both_pointer_first) executing->Start.entry_point)(
    1158         executing->Start.pointer_argument,
    1159         executing->Start.numeric_argument
    1160       );
    1161       break;
    1162     case THREAD_START_BOTH_NUMERIC_FIRST:
    1163       (*(Thread_Entry_both_numeric_first) executing->Start.entry_point)(
    1164         executing->Start.numeric_argument,
    1165         executing->Start.pointer_argument
    1166       );
    1167       break;
    1168   }
    1169 
    1170   _User_extensions_Thread_exitted( executing );
    1171 
    1172   _Internal_error_Occurred(
    1173     INTERNAL_ERROR_CORE,
    1174     TRUE,
    1175     INTERNAL_ERROR_THREAD_EXITTED
    1176   );
    1177 }
    1178 
    1179 /*PAGE
    1180  *
    1181  *  _Thread_Delay_ended
    1182  *
    1183  *  This routine processes a thread whose delay period has ended.
    1184  *  It is called by the watchdog handler.
    1185  *
    1186  *  Input parameters:
    1187  *    id - thread id
    1188  *
    1189  *  Output parameters: NONE
    1190  */
    1191 
    1192 void _Thread_Delay_ended(
    1193   Objects_Id  id,
    1194   void       *ignored
    1195 )
    1196 {
    1197   Thread_Control    *the_thread;
    1198   Objects_Locations  location;
    1199 
    1200   the_thread = _Thread_Get( id, &location );
    1201   switch ( location ) {
    1202     case OBJECTS_ERROR:
    1203     case OBJECTS_REMOTE:  /* impossible */
    1204       break;
    1205     case OBJECTS_LOCAL:
    1206       _Thread_Unblock( the_thread );
    1207       _Thread_Unnest_dispatch();
    1208       break;
    1209   }
    1210 }
    1211 
    1212 /*PAGE
    1213  *
    1214  *  _Thread_Change_priority
    1215  *
    1216  *  This kernel routine changes the priority of the thread.  The
    1217  *  thread chain is adjusted if necessary.
    1218  *
    1219  *  Input parameters:
    1220  *    the_thread   - pointer to thread control block
    1221  *    new_priority - ultimate priority
    1222  *    prepend_it   - TRUE if the thread should be prepended to the chain
    1223  *
    1224  *  Output parameters:  NONE
    1225  *
    1226  *  INTERRUPT LATENCY:
    1227  *    ready chain
    1228  *    select heir
    1229  */
    1230 
    1231 void _Thread_Change_priority(
    1232   Thread_Control   *the_thread,
    1233   Priority_Control  new_priority,
    1234   boolean           prepend_it
    1235 )
    1236 {
    1237   ISR_Level level;
    1238   /* boolean   do_prepend = FALSE; */
    1239 
    1240   /*
    1241    *  If this is a case where prepending the task to its priority is
    1242    *  potentially desired, then we need to consider whether to do it.
    1243    *  This usually occurs when a task lowers its priority implcitly as
    1244    *  the result of losing inherited priority.  Normal explicit priority
    1245    *  change calls (e.g. rtems_task_set_priority) should always do an
    1246    *  append not a prepend.
    1247    */
    1248  
    1249   /*
    1250    *  Techically, the prepend should conditional on the thread lowering
    1251    *  its priority but that does allow cxd2004 of the acvc 2.0.1 to
    1252    *  pass with rtems 4.0.0.  This should change when gnat redoes its
    1253    *  priority scheme.
    1254    */
    1255 /*
    1256   if ( prepend_it &&
    1257        _Thread_Is_executing( the_thread ) &&
    1258        new_priority >= the_thread->current_priority )
    1259     prepend_it = TRUE;
    1260 */
    1261                  
    1262   _Thread_Set_transient( the_thread );
    1263 
    1264   if ( the_thread->current_priority != new_priority )
    1265     _Thread_Set_priority( the_thread, new_priority );
    1266 
    1267   _ISR_Disable( level );
    1268 
    1269   the_thread->current_state =
    1270     _States_Clear( STATES_TRANSIENT, the_thread->current_state );
    1271 
    1272   if ( ! _States_Is_ready( the_thread->current_state ) ) {
    1273     _ISR_Enable( level );
    1274     return;
    1275   }
    1276 
    1277   _Priority_Add_to_bit_map( &the_thread->Priority_map );
    1278   if ( prepend_it )
    1279     _Chain_Prepend_unprotected( the_thread->ready, &the_thread->Object.Node );
    1280   else
    1281     _Chain_Append_unprotected( the_thread->ready, &the_thread->Object.Node );
    1282 
    1283   _ISR_Flash( level );
    1284 
    1285   _Thread_Calculate_heir();
    1286 
    1287   if ( !_Thread_Is_executing_also_the_heir() &&
    1288        _Thread_Executing->is_preemptible )
    1289     _Context_Switch_necessary = TRUE;
    1290   _ISR_Enable( level );
    1291 }
    1292 
    1293 /*PAGE
    1294  *
    1295  * _Thread_Set_priority
    1296  *
    1297  * This directive enables and disables several modes of
    1298  * execution for the requesting thread.
    1299  *
    1300  *  Input parameters:
    1301  *    the_thread   - pointer to thread priority
    1302  *    new_priority - new priority
    1303  *
    1304  *  Output: NONE
    1305  */
    1306 
    1307 void _Thread_Set_priority(
    1308   Thread_Control   *the_thread,
    1309   Priority_Control  new_priority
    1310 )
    1311 {
    1312   the_thread->current_priority = new_priority;
    1313   the_thread->ready            = &_Thread_Ready_chain[ new_priority ];
    1314 
    1315   _Priority_Initialize_information( &the_thread->Priority_map, new_priority );
    1316 }
    1317 
    1318 /*PAGE
    1319  *
    1320  *  _Thread_Evaluate_mode
    1321  *
    1322  *  XXX
    1323  */
    1324 
    1325 boolean _Thread_Evaluate_mode( void )
    1326 {
    1327   Thread_Control     *executing;
    1328 
    1329   executing = _Thread_Executing;
    1330 
    1331   if ( !_States_Is_ready( executing->current_state ) ||
    1332        ( !_Thread_Is_heir( executing ) && executing->is_preemptible ) ) {
    1333     _Context_Switch_necessary = TRUE;
    1334     return TRUE;
    1335   }
    1336 
    1337   return FALSE;
    1338 }
    1339 
    1340 /*PAGE
    1341  *
    1342  *  _Thread_Get
    1343  *
    1344  *  NOTE:  If we are not using static inlines, this must be a real
    1345  *         subroutine call.
    1346  *
    1347  *  NOTE:  XXX... This routine may be able to be optimized.
    1348  */
    1349 
    1350 #ifndef USE_INLINES
    1351 
    1352 Thread_Control *_Thread_Get (
    1353   Objects_Id           id,
    1354   Objects_Locations   *location
    1355 )
    1356 {
    1357   Objects_Classes      the_class;
    1358   Objects_Information *information;
    1359  
    1360   if ( _Objects_Are_ids_equal( id, OBJECTS_ID_OF_SELF ) ) {
    1361     _Thread_Disable_dispatch();
    1362     *location = OBJECTS_LOCAL;
    1363     return( _Thread_Executing );
    1364   }
    1365  
    1366   the_class = _Objects_Get_class( id );
    1367  
    1368   if ( the_class > OBJECTS_CLASSES_LAST ) {
    1369     *location = OBJECTS_ERROR;
    1370     return (Thread_Control *) 0;
    1371   }
    1372  
    1373   information = _Objects_Information_table[ the_class ];
    1374  
    1375   if ( !information || !information->is_thread ) {
    1376     *location = OBJECTS_ERROR;
    1377     return (Thread_Control *) 0;
    1378   }
    1379  
    1380   return (Thread_Control *) _Objects_Get( information, id, location );
    1381 }
    1382 
    1383 #endif
    1384 
    1385 /*PAGE
    1386  *
    1387  *  _Thread_Idle_body
    1388  *
    1389  *  This kernel routine is the idle thread.  The idle thread runs any time
    1390  *  no other thread is ready to run.  This thread loops forever with
    1391  *  interrupts enabled.
    1392  *
    1393  *  Input parameters:
    1394  *    ignored - this parameter is ignored
    1395  *
    1396  *  Output parameters:  NONE
    1397  */
    1398  
    1399 #if (CPU_PROVIDES_IDLE_THREAD_BODY == FALSE)
    1400 Thread _Thread_Idle_body(
    1401   unsigned32 ignored
    1402 )
    1403 {
    1404   for( ; ; ) ;
    1405 }
    1406 #endif
  • cpukit/score/src/thread.c

    ra238cc9 r05df0a8  
    4040 *  Output parameters:  NONE
    4141 */
    42 
    43 char *_Thread_Idle_name = "IDLE";
    4442
    4543void _Thread_Handler_initialization(
     
    103101}
    104102
    105 /*PAGE
    106  *
    107  *  _Thread_Create_idle
    108  */
    109 
    110 void _Thread_Create_idle( void )
    111 {
    112   void       *idle;
    113   unsigned32  idle_task_stack_size;
    114 
    115   /*
    116    *  The entire workspace is zeroed during its initialization.  Thus, all
    117    *  fields not explicitly assigned were explicitly zeroed by
    118    *  _Workspace_Initialization.
    119    */
    120  
    121   _Thread_Idle = _Thread_Internal_allocate();
    122  
    123   /*
    124    *  Initialize the IDLE task.
    125    */
    126  
    127 #if (CPU_PROVIDES_IDLE_THREAD_BODY == TRUE)
    128   idle = (void *) _CPU_Thread_Idle_body;
    129 #else
    130   idle = (void *) _Thread_Idle_body;
    131 #endif
    132  
    133   if ( _CPU_Table.idle_task )
    134     idle = _CPU_Table.idle_task;
    135  
    136   idle_task_stack_size =  _CPU_Table.idle_task_stack_size;
    137   if ( idle_task_stack_size < STACK_MINIMUM_SIZE )
    138     idle_task_stack_size = STACK_MINIMUM_SIZE;
    139  
    140   _Thread_Initialize(
    141     &_Thread_Internal_information,
    142     _Thread_Idle,
    143     NULL,        /* allocate the stack */
    144     idle_task_stack_size,
    145     CPU_IDLE_TASK_IS_FP,
    146     PRIORITY_MAXIMUM,
    147     TRUE,        /* preemptable */
    148     THREAD_CPU_BUDGET_ALGORITHM_NONE,
    149     NULL,        /* no budget algorithm callout */
    150     0,           /* all interrupts enabled */
    151     _Thread_Idle_name
    152   );
    153  
    154   /*
    155    *  WARNING!!! This is necessary to "kick" start the system and
    156    *             MUST be done before _Thread_Start is invoked.
    157    */
    158  
    159   _Thread_Heir      =
    160   _Thread_Executing = _Thread_Idle;
    161  
    162   _Thread_Start(
    163     _Thread_Idle,
    164     THREAD_START_NUMERIC,
    165     idle,
    166     NULL,
    167     0
    168   );
    169  
    170 }
    171 
    172 /*PAGE
    173  *
    174  *  _Thread_Start_multitasking
    175  *
    176  *  This kernel routine readies the requested thread, the thread chain
    177  *  is adjusted.  A new heir thread may be selected.
    178  *
    179  *  Input parameters:
    180  *    system_thread - pointer to system initialization thread control block
    181  *    idle_thread   - pointer to idle thread control block
    182  *
    183  *  Output parameters:  NONE
    184  *
    185  *  NOTE:  This routine uses the "blocking" heir selection mechanism.
    186  *         This insures the correct heir after a thread restart.
    187  *
    188  *  INTERRUPT LATENCY:
    189  *    ready chain
    190  *    select heir
    191  */
    192 
    193 void _Thread_Start_multitasking( void )
    194 {
    195   /*
    196    *  The system is now multitasking and completely initialized. 
    197    *  This system thread now either "goes away" in a single processor
    198    *  system or "turns into" the server thread in an MP system.
    199    */
    200 
    201   _System_state_Set( SYSTEM_STATE_UP );
    202 
    203   _Context_Switch_necessary = FALSE;
    204 
    205   _Thread_Executing = _Thread_Heir;
    206 
    207    /*
    208     * Get the init task(s) running.
    209     *
    210     * Note: Thread_Dispatch() is normally used to dispatch threads.  As
    211     *       part of its work, Thread_Dispatch() restores floating point
    212     *       state for the heir task.
    213     *
    214     *       This code avoids Thread_Dispatch(), and so we have to restore
    215     *       (actually initialize) the floating point state "by hand".
    216     *
    217     *       Ignore the CPU_USE_DEFERRED_FP_SWITCH because we must always
    218     *       switch in the first thread if it is FP.
    219     */
    220  
    221 
    222 #if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
    223    /*
    224     *  don't need to worry about saving BSP's floating point state
    225     */
    226 
    227    if ( _Thread_Heir->fp_context != NULL )
    228      _Context_Restore_fp( &_Thread_Heir->fp_context );
    229 #endif
    230 
    231   _Context_Switch( &_Thread_BSP_context, &_Thread_Heir->Registers );
    232 }
    233 
    234 /*PAGE
    235  *
    236  *  _Thread_Dispatch
    237  *
    238  *  This kernel routine determines if a dispatch is needed, and if so
    239  *  dispatches to the heir thread.  Once the heir is running an attempt
    240  *  is made to dispatch any ASRs.
    241  *
    242  *  ALTERNATE ENTRY POINTS:
    243  *    void _Thread_Enable_dispatch();
    244  *
    245  *  Input parameters:  NONE
    246  *
    247  *  Output parameters:  NONE
    248  *
    249  *  INTERRUPT LATENCY:
    250  *    dispatch thread
    251  *    no dispatch thread
    252  */
    253 
    254 #if ( CPU_INLINE_ENABLE_DISPATCH == FALSE )
    255 void _Thread_Enable_dispatch( void )
    256 {
    257   if ( --_Thread_Dispatch_disable_level )
    258     return;
    259   _Thread_Dispatch();
    260 }
    261 #endif
    262 
    263 void _Thread_Dispatch( void )
    264 {
    265   Thread_Control   *executing;
    266   Thread_Control   *heir;
    267   ISR_Level         level;
    268 
    269   executing   = _Thread_Executing;
    270   _ISR_Disable( level );
    271   while ( _Context_Switch_necessary == TRUE ) {
    272     heir = _Thread_Heir;
    273     _Thread_Dispatch_disable_level = 1;
    274     _Context_Switch_necessary = FALSE;
    275     _Thread_Executing = heir;
    276     executing->rtems_ada_self = rtems_ada_self;
    277     rtems_ada_self = heir->rtems_ada_self;
    278     _ISR_Enable( level );
    279 
    280     heir->ticks_executed++;
    281 
    282     _User_extensions_Thread_switch( executing, heir );
    283 
    284     if ( heir->budget_algorithm == THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE )
    285       heir->cpu_time_budget = _Thread_Ticks_per_timeslice;
    286 
    287     /*
    288      *  If the CPU has hardware floating point, then we must address saving
    289      *  and restoring it as part of the context switch.
    290      *
    291      *  The second conditional compilation section selects the algorithm used
    292      *  to context switch between floating point tasks.  The deferred algorithm
    293      *  can be significantly better in a system with few floating point tasks
    294      *  because it reduces the total number of save and restore FP context
    295      *  operations.  However, this algorithm can not be used on all CPUs due
    296      *  to unpredictable use of FP registers by some compilers for integer
    297      *  operations.
    298      */
    299 
    300 #if ( CPU_HARDWARE_FP == TRUE )
    301 #if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
    302     if ( (heir->fp_context != NULL) && !_Thread_Is_allocated_fp( heir ) ) {
    303       if ( _Thread_Allocated_fp != NULL )
    304         _Context_Save_fp( &_Thread_Allocated_fp->fp_context );
    305       _Context_Restore_fp( &heir->fp_context );
    306       _Thread_Allocated_fp = heir;
    307     }
    308 #else
    309     if ( executing->fp_context != NULL )
    310       _Context_Save_fp( &executing->fp_context );
    311 
    312     if ( heir->fp_context != NULL )
    313       _Context_Restore_fp( &heir->fp_context );
    314 #endif
    315 #endif
    316 
    317     _Context_Switch( &executing->Registers, &heir->Registers );
    318 
    319     executing = _Thread_Executing;
    320 
    321     _ISR_Disable( level );
    322   }
    323 
    324   _Thread_Dispatch_disable_level = 0;
    325 
    326   _ISR_Enable( level );
    327 
    328   if ( _Thread_Do_post_task_switch_extension ||
    329        executing->do_post_task_switch_extension ) {
    330     executing->do_post_task_switch_extension = FALSE;
    331     _API_extensions_Run_postswitch();
    332   }
    333  
    334 }
    335 
    336 /*PAGE
    337  *
    338  *  _Thread_Stack_Allocate
    339  *
    340  *  Allocate the requested stack space for the thread.
    341  *  return the actual size allocated after any adjustment
    342  *  or return zero if the allocation failed.
    343  *  Set the Start.stack field to the address of the stack
    344  */
    345 
    346 static unsigned32 _Thread_Stack_Allocate(
    347   Thread_Control *the_thread,
    348   unsigned32 stack_size)
    349 {
    350   void *stack_addr = 0;
    351  
    352   if ( !_Stack_Is_enough( stack_size ) )
    353     stack_size = STACK_MINIMUM_SIZE;
    354  
    355   /*
    356    * Call ONLY the CPU table stack allocate hook, _or_ the
    357    * the RTEMS workspace allocate.  This is so the stack free
    358    * routine can call the correct deallocation routine.
    359    */
    360 
    361   if ( _CPU_Table.stack_allocate_hook )
    362   {
    363     stack_addr = (*_CPU_Table.stack_allocate_hook)( stack_size );
    364   } else {
    365 
    366     /*
    367      *  First pad the requested size so we allocate enough memory
    368      *  so the context initialization can align it properly.  The address
    369      *  returned the workspace allocate must be directly stored in the
    370      *  stack control block because it is later used in the free sequence.
    371      *
    372      *  Thus it is the responsibility of the CPU dependent code to
    373      *  get and keep the stack adjust factor, the stack alignment, and
    374      *  the context initialization sequence in sync.
    375      */
    376 
    377     stack_size = _Stack_Adjust_size( stack_size );
    378     stack_addr = _Workspace_Allocate( stack_size );
    379   }
    380  
    381   if ( !stack_addr )
    382       stack_size = 0;
    383  
    384   the_thread->Start.stack = stack_addr;
    385  
    386   return stack_size;
    387 }
    388 
    389 /*
    390  *  _Thread_Stack_Free
    391  *
    392  *  Deallocate the Thread's stack.
    393  */
    394 
    395 static void _Thread_Stack_Free(
    396   Thread_Control *the_thread
    397 )
    398 {
    399     /*
    400      *  If the API provided the stack space, then don't free it.
    401      */
    402 
    403     if ( !the_thread->Start.core_allocated_stack )
    404       return;
    405 
    406     /*
    407      * Call ONLY the CPU table stack free hook, or the
    408      * the RTEMS workspace free.  This is so the free
    409      * routine properly matches the allocation of the stack.
    410      */
    411 
    412     if ( _CPU_Table.stack_free_hook )
    413         (*_CPU_Table.stack_free_hook)( the_thread->Start.Initial_stack.area );
    414     else
    415         _Workspace_Free( the_thread->Start.Initial_stack.area );
    416 }
    417 
    418 /*PAGE
    419  *
    420  *  _Thread_Initialize
    421  *
    422  *  XXX
    423  */
    424 
    425 boolean _Thread_Initialize(
    426   Objects_Information                  *information,
    427   Thread_Control                       *the_thread,
    428   void                                 *stack_area,
    429   unsigned32                            stack_size,
    430   boolean                               is_fp,
    431   Priority_Control                      priority,
    432   boolean                               is_preemptible,
    433   Thread_CPU_budget_algorithms          budget_algorithm,
    434   Thread_CPU_budget_algorithm_callout   budget_callout,
    435   unsigned32                            isr_level,
    436   Objects_Name                          name
    437 )
    438 {
    439   unsigned32           actual_stack_size = 0;
    440   void                *stack = NULL;
    441   void                *fp_area;
    442   void                *extensions_area;
    443 
    444   /*
    445    *  Initialize the Ada self pointer
    446    */
    447 
    448   the_thread->rtems_ada_self = NULL;
    449 
    450   /*
    451    *  Allocate and Initialize the stack for this thread.
    452    */
    453 
    454 
    455   if ( !stack_area ) {
    456     if ( !_Stack_Is_enough( stack_size ) )
    457       actual_stack_size = STACK_MINIMUM_SIZE;
    458     else
    459       actual_stack_size = stack_size;
    460 
    461     actual_stack_size = _Thread_Stack_Allocate( the_thread, actual_stack_size );
    462  
    463     if ( !actual_stack_size )
    464       return FALSE;                     /* stack allocation failed */
    465 
    466     stack = the_thread->Start.stack;
    467     the_thread->Start.core_allocated_stack = TRUE;
    468   } else {
    469     stack = stack_area;
    470     actual_stack_size = stack_size;
    471     the_thread->Start.core_allocated_stack = FALSE;
    472   }
    473 
    474   _Stack_Initialize(
    475      &the_thread->Start.Initial_stack,
    476      stack,
    477      actual_stack_size
    478   );
    479 
    480   /*
    481    *  Allocate the floating point area for this thread
    482    */
    483  
    484   if ( is_fp ) {
    485 
    486     fp_area = _Workspace_Allocate( CONTEXT_FP_SIZE );
    487     if ( !fp_area ) {
    488       _Thread_Stack_Free( the_thread );
    489       return FALSE;
    490     }
    491     fp_area = _Context_Fp_start( fp_area, 0 );
    492 
    493   } else
    494     fp_area = NULL;
    495 
    496   the_thread->fp_context       = fp_area;
    497   the_thread->Start.fp_context = fp_area;
    498 
    499   /*
    500    *  Allocate the extensions area for this thread
    501    */
    502 
    503   if ( _Thread_Maximum_extensions ) {
    504     extensions_area = _Workspace_Allocate(
    505       (_Thread_Maximum_extensions + 1) * sizeof( void * )
    506     );
    507 
    508     if ( !extensions_area ) {
    509       if ( fp_area )
    510         (void) _Workspace_Free( fp_area );
    511 
    512       _Thread_Stack_Free( the_thread );
    513 
    514       return FALSE;
    515     }
    516   } else
    517     extensions_area = NULL;
    518  
    519   the_thread->extensions = (void **) extensions_area;
    520 
    521   /*
    522    *  General initialization
    523    */
    524 
    525   the_thread->Start.is_preemptible   = is_preemptible;
    526   the_thread->Start.budget_algorithm = budget_algorithm;
    527   the_thread->Start.budget_callout   = budget_callout;
    528   the_thread->Start.isr_level        = isr_level;
    529 
    530   the_thread->current_state          = STATES_DORMANT;
    531   the_thread->resource_count         = 0;
    532   the_thread->real_priority          = priority;
    533   the_thread->Start.initial_priority = priority;
    534   the_thread->ticks_executed         = 0;
    535  
    536   _Thread_Set_priority( the_thread, priority );
    537 
    538   /*
    539    *  Open the object
    540    */
    541 
    542   _Objects_Open( information, &the_thread->Object, name );
    543 
    544   /*
    545    *  Invoke create extensions
    546    */
    547 
    548   if ( !_User_extensions_Thread_create( the_thread ) ) {
    549 
    550     if ( extensions_area )
    551       (void) _Workspace_Free( extensions_area );
    552 
    553     if ( fp_area )
    554       (void) _Workspace_Free( fp_area );
    555 
    556     _Thread_Stack_Free( the_thread );
    557 
    558     return FALSE;
    559   }
    560 
    561   return TRUE;
    562    
    563 }
    564 
    565 /*
    566  *  _Thread_Start
    567  *
    568  *  DESCRIPTION:
    569  *
    570  *  XXX
    571  */
    572  
    573 boolean _Thread_Start(
    574   Thread_Control       *the_thread,
    575   Thread_Start_types    the_prototype,
    576   void                 *entry_point,
    577   void                 *pointer_argument,
    578   unsigned32            numeric_argument
    579 )
    580 {
    581   if ( _States_Is_dormant( the_thread->current_state ) ) {
    582  
    583     the_thread->Start.entry_point      = (Thread_Entry) entry_point;
    584    
    585     the_thread->Start.prototype        = the_prototype;
    586     the_thread->Start.pointer_argument = pointer_argument;
    587     the_thread->Start.numeric_argument = numeric_argument;
    588  
    589     _Thread_Load_environment( the_thread );
    590  
    591     _Thread_Ready( the_thread );
    592  
    593     _User_extensions_Thread_start( the_thread );
    594  
    595     return TRUE;
    596   }
    597  
    598   return FALSE;
    599  
    600 }
    601 
    602 /*
    603  *  _Thread_Restart
    604  *
    605  *  DESCRIPTION:
    606  *
    607  *  XXX
    608  */
    609  
    610 boolean _Thread_Restart(
    611   Thread_Control      *the_thread,
    612   void                *pointer_argument,
    613   unsigned32           numeric_argument
    614 )
    615 {
    616   if ( !_States_Is_dormant( the_thread->current_state ) ) {
    617  
    618     _Thread_Set_transient( the_thread );
    619     the_thread->resource_count = 0;
    620     the_thread->is_preemptible   = the_thread->Start.is_preemptible;
    621     the_thread->budget_algorithm = the_thread->Start.budget_algorithm;
    622     the_thread->budget_callout   = the_thread->Start.budget_callout;
    623 
    624     the_thread->Start.pointer_argument = pointer_argument;
    625     the_thread->Start.numeric_argument = numeric_argument;
    626  
    627     if ( !_Thread_queue_Extract_with_proxy( the_thread ) ) {
    628  
    629       if ( _Watchdog_Is_active( &the_thread->Timer ) )
    630         (void) _Watchdog_Remove( &the_thread->Timer );
    631     }
    632 
    633     if ( the_thread->current_priority != the_thread->Start.initial_priority ) {
    634       the_thread->real_priority = the_thread->Start.initial_priority;
    635       _Thread_Set_priority( the_thread, the_thread->Start.initial_priority );
    636     }
    637  
    638     _Thread_Load_environment( the_thread );
    639  
    640     _Thread_Ready( the_thread );
    641  
    642     _User_extensions_Thread_restart( the_thread );
    643  
    644     if ( _Thread_Is_executing ( the_thread ) )
    645       _Thread_Restart_self();
    646  
    647     return TRUE;
    648   }
    649  
    650   return FALSE;
    651 }
    652 
    653 /*
    654  *  _Thread_Close
    655  *
    656  *  DESCRIPTION:
    657  *
    658  *  XXX
    659  */
    660  
    661 void _Thread_Close(
    662   Objects_Information  *information,
    663   Thread_Control       *the_thread
    664 )
    665 {
    666   _Objects_Close( information, &the_thread->Object );
    667  
    668   _Thread_Set_state( the_thread, STATES_TRANSIENT );
    669  
    670   if ( !_Thread_queue_Extract_with_proxy( the_thread ) ) {
    671  
    672     if ( _Watchdog_Is_active( &the_thread->Timer ) )
    673       (void) _Watchdog_Remove( &the_thread->Timer );
    674   }
    675 
    676   _User_extensions_Thread_delete( the_thread );
    677  
    678 #if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
    679   if ( _Thread_Is_allocated_fp( the_thread ) )
    680     _Thread_Deallocate_fp();
    681 #endif
    682   the_thread->fp_context = NULL;
    683 
    684   if ( the_thread->Start.fp_context )
    685   (void) _Workspace_Free( the_thread->Start.fp_context );
    686 
    687   _Thread_Stack_Free( the_thread );
    688 
    689   if ( the_thread->extensions )
    690     (void) _Workspace_Free( the_thread->extensions );
    691 
    692   the_thread->Start.stack = NULL;
    693   the_thread->extensions = NULL;
    694 }
    695 
    696 /*PAGE
    697  *
    698  *  _Thread_Ready
    699  *
    700  *  This kernel routine readies the requested thread, the thread chain
    701  *  is adjusted.  A new heir thread may be selected.
    702  *
    703  *  Input parameters:
    704  *    the_thread - pointer to thread control block
    705  *
    706  *  Output parameters:  NONE
    707  *
    708  *  NOTE:  This routine uses the "blocking" heir selection mechanism.
    709  *         This insures the correct heir after a thread restart.
    710  *
    711  *  INTERRUPT LATENCY:
    712  *    ready chain
    713  *    select heir
    714  */
    715 
    716 void _Thread_Ready(
    717   Thread_Control *the_thread
    718 )
    719 {
    720   ISR_Level              level;
    721   Thread_Control *heir;
    722 
    723   _ISR_Disable( level );
    724 
    725   the_thread->current_state = STATES_READY;
    726 
    727   _Priority_Add_to_bit_map( &the_thread->Priority_map );
    728 
    729   _Chain_Append_unprotected( the_thread->ready, &the_thread->Object.Node );
    730 
    731   _ISR_Flash( level );
    732 
    733   _Thread_Calculate_heir();
    734 
    735   heir = _Thread_Heir;
    736 
    737   if ( !_Thread_Is_executing( heir ) && _Thread_Executing->is_preemptible )
    738     _Context_Switch_necessary = TRUE;
    739 
    740   _ISR_Enable( level );
    741 }
    742 
    743 /*PAGE
    744  *
    745  *  _Thread_Clear_state
    746  *
    747  *  This kernel routine clears the appropriate states in the
    748  *  requested thread.  The thread ready chain is adjusted if
    749  *  necessary and the Heir thread is set accordingly.
    750  *
    751  *  Input parameters:
    752  *    the_thread - pointer to thread control block
    753  *    state      - state set to clear
    754  *
    755  *  Output parameters:  NONE
    756  *
    757  *  INTERRUPT LATENCY:
    758  *    priority map
    759  *    select heir
    760  */
    761 
    762 
    763 void _Thread_Clear_state(
    764   Thread_Control *the_thread,
    765   States_Control  state
    766 )
    767 {
    768   ISR_Level       level;
    769   States_Control  current_state;
    770 
    771   _ISR_Disable( level );
    772     current_state = the_thread->current_state;
    773    
    774     if ( current_state & state ) {
    775       current_state =
    776       the_thread->current_state = _States_Clear( state, current_state );
    777 
    778       if ( _States_Is_ready( current_state ) ) {
    779 
    780         _Priority_Add_to_bit_map( &the_thread->Priority_map );
    781 
    782         _Chain_Append_unprotected(the_thread->ready, &the_thread->Object.Node);
    783 
    784         _ISR_Flash( level );
    785 
    786         if ( the_thread->current_priority < _Thread_Heir->current_priority ) {
    787           _Thread_Heir = the_thread;
    788           if ( _Thread_Executing->is_preemptible ||
    789                the_thread->current_priority == 0 )
    790             _Context_Switch_necessary = TRUE;
    791         }
    792       }
    793   }
    794   _ISR_Enable( level );
    795 }
    796 
    797 /*PAGE
    798  *
    799  * _Thread_Set_state
    800  *
    801  * This kernel routine sets the requested state in the THREAD.  The
    802  * THREAD chain is adjusted if necessary.
    803  *
    804  * Input parameters:
    805  *   the_thread   - pointer to thread control block
    806  *   state - state to be set
    807  *
    808  * Output parameters:  NONE
    809  *
    810  *  INTERRUPT LATENCY:
    811  *    ready chain
    812  *    select map
    813  */
    814 
    815 void _Thread_Set_state(
    816   Thread_Control *the_thread,
    817   States_Control         state
    818 )
    819 {
    820   ISR_Level             level;
    821   Chain_Control *ready;
    822 
    823   ready = the_thread->ready;
    824   _ISR_Disable( level );
    825   if ( !_States_Is_ready( the_thread->current_state ) ) {
    826     the_thread->current_state =
    827        _States_Set( state, the_thread->current_state );
    828     _ISR_Enable( level );
    829     return;
    830   }
    831 
    832   the_thread->current_state = state;
    833 
    834   if ( _Chain_Has_only_one_node( ready ) ) {
    835 
    836     _Chain_Initialize_empty( ready );
    837     _Priority_Remove_from_bit_map( &the_thread->Priority_map );
    838 
    839   } else
    840     _Chain_Extract_unprotected( &the_thread->Object.Node );
    841 
    842   _ISR_Flash( level );
    843 
    844   if ( _Thread_Is_heir( the_thread ) )
    845      _Thread_Calculate_heir();
    846 
    847   if ( _Thread_Is_executing( the_thread ) )
    848     _Context_Switch_necessary = TRUE;
    849 
    850   _ISR_Enable( level );
    851 }
    852 
    853 /*PAGE
    854  *
    855  *  _Thread_Set_transient
    856  *
    857  *  This kernel routine places the requested thread in the transient state
    858  *  which will remove it from the ready queue, if necessary.  No
    859  *  rescheduling is necessary because it is assumed that the transient
    860  *  state will be cleared before dispatching is enabled.
    861  *
    862  *  Input parameters:
    863  *    the_thread - pointer to thread control block
    864  *
    865  *  Output parameters:  NONE
    866  *
    867  *  INTERRUPT LATENCY:
    868  *    only case
    869  */
    870 
    871 void _Thread_Set_transient(
    872   Thread_Control *the_thread
    873 )
    874 {
    875   ISR_Level             level;
    876   unsigned32            old_state;
    877   Chain_Control *ready;
    878 
    879   ready = the_thread->ready;
    880   _ISR_Disable( level );
    881 
    882   old_state = the_thread->current_state;
    883   the_thread->current_state = _States_Set( STATES_TRANSIENT, old_state );
    884 
    885   if ( _States_Is_ready( old_state ) ) {
    886     if ( _Chain_Has_only_one_node( ready ) ) {
    887 
    888       _Chain_Initialize_empty( ready );
    889       _Priority_Remove_from_bit_map( &the_thread->Priority_map );
    890 
    891     } else
    892       _Chain_Extract_unprotected( &the_thread->Object.Node );
    893   }
    894 
    895   _ISR_Enable( level );
    896 
    897 }
    898 
    899 /*PAGE
    900  *
    901  *  _Thread_Reset_timeslice
    902  *
    903  *  This routine will remove the running thread from the ready chain
    904  *  and place it immediately at the rear of this chain and then the
    905  *  timeslice counter is reset.  The heir THREAD will be updated if
    906  *  the running is also the currently the heir.
    907  *
    908  *  Input parameters:   NONE
    909  *
    910  *  Output parameters:  NONE
    911  *
    912  *  INTERRUPT LATENCY:
    913  *    ready chain
    914  *    select heir
    915  */
    916 
    917 void _Thread_Reset_timeslice( void )
    918 {
    919   ISR_Level       level;
    920   Thread_Control *executing;
    921   Chain_Control  *ready;
    922 
    923   executing = _Thread_Executing;
    924   ready     = executing->ready;
    925   _ISR_Disable( level );
    926     if ( _Chain_Has_only_one_node( ready ) ) {
    927       _ISR_Enable( level );
    928       return;
    929     }
    930     _Chain_Extract_unprotected( &executing->Object.Node );
    931     _Chain_Append_unprotected( ready, &executing->Object.Node );
    932 
    933   _ISR_Flash( level );
    934 
    935     if ( _Thread_Is_heir( executing ) )
    936       _Thread_Heir = (Thread_Control *) ready->first;
    937 
    938     _Context_Switch_necessary = TRUE;
    939 
    940   _ISR_Enable( level );
    941 }
    942 
    943 /*PAGE
    944  *
    945  *  _Thread_Tickle_timeslice
    946  *
    947  *  This scheduler routine determines if timeslicing is enabled
    948  *  for the currently executing thread and, if so, updates the
    949  *  timeslice count and checks for timeslice expiration.
    950  *
    951  *  Input parameters:   NONE
    952  *
    953  *  Output parameters:  NONE
    954  */
    955 
    956 void _Thread_Tickle_timeslice( void )
    957 {
    958   Thread_Control *executing;
    959 
    960   executing = _Thread_Executing;
    961 
    962   /*
    963    *  Increment the number of ticks this thread has been executing
    964    */
    965 
    966   executing->ticks_executed++;
    967 
    968   /*
    969    *  If the thread is not preemptible or is not ready, then
    970    *  just return.
    971    */
    972 
    973   if ( !executing->is_preemptible )
    974     return;
    975 
    976   if ( !_States_Is_ready( executing->current_state ) )
    977     return;
    978 
    979   /*
    980    *  The cpu budget algorithm determines what happens next.
    981    */
    982 
    983   switch ( executing->budget_algorithm ) {
    984     case THREAD_CPU_BUDGET_ALGORITHM_NONE:
    985       break;
    986 
    987     case THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE:
    988     case THREAD_CPU_BUDGET_ALGORITHM_EXHAUST_TIMESLICE:
    989       if ( --executing->cpu_time_budget == 0 ) {
    990         _Thread_Reset_timeslice();
    991         executing->cpu_time_budget = _Thread_Ticks_per_timeslice;
    992       }
    993       break;
    994 
    995     case THREAD_CPU_BUDGET_ALGORITHM_CALLOUT:
    996       if ( --executing->cpu_time_budget == 0 )
    997         (*executing->budget_callout)( executing );
    998       break;
    999   }
    1000 }
    1001 
    1002 /*PAGE
    1003  *
    1004  *  _Thread_Yield_processor
    1005  *
    1006  *  This kernel routine will remove the running THREAD from the ready chain
    1007  *  and place it immediatly at the rear of this chain.  Reset timeslice
    1008  *  and yield the processor functions both use this routine, therefore if
    1009  *  reset is TRUE and this is the only thread on the chain then the
    1010  *  timeslice counter is reset.  The heir THREAD will be updated if the
    1011  *  running is also the currently the heir.
    1012  *
    1013  *  Input parameters:   NONE
    1014  *
    1015  *  Output parameters:  NONE
    1016  *
    1017  *  INTERRUPT LATENCY:
    1018  *    ready chain
    1019  *    select heir
    1020  */
    1021 
    1022 void _Thread_Yield_processor( void )
    1023 {
    1024   ISR_Level       level;
    1025   Thread_Control *executing;
    1026   Chain_Control  *ready;
    1027 
    1028   executing = _Thread_Executing;
    1029   ready     = executing->ready;
    1030   _ISR_Disable( level );
    1031     if ( !_Chain_Has_only_one_node( ready ) ) {
    1032       _Chain_Extract_unprotected( &executing->Object.Node );
    1033       _Chain_Append_unprotected( ready, &executing->Object.Node );
    1034 
    1035       _ISR_Flash( level );
    1036 
    1037       if ( _Thread_Is_heir( executing ) )
    1038         _Thread_Heir = (Thread_Control *) ready->first;
    1039       _Context_Switch_necessary = TRUE;
    1040     }
    1041     else if ( !_Thread_Is_heir( executing ) )
    1042       _Context_Switch_necessary = TRUE;
    1043 
    1044   _ISR_Enable( level );
    1045 }
    1046 
    1047 /*PAGE
    1048  *
    1049  *  _Thread_Load_environment
    1050  *
    1051  *  Load starting environment for another thread from its start area in the
    1052  *  thread.  Only called from t_restart and t_start.
    1053  *
    1054  *  Input parameters:
    1055  *    the_thread - thread control block pointer
    1056  *
    1057  *  Output parameters:  NONE
    1058  */
    1059 
    1060 void _Thread_Load_environment(
    1061   Thread_Control *the_thread
    1062 )
    1063 {
    1064   boolean is_fp = FALSE;
    1065 
    1066   if ( the_thread->Start.fp_context ) {
    1067     the_thread->fp_context = the_thread->Start.fp_context;
    1068     _Context_Initialize_fp( &the_thread->fp_context );
    1069     is_fp = TRUE;
    1070   }
    1071 
    1072   the_thread->do_post_task_switch_extension = FALSE;
    1073   the_thread->is_preemptible   = the_thread->Start.is_preemptible;
    1074   the_thread->budget_algorithm = the_thread->Start.budget_algorithm;
    1075   the_thread->budget_callout   = the_thread->Start.budget_callout;
    1076 
    1077   _Context_Initialize(
    1078     &the_thread->Registers,
    1079     the_thread->Start.Initial_stack.area,
    1080     the_thread->Start.Initial_stack.size,
    1081     the_thread->Start.isr_level,
    1082     _Thread_Handler,
    1083     is_fp
    1084   );
    1085 
    1086 }
    1087 
    1088 /*PAGE
    1089  *
    1090  *  _Thread_Handler
    1091  *
    1092  *  This routine is the "primal" entry point for all threads.
    1093  *  _Context_Initialize() dummies up the thread's initial context
    1094  *  to cause the first Context_Switch() to jump to _Thread_Handler().
    1095  *
    1096  *  This routine is the default thread exitted error handler.  It is
    1097  *  returned to when a thread exits.  The configured fatal error handler
    1098  *  is invoked to process the exit.
    1099  *
    1100  *  NOTE:
    1101  *
    1102  *  On entry, it is assumed all interrupts are blocked and that this
    1103  *  routine needs to set the initial isr level.  This may or may not
    1104  *  actually be needed by the context switch routine and as a result
    1105  *  interrupts may already be at there proper level.  Either way,
    1106  *  setting the initial isr level properly here is safe.
    1107  * 
    1108  *  Currently this is only really needed for the posix port,
    1109  *  ref: _Context_Switch in unix/cpu.c
    1110  *
    1111  *  Input parameters:   NONE
    1112  *
    1113  *  Output parameters:  NONE
    1114  */
    1115 
    1116 void _Thread_Handler( void )
    1117 {
    1118   ISR_Level  level;
    1119   Thread_Control *executing;
    1120  
    1121   executing = _Thread_Executing;
    1122  
    1123   /*
    1124    * have to put level into a register for those cpu's that use
    1125    * inline asm here
    1126    */
    1127  
    1128   level = executing->Start.isr_level;
    1129   _ISR_Set_level(level);
    1130 
    1131   /*
    1132    * Take care that 'begin' extensions get to complete before
    1133    * 'switch' extensions can run.  This means must keep dispatch
    1134    * disabled until all 'begin' extensions complete.
    1135    */
    1136  
    1137   _User_extensions_Thread_begin( executing );
    1138  
    1139   /*
    1140    *  At this point, the dispatch disable level BETTER be 1.
    1141    */
    1142 
    1143   _Thread_Enable_dispatch();
    1144  
    1145   switch ( executing->Start.prototype ) {
    1146     case THREAD_START_NUMERIC:
    1147       (*(Thread_Entry_numeric) executing->Start.entry_point)(
    1148         executing->Start.numeric_argument
    1149       );
    1150       break;
    1151     case THREAD_START_POINTER:
    1152       (*(Thread_Entry_pointer) executing->Start.entry_point)(
    1153         executing->Start.pointer_argument
    1154       );
    1155       break;
    1156     case THREAD_START_BOTH_POINTER_FIRST:
    1157       (*(Thread_Entry_both_pointer_first) executing->Start.entry_point)(
    1158         executing->Start.pointer_argument,
    1159         executing->Start.numeric_argument
    1160       );
    1161       break;
    1162     case THREAD_START_BOTH_NUMERIC_FIRST:
    1163       (*(Thread_Entry_both_numeric_first) executing->Start.entry_point)(
    1164         executing->Start.numeric_argument,
    1165         executing->Start.pointer_argument
    1166       );
    1167       break;
    1168   }
    1169 
    1170   _User_extensions_Thread_exitted( executing );
    1171 
    1172   _Internal_error_Occurred(
    1173     INTERNAL_ERROR_CORE,
    1174     TRUE,
    1175     INTERNAL_ERROR_THREAD_EXITTED
    1176   );
    1177 }
    1178 
    1179 /*PAGE
    1180  *
    1181  *  _Thread_Delay_ended
    1182  *
    1183  *  This routine processes a thread whose delay period has ended.
    1184  *  It is called by the watchdog handler.
    1185  *
    1186  *  Input parameters:
    1187  *    id - thread id
    1188  *
    1189  *  Output parameters: NONE
    1190  */
    1191 
    1192 void _Thread_Delay_ended(
    1193   Objects_Id  id,
    1194   void       *ignored
    1195 )
    1196 {
    1197   Thread_Control    *the_thread;
    1198   Objects_Locations  location;
    1199 
    1200   the_thread = _Thread_Get( id, &location );
    1201   switch ( location ) {
    1202     case OBJECTS_ERROR:
    1203     case OBJECTS_REMOTE:  /* impossible */
    1204       break;
    1205     case OBJECTS_LOCAL:
    1206       _Thread_Unblock( the_thread );
    1207       _Thread_Unnest_dispatch();
    1208       break;
    1209   }
    1210 }
    1211 
    1212 /*PAGE
    1213  *
    1214  *  _Thread_Change_priority
    1215  *
    1216  *  This kernel routine changes the priority of the thread.  The
    1217  *  thread chain is adjusted if necessary.
    1218  *
    1219  *  Input parameters:
    1220  *    the_thread   - pointer to thread control block
    1221  *    new_priority - ultimate priority
    1222  *    prepend_it   - TRUE if the thread should be prepended to the chain
    1223  *
    1224  *  Output parameters:  NONE
    1225  *
    1226  *  INTERRUPT LATENCY:
    1227  *    ready chain
    1228  *    select heir
    1229  */
    1230 
    1231 void _Thread_Change_priority(
    1232   Thread_Control   *the_thread,
    1233   Priority_Control  new_priority,
    1234   boolean           prepend_it
    1235 )
    1236 {
    1237   ISR_Level level;
    1238   /* boolean   do_prepend = FALSE; */
    1239 
    1240   /*
    1241    *  If this is a case where prepending the task to its priority is
    1242    *  potentially desired, then we need to consider whether to do it.
    1243    *  This usually occurs when a task lowers its priority implcitly as
    1244    *  the result of losing inherited priority.  Normal explicit priority
    1245    *  change calls (e.g. rtems_task_set_priority) should always do an
    1246    *  append not a prepend.
    1247    */
    1248  
    1249   /*
    1250    *  Techically, the prepend should conditional on the thread lowering
    1251    *  its priority but that does allow cxd2004 of the acvc 2.0.1 to
    1252    *  pass with rtems 4.0.0.  This should change when gnat redoes its
    1253    *  priority scheme.
    1254    */
    1255 /*
    1256   if ( prepend_it &&
    1257        _Thread_Is_executing( the_thread ) &&
    1258        new_priority >= the_thread->current_priority )
    1259     prepend_it = TRUE;
    1260 */
    1261                  
    1262   _Thread_Set_transient( the_thread );
    1263 
    1264   if ( the_thread->current_priority != new_priority )
    1265     _Thread_Set_priority( the_thread, new_priority );
    1266 
    1267   _ISR_Disable( level );
    1268 
    1269   the_thread->current_state =
    1270     _States_Clear( STATES_TRANSIENT, the_thread->current_state );
    1271 
    1272   if ( ! _States_Is_ready( the_thread->current_state ) ) {
    1273     _ISR_Enable( level );
    1274     return;
    1275   }
    1276 
    1277   _Priority_Add_to_bit_map( &the_thread->Priority_map );
    1278   if ( prepend_it )
    1279     _Chain_Prepend_unprotected( the_thread->ready, &the_thread->Object.Node );
    1280   else
    1281     _Chain_Append_unprotected( the_thread->ready, &the_thread->Object.Node );
    1282 
    1283   _ISR_Flash( level );
    1284 
    1285   _Thread_Calculate_heir();
    1286 
    1287   if ( !_Thread_Is_executing_also_the_heir() &&
    1288        _Thread_Executing->is_preemptible )
    1289     _Context_Switch_necessary = TRUE;
    1290   _ISR_Enable( level );
    1291 }
    1292 
    1293 /*PAGE
    1294  *
    1295  * _Thread_Set_priority
    1296  *
    1297  * This directive enables and disables several modes of
    1298  * execution for the requesting thread.
    1299  *
    1300  *  Input parameters:
    1301  *    the_thread   - pointer to thread priority
    1302  *    new_priority - new priority
    1303  *
    1304  *  Output: NONE
    1305  */
    1306 
    1307 void _Thread_Set_priority(
    1308   Thread_Control   *the_thread,
    1309   Priority_Control  new_priority
    1310 )
    1311 {
    1312   the_thread->current_priority = new_priority;
    1313   the_thread->ready            = &_Thread_Ready_chain[ new_priority ];
    1314 
    1315   _Priority_Initialize_information( &the_thread->Priority_map, new_priority );
    1316 }
    1317 
    1318 /*PAGE
    1319  *
    1320  *  _Thread_Evaluate_mode
    1321  *
    1322  *  XXX
    1323  */
    1324 
    1325 boolean _Thread_Evaluate_mode( void )
    1326 {
    1327   Thread_Control     *executing;
    1328 
    1329   executing = _Thread_Executing;
    1330 
    1331   if ( !_States_Is_ready( executing->current_state ) ||
    1332        ( !_Thread_Is_heir( executing ) && executing->is_preemptible ) ) {
    1333     _Context_Switch_necessary = TRUE;
    1334     return TRUE;
    1335   }
    1336 
    1337   return FALSE;
    1338 }
    1339 
    1340 /*PAGE
    1341  *
    1342  *  _Thread_Get
    1343  *
    1344  *  NOTE:  If we are not using static inlines, this must be a real
    1345  *         subroutine call.
    1346  *
    1347  *  NOTE:  XXX... This routine may be able to be optimized.
    1348  */
    1349 
    1350 #ifndef USE_INLINES
    1351 
    1352 Thread_Control *_Thread_Get (
    1353   Objects_Id           id,
    1354   Objects_Locations   *location
    1355 )
    1356 {
    1357   Objects_Classes      the_class;
    1358   Objects_Information *information;
    1359  
    1360   if ( _Objects_Are_ids_equal( id, OBJECTS_ID_OF_SELF ) ) {
    1361     _Thread_Disable_dispatch();
    1362     *location = OBJECTS_LOCAL;
    1363     return( _Thread_Executing );
    1364   }
    1365  
    1366   the_class = _Objects_Get_class( id );
    1367  
    1368   if ( the_class > OBJECTS_CLASSES_LAST ) {
    1369     *location = OBJECTS_ERROR;
    1370     return (Thread_Control *) 0;
    1371   }
    1372  
    1373   information = _Objects_Information_table[ the_class ];
    1374  
    1375   if ( !information || !information->is_thread ) {
    1376     *location = OBJECTS_ERROR;
    1377     return (Thread_Control *) 0;
    1378   }
    1379  
    1380   return (Thread_Control *) _Objects_Get( information, id, location );
    1381 }
    1382 
    1383 #endif
    1384 
    1385 /*PAGE
    1386  *
    1387  *  _Thread_Idle_body
    1388  *
    1389  *  This kernel routine is the idle thread.  The idle thread runs any time
    1390  *  no other thread is ready to run.  This thread loops forever with
    1391  *  interrupts enabled.
    1392  *
    1393  *  Input parameters:
    1394  *    ignored - this parameter is ignored
    1395  *
    1396  *  Output parameters:  NONE
    1397  */
    1398  
    1399 #if (CPU_PROVIDES_IDLE_THREAD_BODY == FALSE)
    1400 Thread _Thread_Idle_body(
    1401   unsigned32 ignored
    1402 )
    1403 {
    1404   for( ; ; ) ;
    1405 }
    1406 #endif
Note: See TracChangeset for help on using the changeset viewer.