source: rtems/c/src/exec/score/src/thread.c @ ecc3fe3

4.104.114.84.95
Last change on this file since ecc3fe3 was ecc3fe3, checked in by Joel Sherrill <joel.sherrill@…>, on 09/23/98 at 16:41:00

IDLE task stack size now specified as a field in the CPU Table for all
ports.

  • Property mode set to 100644
File size: 33.2 KB
Line 
1/*
2 *  Thread Handler
3 *
4 *
5 *  COPYRIGHT (c) 1989-1998.
6 *  On-Line Applications Research Corporation (OAR).
7 *  Copyright assigned to U.S. Government, 1994.
8 *
9 *  The license and distribution terms for this file may be
10 *  found in found in the file LICENSE in this distribution or at
11 *  http://www.OARcorp.com/rtems/license.html.
12 *
13 *  $Id$
14 */
15
16#include <rtems/system.h>
17#include <rtems/score/apiext.h>
18#include <rtems/score/context.h>
19#include <rtems/score/interr.h>
20#include <rtems/score/isr.h>
21#include <rtems/score/object.h>
22#include <rtems/score/priority.h>
23#include <rtems/score/states.h>
24#include <rtems/score/sysstate.h>
25#include <rtems/score/thread.h>
26#include <rtems/score/threadq.h>
27#include <rtems/score/userext.h>
28#include <rtems/score/wkspace.h>
29
30/*PAGE
31 *
32 *  _Thread_Handler_initialization
33 *
34 *  This routine initializes all thread manager related data structures.
35 *
36 *  Input parameters:
37 *    ticks_per_timeslice - clock ticks per quantum
38 *    maximum_proxies     - number of proxies to initialize
39 *
40 *  Output parameters:  NONE
41 */
42
43char *_Thread_Idle_name = "IDLE";
44
45void _Thread_Handler_initialization(
46  unsigned32   ticks_per_timeslice,
47  unsigned32   maximum_extensions,
48  unsigned32   maximum_proxies
49)
50{
51  unsigned32      index;
52
53  /*
54   * BOTH stacks hooks must be set or both must be NULL.
55   * Do not allow mixture.
56   */
57
58  if ( !( ( _CPU_Table.stack_allocate_hook == 0 )
59       == ( _CPU_Table.stack_free_hook == 0 ) ) )
60    _Internal_error_Occurred(
61      INTERNAL_ERROR_CORE,
62      TRUE,
63      INTERNAL_ERROR_BAD_STACK_HOOK
64    );
65
66  _Context_Switch_necessary = FALSE;
67  _Thread_Executing         = NULL;
68  _Thread_Heir              = NULL;
69  _Thread_Allocated_fp      = NULL;
70
71  _Thread_Do_post_task_switch_extension = 0;
72
73  _Thread_Maximum_extensions = maximum_extensions;
74
75  _Thread_Ticks_per_timeslice  = ticks_per_timeslice;
76
77  _Thread_Ready_chain = (Chain_Control *) _Workspace_Allocate_or_fatal_error(
78    (PRIORITY_MAXIMUM + 1) * sizeof(Chain_Control)
79  );
80
81  for ( index=0; index <= PRIORITY_MAXIMUM ; index++ )
82    _Chain_Initialize_empty( &_Thread_Ready_chain[ index ] );
83
84  _Thread_MP_Handler_initialization( maximum_proxies );
85
86  /*
87   *  Initialize this class of objects.
88   */
89 
90  _Objects_Initialize_information(
91    &_Thread_Internal_information,
92    OBJECTS_INTERNAL_THREADS,
93    FALSE,
94    ( _System_state_Is_multiprocessing ) ?  2 : 1,
95    sizeof( Thread_Control ),
96    TRUE,
97    8,
98    TRUE
99  );
100
101}
102
103/*PAGE
104 *
105 *  _Thread_Create_idle
106 */
107
108void _Thread_Create_idle( void )
109{
110  void       *idle;
111  unsigned32  idle_task_stack_size;
112
113  /*
114   *  The entire workspace is zeroed during its initialization.  Thus, all
115   *  fields not explicitly assigned were explicitly zeroed by
116   *  _Workspace_Initialization.
117   */
118 
119  _Thread_Idle = _Thread_Internal_allocate();
120 
121  /*
122   *  Initialize the IDLE task.
123   */
124 
125#if (CPU_PROVIDES_IDLE_THREAD_BODY == TRUE)
126  idle = (void *) _CPU_Thread_Idle_body;
127#else
128  idle = (void *) _Thread_Idle_body;
129#endif
130 
131  if ( _CPU_Table.idle_task )
132    idle = _CPU_Table.idle_task;
133 
134  idle_task_stack_size =  _CPU_Table.idle_task_stack_size;
135  if ( idle_task_stack_size < STACK_MINIMUM_SIZE )
136    idle_task_stack_size = STACK_MINIMUM_SIZE;
137 
138  _Thread_Initialize(
139    &_Thread_Internal_information,
140    _Thread_Idle,
141    NULL,        /* allocate the stack */
142    idle_task_stack_size,
143    CPU_IDLE_TASK_IS_FP,
144    PRIORITY_MAXIMUM,
145    TRUE,        /* preemptable */
146    THREAD_CPU_BUDGET_ALGORITHM_NONE,
147    NULL,        /* no budget algorithm callout */
148    0,           /* all interrupts enabled */
149    _Thread_Idle_name
150  );
151 
152  /*
153   *  WARNING!!! This is necessary to "kick" start the system and
154   *             MUST be done before _Thread_Start is invoked.
155   */
156 
157  _Thread_Heir      =
158  _Thread_Executing = _Thread_Idle;
159 
160  _Thread_Start(
161    _Thread_Idle,
162    THREAD_START_NUMERIC,
163    idle,
164    NULL,
165    0
166  );
167 
168}
169
170/*PAGE
171 *
172 *  _Thread_Start_multitasking
173 *
174 *  This kernel routine readies the requested thread, the thread chain
175 *  is adjusted.  A new heir thread may be selected.
176 *
177 *  Input parameters:
178 *    system_thread - pointer to system initialization thread control block
179 *    idle_thread   - pointer to idle thread control block
180 *
181 *  Output parameters:  NONE
182 *
183 *  NOTE:  This routine uses the "blocking" heir selection mechanism.
184 *         This insures the correct heir after a thread restart.
185 *
186 *  INTERRUPT LATENCY:
187 *    ready chain
188 *    select heir
189 */
190
191void _Thread_Start_multitasking( void )
192{
193  /*
194   *  The system is now multitasking and completely initialized. 
195   *  This system thread now either "goes away" in a single processor
196   *  system or "turns into" the server thread in an MP system.
197   */
198
199  _System_state_Set( SYSTEM_STATE_UP );
200
201  _Context_Switch_necessary = FALSE;
202
203  _Thread_Executing = _Thread_Heir;
204
205   /*
206    * Get the init task(s) running.
207    *
208    * Note: Thread_Dispatch() is normally used to dispatch threads.  As
209    *       part of its work, Thread_Dispatch() restores floating point
210    *       state for the heir task.
211    *
212    *       This code avoids Thread_Dispatch(), and so we have to restore
213    *       (actually initialize) the floating point state "by hand".
214    *
215    *       Ignore the CPU_USE_DEFERRED_FP_SWITCH because we must always
216    *       switch in the first thread if it is FP.
217    */
218 
219
220#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
221   /*
222    *  don't need to worry about saving BSP's floating point state
223    */
224
225   if ( _Thread_Heir->fp_context != NULL )
226     _Context_Restore_fp( &_Thread_Heir->fp_context );
227#endif
228
229  _Context_Switch( &_Thread_BSP_context, &_Thread_Heir->Registers );
230}
231
232/*PAGE
233 *
234 *  _Thread_Dispatch
235 *
236 *  This kernel routine determines if a dispatch is needed, and if so
237 *  dispatches to the heir thread.  Once the heir is running an attempt
238 *  is made to dispatch any ASRs.
239 *
240 *  ALTERNATE ENTRY POINTS:
241 *    void _Thread_Enable_dispatch();
242 *
243 *  Input parameters:  NONE
244 *
245 *  Output parameters:  NONE
246 *
247 *  INTERRUPT LATENCY:
248 *    dispatch thread
249 *    no dispatch thread
250 */
251
252#if ( CPU_INLINE_ENABLE_DISPATCH == FALSE )
253void _Thread_Enable_dispatch( void )
254{
255  if ( --_Thread_Dispatch_disable_level )
256    return;
257  _Thread_Dispatch();
258}
259#endif
260
261void _Thread_Dispatch( void )
262{
263  Thread_Control   *executing;
264  Thread_Control   *heir;
265  ISR_Level         level;
266
267  executing   = _Thread_Executing;
268  _ISR_Disable( level );
269  while ( _Context_Switch_necessary == TRUE ) {
270    heir = _Thread_Heir;
271    _Thread_Dispatch_disable_level = 1;
272    _Context_Switch_necessary = FALSE;
273    _Thread_Executing = heir;
274    _ISR_Enable( level );
275
276    heir->ticks_executed++;
277
278    _User_extensions_Thread_switch( executing, heir );
279
280    if ( heir->budget_algorithm == THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE )
281      heir->cpu_time_budget = _Thread_Ticks_per_timeslice;
282
283    /*
284     *  If the CPU has hardware floating point, then we must address saving
285     *  and restoring it as part of the context switch.
286     *
287     *  The second conditional compilation section selects the algorithm used
288     *  to context switch between floating point tasks.  The deferred algorithm
289     *  can be significantly better in a system with few floating point tasks
290     *  because it reduces the total number of save and restore FP context
291     *  operations.  However, this algorithm can not be used on all CPUs due
292     *  to unpredictable use of FP registers by some compilers for integer
293     *  operations.
294     */
295
296#if ( CPU_HARDWARE_FP == TRUE )
297#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
298    if ( (heir->fp_context != NULL) && !_Thread_Is_allocated_fp( heir ) ) {
299      if ( _Thread_Allocated_fp != NULL )
300        _Context_Save_fp( &_Thread_Allocated_fp->fp_context );
301      _Context_Restore_fp( &heir->fp_context );
302      _Thread_Allocated_fp = heir;
303    }
304#else
305    if ( executing->fp_context != NULL )
306      _Context_Save_fp( &executing->fp_context );
307
308    if ( heir->fp_context != NULL )
309      _Context_Restore_fp( &heir->fp_context );
310#endif
311#endif
312
313    _Context_Switch( &executing->Registers, &heir->Registers );
314
315    executing = _Thread_Executing;
316
317    _ISR_Disable( level );
318  }
319
320  _Thread_Dispatch_disable_level = 0;
321
322  _ISR_Enable( level );
323
324  if ( _Thread_Do_post_task_switch_extension ||
325       executing->do_post_task_switch_extension ) {
326    executing->do_post_task_switch_extension = FALSE;
327    _API_extensions_Run_postswitch();
328  }
329 
330}
331
332/*PAGE
333 *
334 *  _Thread_Stack_Allocate
335 *
336 *  Allocate the requested stack space for the thread.
337 *  return the actual size allocated after any adjustment
338 *  or return zero if the allocation failed.
339 *  Set the Start.stack field to the address of the stack
340 */
341
342static unsigned32 _Thread_Stack_Allocate(
343  Thread_Control *the_thread,
344  unsigned32 stack_size)
345{
346  void *stack_addr = 0;
347 
348  if ( !_Stack_Is_enough( stack_size ) )
349    stack_size = STACK_MINIMUM_SIZE;
350 
351  /*
352   * Call ONLY the CPU table stack allocate hook, _or_ the
353   * the RTEMS workspace allocate.  This is so the stack free
354   * routine can call the correct deallocation routine.
355   */
356
357  if ( _CPU_Table.stack_allocate_hook )
358  {
359    stack_addr = (*_CPU_Table.stack_allocate_hook)( stack_size );
360  } else {
361
362    /*
363     *  First pad the requested size so we allocate enough memory
364     *  so the context initialization can align it properly.  The address
365     *  returned the workspace allocate must be directly stored in the
366     *  stack control block because it is later used in the free sequence.
367     *
368     *  Thus it is the responsibility of the CPU dependent code to
369     *  get and keep the stack adjust factor, the stack alignment, and
370     *  the context initialization sequence in sync.
371     */
372
373    stack_size = _Stack_Adjust_size( stack_size );
374    stack_addr = _Workspace_Allocate( stack_size );
375  }
376 
377  if ( !stack_addr )
378      stack_size = 0;
379 
380  the_thread->Start.stack = stack_addr;
381 
382  return stack_size;
383}
384
385/*
386 *  _Thread_Stack_Free
387 *
388 *  Deallocate the Thread's stack.
389 */
390
391static void _Thread_Stack_Free(
392  Thread_Control *the_thread
393)
394{
395    /*
396     *  If the API provided the stack space, then don't free it.
397     */
398
399    if ( !the_thread->Start.core_allocated_stack )
400      return;
401
402    /*
403     * Call ONLY the CPU table stack free hook, or the
404     * the RTEMS workspace free.  This is so the free
405     * routine properly matches the allocation of the stack.
406     */
407
408    if ( _CPU_Table.stack_free_hook )
409        (*_CPU_Table.stack_free_hook)( the_thread->Start.Initial_stack.area );
410    else
411        _Workspace_Free( the_thread->Start.Initial_stack.area );
412}
413
414/*PAGE
415 *
416 *  _Thread_Initialize
417 *
418 *  XXX
419 */
420
421boolean _Thread_Initialize(
422  Objects_Information                  *information,
423  Thread_Control                       *the_thread,
424  void                                 *stack_area,
425  unsigned32                            stack_size,
426  boolean                               is_fp,
427  Priority_Control                      priority,
428  boolean                               is_preemptible,
429  Thread_CPU_budget_algorithms          budget_algorithm,
430  Thread_CPU_budget_algorithm_callout   budget_callout,
431  unsigned32                            isr_level,
432  Objects_Name                          name
433)
434{
435  unsigned32           actual_stack_size = 0;
436  void                *stack = NULL;
437  void                *fp_area;
438  void                *extensions_area;
439
440  /*
441   *  Allocate and Initialize the stack for this thread.
442   */
443
444
445  if ( !stack_area ) {
446    if ( !_Stack_Is_enough( stack_size ) )
447      actual_stack_size = STACK_MINIMUM_SIZE;
448    else
449      actual_stack_size = stack_size;
450
451    actual_stack_size = _Thread_Stack_Allocate( the_thread, actual_stack_size );
452 
453    if ( !actual_stack_size )
454      return FALSE;                     /* stack allocation failed */
455
456    stack = the_thread->Start.stack;
457    the_thread->Start.core_allocated_stack = TRUE;
458  } else {
459    stack = stack_area;
460    actual_stack_size = stack_size;
461    the_thread->Start.core_allocated_stack = FALSE;
462  }
463
464  _Stack_Initialize(
465     &the_thread->Start.Initial_stack,
466     stack,
467     actual_stack_size
468  );
469
470  /*
471   *  Allocate the floating point area for this thread
472   */
473 
474  if ( is_fp ) {
475
476    fp_area = _Workspace_Allocate( CONTEXT_FP_SIZE );
477    if ( !fp_area ) {
478      _Thread_Stack_Free( the_thread );
479      return FALSE;
480    }
481    fp_area = _Context_Fp_start( fp_area, 0 );
482
483  } else
484    fp_area = NULL;
485
486  the_thread->fp_context       = fp_area;
487  the_thread->Start.fp_context = fp_area;
488
489  /*
490   *  Allocate the extensions area for this thread
491   */
492
493  if ( _Thread_Maximum_extensions ) {
494    extensions_area = _Workspace_Allocate(
495      (_Thread_Maximum_extensions + 1) * sizeof( void * )
496    );
497
498    if ( !extensions_area ) {
499      if ( fp_area )
500        (void) _Workspace_Free( fp_area );
501
502      _Thread_Stack_Free( the_thread );
503
504      return FALSE;
505    }
506  } else
507    extensions_area = NULL;
508 
509  the_thread->extensions = (void **) extensions_area;
510
511  /*
512   *  General initialization
513   */
514
515  the_thread->Start.is_preemptible   = is_preemptible;
516  the_thread->Start.budget_algorithm = budget_algorithm;
517  the_thread->Start.budget_callout   = budget_callout;
518  the_thread->Start.isr_level        = isr_level;
519
520  the_thread->current_state          = STATES_DORMANT;
521  the_thread->resource_count         = 0;
522  the_thread->real_priority          = priority;
523  the_thread->Start.initial_priority = priority;
524  the_thread->ticks_executed         = 0;
525 
526  _Thread_Set_priority( the_thread, priority );
527
528  /*
529   *  Open the object
530   */
531
532  _Objects_Open( information, &the_thread->Object, name );
533
534  /*
535   *  Invoke create extensions
536   */
537
538  if ( !_User_extensions_Thread_create( the_thread ) ) {
539
540    if ( extensions_area )
541      (void) _Workspace_Free( extensions_area );
542
543    if ( fp_area )
544      (void) _Workspace_Free( fp_area );
545
546    _Thread_Stack_Free( the_thread );
547
548    return FALSE;
549  }
550
551  return TRUE;
552   
553}
554
555/*
556 *  _Thread_Start
557 *
558 *  DESCRIPTION:
559 *
560 *  XXX
561 */
562 
563boolean _Thread_Start(
564  Thread_Control       *the_thread,
565  Thread_Start_types    the_prototype,
566  void                 *entry_point,
567  void                 *pointer_argument,
568  unsigned32            numeric_argument
569)
570{
571  if ( _States_Is_dormant( the_thread->current_state ) ) {
572 
573    the_thread->Start.entry_point      = (Thread_Entry) entry_point;
574   
575    the_thread->Start.prototype        = the_prototype;
576    the_thread->Start.pointer_argument = pointer_argument;
577    the_thread->Start.numeric_argument = numeric_argument;
578 
579    _Thread_Load_environment( the_thread );
580 
581    _Thread_Ready( the_thread );
582 
583    _User_extensions_Thread_start( the_thread );
584 
585    return TRUE;
586  }
587 
588  return FALSE;
589 
590}
591
592/*
593 *  _Thread_Restart
594 *
595 *  DESCRIPTION:
596 *
597 *  XXX
598 */
599 
600boolean _Thread_Restart(
601  Thread_Control      *the_thread,
602  void                *pointer_argument,
603  unsigned32           numeric_argument
604)
605{
606  if ( !_States_Is_dormant( the_thread->current_state ) ) {
607 
608    _Thread_Set_transient( the_thread );
609    the_thread->resource_count = 0;
610    the_thread->is_preemptible   = the_thread->Start.is_preemptible;
611    the_thread->budget_algorithm = the_thread->Start.budget_algorithm;
612    the_thread->budget_callout   = the_thread->Start.budget_callout;
613
614    the_thread->Start.pointer_argument = pointer_argument;
615    the_thread->Start.numeric_argument = numeric_argument;
616 
617    if ( !_Thread_queue_Extract_with_proxy( the_thread ) ) {
618 
619      if ( _Watchdog_Is_active( &the_thread->Timer ) )
620        (void) _Watchdog_Remove( &the_thread->Timer );
621    }
622
623    if ( the_thread->current_priority != the_thread->Start.initial_priority ) {
624      the_thread->real_priority = the_thread->Start.initial_priority;
625      _Thread_Set_priority( the_thread, the_thread->Start.initial_priority );
626    }
627 
628    _Thread_Load_environment( the_thread );
629 
630    _Thread_Ready( the_thread );
631 
632    _User_extensions_Thread_restart( the_thread );
633 
634    if ( _Thread_Is_executing ( the_thread ) )
635      _Thread_Restart_self();
636 
637    return TRUE;
638  }
639 
640  return FALSE;
641}
642
643/*
644 *  _Thread_Close
645 *
646 *  DESCRIPTION:
647 *
648 *  XXX
649 */
650 
651void _Thread_Close(
652  Objects_Information  *information,
653  Thread_Control       *the_thread
654)
655{
656  _Objects_Close( information, &the_thread->Object );
657 
658  _Thread_Set_state( the_thread, STATES_TRANSIENT );
659 
660  if ( !_Thread_queue_Extract_with_proxy( the_thread ) ) {
661 
662    if ( _Watchdog_Is_active( &the_thread->Timer ) )
663      (void) _Watchdog_Remove( &the_thread->Timer );
664  }
665
666  _User_extensions_Thread_delete( the_thread );
667 
668#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
669  if ( _Thread_Is_allocated_fp( the_thread ) )
670    _Thread_Deallocate_fp();
671#endif
672  the_thread->fp_context = NULL;
673
674  if ( the_thread->Start.fp_context )
675  (void) _Workspace_Free( the_thread->Start.fp_context );
676
677  _Thread_Stack_Free( the_thread );
678
679  if ( the_thread->extensions )
680    (void) _Workspace_Free( the_thread->extensions );
681
682  the_thread->Start.stack = NULL;
683  the_thread->extensions = NULL;
684}
685
686/*PAGE
687 *
688 *  _Thread_Ready
689 *
690 *  This kernel routine readies the requested thread, the thread chain
691 *  is adjusted.  A new heir thread may be selected.
692 *
693 *  Input parameters:
694 *    the_thread - pointer to thread control block
695 *
696 *  Output parameters:  NONE
697 *
698 *  NOTE:  This routine uses the "blocking" heir selection mechanism.
699 *         This insures the correct heir after a thread restart.
700 *
701 *  INTERRUPT LATENCY:
702 *    ready chain
703 *    select heir
704 */
705
706void _Thread_Ready(
707  Thread_Control *the_thread
708)
709{
710  ISR_Level              level;
711  Thread_Control *heir;
712
713  _ISR_Disable( level );
714
715  the_thread->current_state = STATES_READY;
716
717  _Priority_Add_to_bit_map( &the_thread->Priority_map );
718
719  _Chain_Append_unprotected( the_thread->ready, &the_thread->Object.Node );
720
721  _ISR_Flash( level );
722
723  _Thread_Calculate_heir();
724
725  heir = _Thread_Heir;
726
727  if ( !_Thread_Is_executing( heir ) && _Thread_Executing->is_preemptible )
728    _Context_Switch_necessary = TRUE;
729
730  _ISR_Enable( level );
731}
732
733/*PAGE
734 *
735 *  _Thread_Clear_state
736 *
737 *  This kernel routine clears the appropriate states in the
738 *  requested thread.  The thread ready chain is adjusted if
739 *  necessary and the Heir thread is set accordingly.
740 *
741 *  Input parameters:
742 *    the_thread - pointer to thread control block
743 *    state      - state set to clear
744 *
745 *  Output parameters:  NONE
746 *
747 *  INTERRUPT LATENCY:
748 *    priority map
749 *    select heir
750 */
751
752
753void _Thread_Clear_state(
754  Thread_Control *the_thread,
755  States_Control  state
756)
757{
758  ISR_Level       level;
759  States_Control  current_state;
760
761  _ISR_Disable( level );
762    current_state = the_thread->current_state;
763   
764    if ( current_state & state ) {
765      current_state =
766      the_thread->current_state = _States_Clear( state, current_state );
767
768      if ( _States_Is_ready( current_state ) ) {
769
770        _Priority_Add_to_bit_map( &the_thread->Priority_map );
771
772        _Chain_Append_unprotected(the_thread->ready, &the_thread->Object.Node);
773
774        _ISR_Flash( level );
775
776        if ( the_thread->current_priority < _Thread_Heir->current_priority ) {
777          _Thread_Heir = the_thread;
778          if ( _Thread_Executing->is_preemptible ||
779               the_thread->current_priority == 0 )
780            _Context_Switch_necessary = TRUE;
781        }
782      }
783  }
784  _ISR_Enable( level );
785}
786
787/*PAGE
788 *
789 * _Thread_Set_state
790 *
791 * This kernel routine sets the requested state in the THREAD.  The
792 * THREAD chain is adjusted if necessary.
793 *
794 * Input parameters:
795 *   the_thread   - pointer to thread control block
796 *   state - state to be set
797 *
798 * Output parameters:  NONE
799 *
800 *  INTERRUPT LATENCY:
801 *    ready chain
802 *    select map
803 */
804
805void _Thread_Set_state(
806  Thread_Control *the_thread,
807  States_Control         state
808)
809{
810  ISR_Level             level;
811  Chain_Control *ready;
812
813  ready = the_thread->ready;
814  _ISR_Disable( level );
815  if ( !_States_Is_ready( the_thread->current_state ) ) {
816    the_thread->current_state =
817       _States_Set( state, the_thread->current_state );
818    _ISR_Enable( level );
819    return;
820  }
821
822  the_thread->current_state = state;
823
824  if ( _Chain_Has_only_one_node( ready ) ) {
825
826    _Chain_Initialize_empty( ready );
827    _Priority_Remove_from_bit_map( &the_thread->Priority_map );
828
829  } else
830    _Chain_Extract_unprotected( &the_thread->Object.Node );
831
832  _ISR_Flash( level );
833
834  if ( _Thread_Is_heir( the_thread ) )
835     _Thread_Calculate_heir();
836
837  if ( _Thread_Is_executing( the_thread ) )
838    _Context_Switch_necessary = TRUE;
839
840  _ISR_Enable( level );
841}
842
843/*PAGE
844 *
845 *  _Thread_Set_transient
846 *
847 *  This kernel routine places the requested thread in the transient state
848 *  which will remove it from the ready queue, if necessary.  No
849 *  rescheduling is necessary because it is assumed that the transient
850 *  state will be cleared before dispatching is enabled.
851 *
852 *  Input parameters:
853 *    the_thread - pointer to thread control block
854 *
855 *  Output parameters:  NONE
856 *
857 *  INTERRUPT LATENCY:
858 *    only case
859 */
860
861void _Thread_Set_transient(
862  Thread_Control *the_thread
863)
864{
865  ISR_Level             level;
866  unsigned32            old_state;
867  Chain_Control *ready;
868
869  ready = the_thread->ready;
870  _ISR_Disable( level );
871
872  old_state = the_thread->current_state;
873  the_thread->current_state = _States_Set( STATES_TRANSIENT, old_state );
874
875  if ( _States_Is_ready( old_state ) ) {
876    if ( _Chain_Has_only_one_node( ready ) ) {
877
878      _Chain_Initialize_empty( ready );
879      _Priority_Remove_from_bit_map( &the_thread->Priority_map );
880
881    } else
882      _Chain_Extract_unprotected( &the_thread->Object.Node );
883  }
884
885  _ISR_Enable( level );
886
887}
888
889/*PAGE
890 *
891 *  _Thread_Reset_timeslice
892 *
893 *  This routine will remove the running thread from the ready chain
894 *  and place it immediately at the rear of this chain and then the
895 *  timeslice counter is reset.  The heir THREAD will be updated if
896 *  the running is also the currently the heir.
897 *
898 *  Input parameters:   NONE
899 *
900 *  Output parameters:  NONE
901 *
902 *  INTERRUPT LATENCY:
903 *    ready chain
904 *    select heir
905 */
906
907void _Thread_Reset_timeslice( void )
908{
909  ISR_Level       level;
910  Thread_Control *executing;
911  Chain_Control  *ready;
912
913  executing = _Thread_Executing;
914  ready     = executing->ready;
915  _ISR_Disable( level );
916    if ( _Chain_Has_only_one_node( ready ) ) {
917      _ISR_Enable( level );
918      return;
919    }
920    _Chain_Extract_unprotected( &executing->Object.Node );
921    _Chain_Append_unprotected( ready, &executing->Object.Node );
922
923  _ISR_Flash( level );
924
925    if ( _Thread_Is_heir( executing ) )
926      _Thread_Heir = (Thread_Control *) ready->first;
927
928    _Context_Switch_necessary = TRUE;
929
930  _ISR_Enable( level );
931}
932
933/*PAGE
934 *
935 *  _Thread_Tickle_timeslice
936 *
937 *  This scheduler routine determines if timeslicing is enabled
938 *  for the currently executing thread and, if so, updates the
939 *  timeslice count and checks for timeslice expiration.
940 *
941 *  Input parameters:   NONE
942 *
943 *  Output parameters:  NONE
944 */
945
946void _Thread_Tickle_timeslice( void )
947{
948  Thread_Control *executing;
949
950  executing = _Thread_Executing;
951
952  /*
953   *  Increment the number of ticks this thread has been executing
954   */
955
956  executing->ticks_executed++;
957
958  /*
959   *  If the thread is not preemptible or is not ready, then
960   *  just return.
961   */
962
963  if ( !executing->is_preemptible )
964    return;
965
966  if ( !_States_Is_ready( executing->current_state ) )
967    return;
968
969  /*
970   *  The cpu budget algorithm determines what happens next.
971   */
972
973  switch ( executing->budget_algorithm ) {
974    case THREAD_CPU_BUDGET_ALGORITHM_NONE:
975      break;
976
977    case THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE:
978    case THREAD_CPU_BUDGET_ALGORITHM_EXHAUST_TIMESLICE:
979      if ( --executing->cpu_time_budget == 0 ) {
980        _Thread_Reset_timeslice();
981        executing->cpu_time_budget = _Thread_Ticks_per_timeslice;
982      }
983      break;
984
985    case THREAD_CPU_BUDGET_ALGORITHM_CALLOUT:
986      if ( --executing->cpu_time_budget == 0 )
987        (*executing->budget_callout)( executing );
988      break;
989  }
990}
991
992/*PAGE
993 *
994 *  _Thread_Yield_processor
995 *
996 *  This kernel routine will remove the running THREAD from the ready chain
997 *  and place it immediatly at the rear of this chain.  Reset timeslice
998 *  and yield the processor functions both use this routine, therefore if
999 *  reset is TRUE and this is the only thread on the chain then the
1000 *  timeslice counter is reset.  The heir THREAD will be updated if the
1001 *  running is also the currently the heir.
1002 *
1003 *  Input parameters:   NONE
1004 *
1005 *  Output parameters:  NONE
1006 *
1007 *  INTERRUPT LATENCY:
1008 *    ready chain
1009 *    select heir
1010 */
1011
1012void _Thread_Yield_processor( void )
1013{
1014  ISR_Level       level;
1015  Thread_Control *executing;
1016  Chain_Control  *ready;
1017
1018  executing = _Thread_Executing;
1019  ready     = executing->ready;
1020  _ISR_Disable( level );
1021    if ( !_Chain_Has_only_one_node( ready ) ) {
1022      _Chain_Extract_unprotected( &executing->Object.Node );
1023      _Chain_Append_unprotected( ready, &executing->Object.Node );
1024
1025      _ISR_Flash( level );
1026
1027      if ( _Thread_Is_heir( executing ) )
1028        _Thread_Heir = (Thread_Control *) ready->first;
1029      _Context_Switch_necessary = TRUE;
1030    }
1031    else if ( !_Thread_Is_heir( executing ) )
1032      _Context_Switch_necessary = TRUE;
1033
1034  _ISR_Enable( level );
1035}
1036
1037/*PAGE
1038 *
1039 *  _Thread_Load_environment
1040 *
1041 *  Load starting environment for another thread from its start area in the
1042 *  thread.  Only called from t_restart and t_start.
1043 *
1044 *  Input parameters:
1045 *    the_thread - thread control block pointer
1046 *
1047 *  Output parameters:  NONE
1048 */
1049
1050void _Thread_Load_environment(
1051  Thread_Control *the_thread
1052)
1053{
1054  boolean is_fp = FALSE;
1055
1056  if ( the_thread->Start.fp_context ) {
1057    the_thread->fp_context = the_thread->Start.fp_context;
1058    _Context_Initialize_fp( &the_thread->fp_context );
1059    is_fp = TRUE;
1060  }
1061
1062  the_thread->do_post_task_switch_extension = FALSE;
1063  the_thread->is_preemptible   = the_thread->Start.is_preemptible;
1064  the_thread->budget_algorithm = the_thread->Start.budget_algorithm;
1065  the_thread->budget_callout   = the_thread->Start.budget_callout;
1066
1067  _Context_Initialize(
1068    &the_thread->Registers,
1069    the_thread->Start.Initial_stack.area,
1070    the_thread->Start.Initial_stack.size,
1071    the_thread->Start.isr_level,
1072    _Thread_Handler,
1073    is_fp
1074  );
1075
1076}
1077
1078/*PAGE
1079 *
1080 *  _Thread_Handler
1081 *
1082 *  This routine is the "primal" entry point for all threads.
1083 *  _Context_Initialize() dummies up the thread's initial context
1084 *  to cause the first Context_Switch() to jump to _Thread_Handler().
1085 *
1086 *  This routine is the default thread exitted error handler.  It is
1087 *  returned to when a thread exits.  The configured fatal error handler
1088 *  is invoked to process the exit.
1089 *
1090 *  NOTE:
1091 *
1092 *  On entry, it is assumed all interrupts are blocked and that this
1093 *  routine needs to set the initial isr level.  This may or may not
1094 *  actually be needed by the context switch routine and as a result
1095 *  interrupts may already be at there proper level.  Either way,
1096 *  setting the initial isr level properly here is safe.
1097 * 
1098 *  Currently this is only really needed for the posix port,
1099 *  ref: _Context_Switch in unix/cpu.c
1100 *
1101 *  Input parameters:   NONE
1102 *
1103 *  Output parameters:  NONE
1104 */
1105
1106void _Thread_Handler( void )
1107{
1108  ISR_Level  level;
1109  Thread_Control *executing;
1110 
1111  executing = _Thread_Executing;
1112 
1113  /*
1114   * have to put level into a register for those cpu's that use
1115   * inline asm here
1116   */
1117 
1118  level = executing->Start.isr_level;
1119  _ISR_Set_level(level);
1120
1121  /*
1122   * Take care that 'begin' extensions get to complete before
1123   * 'switch' extensions can run.  This means must keep dispatch
1124   * disabled until all 'begin' extensions complete.
1125   */
1126 
1127  _User_extensions_Thread_begin( executing );
1128 
1129  /*
1130   *  At this point, the dispatch disable level BETTER be 1.
1131   */
1132
1133  _Thread_Enable_dispatch();
1134 
1135  switch ( executing->Start.prototype ) {
1136    case THREAD_START_NUMERIC:
1137      (*(Thread_Entry_numeric) executing->Start.entry_point)(
1138        executing->Start.numeric_argument
1139      );
1140      break;
1141    case THREAD_START_POINTER:
1142      (*(Thread_Entry_pointer) executing->Start.entry_point)(
1143        executing->Start.pointer_argument
1144      );
1145      break;
1146    case THREAD_START_BOTH_POINTER_FIRST:
1147      (*(Thread_Entry_both_pointer_first) executing->Start.entry_point)(
1148        executing->Start.pointer_argument,
1149        executing->Start.numeric_argument
1150      );
1151      break;
1152    case THREAD_START_BOTH_NUMERIC_FIRST:
1153      (*(Thread_Entry_both_numeric_first) executing->Start.entry_point)(
1154        executing->Start.numeric_argument,
1155        executing->Start.pointer_argument
1156      );
1157      break;
1158  }
1159
1160  _User_extensions_Thread_exitted( executing );
1161
1162  _Internal_error_Occurred(
1163    INTERNAL_ERROR_CORE,
1164    TRUE,
1165    INTERNAL_ERROR_THREAD_EXITTED
1166  );
1167}
1168
1169/*PAGE
1170 *
1171 *  _Thread_Delay_ended
1172 *
1173 *  This routine processes a thread whose delay period has ended.
1174 *  It is called by the watchdog handler.
1175 *
1176 *  Input parameters:
1177 *    id - thread id
1178 *
1179 *  Output parameters: NONE
1180 */
1181
1182void _Thread_Delay_ended(
1183  Objects_Id  id,
1184  void       *ignored
1185)
1186{
1187  Thread_Control    *the_thread;
1188  Objects_Locations  location;
1189
1190  the_thread = _Thread_Get( id, &location );
1191  switch ( location ) {
1192    case OBJECTS_ERROR:
1193    case OBJECTS_REMOTE:  /* impossible */
1194      break;
1195    case OBJECTS_LOCAL:
1196      _Thread_Unblock( the_thread );
1197      _Thread_Unnest_dispatch();
1198      break;
1199  }
1200}
1201
1202/*PAGE
1203 *
1204 *  _Thread_Change_priority
1205 *
1206 *  This kernel routine changes the priority of the thread.  The
1207 *  thread chain is adjusted if necessary.
1208 *
1209 *  Input parameters:
1210 *    the_thread   - pointer to thread control block
1211 *    new_priority - ultimate priority
1212 *    prepend_it   - TRUE if the thread should be prepended to the chain
1213 *
1214 *  Output parameters:  NONE
1215 *
1216 *  INTERRUPT LATENCY:
1217 *    ready chain
1218 *    select heir
1219 */
1220
1221void _Thread_Change_priority(
1222  Thread_Control   *the_thread,
1223  Priority_Control  new_priority,
1224  boolean           prepend_it
1225)
1226{
1227  ISR_Level level;
1228  /* boolean   do_prepend = FALSE; */
1229
1230  /*
1231   *  If this is a case where prepending the task to its priority is
1232   *  potentially desired, then we need to consider whether to do it.
1233   *  This usually occurs when a task lowers its priority implcitly as
1234   *  the result of losing inherited priority.  Normal explicit priority
1235   *  change calls (e.g. rtems_task_set_priority) should always do an
1236   *  append not a prepend.
1237   */
1238 
1239  /*
1240   *  Techically, the prepend should conditional on the thread lowering
1241   *  its priority but that does allow cxd2004 of the acvc 2.0.1 to
1242   *  pass with rtems 4.0.0.  This should change when gnat redoes its
1243   *  priority scheme.
1244   */
1245/*
1246  if ( prepend_it &&
1247       _Thread_Is_executing( the_thread ) &&
1248       new_priority >= the_thread->current_priority )
1249    prepend_it = TRUE;
1250*/
1251                 
1252  _Thread_Set_transient( the_thread );
1253
1254  if ( the_thread->current_priority != new_priority )
1255    _Thread_Set_priority( the_thread, new_priority );
1256
1257  _ISR_Disable( level );
1258
1259  the_thread->current_state =
1260    _States_Clear( STATES_TRANSIENT, the_thread->current_state );
1261
1262  if ( ! _States_Is_ready( the_thread->current_state ) ) {
1263    _ISR_Enable( level );
1264    return;
1265  }
1266
1267  _Priority_Add_to_bit_map( &the_thread->Priority_map );
1268  if ( prepend_it )
1269    _Chain_Prepend_unprotected( the_thread->ready, &the_thread->Object.Node );
1270  else
1271    _Chain_Append_unprotected( the_thread->ready, &the_thread->Object.Node );
1272
1273  _ISR_Flash( level );
1274
1275  _Thread_Calculate_heir();
1276
1277  if ( !_Thread_Is_executing_also_the_heir() &&
1278       _Thread_Executing->is_preemptible )
1279    _Context_Switch_necessary = TRUE;
1280  _ISR_Enable( level );
1281}
1282
1283/*PAGE
1284 *
1285 * _Thread_Set_priority
1286 *
1287 * This directive enables and disables several modes of
1288 * execution for the requesting thread.
1289 *
1290 *  Input parameters:
1291 *    the_thread   - pointer to thread priority
1292 *    new_priority - new priority
1293 *
1294 *  Output: NONE
1295 */
1296
1297void _Thread_Set_priority(
1298  Thread_Control   *the_thread,
1299  Priority_Control  new_priority
1300)
1301{
1302  the_thread->current_priority = new_priority;
1303  the_thread->ready            = &_Thread_Ready_chain[ new_priority ];
1304
1305  _Priority_Initialize_information( &the_thread->Priority_map, new_priority );
1306}
1307
1308/*PAGE
1309 *
1310 *  _Thread_Evaluate_mode
1311 *
1312 *  XXX
1313 */
1314
1315boolean _Thread_Evaluate_mode( void )
1316{
1317  Thread_Control     *executing;
1318
1319  executing = _Thread_Executing;
1320
1321  if ( !_States_Is_ready( executing->current_state ) ||
1322       ( !_Thread_Is_heir( executing ) && executing->is_preemptible ) ) {
1323    _Context_Switch_necessary = TRUE;
1324    return TRUE;
1325  }
1326
1327  return FALSE;
1328}
1329
1330/*PAGE
1331 *
1332 *  _Thread_Get
1333 *
1334 *  NOTE:  If we are not using static inlines, this must be a real
1335 *         subroutine call.
1336 *
1337 *  NOTE:  XXX... This routine may be able to be optimized.
1338 */
1339
1340#ifndef USE_INLINES
1341
1342Thread_Control *_Thread_Get (
1343  Objects_Id           id,
1344  Objects_Locations   *location
1345)
1346{
1347  Objects_Classes      the_class;
1348  Objects_Information *information;
1349 
1350  if ( _Objects_Are_ids_equal( id, OBJECTS_ID_OF_SELF ) ) {
1351    _Thread_Disable_dispatch();
1352    *location = OBJECTS_LOCAL;
1353    return( _Thread_Executing );
1354  }
1355 
1356  the_class = _Objects_Get_class( id );
1357 
1358  if ( the_class > OBJECTS_CLASSES_LAST ) {
1359    *location = OBJECTS_ERROR;
1360    return (Thread_Control *) 0;
1361  }
1362 
1363  information = _Objects_Information_table[ the_class ];
1364 
1365  if ( !information || !information->is_thread ) {
1366    *location = OBJECTS_ERROR;
1367    return (Thread_Control *) 0;
1368  }
1369 
1370  return (Thread_Control *) _Objects_Get( information, id, location );
1371}
1372
1373#endif
1374
1375/*PAGE
1376 *
1377 *  _Thread_Idle_body
1378 *
1379 *  This kernel routine is the idle thread.  The idle thread runs any time
1380 *  no other thread is ready to run.  This thread loops forever with
1381 *  interrupts enabled.
1382 *
1383 *  Input parameters:
1384 *    ignored - this parameter is ignored
1385 *
1386 *  Output parameters:  NONE
1387 */
1388 
1389#if (CPU_PROVIDES_IDLE_THREAD_BODY == FALSE)
1390Thread _Thread_Idle_body(
1391  unsigned32 ignored
1392)
1393{
1394  for( ; ; ) ;
1395}
1396#endif
Note: See TracBrowser for help on using the repository browser.