source: rtems/cpukit/score/src/thread.c @ 0f63c86

4.104.114.84.95
Last change on this file since 0f63c86 was 0f63c86, checked in by Joel Sherrill <joel.sherrill@…>, on Jun 10, 1996 at 8:47:56 PM

added code to _Thread_Clear_state to check if the state was not currently set.

  • Property mode set to 100644
File size: 30.8 KB
Line 
1/*
2 *  Thread Handler
3 *
4 *
5 *  COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
6 *  On-Line Applications Research Corporation (OAR).
7 *  All rights assigned to U.S. Government, 1994.
8 *
9 *  This material may be reproduced by or for the U.S. Government pursuant
10 *  to the copyright license under the clause at DFARS 252.227-7013.  This
11 *  notice must appear in all copies of this file and its derivatives.
12 *
13 *  $Id$
14 */
15
16#include <rtems/system.h>
17#include <rtems/score/apiext.h>
18#include <rtems/score/context.h>
19#include <rtems/score/interr.h>
20#include <rtems/score/isr.h>
21#include <rtems/score/object.h>
22#include <rtems/score/priority.h>
23#include <rtems/score/states.h>
24#include <rtems/score/sysstate.h>
25#include <rtems/score/thread.h>
26#include <rtems/score/threadq.h>
27#include <rtems/score/userext.h>
28#include <rtems/score/wkspace.h>
29
30/*PAGE
31 *
32 *  _Thread_Handler_initialization
33 *
34 *  This routine initializes all thread manager related data structures.
35 *
36 *  Input parameters:
37 *    ticks_per_timeslice - clock ticks per quantum
38 *    maximum_proxies     - number of proxies to initialize
39 *
40 *  Output parameters:  NONE
41 */
42
43char *_Thread_Idle_name = "IDLE";
44
45void _Thread_Handler_initialization(
46  unsigned32   ticks_per_timeslice,
47  unsigned32   maximum_extensions,
48  unsigned32   maximum_proxies
49)
50{
51  unsigned32      index;
52
53  /*
54   * BOTH stacks hooks must be set or both must be NULL.
55   * Do not allow mixture.
56   */
57
58  if ( !( ( _CPU_Table.stack_allocate_hook == 0 )
59       == ( _CPU_Table.stack_free_hook == 0 ) ) )
60    _Internal_error_Occurred(
61      INTERNAL_ERROR_CORE,
62      TRUE,
63      INTERNAL_ERROR_BAD_STACK_HOOK
64    );
65
66  _Context_Switch_necessary = FALSE;
67  _Thread_Executing         = NULL;
68  _Thread_Heir              = NULL;
69  _Thread_Allocated_fp      = NULL;
70
71  _Thread_Maximum_extensions = maximum_extensions;
72
73  _Thread_Ticks_per_timeslice          = ticks_per_timeslice;
74
75  _Thread_Ready_chain = _Workspace_Allocate_or_fatal_error(
76    (PRIORITY_MAXIMUM + 1) * sizeof(Chain_Control)
77  );
78
79  for ( index=0; index <= PRIORITY_MAXIMUM ; index++ )
80    _Chain_Initialize_empty( &_Thread_Ready_chain[ index ] );
81
82  _Thread_MP_Handler_initialization( maximum_proxies );
83
84  /*
85   *  Initialize this class of objects.
86   */
87 
88  _Objects_Initialize_information(
89    &_Thread_Internal_information,
90    OBJECTS_INTERNAL_THREADS,
91    FALSE,
92    ( _System_state_Is_multiprocessing ) ?  2 : 1,
93    sizeof( Thread_Control ),
94    TRUE,
95    8,
96    TRUE
97  );
98
99}
100
101/*PAGE
102 *
103 *  _Thread_Create_idle
104 */
105
106void _Thread_Create_idle( void )
107{
108  void *idle;
109
110  /*
111   *  The entire workspace is zeroed during its initialization.  Thus, all
112   *  fields not explicitly assigned were explicitly zeroed by
113   *  _Workspace_Initialization.
114   */
115 
116  _Thread_Idle = _Thread_Internal_allocate();
117 
118  /*
119   *  Initialize the IDLE task.
120   */
121 
122#if (CPU_PROVIDES_IDLE_THREAD_BODY == TRUE)
123  idle = _CPU_Thread_Idle_body;
124#else
125  idle = _Thread_Idle_body;
126#endif
127 
128  if ( _CPU_Table.idle_task )
129    idle = _CPU_Table.idle_task;
130 
131  _Thread_Initialize(
132    &_Thread_Internal_information,
133    _Thread_Idle,
134    NULL,        /* allocate the stack */
135    THREAD_IDLE_STACK_SIZE,
136    CPU_IDLE_TASK_IS_FP,
137    PRIORITY_MAXIMUM,
138    TRUE,        /* preemptable */
139    THREAD_CPU_BUDGET_ALGORITHM_NONE,
140    NULL,        /* no budget algorithm callout */
141    0,           /* all interrupts enabled */
142    _Thread_Idle_name
143  );
144 
145  /*
146   *  WARNING!!! This is necessary to "kick" start the system and
147   *             MUST be done before _Thread_Start is invoked.
148   */
149 
150  _Thread_Heir      =
151  _Thread_Executing = _Thread_Idle;
152 
153  _Thread_Start(
154    _Thread_Idle,
155    THREAD_START_NUMERIC,
156    idle,
157    NULL,
158    0
159  );
160 
161}
162
163/*PAGE
164 *
165 *  _Thread_Start_multitasking
166 *
167 *  This kernel routine readies the requested thread, the thread chain
168 *  is adjusted.  A new heir thread may be selected.
169 *
170 *  Input parameters:
171 *    system_thread - pointer to system initialization thread control block
172 *    idle_thread   - pointer to idle thread control block
173 *
174 *  Output parameters:  NONE
175 *
176 *  NOTE:  This routine uses the "blocking" heir selection mechanism.
177 *         This insures the correct heir after a thread restart.
178 *
179 *  INTERRUPT LATENCY:
180 *    ready chain
181 *    select heir
182 */
183
184void _Thread_Start_multitasking( void )
185{
186  /*
187   *  The system is now multitasking and completely initialized. 
188   *  This system thread now either "goes away" in a single processor
189   *  system or "turns into" the server thread in an MP system.
190   */
191
192  _System_state_Set( SYSTEM_STATE_UP );
193
194  _Context_Switch_necessary = FALSE;
195
196  _Thread_Executing = _Thread_Heir;
197
198  _Context_Switch( &_Thread_BSP_context, &_Thread_Executing->Registers );
199}
200
201/*PAGE
202 *
203 *  _Thread_Dispatch
204 *
205 *  This kernel routine determines if a dispatch is needed, and if so
206 *  dispatches to the heir thread.  Once the heir is running an attempt
207 *  is made to dispatch any ASRs.
208 *
209 *  ALTERNATE ENTRY POINTS:
210 *    void _Thread_Enable_dispatch();
211 *
212 *  Input parameters:  NONE
213 *
214 *  Output parameters:  NONE
215 *
216 *  INTERRUPT LATENCY:
217 *    dispatch thread
218 *    no dispatch thread
219 */
220
221#if ( CPU_INLINE_ENABLE_DISPATCH == FALSE )
222void _Thread_Enable_dispatch( void )
223{
224  if ( --_Thread_Dispatch_disable_level )
225    return;
226  _Thread_Dispatch();
227}
228#endif
229
230void _Thread_Dispatch( void )
231{
232  Thread_Control   *executing;
233  Thread_Control   *heir;
234  ISR_Level         level;
235
236  executing   = _Thread_Executing;
237  _ISR_Disable( level );
238  while ( _Context_Switch_necessary == TRUE ) {
239    heir = _Thread_Heir;
240    _Thread_Dispatch_disable_level = 1;
241    _Context_Switch_necessary = FALSE;
242    _Thread_Executing = heir;
243    _ISR_Enable( level );
244
245    _User_extensions_Thread_switch( executing, heir );
246
247    if ( heir->budget_algorithm == THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE )
248      heir->cpu_time_budget = _Thread_Ticks_per_timeslice;
249
250    /*
251     *  If the CPU has hardware floating point, then we must address saving
252     *  and restoring it as part of the context switch.
253     *
254     *  The second conditional compilation section selects the algorithm used
255     *  to context switch between floating point tasks.  The deferred algorithm
256     *  can be significantly better in a system with few floating point tasks
257     *  because it reduces the total number of save and restore FP context
258     *  operations.  However, this algorithm can not be used on all CPUs due
259     *  to unpredictable use of FP registers by some compilers for integer
260     *  operations.
261     */
262
263#if ( CPU_HARDWARE_FP == TRUE )
264#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
265    if ( (heir->fp_context != NULL) && !_Thread_Is_allocated_fp( heir ) ) {
266      if ( _Thread_Allocated_fp != NULL )
267        _Context_Save_fp( &_Thread_Allocated_fp->fp_context );
268      _Context_Restore_fp( &heir->fp_context );
269      _Thread_Allocated_fp = heir;
270    }
271#else
272    if ( executing->fp_context != NULL )
273      _Context_Save_fp( &executing->fp_context );
274
275    if ( heir->fp_context != NULL )
276      _Context_Restore_fp( &heir->fp_context );
277#endif
278#endif
279
280    _Context_Switch( &executing->Registers, &heir->Registers );
281
282    executing = _Thread_Executing;
283
284    _ISR_Disable( level );
285  }
286
287  _Thread_Dispatch_disable_level = 0;
288
289  _ISR_Enable( level );
290
291  if ( executing->do_post_task_switch_extension ) {
292    executing->do_post_task_switch_extension = FALSE;
293    _API_extensions_Run_postswitch();
294  }
295 
296}
297
298/*PAGE
299 *
300 *  _Thread_Stack_Allocate
301 *
302 *  Allocate the requested stack space for the thread.
303 *  return the actual size allocated after any adjustment
304 *  or return zero if the allocation failed.
305 *  Set the Start.stack field to the address of the stack
306 */
307
308static unsigned32 _Thread_Stack_Allocate(
309  Thread_Control *the_thread,
310  unsigned32 stack_size)
311{
312  void *stack_addr = 0;
313 
314  if ( !_Stack_Is_enough( stack_size ) )
315    stack_size = STACK_MINIMUM_SIZE;
316 
317  /*
318   * Call ONLY the CPU table stack allocate hook, _or_ the
319   * the RTEMS workspace allocate.  This is so the stack free
320   * routine can call the correct deallocation routine.
321   */
322
323  if ( _CPU_Table.stack_allocate_hook )
324  {
325    stack_addr = (*_CPU_Table.stack_allocate_hook)( stack_size );
326  } else {
327
328    /*
329     *  First pad the requested size so we allocate enough memory
330     *  so the context initialization can align it properly.  The address
331     *  returned the workspace allocate must be directly stored in the
332     *  stack control block because it is later used in the free sequence.
333     *
334     *  Thus it is the responsibility of the CPU dependent code to
335     *  get and keep the stack adjust factor, the stack alignment, and
336     *  the context initialization sequence in sync.
337     */
338
339    stack_size = _Stack_Adjust_size( stack_size );
340    stack_addr = _Workspace_Allocate( stack_size );
341  }
342 
343  if ( !stack_addr )
344      stack_size = 0;
345 
346  the_thread->Start.stack = stack_addr;
347 
348  return stack_size;
349}
350
351/*
352 *  _Thread_Stack_Free
353 *
354 *  Deallocate the Thread's stack.
355 */
356
357static void _Thread_Stack_Free(
358  Thread_Control *the_thread
359)
360{
361    /*
362     *  If the API provided the stack space, then don't free it.
363     */
364
365    if ( !the_thread->Start.core_allocated_stack )
366      return;
367
368    /*
369     * Call ONLY the CPU table stack free hook, or the
370     * the RTEMS workspace free.  This is so the free
371     * routine properly matches the allocation of the stack.
372     */
373
374    if ( _CPU_Table.stack_free_hook )
375        (*_CPU_Table.stack_free_hook)( the_thread->Start.Initial_stack.area );
376    else
377        _Workspace_Free( the_thread->Start.Initial_stack.area );
378}
379
380/*PAGE
381 *
382 *  _Thread_Initialize
383 *
384 *  XXX
385 */
386
387boolean _Thread_Initialize(
388  Objects_Information                  *information,
389  Thread_Control                       *the_thread,
390  void                                 *stack_area,
391  unsigned32                            stack_size,
392  boolean                               is_fp,
393  Priority_Control                      priority,
394  boolean                               is_preemptible,
395  Thread_CPU_budget_algorithms          budget_algorithm,
396  Thread_CPU_budget_algorithm_callout   budget_callout,
397  unsigned32                            isr_level,
398  Objects_Name                          name
399)
400{
401  unsigned32           actual_stack_size = 0;
402  void                *stack = NULL;
403  void                *fp_area;
404  void                *extensions_area;
405
406  /*
407   *  Allocate and Initialize the stack for this thread.
408   */
409
410
411  if ( !stack ) {
412    if ( !_Stack_Is_enough( stack_size ) )
413      actual_stack_size = STACK_MINIMUM_SIZE;
414    else
415      actual_stack_size = stack_size;
416
417    actual_stack_size = _Stack_Adjust_size( actual_stack_size );
418    stack             = stack_area;
419
420    actual_stack_size = _Thread_Stack_Allocate( the_thread, stack_size );
421 
422    if ( !actual_stack_size ) 
423      return FALSE;                     /* stack allocation failed */
424
425    stack = the_thread->Start.stack;
426
427    the_thread->Start.core_allocated_stack = TRUE;
428  } else {
429    stack = stack_area;
430    actual_stack_size = stack_size;
431    the_thread->Start.core_allocated_stack = FALSE;
432  }
433
434  _Stack_Initialize(
435     &the_thread->Start.Initial_stack,
436     stack,
437     actual_stack_size
438  );
439
440  /*
441   *  Allocate the floating point area for this thread
442   */
443 
444  if ( is_fp ) {
445
446    fp_area = _Workspace_Allocate( CONTEXT_FP_SIZE );
447    if ( !fp_area ) {
448      _Thread_Stack_Free( the_thread );
449      return FALSE;
450    }
451    fp_area = _Context_Fp_start( fp_area, 0 );
452
453  } else
454    fp_area = NULL;
455
456  the_thread->fp_context       = fp_area;
457  the_thread->Start.fp_context = fp_area;
458
459  /*
460   *  Allocate the extensions area for this thread
461   */
462
463  if ( _Thread_Maximum_extensions ) {
464    extensions_area = _Workspace_Allocate(
465      (_Thread_Maximum_extensions + 1) * sizeof( void * )
466    );
467
468    if ( !extensions_area ) {
469      if ( fp_area )
470        (void) _Workspace_Free( fp_area );
471
472      _Thread_Stack_Free( the_thread );
473
474      return FALSE;
475    }
476  } else 
477    extensions_area = NULL;
478 
479  the_thread->extensions = extensions_area;
480
481  /*
482   *  General initialization
483   */
484
485  the_thread->Start.is_preemptible   = is_preemptible;
486  the_thread->Start.budget_algorithm = budget_algorithm;
487  the_thread->Start.budget_callout   = budget_callout;
488  the_thread->Start.isr_level        = isr_level;
489
490  the_thread->current_state          = STATES_DORMANT;
491  the_thread->resource_count         = 0;
492  the_thread->real_priority          = priority;
493  the_thread->Start.initial_priority = priority;
494 
495  _Thread_Set_priority( the_thread, priority );
496
497  /*
498   *  Open the object
499   */
500
501  _Objects_Open( information, &the_thread->Object, name );
502
503  /*
504   *  Invoke create extensions
505   */
506
507  if ( !_User_extensions_Thread_create( the_thread ) ) {
508
509    if ( extensions_area )
510      (void) _Workspace_Free( extensions_area );
511
512    if ( fp_area )
513      (void) _Workspace_Free( fp_area );
514
515    _Thread_Stack_Free( the_thread->Start.stack );
516
517    return FALSE;
518  }
519
520  return TRUE;
521   
522}
523
524/*
525 *  _Thread_Start
526 *
527 *  DESCRIPTION:
528 *
529 *  XXX
530 */
531 
532boolean _Thread_Start(
533  Thread_Control       *the_thread,
534  Thread_Start_types    the_prototype,
535  void                 *entry_point,
536  void                 *pointer_argument,
537  unsigned32            numeric_argument
538)
539{
540  if ( _States_Is_dormant( the_thread->current_state ) ) {
541 
542    the_thread->Start.entry_point      = entry_point;
543   
544    the_thread->Start.prototype        = the_prototype;
545    the_thread->Start.pointer_argument = pointer_argument;
546    the_thread->Start.numeric_argument = numeric_argument;
547 
548    _Thread_Load_environment( the_thread );
549 
550    _Thread_Ready( the_thread );
551 
552    _User_extensions_Thread_start( the_thread );
553 
554    return TRUE;
555  }
556 
557  return FALSE;
558 
559}
560
561/*
562 *  _Thread_Restart
563 *
564 *  DESCRIPTION:
565 *
566 *  XXX
567 */
568 
569boolean _Thread_Restart(
570  Thread_Control      *the_thread,
571  void                *pointer_argument,
572  unsigned32           numeric_argument
573)
574{
575  if ( !_States_Is_dormant( the_thread->current_state ) ) {
576 
577    _Thread_Set_transient( the_thread );
578    the_thread->resource_count = 0;
579    the_thread->is_preemptible   = the_thread->Start.is_preemptible;
580    the_thread->budget_algorithm = the_thread->Start.budget_algorithm;
581    the_thread->budget_callout   = the_thread->Start.budget_callout;
582
583    the_thread->Start.pointer_argument = pointer_argument;
584    the_thread->Start.numeric_argument = numeric_argument;
585 
586    if ( !_Thread_queue_Extract_with_proxy( the_thread ) ) {
587 
588      if ( _Watchdog_Is_active( &the_thread->Timer ) )
589        (void) _Watchdog_Remove( &the_thread->Timer );
590    }
591
592    if ( the_thread->current_priority != the_thread->Start.initial_priority ) {
593      the_thread->real_priority = the_thread->Start.initial_priority;
594      _Thread_Set_priority( the_thread, the_thread->Start.initial_priority );
595    }
596 
597    _Thread_Load_environment( the_thread );
598 
599    _Thread_Ready( the_thread );
600 
601    _User_extensions_Thread_restart( the_thread );
602 
603    if ( _Thread_Is_executing ( the_thread ) )
604      _Thread_Restart_self();
605 
606    return TRUE;
607  }
608 
609  return FALSE;
610}
611
612/*
613 *  _Thread_Close
614 *
615 *  DESCRIPTION:
616 *
617 *  XXX
618 */
619 
620void _Thread_Close(
621  Objects_Information  *information,
622  Thread_Control       *the_thread
623)
624{
625  _Objects_Close( information, &the_thread->Object );
626 
627  _Thread_Set_state( the_thread, STATES_TRANSIENT );
628 
629  if ( !_Thread_queue_Extract_with_proxy( the_thread ) ) {
630 
631    if ( _Watchdog_Is_active( &the_thread->Timer ) )
632      (void) _Watchdog_Remove( &the_thread->Timer );
633  }
634
635  _User_extensions_Thread_delete( the_thread );
636 
637#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
638  if ( _Thread_Is_allocated_fp( the_thread ) )
639    _Thread_Deallocate_fp();
640#endif
641  the_thread->fp_context = NULL;
642
643  if ( the_thread->Start.fp_context )
644  (void) _Workspace_Free( the_thread->Start.fp_context );
645
646  _Thread_Stack_Free( the_thread );
647
648  if ( the_thread->extensions )
649    (void) _Workspace_Free( the_thread->extensions );
650
651  the_thread->Start.stack = NULL;
652  the_thread->extensions = NULL;
653}
654
655/*PAGE
656 *
657 *  _Thread_Ready
658 *
659 *  This kernel routine readies the requested thread, the thread chain
660 *  is adjusted.  A new heir thread may be selected.
661 *
662 *  Input parameters:
663 *    the_thread - pointer to thread control block
664 *
665 *  Output parameters:  NONE
666 *
667 *  NOTE:  This routine uses the "blocking" heir selection mechanism.
668 *         This insures the correct heir after a thread restart.
669 *
670 *  INTERRUPT LATENCY:
671 *    ready chain
672 *    select heir
673 */
674
675void _Thread_Ready(
676  Thread_Control *the_thread
677)
678{
679  ISR_Level              level;
680  Thread_Control *heir;
681
682  _ISR_Disable( level );
683
684  the_thread->current_state = STATES_READY;
685
686  _Priority_Add_to_bit_map( &the_thread->Priority_map );
687
688  _Chain_Append_unprotected( the_thread->ready, &the_thread->Object.Node );
689
690  _ISR_Flash( level );
691
692  _Thread_Calculate_heir();
693
694  heir = _Thread_Heir;
695
696  if ( !_Thread_Is_executing( heir ) && _Thread_Executing->is_preemptible ) 
697    _Context_Switch_necessary = TRUE;
698
699  _ISR_Enable( level );
700}
701
702/*PAGE
703 *
704 *  _Thread_Clear_state
705 *
706 *  This kernel routine clears the appropriate states in the
707 *  requested thread.  The thread ready chain is adjusted if
708 *  necessary and the Heir thread is set accordingly.
709 *
710 *  Input parameters:
711 *    the_thread - pointer to thread control block
712 *    state      - state set to clear
713 *
714 *  Output parameters:  NONE
715 *
716 *  INTERRUPT LATENCY:
717 *    priority map
718 *    select heir
719 */
720
721
722void _Thread_Clear_state(
723  Thread_Control *the_thread,
724  States_Control  state
725)
726{
727  ISR_Level       level;
728  States_Control  current_state;
729
730  _ISR_Disable( level );
731    current_state = the_thread->current_state;
732   
733    if ( current_state & state ) {
734      current_state = 
735      the_thread->current_state = _States_Clear( state, current_state );
736
737      if ( _States_Is_ready( current_state ) ) {
738
739        _Priority_Add_to_bit_map( &the_thread->Priority_map );
740
741        _Chain_Append_unprotected(the_thread->ready, &the_thread->Object.Node);
742
743        _ISR_Flash( level );
744
745        if ( the_thread->current_priority < _Thread_Heir->current_priority ) {
746          _Thread_Heir = the_thread;
747          if ( _Thread_Executing->is_preemptible ||
748               the_thread->current_priority == 0 )
749            _Context_Switch_necessary = TRUE;
750        }
751      }
752  }
753  _ISR_Enable( level );
754}
755
756/*PAGE
757 *
758 * _Thread_Set_state
759 *
760 * This kernel routine sets the requested state in the THREAD.  The
761 * THREAD chain is adjusted if necessary.
762 *
763 * Input parameters:
764 *   the_thread   - pointer to thread control block
765 *   state - state to be set
766 *
767 * Output parameters:  NONE
768 *
769 *  INTERRUPT LATENCY:
770 *    ready chain
771 *    select map
772 */
773
774void _Thread_Set_state(
775  Thread_Control *the_thread,
776  States_Control         state
777)
778{
779  ISR_Level             level;
780  Chain_Control *ready;
781
782  ready = the_thread->ready;
783  _ISR_Disable( level );
784  if ( !_States_Is_ready( the_thread->current_state ) ) {
785    the_thread->current_state =
786       _States_Set( state, the_thread->current_state );
787    _ISR_Enable( level );
788    return;
789  }
790
791  the_thread->current_state = state;
792
793  if ( _Chain_Has_only_one_node( ready ) ) {
794
795    _Chain_Initialize_empty( ready );
796    _Priority_Remove_from_bit_map( &the_thread->Priority_map );
797
798  } else
799    _Chain_Extract_unprotected( &the_thread->Object.Node );
800
801  _ISR_Flash( level );
802
803  if ( _Thread_Is_heir( the_thread ) )
804     _Thread_Calculate_heir();
805
806  if ( _Thread_Is_executing( the_thread ) )
807    _Context_Switch_necessary = TRUE;
808
809  _ISR_Enable( level );
810}
811
812/*PAGE
813 *
814 *  _Thread_Set_transient
815 *
816 *  This kernel routine places the requested thread in the transient state
817 *  which will remove it from the ready queue, if necessary.  No
818 *  rescheduling is necessary because it is assumed that the transient
819 *  state will be cleared before dispatching is enabled.
820 *
821 *  Input parameters:
822 *    the_thread - pointer to thread control block
823 *
824 *  Output parameters:  NONE
825 *
826 *  INTERRUPT LATENCY:
827 *    only case
828 */
829
830void _Thread_Set_transient(
831  Thread_Control *the_thread
832)
833{
834  ISR_Level             level;
835  unsigned32            old_state;
836  Chain_Control *ready;
837
838  ready = the_thread->ready;
839  _ISR_Disable( level );
840
841  old_state = the_thread->current_state;
842  the_thread->current_state = _States_Set( STATES_TRANSIENT, old_state );
843
844  if ( _States_Is_ready( old_state ) ) {
845    if ( _Chain_Has_only_one_node( ready ) ) {
846
847      _Chain_Initialize_empty( ready );
848      _Priority_Remove_from_bit_map( &the_thread->Priority_map );
849
850    } else
851      _Chain_Extract_unprotected( &the_thread->Object.Node );
852  }
853
854  _ISR_Enable( level );
855
856}
857
858/*PAGE
859 *
860 *  _Thread_Reset_timeslice
861 *
862 *  This routine will remove the running thread from the ready chain
863 *  and place it immediately at the rear of this chain and then the
864 *  timeslice counter is reset.  The heir THREAD will be updated if
865 *  the running is also the currently the heir.
866 *
867 *  Input parameters:   NONE
868 *
869 *  Output parameters:  NONE
870 *
871 *  INTERRUPT LATENCY:
872 *    ready chain
873 *    select heir
874 */
875
876void _Thread_Reset_timeslice( void )
877{
878  ISR_Level       level;
879  Thread_Control *executing;
880  Chain_Control  *ready;
881
882  executing = _Thread_Executing;
883  ready     = executing->ready;
884  _ISR_Disable( level );
885    if ( _Chain_Has_only_one_node( ready ) ) {
886      _ISR_Enable( level );
887      return;
888    }
889    _Chain_Extract_unprotected( &executing->Object.Node );
890    _Chain_Append_unprotected( ready, &executing->Object.Node );
891
892  _ISR_Flash( level );
893
894    if ( _Thread_Is_heir( executing ) )
895      _Thread_Heir = (Thread_Control *) ready->first;
896
897    _Context_Switch_necessary = TRUE;
898
899  _ISR_Enable( level );
900}
901
902/*PAGE
903 *
904 *  _Thread_Tickle_timeslice
905 *
906 *  This scheduler routine determines if timeslicing is enabled
907 *  for the currently executing thread and, if so, updates the
908 *  timeslice count and checks for timeslice expiration.
909 *
910 *  Input parameters:   NONE
911 *
912 *  Output parameters:  NONE
913 */
914
915void _Thread_Tickle_timeslice( void )
916{
917  Thread_Control *executing;
918
919  executing = _Thread_Executing;
920
921  if ( !executing->is_preemptible )
922    return;
923
924  if ( !_States_Is_ready( executing->current_state ) )
925    return;
926
927  switch ( executing->budget_algorithm ) {
928    case THREAD_CPU_BUDGET_ALGORITHM_NONE:
929      break;
930
931    case THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE:
932    case THREAD_CPU_BUDGET_ALGORITHM_EXHAUST_TIMESLICE:
933      if ( --executing->cpu_time_budget == 0 ) {
934        _Thread_Reset_timeslice();
935        executing->cpu_time_budget = _Thread_Ticks_per_timeslice;
936      }
937      break;
938
939    case THREAD_CPU_BUDGET_ALGORITHM_CALLOUT:
940      if ( --executing->cpu_time_budget == 0 )
941        (*executing->budget_callout)( executing );
942      break;
943  }
944}
945
946/*PAGE
947 *
948 *  _Thread_Yield_processor
949 *
950 *  This kernel routine will remove the running THREAD from the ready chain
951 *  and place it immediatly at the rear of this chain.  Reset timeslice
952 *  and yield the processor functions both use this routine, therefore if
953 *  reset is TRUE and this is the only thread on the chain then the
954 *  timeslice counter is reset.  The heir THREAD will be updated if the
955 *  running is also the currently the heir.
956 *
957 *  Input parameters:   NONE
958 *
959 *  Output parameters:  NONE
960 *
961 *  INTERRUPT LATENCY:
962 *    ready chain
963 *    select heir
964 */
965
966void _Thread_Yield_processor( void )
967{
968  ISR_Level       level;
969  Thread_Control *executing;
970  Chain_Control  *ready;
971
972  executing = _Thread_Executing;
973  ready     = executing->ready;
974  _ISR_Disable( level );
975    if ( !_Chain_Has_only_one_node( ready ) ) {
976      _Chain_Extract_unprotected( &executing->Object.Node );
977      _Chain_Append_unprotected( ready, &executing->Object.Node );
978
979      _ISR_Flash( level );
980
981      if ( _Thread_Is_heir( executing ) )
982        _Thread_Heir = (Thread_Control *) ready->first;
983      _Context_Switch_necessary = TRUE;
984    }
985    else if ( !_Thread_Is_heir( executing ) )
986      _Context_Switch_necessary = TRUE;
987
988  _ISR_Enable( level );
989}
990
991/*PAGE
992 *
993 *  _Thread_Load_environment
994 *
995 *  Load starting environment for another thread from its start area in the
996 *  thread.  Only called from t_restart and t_start.
997 *
998 *  Input parameters:
999 *    the_thread - thread control block pointer
1000 *
1001 *  Output parameters:  NONE
1002 */
1003
1004void _Thread_Load_environment(
1005  Thread_Control *the_thread
1006)
1007{
1008  boolean is_fp = FALSE;
1009
1010  if ( the_thread->Start.fp_context ) {
1011    the_thread->fp_context = the_thread->Start.fp_context;
1012    _Context_Initialize_fp( &the_thread->fp_context );
1013    is_fp = TRUE;
1014  }
1015
1016  the_thread->do_post_task_switch_extension = FALSE;
1017  the_thread->is_preemptible   = the_thread->Start.is_preemptible;
1018  the_thread->budget_algorithm = the_thread->Start.budget_algorithm;
1019  the_thread->budget_callout   = the_thread->Start.budget_callout;
1020
1021  _Context_Initialize(
1022    &the_thread->Registers,
1023    the_thread->Start.Initial_stack.area,
1024    the_thread->Start.Initial_stack.size,
1025    the_thread->Start.isr_level,
1026    _Thread_Handler,
1027    is_fp
1028  );
1029
1030}
1031
1032/*PAGE
1033 *
1034 *  _Thread_Handler
1035 *
1036 *  This routine is the "primal" entry point for all threads.
1037 *  _Context_Initialize() dummies up the thread's initial context
1038 *  to cause the first Context_Switch() to jump to _Thread_Handler().
1039 *
1040 *  This routine is the default thread exitted error handler.  It is
1041 *  returned to when a thread exits.  The configured fatal error handler
1042 *  is invoked to process the exit.
1043 *
1044 *  NOTE:
1045 *
1046 *  On entry, it is assumed all interrupts are blocked and that this
1047 *  routine needs to set the initial isr level.  This may or may not
1048 *  actually be needed by the context switch routine and as a result
1049 *  interrupts may already be at there proper level.  Either way,
1050 *  setting the initial isr level properly here is safe.
1051 * 
1052 *  Currently this is only really needed for the posix port,
1053 *  ref: _Context_Switch in unix/cpu.c
1054 *
1055 *  Input parameters:   NONE
1056 *
1057 *  Output parameters:  NONE
1058 */
1059
1060void _Thread_Handler( void )
1061{
1062  ISR_Level  level;
1063  Thread_Control *executing;
1064 
1065  executing = _Thread_Executing;
1066 
1067  /*
1068   * have to put level into a register for those cpu's that use
1069   * inline asm here
1070   */
1071 
1072  level = executing->Start.isr_level;
1073  _ISR_Set_level(level);
1074
1075  /*
1076   * Take care that 'begin' extensions get to complete before
1077   * 'switch' extensions can run.  This means must keep dispatch
1078   * disabled until all 'begin' extensions complete.
1079   */
1080 
1081  _User_extensions_Thread_begin( executing );
1082 
1083  /*
1084   *  At this point, the dispatch disable level BETTER be 1.
1085   */
1086
1087  _Thread_Enable_dispatch();
1088 
1089  switch ( executing->Start.prototype ) {
1090    case THREAD_START_NUMERIC:
1091      (*executing->Start.entry_point)( executing->Start.numeric_argument );
1092      break;
1093    case THREAD_START_POINTER:
1094      (*executing->Start.entry_point)( executing->Start.pointer_argument );
1095      break;
1096    case THREAD_START_BOTH_POINTER_FIRST:
1097      (*executing->Start.entry_point)( 
1098        executing->Start.pointer_argument,
1099        executing->Start.numeric_argument
1100      );
1101      break;
1102    case THREAD_START_BOTH_NUMERIC_FIRST:
1103      (*executing->Start.entry_point)( 
1104        executing->Start.numeric_argument,
1105        executing->Start.pointer_argument
1106      );
1107      break;
1108  }
1109
1110  _User_extensions_Thread_exitted( executing );
1111
1112  _Internal_error_Occurred(
1113    INTERNAL_ERROR_CORE,
1114    TRUE,
1115    INTERNAL_ERROR_THREAD_EXITTED
1116  );
1117}
1118
1119/*PAGE
1120 *
1121 *  _Thread_Delay_ended
1122 *
1123 *  This routine processes a thread whose delay period has ended.
1124 *  It is called by the watchdog handler.
1125 *
1126 *  Input parameters:
1127 *    id - thread id
1128 *
1129 *  Output parameters: NONE
1130 */
1131
1132void _Thread_Delay_ended(
1133  Objects_Id  id,
1134  void       *ignored
1135)
1136{
1137  Thread_Control    *the_thread;
1138  Objects_Locations  location;
1139
1140  the_thread = _Thread_Get( id, &location );
1141  switch ( location ) {
1142    case OBJECTS_ERROR:
1143    case OBJECTS_REMOTE:  /* impossible */
1144      break;
1145    case OBJECTS_LOCAL:
1146      _Thread_Unblock( the_thread );
1147      _Thread_Unnest_dispatch();
1148      break;
1149  }
1150}
1151
1152/*PAGE
1153 *
1154 *  _Thread_Change_priority
1155 *
1156 *  This kernel routine changes the priority of the thread.  The
1157 *  thread chain is adjusted if necessary.
1158 *
1159 *  Input parameters:
1160 *    the_thread   - pointer to thread control block
1161 *    new_priority - ultimate priority
1162 *
1163 *  Output parameters:  NONE
1164 *
1165 *  INTERRUPT LATENCY:
1166 *    ready chain
1167 *    select heir
1168 */
1169
1170void _Thread_Change_priority(
1171  Thread_Control   *the_thread,
1172  Priority_Control  new_priority
1173)
1174{
1175  ISR_Level level;
1176
1177  _Thread_Set_transient( the_thread );
1178
1179  if ( the_thread->current_priority != new_priority )
1180    _Thread_Set_priority( the_thread, new_priority );
1181
1182  _ISR_Disable( level );
1183
1184  the_thread->current_state =
1185    _States_Clear( STATES_TRANSIENT, the_thread->current_state );
1186
1187  if ( ! _States_Is_ready( the_thread->current_state ) ) {
1188    _ISR_Enable( level );
1189    return;
1190  }
1191
1192  _Priority_Add_to_bit_map( &the_thread->Priority_map );
1193  _Chain_Append_unprotected( the_thread->ready, &the_thread->Object.Node );
1194
1195  _ISR_Flash( level );
1196
1197  _Thread_Calculate_heir();
1198
1199  if ( !_Thread_Is_executing_also_the_heir() &&
1200       _Thread_Executing->is_preemptible )
1201    _Context_Switch_necessary = TRUE;
1202
1203  _ISR_Enable( level );
1204}
1205
1206/*PAGE
1207 *
1208 * _Thread_Set_priority
1209 *
1210 * This directive enables and disables several modes of
1211 * execution for the requesting thread.
1212 *
1213 *  Input parameters:
1214 *    the_thread   - pointer to thread priority
1215 *    new_priority - new priority
1216 *
1217 *  Output: NONE
1218 */
1219
1220void _Thread_Set_priority(
1221  Thread_Control   *the_thread,
1222  Priority_Control  new_priority
1223)
1224{
1225  the_thread->current_priority = new_priority;
1226  the_thread->ready            = &_Thread_Ready_chain[ new_priority ];
1227
1228  _Priority_Initialize_information( &the_thread->Priority_map, new_priority );
1229}
1230
1231/*PAGE
1232 *
1233 *  _Thread_Evaluate_mode
1234 *
1235 *  XXX
1236 */
1237
1238boolean _Thread_Evaluate_mode( void )
1239{
1240  Thread_Control     *executing;
1241
1242  executing = _Thread_Executing;
1243
1244  if ( !_States_Is_ready( executing->current_state ) ||
1245       ( !_Thread_Is_heir( executing ) && executing->is_preemptible ) ) {
1246    _Context_Switch_necessary = TRUE;
1247    return TRUE;
1248  }
1249
1250  return FALSE;
1251}
1252
1253/*PAGE
1254 *
1255 *  _Thread_Get
1256 *
1257 *  NOTE:  If we are not using static inlines, this must be a real
1258 *         subroutine call.
1259 *
1260 *  NOTE:  XXX... This routine may be able to be optimized.
1261 */
1262
1263#ifndef USE_INLINES
1264
1265Thread_Control *_Thread_Get (
1266  Objects_Id           id,
1267  Objects_Locations   *location
1268)
1269{
1270  Objects_Classes      the_class;
1271  Objects_Information *information;
1272 
1273  if ( _Objects_Are_ids_equal( id, OBJECTS_ID_OF_SELF ) ) {
1274    _Thread_Disable_dispatch();
1275    *location = OBJECTS_LOCAL;
1276    return( _Thread_Executing );
1277  }
1278 
1279  the_class = _Objects_Get_class( id );
1280 
1281  if ( the_class > OBJECTS_CLASSES_LAST ) {
1282    *location = OBJECTS_ERROR;
1283    return (Thread_Control *) 0;
1284  } 
1285 
1286  information = _Objects_Information_table[ the_class ];
1287 
1288  if ( !information || !information->is_thread ) { 
1289    *location = OBJECTS_ERROR;
1290    return (Thread_Control *) 0;
1291  }
1292 
1293  return (Thread_Control *) _Objects_Get( information, id, location );
1294}
1295
1296#endif
1297
1298/*PAGE
1299 *
1300 *  _Thread_Idle_body
1301 *
1302 *  This kernel routine is the idle thread.  The idle thread runs any time
1303 *  no other thread is ready to run.  This thread loops forever with
1304 *  interrupts enabled.
1305 *
1306 *  Input parameters:
1307 *    ignored - this parameter is ignored
1308 *
1309 *  Output parameters:  NONE
1310 */
1311 
1312#if (CPU_PROVIDES_IDLE_THREAD_BODY == FALSE)
1313Thread _Thread_Idle_body(
1314  unsigned32 ignored
1315)
1316{
1317  for( ; ; ) ;
1318}
1319#endif
Note: See TracBrowser for help on using the repository browser.