source: rtems/c/src/exec/score/src/thread.c @ ebd40c0

4.104.114.84.95
Last change on this file since ebd40c0 was ebd40c0, checked in by Joel Sherrill <joel.sherrill@…>, on Jun 6, 1996 at 2:55:08 PM

Added Sporadic Server support to posix threads which required changes
in the core to support multiple algorithms to handle cpu time budgetting
which resulted in a change to the calling sequence of _Thread_Initialize.

  • Property mode set to 100644
File size: 30.7 KB
Line 
1/*
2 *  Thread Handler
3 *
4 *
5 *  COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
6 *  On-Line Applications Research Corporation (OAR).
7 *  All rights assigned to U.S. Government, 1994.
8 *
9 *  This material may be reproduced by or for the U.S. Government pursuant
10 *  to the copyright license under the clause at DFARS 252.227-7013.  This
11 *  notice must appear in all copies of this file and its derivatives.
12 *
13 *  $Id$
14 */
15
16#include <rtems/system.h>
17#include <rtems/score/apiext.h>
18#include <rtems/score/context.h>
19#include <rtems/score/interr.h>
20#include <rtems/score/isr.h>
21#include <rtems/score/object.h>
22#include <rtems/score/priority.h>
23#include <rtems/score/states.h>
24#include <rtems/score/sysstate.h>
25#include <rtems/score/thread.h>
26#include <rtems/score/threadq.h>
27#include <rtems/score/userext.h>
28#include <rtems/score/wkspace.h>
29
30/*PAGE
31 *
32 *  _Thread_Handler_initialization
33 *
34 *  This routine initializes all thread manager related data structures.
35 *
36 *  Input parameters:
37 *    ticks_per_timeslice - clock ticks per quantum
38 *    maximum_proxies     - number of proxies to initialize
39 *
40 *  Output parameters:  NONE
41 */
42
43char *_Thread_Idle_name = "IDLE";
44
45void _Thread_Handler_initialization(
46  unsigned32   ticks_per_timeslice,
47  unsigned32   maximum_extensions,
48  unsigned32   maximum_proxies
49)
50{
51  unsigned32      index;
52
53  /*
54   * BOTH stacks hooks must be set or both must be NULL.
55   * Do not allow mixture.
56   */
57
58  if ( !( ( _CPU_Table.stack_allocate_hook == 0 )
59       == ( _CPU_Table.stack_free_hook == 0 ) ) )
60    _Internal_error_Occurred(
61      INTERNAL_ERROR_CORE,
62      TRUE,
63      INTERNAL_ERROR_BAD_STACK_HOOK
64    );
65
66  _Context_Switch_necessary = FALSE;
67  _Thread_Executing         = NULL;
68  _Thread_Heir              = NULL;
69  _Thread_Allocated_fp      = NULL;
70
71  _Thread_Maximum_extensions = maximum_extensions;
72
73  _Thread_Ticks_per_timeslice          = ticks_per_timeslice;
74
75  _Thread_Ready_chain = _Workspace_Allocate_or_fatal_error(
76    (PRIORITY_MAXIMUM + 1) * sizeof(Chain_Control)
77  );
78
79  for ( index=0; index <= PRIORITY_MAXIMUM ; index++ )
80    _Chain_Initialize_empty( &_Thread_Ready_chain[ index ] );
81
82  _Thread_MP_Handler_initialization( maximum_proxies );
83
84  /*
85   *  Initialize this class of objects.
86   */
87 
88  _Objects_Initialize_information(
89    &_Thread_Internal_information,
90    OBJECTS_INTERNAL_THREADS,
91    FALSE,
92    ( _System_state_Is_multiprocessing ) ?  2 : 1,
93    sizeof( Thread_Control ),
94    TRUE,
95    8,
96    TRUE
97  );
98
99}
100
101/*PAGE
102 *
103 *  _Thread_Create_idle
104 */
105
106void _Thread_Create_idle( void )
107{
108  void *idle;
109
110  /*
111   *  The entire workspace is zeroed during its initialization.  Thus, all
112   *  fields not explicitly assigned were explicitly zeroed by
113   *  _Workspace_Initialization.
114   */
115 
116  _Thread_Idle = _Thread_Internal_allocate();
117 
118  /*
119   *  Initialize the IDLE task.
120   */
121 
122#if (CPU_PROVIDES_IDLE_THREAD_BODY == TRUE)
123  idle = _CPU_Thread_Idle_body;
124#else
125  idle = _Thread_Idle_body;
126#endif
127 
128  if ( _CPU_Table.idle_task )
129    idle = _CPU_Table.idle_task;
130 
131  _Thread_Initialize(
132    &_Thread_Internal_information,
133    _Thread_Idle,
134    NULL,        /* allocate the stack */
135    THREAD_IDLE_STACK_SIZE,
136    CPU_IDLE_TASK_IS_FP,
137    PRIORITY_MAXIMUM,
138    TRUE,        /* preemptable */
139    THREAD_CPU_BUDGET_ALGORITHM_NONE,
140    NULL,        /* no budget algorithm callout */
141    0,           /* all interrupts enabled */
142    _Thread_Idle_name
143  );
144 
145  /*
146   *  WARNING!!! This is necessary to "kick" start the system and
147   *             MUST be done before _Thread_Start is invoked.
148   */
149 
150  _Thread_Heir      =
151  _Thread_Executing = _Thread_Idle;
152 
153  _Thread_Start(
154    _Thread_Idle,
155    THREAD_START_NUMERIC,
156    idle,
157    NULL,
158    0
159  );
160 
161}
162
163/*PAGE
164 *
165 *  _Thread_Start_multitasking
166 *
167 *  This kernel routine readies the requested thread, the thread chain
168 *  is adjusted.  A new heir thread may be selected.
169 *
170 *  Input parameters:
171 *    system_thread - pointer to system initialization thread control block
172 *    idle_thread   - pointer to idle thread control block
173 *
174 *  Output parameters:  NONE
175 *
176 *  NOTE:  This routine uses the "blocking" heir selection mechanism.
177 *         This insures the correct heir after a thread restart.
178 *
179 *  INTERRUPT LATENCY:
180 *    ready chain
181 *    select heir
182 */
183
184void _Thread_Start_multitasking( void )
185{
186  /*
187   *  The system is now multitasking and completely initialized. 
188   *  This system thread now either "goes away" in a single processor
189   *  system or "turns into" the server thread in an MP system.
190   */
191
192  _System_state_Set( SYSTEM_STATE_UP );
193
194  _Context_Switch_necessary = FALSE;
195
196  _Thread_Executing = _Thread_Heir;
197
198  _Context_Switch( &_Thread_BSP_context, &_Thread_Executing->Registers );
199}
200
201/*PAGE
202 *
203 *  _Thread_Dispatch
204 *
205 *  This kernel routine determines if a dispatch is needed, and if so
206 *  dispatches to the heir thread.  Once the heir is running an attempt
207 *  is made to dispatch any ASRs.
208 *
209 *  ALTERNATE ENTRY POINTS:
210 *    void _Thread_Enable_dispatch();
211 *
212 *  Input parameters:  NONE
213 *
214 *  Output parameters:  NONE
215 *
216 *  INTERRUPT LATENCY:
217 *    dispatch thread
218 *    no dispatch thread
219 */
220
221#if ( CPU_INLINE_ENABLE_DISPATCH == FALSE )
222void _Thread_Enable_dispatch( void )
223{
224  if ( --_Thread_Dispatch_disable_level )
225    return;
226  _Thread_Dispatch();
227}
228#endif
229
230void _Thread_Dispatch( void )
231{
232  Thread_Control   *executing;
233  Thread_Control   *heir;
234  ISR_Level         level;
235
236  executing   = _Thread_Executing;
237  _ISR_Disable( level );
238  while ( _Context_Switch_necessary == TRUE ) {
239    heir = _Thread_Heir;
240    _Thread_Dispatch_disable_level = 1;
241    _Context_Switch_necessary = FALSE;
242    _Thread_Executing = heir;
243    _ISR_Enable( level );
244
245    _User_extensions_Thread_switch( executing, heir );
246
247    if ( heir->budget_algorithm == THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE )
248      heir->cpu_time_budget = _Thread_Ticks_per_timeslice;
249
250    /*
251     *  If the CPU has hardware floating point, then we must address saving
252     *  and restoring it as part of the context switch.
253     *
254     *  The second conditional compilation section selects the algorithm used
255     *  to context switch between floating point tasks.  The deferred algorithm
256     *  can be significantly better in a system with few floating point tasks
257     *  because it reduces the total number of save and restore FP context
258     *  operations.  However, this algorithm can not be used on all CPUs due
259     *  to unpredictable use of FP registers by some compilers for integer
260     *  operations.
261     */
262
263#if ( CPU_HARDWARE_FP == TRUE )
264#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
265    if ( (heir->fp_context != NULL) && !_Thread_Is_allocated_fp( heir ) ) {
266      if ( _Thread_Allocated_fp != NULL )
267        _Context_Save_fp( &_Thread_Allocated_fp->fp_context );
268      _Context_Restore_fp( &heir->fp_context );
269      _Thread_Allocated_fp = heir;
270    }
271#else
272    if ( executing->fp_context != NULL )
273      _Context_Save_fp( &executing->fp_context );
274
275    if ( heir->fp_context != NULL )
276      _Context_Restore_fp( &heir->fp_context );
277#endif
278#endif
279
280    _Context_Switch( &executing->Registers, &heir->Registers );
281
282    executing = _Thread_Executing;
283
284    _ISR_Disable( level );
285  }
286
287  _Thread_Dispatch_disable_level = 0;
288
289  _ISR_Enable( level );
290
291  if ( executing->do_post_task_switch_extension ) {
292    executing->do_post_task_switch_extension = FALSE;
293    _API_extensions_Run_postswitch();
294  }
295 
296}
297
298/*PAGE
299 *
300 *  _Thread_Stack_Allocate
301 *
302 *  Allocate the requested stack space for the thread.
303 *  return the actual size allocated after any adjustment
304 *  or return zero if the allocation failed.
305 *  Set the Start.stack field to the address of the stack
306 */
307
308static unsigned32 _Thread_Stack_Allocate(
309  Thread_Control *the_thread,
310  unsigned32 stack_size)
311{
312  void *stack_addr = 0;
313 
314  if ( !_Stack_Is_enough( stack_size ) )
315    stack_size = STACK_MINIMUM_SIZE;
316 
317  /*
318   * Call ONLY the CPU table stack allocate hook, _or_ the
319   * the RTEMS workspace allocate.  This is so the stack free
320   * routine can call the correct deallocation routine.
321   */
322
323  if ( _CPU_Table.stack_allocate_hook )
324  {
325    stack_addr = (*_CPU_Table.stack_allocate_hook)( stack_size );
326  } else {
327
328    /*
329     *  First pad the requested size so we allocate enough memory
330     *  so the context initialization can align it properly.  The address
331     *  returned the workspace allocate must be directly stored in the
332     *  stack control block because it is later used in the free sequence.
333     *
334     *  Thus it is the responsibility of the CPU dependent code to
335     *  get and keep the stack adjust factor, the stack alignment, and
336     *  the context initialization sequence in sync.
337     */
338
339    stack_size = _Stack_Adjust_size( stack_size );
340    stack_addr = _Workspace_Allocate( stack_size );
341  }
342 
343  if ( !stack_addr )
344      stack_size = 0;
345 
346  the_thread->Start.stack = stack_addr;
347 
348  return stack_size;
349}
350
351/*
352 *  _Thread_Stack_Free
353 *
354 *  Deallocate the Thread's stack.
355 */
356
357static void _Thread_Stack_Free(
358  Thread_Control *the_thread
359)
360{
361    /*
362     *  If the API provided the stack space, then don't free it.
363     */
364
365    if ( !the_thread->Start.core_allocated_stack )
366      return;
367
368    /*
369     * Call ONLY the CPU table stack free hook, or the
370     * the RTEMS workspace free.  This is so the free
371     * routine properly matches the allocation of the stack.
372     */
373
374    if ( _CPU_Table.stack_free_hook )
375        (*_CPU_Table.stack_free_hook)( the_thread->Start.Initial_stack.area );
376    else
377        _Workspace_Free( the_thread->Start.Initial_stack.area );
378}
379
380/*PAGE
381 *
382 *  _Thread_Initialize
383 *
384 *  XXX
385 */
386
387boolean _Thread_Initialize(
388  Objects_Information                  *information,
389  Thread_Control                       *the_thread,
390  void                                 *stack_area,
391  unsigned32                            stack_size,
392  boolean                               is_fp,
393  Priority_Control                      priority,
394  boolean                               is_preemptible,
395  Thread_CPU_budget_algorithms          budget_algorithm,
396  Thread_CPU_budget_algorithm_callout   budget_callout,
397  unsigned32                            isr_level,
398  Objects_Name                          name
399)
400{
401  unsigned32           actual_stack_size = 0;
402  void                *stack = NULL;
403  void                *fp_area;
404  void                *extensions_area;
405
406  /*
407   *  Allocate and Initialize the stack for this thread.
408   */
409
410
411  if ( !stack ) {
412    if ( !_Stack_Is_enough( stack_size ) )
413      actual_stack_size = STACK_MINIMUM_SIZE;
414    else
415      actual_stack_size = stack_size;
416
417    actual_stack_size = _Stack_Adjust_size( actual_stack_size );
418    stack             = stack_area;
419
420    actual_stack_size = _Thread_Stack_Allocate( the_thread, stack_size );
421 
422    if ( !actual_stack_size ) 
423      return FALSE;                     /* stack allocation failed */
424
425    stack = the_thread->Start.stack;
426
427    the_thread->Start.core_allocated_stack = TRUE;
428  } else {
429    stack = stack_area;
430    actual_stack_size = stack_size;
431    the_thread->Start.core_allocated_stack = FALSE;
432  }
433
434  _Stack_Initialize(
435     &the_thread->Start.Initial_stack,
436     stack,
437     actual_stack_size
438  );
439
440  /*
441   *  Allocate the floating point area for this thread
442   */
443 
444  if ( is_fp ) {
445
446    fp_area = _Workspace_Allocate( CONTEXT_FP_SIZE );
447    if ( !fp_area ) {
448      _Thread_Stack_Free( the_thread );
449      return FALSE;
450    }
451    fp_area = _Context_Fp_start( fp_area, 0 );
452
453  } else
454    fp_area = NULL;
455
456  the_thread->fp_context       = fp_area;
457  the_thread->Start.fp_context = fp_area;
458
459  /*
460   *  Allocate the extensions area for this thread
461   */
462
463  if ( _Thread_Maximum_extensions ) {
464    extensions_area = _Workspace_Allocate(
465      (_Thread_Maximum_extensions + 1) * sizeof( void * )
466    );
467
468    if ( !extensions_area ) {
469      if ( fp_area )
470        (void) _Workspace_Free( fp_area );
471
472      _Thread_Stack_Free( the_thread );
473
474      return FALSE;
475    }
476  } else 
477    extensions_area = NULL;
478 
479  the_thread->extensions = extensions_area;
480
481  /*
482   *  General initialization
483   */
484
485  the_thread->Start.is_preemptible   = is_preemptible;
486  the_thread->Start.budget_algorithm = budget_algorithm;
487  the_thread->Start.budget_callout   = budget_callout;
488  the_thread->Start.isr_level        = isr_level;
489
490  the_thread->current_state          = STATES_DORMANT;
491  the_thread->resource_count         = 0;
492  the_thread->real_priority          = priority;
493  the_thread->Start.initial_priority = priority;
494 
495  _Thread_Set_priority( the_thread, priority );
496
497  /*
498   *  Open the object
499   */
500
501  _Objects_Open( information, &the_thread->Object, name );
502
503  /*
504   *  Invoke create extensions
505   */
506
507  if ( !_User_extensions_Thread_create( the_thread ) ) {
508
509    if ( extensions_area )
510      (void) _Workspace_Free( extensions_area );
511
512    if ( fp_area )
513      (void) _Workspace_Free( fp_area );
514
515    _Thread_Stack_Free( the_thread->Start.stack );
516
517    return FALSE;
518  }
519
520  return TRUE;
521   
522}
523
524/*
525 *  _Thread_Start
526 *
527 *  DESCRIPTION:
528 *
529 *  XXX
530 */
531 
532boolean _Thread_Start(
533  Thread_Control       *the_thread,
534  Thread_Start_types    the_prototype,
535  void                 *entry_point,
536  void                 *pointer_argument,
537  unsigned32            numeric_argument
538)
539{
540  if ( _States_Is_dormant( the_thread->current_state ) ) {
541 
542    the_thread->Start.entry_point      = entry_point;
543   
544    the_thread->Start.prototype        = the_prototype;
545    the_thread->Start.pointer_argument = pointer_argument;
546    the_thread->Start.numeric_argument = numeric_argument;
547 
548    _Thread_Load_environment( the_thread );
549 
550    _Thread_Ready( the_thread );
551 
552    _User_extensions_Thread_start( the_thread );
553 
554    return TRUE;
555  }
556 
557  return FALSE;
558 
559}
560
561/*
562 *  _Thread_Restart
563 *
564 *  DESCRIPTION:
565 *
566 *  XXX
567 */
568 
569boolean _Thread_Restart(
570  Thread_Control      *the_thread,
571  void                *pointer_argument,
572  unsigned32           numeric_argument
573)
574{
575  if ( !_States_Is_dormant( the_thread->current_state ) ) {
576 
577    _Thread_Set_transient( the_thread );
578    the_thread->resource_count = 0;
579    the_thread->is_preemptible   = the_thread->Start.is_preemptible;
580    the_thread->budget_algorithm = the_thread->Start.budget_algorithm;
581    the_thread->budget_callout   = the_thread->Start.budget_callout;
582
583    the_thread->Start.pointer_argument = pointer_argument;
584    the_thread->Start.numeric_argument = numeric_argument;
585 
586    if ( !_Thread_queue_Extract_with_proxy( the_thread ) ) {
587 
588      if ( _Watchdog_Is_active( &the_thread->Timer ) )
589        (void) _Watchdog_Remove( &the_thread->Timer );
590    }
591
592    if ( the_thread->current_priority != the_thread->Start.initial_priority ) {
593      the_thread->real_priority = the_thread->Start.initial_priority;
594      _Thread_Set_priority( the_thread, the_thread->Start.initial_priority );
595    }
596 
597    _Thread_Load_environment( the_thread );
598 
599    _Thread_Ready( the_thread );
600 
601    _User_extensions_Thread_restart( the_thread );
602 
603    if ( _Thread_Is_executing ( the_thread ) )
604      _Thread_Restart_self();
605 
606    return TRUE;
607  }
608 
609  return FALSE;
610}
611
612/*
613 *  _Thread_Close
614 *
615 *  DESCRIPTION:
616 *
617 *  XXX
618 */
619 
620void _Thread_Close(
621  Objects_Information  *information,
622  Thread_Control       *the_thread
623)
624{
625  _Objects_Close( information, &the_thread->Object );
626 
627  _Thread_Set_state( the_thread, STATES_TRANSIENT );
628 
629  if ( !_Thread_queue_Extract_with_proxy( the_thread ) ) {
630 
631    if ( _Watchdog_Is_active( &the_thread->Timer ) )
632      (void) _Watchdog_Remove( &the_thread->Timer );
633  }
634
635  _User_extensions_Thread_delete( the_thread );
636 
637#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
638  if ( _Thread_Is_allocated_fp( the_thread ) )
639    _Thread_Deallocate_fp();
640#endif
641  the_thread->fp_context = NULL;
642
643  if ( the_thread->Start.fp_context )
644  (void) _Workspace_Free( the_thread->Start.fp_context );
645
646  _Thread_Stack_Free( the_thread );
647
648  if ( the_thread->extensions )
649    (void) _Workspace_Free( the_thread->extensions );
650
651  the_thread->Start.stack = NULL;
652  the_thread->extensions = NULL;
653}
654
655/*PAGE
656 *
657 *  _Thread_Ready
658 *
659 *  This kernel routine readies the requested thread, the thread chain
660 *  is adjusted.  A new heir thread may be selected.
661 *
662 *  Input parameters:
663 *    the_thread - pointer to thread control block
664 *
665 *  Output parameters:  NONE
666 *
667 *  NOTE:  This routine uses the "blocking" heir selection mechanism.
668 *         This insures the correct heir after a thread restart.
669 *
670 *  INTERRUPT LATENCY:
671 *    ready chain
672 *    select heir
673 */
674
675void _Thread_Ready(
676  Thread_Control *the_thread
677)
678{
679  ISR_Level              level;
680  Thread_Control *heir;
681
682  _ISR_Disable( level );
683
684  the_thread->current_state = STATES_READY;
685
686  _Priority_Add_to_bit_map( &the_thread->Priority_map );
687
688  _Chain_Append_unprotected( the_thread->ready, &the_thread->Object.Node );
689
690  _ISR_Flash( level );
691
692  _Thread_Calculate_heir();
693
694  heir = _Thread_Heir;
695
696  if ( !_Thread_Is_executing( heir ) && _Thread_Executing->is_preemptible ) 
697    _Context_Switch_necessary = TRUE;
698
699  _ISR_Enable( level );
700}
701
702/*PAGE
703 *
704 *  _Thread_Clear_state
705 *
706 *  This kernel routine clears the appropriate states in the
707 *  requested thread.  The thread ready chain is adjusted if
708 *  necessary and the Heir thread is set accordingly.
709 *
710 *  Input parameters:
711 *    the_thread - pointer to thread control block
712 *    state      - state set to clear
713 *
714 *  Output parameters:  NONE
715 *
716 *  INTERRUPT LATENCY:
717 *    priority map
718 *    select heir
719 */
720
721void _Thread_Clear_state(
722  Thread_Control *the_thread,
723  States_Control  state
724)
725{
726  ISR_Level level;
727
728  _ISR_Disable( level );
729    the_thread->current_state =
730      _States_Clear( state, the_thread->current_state );
731
732    if ( _States_Is_ready( the_thread->current_state ) ) {
733
734      _Priority_Add_to_bit_map( &the_thread->Priority_map );
735
736      _Chain_Append_unprotected( the_thread->ready, &the_thread->Object.Node );
737
738      _ISR_Flash( level );
739
740      if ( the_thread->current_priority < _Thread_Heir->current_priority ) {
741        _Thread_Heir = the_thread;
742        if ( _Thread_Executing->is_preemptible ||
743             the_thread->current_priority == 0 )
744          _Context_Switch_necessary = TRUE;
745      }
746    }
747  _ISR_Enable( level );
748}
749
750/*PAGE
751 *
752 * _Thread_Set_state
753 *
754 * This kernel routine sets the requested state in the THREAD.  The
755 * THREAD chain is adjusted if necessary.
756 *
757 * Input parameters:
758 *   the_thread   - pointer to thread control block
759 *   state - state to be set
760 *
761 * Output parameters:  NONE
762 *
763 *  INTERRUPT LATENCY:
764 *    ready chain
765 *    select map
766 */
767
768void _Thread_Set_state(
769  Thread_Control *the_thread,
770  States_Control         state
771)
772{
773  ISR_Level             level;
774  Chain_Control *ready;
775
776  ready = the_thread->ready;
777  _ISR_Disable( level );
778  if ( !_States_Is_ready( the_thread->current_state ) ) {
779    the_thread->current_state =
780       _States_Set( state, the_thread->current_state );
781    _ISR_Enable( level );
782    return;
783  }
784
785  the_thread->current_state = state;
786
787  if ( _Chain_Has_only_one_node( ready ) ) {
788
789    _Chain_Initialize_empty( ready );
790    _Priority_Remove_from_bit_map( &the_thread->Priority_map );
791
792  } else
793    _Chain_Extract_unprotected( &the_thread->Object.Node );
794
795  _ISR_Flash( level );
796
797  if ( _Thread_Is_heir( the_thread ) )
798     _Thread_Calculate_heir();
799
800  if ( _Thread_Is_executing( the_thread ) )
801    _Context_Switch_necessary = TRUE;
802
803  _ISR_Enable( level );
804}
805
806/*PAGE
807 *
808 *  _Thread_Set_transient
809 *
810 *  This kernel routine places the requested thread in the transient state
811 *  which will remove it from the ready queue, if necessary.  No
812 *  rescheduling is necessary because it is assumed that the transient
813 *  state will be cleared before dispatching is enabled.
814 *
815 *  Input parameters:
816 *    the_thread - pointer to thread control block
817 *
818 *  Output parameters:  NONE
819 *
820 *  INTERRUPT LATENCY:
821 *    only case
822 */
823
824void _Thread_Set_transient(
825  Thread_Control *the_thread
826)
827{
828  ISR_Level             level;
829  unsigned32            old_state;
830  Chain_Control *ready;
831
832  ready = the_thread->ready;
833  _ISR_Disable( level );
834
835  old_state = the_thread->current_state;
836  the_thread->current_state = _States_Set( STATES_TRANSIENT, old_state );
837
838  if ( _States_Is_ready( old_state ) ) {
839    if ( _Chain_Has_only_one_node( ready ) ) {
840
841      _Chain_Initialize_empty( ready );
842      _Priority_Remove_from_bit_map( &the_thread->Priority_map );
843
844    } else
845      _Chain_Extract_unprotected( &the_thread->Object.Node );
846  }
847
848  _ISR_Enable( level );
849
850}
851
852/*PAGE
853 *
854 *  _Thread_Reset_timeslice
855 *
856 *  This routine will remove the running thread from the ready chain
857 *  and place it immediately at the rear of this chain and then the
858 *  timeslice counter is reset.  The heir THREAD will be updated if
859 *  the running is also the currently the heir.
860 *
861 *  Input parameters:   NONE
862 *
863 *  Output parameters:  NONE
864 *
865 *  INTERRUPT LATENCY:
866 *    ready chain
867 *    select heir
868 */
869
870void _Thread_Reset_timeslice( void )
871{
872  ISR_Level       level;
873  Thread_Control *executing;
874  Chain_Control  *ready;
875
876  executing = _Thread_Executing;
877  ready     = executing->ready;
878  _ISR_Disable( level );
879    if ( _Chain_Has_only_one_node( ready ) ) {
880      _ISR_Enable( level );
881      return;
882    }
883    _Chain_Extract_unprotected( &executing->Object.Node );
884    _Chain_Append_unprotected( ready, &executing->Object.Node );
885
886  _ISR_Flash( level );
887
888    if ( _Thread_Is_heir( executing ) )
889      _Thread_Heir = (Thread_Control *) ready->first;
890
891    _Context_Switch_necessary = TRUE;
892
893  _ISR_Enable( level );
894}
895
896/*PAGE
897 *
898 *  _Thread_Tickle_timeslice
899 *
900 *  This scheduler routine determines if timeslicing is enabled
901 *  for the currently executing thread and, if so, updates the
902 *  timeslice count and checks for timeslice expiration.
903 *
904 *  Input parameters:   NONE
905 *
906 *  Output parameters:  NONE
907 */
908
909void _Thread_Tickle_timeslice( void )
910{
911  Thread_Control *executing;
912
913  executing = _Thread_Executing;
914
915  if ( !executing->is_preemptible )
916    return;
917
918  if ( !_States_Is_ready( executing->current_state ) )
919    return;
920
921  switch ( executing->budget_algorithm ) {
922    case THREAD_CPU_BUDGET_ALGORITHM_NONE:
923      break;
924
925    case THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE:
926    case THREAD_CPU_BUDGET_ALGORITHM_EXHAUST_TIMESLICE:
927      if ( --executing->cpu_time_budget == 0 ) {
928        _Thread_Reset_timeslice();
929        executing->cpu_time_budget = _Thread_Ticks_per_timeslice;
930      }
931      break;
932
933    case THREAD_CPU_BUDGET_ALGORITHM_CALLOUT:
934      if ( --executing->cpu_time_budget == 0 )
935        (*executing->budget_callout)( executing );
936      break;
937  }
938}
939
940/*PAGE
941 *
942 *  _Thread_Yield_processor
943 *
944 *  This kernel routine will remove the running THREAD from the ready chain
945 *  and place it immediatly at the rear of this chain.  Reset timeslice
946 *  and yield the processor functions both use this routine, therefore if
947 *  reset is TRUE and this is the only thread on the chain then the
948 *  timeslice counter is reset.  The heir THREAD will be updated if the
949 *  running is also the currently the heir.
950 *
951 *  Input parameters:   NONE
952 *
953 *  Output parameters:  NONE
954 *
955 *  INTERRUPT LATENCY:
956 *    ready chain
957 *    select heir
958 */
959
960void _Thread_Yield_processor( void )
961{
962  ISR_Level       level;
963  Thread_Control *executing;
964  Chain_Control  *ready;
965
966  executing = _Thread_Executing;
967  ready     = executing->ready;
968  _ISR_Disable( level );
969    if ( !_Chain_Has_only_one_node( ready ) ) {
970      _Chain_Extract_unprotected( &executing->Object.Node );
971      _Chain_Append_unprotected( ready, &executing->Object.Node );
972
973      _ISR_Flash( level );
974
975      if ( _Thread_Is_heir( executing ) )
976        _Thread_Heir = (Thread_Control *) ready->first;
977      _Context_Switch_necessary = TRUE;
978    }
979    else if ( !_Thread_Is_heir( executing ) )
980      _Context_Switch_necessary = TRUE;
981
982  _ISR_Enable( level );
983}
984
985/*PAGE
986 *
987 *  _Thread_Load_environment
988 *
989 *  Load starting environment for another thread from its start area in the
990 *  thread.  Only called from t_restart and t_start.
991 *
992 *  Input parameters:
993 *    the_thread - thread control block pointer
994 *
995 *  Output parameters:  NONE
996 */
997
998void _Thread_Load_environment(
999  Thread_Control *the_thread
1000)
1001{
1002  boolean is_fp = FALSE;
1003
1004  if ( the_thread->Start.fp_context ) {
1005    the_thread->fp_context = the_thread->Start.fp_context;
1006    _Context_Initialize_fp( &the_thread->fp_context );
1007    is_fp = TRUE;
1008  }
1009
1010  the_thread->do_post_task_switch_extension = FALSE;
1011  the_thread->is_preemptible   = the_thread->Start.is_preemptible;
1012  the_thread->budget_algorithm = the_thread->Start.budget_algorithm;
1013  the_thread->budget_callout   = the_thread->Start.budget_callout;
1014
1015  _Context_Initialize(
1016    &the_thread->Registers,
1017    the_thread->Start.Initial_stack.area,
1018    the_thread->Start.Initial_stack.size,
1019    the_thread->Start.isr_level,
1020    _Thread_Handler,
1021    is_fp
1022  );
1023
1024}
1025
1026/*PAGE
1027 *
1028 *  _Thread_Handler
1029 *
1030 *  This routine is the "primal" entry point for all threads.
1031 *  _Context_Initialize() dummies up the thread's initial context
1032 *  to cause the first Context_Switch() to jump to _Thread_Handler().
1033 *
1034 *  This routine is the default thread exitted error handler.  It is
1035 *  returned to when a thread exits.  The configured fatal error handler
1036 *  is invoked to process the exit.
1037 *
1038 *  NOTE:
1039 *
1040 *  On entry, it is assumed all interrupts are blocked and that this
1041 *  routine needs to set the initial isr level.  This may or may not
1042 *  actually be needed by the context switch routine and as a result
1043 *  interrupts may already be at there proper level.  Either way,
1044 *  setting the initial isr level properly here is safe.
1045 * 
1046 *  Currently this is only really needed for the posix port,
1047 *  ref: _Context_Switch in unix/cpu.c
1048 *
1049 *  Input parameters:   NONE
1050 *
1051 *  Output parameters:  NONE
1052 */
1053
1054void _Thread_Handler( void )
1055{
1056  ISR_Level  level;
1057  Thread_Control *executing;
1058 
1059  executing = _Thread_Executing;
1060 
1061  /*
1062   * have to put level into a register for those cpu's that use
1063   * inline asm here
1064   */
1065 
1066  level = executing->Start.isr_level;
1067  _ISR_Set_level(level);
1068
1069  /*
1070   * Take care that 'begin' extensions get to complete before
1071   * 'switch' extensions can run.  This means must keep dispatch
1072   * disabled until all 'begin' extensions complete.
1073   */
1074 
1075  _User_extensions_Thread_begin( executing );
1076 
1077  /*
1078   *  At this point, the dispatch disable level BETTER be 1.
1079   */
1080
1081  _Thread_Enable_dispatch();
1082 
1083  switch ( executing->Start.prototype ) {
1084    case THREAD_START_NUMERIC:
1085      (*executing->Start.entry_point)( executing->Start.numeric_argument );
1086      break;
1087    case THREAD_START_POINTER:
1088      (*executing->Start.entry_point)( executing->Start.pointer_argument );
1089      break;
1090    case THREAD_START_BOTH_POINTER_FIRST:
1091      (*executing->Start.entry_point)( 
1092        executing->Start.pointer_argument,
1093        executing->Start.numeric_argument
1094      );
1095      break;
1096    case THREAD_START_BOTH_NUMERIC_FIRST:
1097      (*executing->Start.entry_point)( 
1098        executing->Start.numeric_argument,
1099        executing->Start.pointer_argument
1100      );
1101      break;
1102  }
1103
1104  _User_extensions_Thread_exitted( executing );
1105
1106  _Internal_error_Occurred(
1107    INTERNAL_ERROR_CORE,
1108    TRUE,
1109    INTERNAL_ERROR_THREAD_EXITTED
1110  );
1111}
1112
1113/*PAGE
1114 *
1115 *  _Thread_Delay_ended
1116 *
1117 *  This routine processes a thread whose delay period has ended.
1118 *  It is called by the watchdog handler.
1119 *
1120 *  Input parameters:
1121 *    id - thread id
1122 *
1123 *  Output parameters: NONE
1124 */
1125
1126void _Thread_Delay_ended(
1127  Objects_Id  id,
1128  void       *ignored
1129)
1130{
1131  Thread_Control    *the_thread;
1132  Objects_Locations  location;
1133
1134  the_thread = _Thread_Get( id, &location );
1135  switch ( location ) {
1136    case OBJECTS_ERROR:
1137    case OBJECTS_REMOTE:  /* impossible */
1138      break;
1139    case OBJECTS_LOCAL:
1140      _Thread_Unblock( the_thread );
1141      _Thread_Unnest_dispatch();
1142      break;
1143  }
1144}
1145
1146/*PAGE
1147 *
1148 *  _Thread_Change_priority
1149 *
1150 *  This kernel routine changes the priority of the thread.  The
1151 *  thread chain is adjusted if necessary.
1152 *
1153 *  Input parameters:
1154 *    the_thread   - pointer to thread control block
1155 *    new_priority - ultimate priority
1156 *
1157 *  Output parameters:  NONE
1158 *
1159 *  INTERRUPT LATENCY:
1160 *    ready chain
1161 *    select heir
1162 */
1163
1164void _Thread_Change_priority(
1165  Thread_Control   *the_thread,
1166  Priority_Control  new_priority
1167)
1168{
1169  ISR_Level level;
1170
1171  _Thread_Set_transient( the_thread );
1172
1173  if ( the_thread->current_priority != new_priority )
1174    _Thread_Set_priority( the_thread, new_priority );
1175
1176  _ISR_Disable( level );
1177
1178  the_thread->current_state =
1179    _States_Clear( STATES_TRANSIENT, the_thread->current_state );
1180
1181  if ( ! _States_Is_ready( the_thread->current_state ) ) {
1182    _ISR_Enable( level );
1183    return;
1184  }
1185
1186  _Priority_Add_to_bit_map( &the_thread->Priority_map );
1187  _Chain_Append_unprotected( the_thread->ready, &the_thread->Object.Node );
1188
1189  _ISR_Flash( level );
1190
1191  _Thread_Calculate_heir();
1192
1193  if ( !_Thread_Is_executing_also_the_heir() &&
1194       _Thread_Executing->is_preemptible )
1195    _Context_Switch_necessary = TRUE;
1196
1197  _ISR_Enable( level );
1198}
1199
1200/*PAGE
1201 *
1202 * _Thread_Set_priority
1203 *
1204 * This directive enables and disables several modes of
1205 * execution for the requesting thread.
1206 *
1207 *  Input parameters:
1208 *    the_thread   - pointer to thread priority
1209 *    new_priority - new priority
1210 *
1211 *  Output: NONE
1212 */
1213
1214void _Thread_Set_priority(
1215  Thread_Control   *the_thread,
1216  Priority_Control  new_priority
1217)
1218{
1219  the_thread->current_priority = new_priority;
1220  the_thread->ready            = &_Thread_Ready_chain[ new_priority ];
1221
1222  _Priority_Initialize_information( &the_thread->Priority_map, new_priority );
1223}
1224
1225/*PAGE
1226 *
1227 *  _Thread_Evaluate_mode
1228 *
1229 *  XXX
1230 */
1231
1232boolean _Thread_Evaluate_mode( void )
1233{
1234  Thread_Control     *executing;
1235
1236  executing = _Thread_Executing;
1237
1238  if ( !_States_Is_ready( executing->current_state ) ||
1239       ( !_Thread_Is_heir( executing ) && executing->is_preemptible ) ) {
1240    _Context_Switch_necessary = TRUE;
1241    return TRUE;
1242  }
1243
1244  return FALSE;
1245}
1246
1247/*PAGE
1248 *
1249 *  _Thread_Get
1250 *
1251 *  NOTE:  If we are not using static inlines, this must be a real
1252 *         subroutine call.
1253 *
1254 *  NOTE:  XXX... This routine may be able to be optimized.
1255 */
1256
1257#ifndef USE_INLINES
1258
1259Thread_Control *_Thread_Get (
1260  Objects_Id           id,
1261  Objects_Locations   *location
1262)
1263{
1264  Objects_Classes      the_class;
1265  Objects_Information *information;
1266 
1267  if ( _Objects_Are_ids_equal( id, OBJECTS_ID_OF_SELF ) ) {
1268    _Thread_Disable_dispatch();
1269    *location = OBJECTS_LOCAL;
1270    return( _Thread_Executing );
1271  }
1272 
1273  the_class = _Objects_Get_class( id );
1274 
1275  if ( the_class > OBJECTS_CLASSES_LAST ) {
1276    *location = OBJECTS_ERROR;
1277    return (Thread_Control *) 0;
1278  } 
1279 
1280  information = _Objects_Information_table[ the_class ];
1281 
1282  if ( !information || !information->is_thread ) { 
1283    *location = OBJECTS_ERROR;
1284    return (Thread_Control *) 0;
1285  }
1286 
1287  return (Thread_Control *) _Objects_Get( information, id, location );
1288}
1289
1290#endif
1291
1292/*PAGE
1293 *
1294 *  _Thread_Idle_body
1295 *
1296 *  This kernel routine is the idle thread.  The idle thread runs any time
1297 *  no other thread is ready to run.  This thread loops forever with
1298 *  interrupts enabled.
1299 *
1300 *  Input parameters:
1301 *    ignored - this parameter is ignored
1302 *
1303 *  Output parameters:  NONE
1304 */
1305 
1306#if (CPU_PROVIDES_IDLE_THREAD_BODY == FALSE)
1307Thread _Thread_Idle_body(
1308  unsigned32 ignored
1309)
1310{
1311  for( ; ; ) ;
1312}
1313#endif
Note: See TracBrowser for help on using the repository browser.