source: rtems/c/src/exec/score/src/thread.c @ 1543932

4.104.114.84.95
Last change on this file since 1543932 was 1543932, checked in by Joel Sherrill <joel.sherrill@…>, on 04/24/96 at 17:36:35

added comment regarding stack alignment issues to thread stack allocation
routine.

  • Property mode set to 100644
File size: 28.9 KB
Line 
1/*
2 *  Thread Handler
3 *
4 *
5 *  COPYRIGHT (c) 1989, 1990, 1991, 1992, 1993, 1994.
6 *  On-Line Applications Research Corporation (OAR).
7 *  All rights assigned to U.S. Government, 1994.
8 *
9 *  This material may be reproduced by or for the U.S. Government pursuant
10 *  to the copyright license under the clause at DFARS 252.227-7013.  This
11 *  notice must appear in all copies of this file and its derivatives.
12 *
13 *  $Id$
14 */
15
16#include <rtems/system.h>
17#include <rtems/score/apiext.h>
18#include <rtems/score/context.h>
19#include <rtems/score/interr.h>
20#include <rtems/score/isr.h>
21#include <rtems/score/object.h>
22#include <rtems/score/priority.h>
23#include <rtems/score/states.h>
24#include <rtems/score/sysstate.h>
25#include <rtems/score/thread.h>
26#include <rtems/score/threadq.h>
27#include <rtems/score/userext.h>
28#include <rtems/score/wkspace.h>
29
30/*PAGE
31 *
32 *  _Thread_Handler_initialization
33 *
34 *  This routine initializes all thread manager related data structures.
35 *
36 *  Input parameters:
37 *    ticks_per_timeslice - clock ticks per quantum
38 *    maximum_proxies     - number of proxies to initialize
39 *
40 *  Output parameters:  NONE
41 */
42
43char *_Thread_Idle_name = "IDLE";
44
45void _Thread_Handler_initialization(
46  unsigned32   ticks_per_timeslice,
47  unsigned32   maximum_extensions,
48  unsigned32   maximum_proxies
49)
50{
51  unsigned32      index;
52
53  /*
54   * BOTH stacks hooks must be set or both must be NULL.
55   * Do not allow mixture.
56   */
57
58  if ( !( ( _CPU_Table.stack_allocate_hook == 0 )
59       == ( _CPU_Table.stack_free_hook == 0 ) ) )
60    _Internal_error_Occurred(
61      INTERNAL_ERROR_CORE,
62      TRUE,
63      INTERNAL_ERROR_BAD_STACK_HOOK
64    );
65
66  _Context_Switch_necessary = FALSE;
67  _Thread_Executing         = NULL;
68  _Thread_Heir              = NULL;
69  _Thread_Allocated_fp      = NULL;
70
71  _Thread_Maximum_extensions = maximum_extensions;
72
73  _Thread_Ticks_remaining_in_timeslice = ticks_per_timeslice;
74  _Thread_Ticks_per_timeslice          = ticks_per_timeslice;
75
76  _Thread_Ready_chain = _Workspace_Allocate_or_fatal_error(
77    (PRIORITY_MAXIMUM + 1) * sizeof(Chain_Control)
78  );
79
80  for ( index=0; index <= PRIORITY_MAXIMUM ; index++ )
81    _Chain_Initialize_empty( &_Thread_Ready_chain[ index ] );
82
83  _Thread_MP_Handler_initialization( maximum_proxies );
84
85  /*
86   *  Initialize this class of objects.
87   */
88 
89  _Objects_Initialize_information(
90    &_Thread_Internal_information,
91    OBJECTS_INTERNAL_THREADS,
92    FALSE,
93    ( _System_state_Is_multiprocessing ) ?  2 : 1,
94    sizeof( Thread_Control ),
95    TRUE,
96    8,
97    TRUE
98  );
99
100}
101
102/*PAGE
103 *
104 *  _Thread_Create_idle
105 */
106
107void _Thread_Create_idle( void )
108{
109  void *idle;
110
111  /*
112   *  The entire workspace is zeroed during its initialization.  Thus, all
113   *  fields not explicitly assigned were explicitly zeroed by
114   *  _Workspace_Initialization.
115   */
116 
117  _Thread_Idle = _Thread_Internal_allocate();
118 
119  /*
120   *  Initialize the IDLE task.
121   */
122 
123#if (CPU_PROVIDES_IDLE_THREAD_BODY == TRUE)
124  idle = _CPU_Thread_Idle_body;
125#else
126  idle = _Thread_Idle_body;
127#endif
128 
129  if ( _CPU_Table.idle_task )
130    idle = _CPU_Table.idle_task;
131 
132  _Thread_Initialize(
133    &_Thread_Internal_information,
134    _Thread_Idle,
135    NULL,        /* allocate the stack */
136    THREAD_IDLE_STACK_SIZE,
137    CPU_IDLE_TASK_IS_FP,
138    PRIORITY_MAXIMUM,
139    TRUE,        /* preemptable */
140    FALSE,       /* not timesliced */
141    0,           /* all interrupts enabled */
142    _Thread_Idle_name
143  );
144 
145  /*
146   *  WARNING!!! This is necessary to "kick" start the system and
147   *             MUST be done before _Thread_Start is invoked.
148   */
149 
150  _Thread_Heir      =
151  _Thread_Executing = _Thread_Idle;
152 
153  _Thread_Start(
154    _Thread_Idle,
155    THREAD_START_NUMERIC,
156    idle,
157    NULL,
158    0
159  );
160 
161}
162
163/*PAGE
164 *
165 *  _Thread_Start_multitasking
166 *
167 *  This kernel routine readies the requested thread, the thread chain
168 *  is adjusted.  A new heir thread may be selected.
169 *
170 *  Input parameters:
171 *    system_thread - pointer to system initialization thread control block
172 *    idle_thread   - pointer to idle thread control block
173 *
174 *  Output parameters:  NONE
175 *
176 *  NOTE:  This routine uses the "blocking" heir selection mechanism.
177 *         This insures the correct heir after a thread restart.
178 *
179 *  INTERRUPT LATENCY:
180 *    ready chain
181 *    select heir
182 */
183
184void _Thread_Start_multitasking( void )
185{
186  /*
187   *  The system is now multitasking and completely initialized. 
188   *  This system thread now either "goes away" in a single processor
189   *  system or "turns into" the server thread in an MP system.
190   */
191
192  _System_state_Set( SYSTEM_STATE_UP );
193
194  _Context_Switch_necessary = FALSE;
195
196  _Thread_Executing = _Thread_Heir;
197
198  _Context_Switch( &_Thread_BSP_context, &_Thread_Executing->Registers );
199}
200
201/*PAGE
202 *
203 *  _Thread_Dispatch
204 *
205 *  This kernel routine determines if a dispatch is needed, and if so
206 *  dispatches to the heir thread.  Once the heir is running an attempt
207 *  is made to dispatch any ASRs.
208 *
209 *  ALTERNATE ENTRY POINTS:
210 *    void _Thread_Enable_dispatch();
211 *
212 *  Input parameters:  NONE
213 *
214 *  Output parameters:  NONE
215 *
216 *  INTERRUPT LATENCY:
217 *    dispatch thread
218 *    no dispatch thread
219 */
220
221#if ( CPU_INLINE_ENABLE_DISPATCH == FALSE )
222void _Thread_Enable_dispatch( void )
223{
224  if ( --_Thread_Dispatch_disable_level )
225    return;
226  _Thread_Dispatch();
227}
228#endif
229
230void _Thread_Dispatch( void )
231{
232  Thread_Control   *executing;
233  Thread_Control   *heir;
234  ISR_Level         level;
235
236  executing   = _Thread_Executing;
237  _ISR_Disable( level );
238  while ( _Context_Switch_necessary == TRUE ) {
239    heir = _Thread_Heir;
240    _Thread_Dispatch_disable_level = 1;
241    _Context_Switch_necessary = FALSE;
242    _Thread_Executing = heir;
243    _ISR_Enable( level );
244
245    _User_extensions_Thread_switch( executing, heir );
246
247    _Thread_Ticks_remaining_in_timeslice = _Thread_Ticks_per_timeslice;
248
249    /*
250     *  If the CPU has hardware floating point, then we must address saving
251     *  and restoring it as part of the context switch.
252     *
253     *  The second conditional compilation section selects the algorithm used
254     *  to context switch between floating point tasks.  The deferred algorithm
255     *  can be significantly better in a system with few floating point tasks
256     *  because it reduces the total number of save and restore FP context
257     *  operations.  However, this algorithm can not be used on all CPUs due
258     *  to unpredictable use of FP registers by some compilers for integer
259     *  operations.
260     */
261
262#if ( CPU_HARDWARE_FP == TRUE )
263#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
264    if ( (heir->fp_context != NULL) && !_Thread_Is_allocated_fp( heir ) ) {
265      if ( _Thread_Allocated_fp != NULL )
266        _Context_Save_fp( &_Thread_Allocated_fp->fp_context );
267      _Context_Restore_fp( &heir->fp_context );
268      _Thread_Allocated_fp = heir;
269    }
270#else
271    if ( executing->fp_context != NULL )
272      _Context_Save_fp( &executing->fp_context );
273
274    if ( heir->fp_context != NULL )
275      _Context_Restore_fp( &heir->fp_context );
276#endif
277#endif
278
279    _Context_Switch( &executing->Registers, &heir->Registers );
280
281    executing = _Thread_Executing;
282
283    _ISR_Disable( level );
284  }
285
286  _Thread_Dispatch_disable_level = 0;
287
288  _ISR_Enable( level );
289
290  if ( executing->do_post_task_switch_extension ) {
291    executing->do_post_task_switch_extension = FALSE;
292    _API_extensions_Run_postswitch();
293  }
294 
295}
296
297/*PAGE
298 *
299 *  _Thread_Stack_Allocate
300 *
301 *  Allocate the requested stack space for the thread.
302 *  return the actual size allocated after any adjustment
303 *  or return zero if the allocation failed.
304 *  Set the Start.stack field to the address of the stack
305 */
306
307static unsigned32 _Thread_Stack_Allocate(
308  Thread_Control *the_thread,
309  unsigned32 stack_size)
310{
311  void *stack_addr = 0;
312 
313  if ( !_Stack_Is_enough( stack_size ) )
314    stack_size = STACK_MINIMUM_SIZE;
315 
316  /*
317   * Call ONLY the CPU table stack allocate hook, _or_ the
318   * the RTEMS workspace allocate.  This is so the stack free
319   * routine can call the correct deallocation routine.
320   */
321
322  if ( _CPU_Table.stack_allocate_hook )
323  {
324    stack_addr = (*_CPU_Table.stack_allocate_hook)( stack_size );
325  } else {
326
327    /*
328     *  First pad the requested size so we allocate enough memory
329     *  so the context initialization can align it properly.  The address
330     *  returned the workspace allocate must be directly stored in the
331     *  stack control block because it is later used in the free sequence.
332     *
333     *  Thus it is the responsibility of the CPU dependent code to
334     *  get and keep the stack adjust factor, the stack alignment, and
335     *  the context initialization sequence in sync.
336     */
337
338    stack_size = _Stack_Adjust_size( stack_size );
339    stack_addr = _Workspace_Allocate( stack_size );
340  }
341 
342  if ( !stack_addr )
343      stack_size = 0;
344 
345  the_thread->Start.stack = stack_addr;
346 
347  return stack_size;
348}
349
350/*
351 *  _Thread_Stack_Free
352 *
353 *  Deallocate the Thread's stack.
354 */
355
356static void _Thread_Stack_Free(void *stack_addr)
357{
358    /*
359     * Call ONLY the CPU table stack free hook, or the
360     * the RTEMS workspace free.  This is so the free
361     * routine properly matches the allocation of the stack.
362     */
363
364    if ( _CPU_Table.stack_free_hook )
365        (*_CPU_Table.stack_free_hook)( stack_addr );
366    else
367        _Workspace_Free( stack_addr );
368}
369
370/*PAGE
371 *
372 *  _Thread_Initialize
373 *
374 *  XXX
375 */
376
377boolean _Thread_Initialize(
378  Objects_Information *information,
379  Thread_Control      *the_thread,
380  void                *stack_area,    /* NULL if to be allocated */
381  unsigned32           stack_size,    /* insure it is >= min */
382  boolean              is_fp,         /* TRUE if thread uses FP */
383  Priority_Control     priority,
384  boolean              is_preemptible,
385  boolean              is_timeslice,
386  unsigned32           isr_level,
387  Objects_Name         name
388 
389)
390{
391  unsigned32           actual_stack_size;
392  void                *stack;
393  void                *fp_area;
394  void                *extensions_area;
395
396  /*
397   *  Allocate and Initialize the stack for this thread.
398   */
399
400  if ( !_Stack_Is_enough( stack_size ) )
401    actual_stack_size = STACK_MINIMUM_SIZE;
402  else
403    actual_stack_size = stack_size;
404
405  actual_stack_size = _Stack_Adjust_size( actual_stack_size );
406  stack             = stack_area;
407
408  if ( !stack ) {
409    actual_stack_size = _Thread_Stack_Allocate( the_thread, stack_size );
410 
411    if ( !actual_stack_size )
412      return FALSE;                     /* stack allocation failed */
413
414    stack = the_thread->Start.stack;
415  } else
416    the_thread->Start.stack = NULL;
417
418  _Stack_Initialize(
419     &the_thread->Start.Initial_stack,
420     stack,
421     actual_stack_size
422  );
423
424  /*
425   *  Allocate the floating point area for this thread
426   */
427 
428  if ( is_fp ) {
429
430    fp_area = _Workspace_Allocate( CONTEXT_FP_SIZE );
431    if ( !fp_area ) {
432      if ( the_thread->Start.stack )
433        (void) _Thread_Stack_Free( the_thread->Start.stack );
434      return FALSE;
435    }
436    fp_area = _Context_Fp_start( fp_area, 0 );
437
438  } else
439    fp_area = NULL;
440
441  the_thread->fp_context       = fp_area;
442  the_thread->Start.fp_context = fp_area;
443 
444
445  /*
446   *  Allocate the extensions area for this thread
447   */
448
449  if ( _Thread_Maximum_extensions ) {
450    extensions_area = _Workspace_Allocate(
451      (_Thread_Maximum_extensions + 1) * sizeof( void * )
452    );
453
454    if ( !extensions_area ) {
455      if ( fp_area )
456        (void) _Workspace_Free( fp_area );
457
458      if ( the_thread->Start.stack )
459        (void) _Thread_Stack_Free( the_thread->Start.stack );
460
461      return FALSE;
462    }
463  } else
464    extensions_area = NULL;
465 
466  the_thread->extensions = extensions_area;
467
468  /*
469   *  General initialization
470   */
471
472  the_thread->Start.is_preemptible = is_preemptible;
473  the_thread->Start.is_timeslice   = is_timeslice;
474  the_thread->Start.isr_level      = isr_level;
475
476  the_thread->current_state          = STATES_DORMANT;
477  the_thread->resource_count         = 0;
478  the_thread->real_priority          = priority;
479  the_thread->Start.initial_priority = priority;
480 
481  _Thread_Set_priority( the_thread, priority );
482
483  /*
484   *  Open the object
485   */
486
487  _Objects_Open( information, &the_thread->Object, name );
488
489  /*
490   *  Invoke create extensions
491   */
492
493  if ( !_User_extensions_Thread_create( the_thread ) ) {
494
495    if ( extensions_area )
496      (void) _Workspace_Free( extensions_area );
497
498    if ( fp_area )
499      (void) _Workspace_Free( fp_area );
500
501    if ( the_thread->Start.stack )
502      (void) _Thread_Stack_Free( the_thread->Start.stack );
503
504    return FALSE;
505  }
506
507  return TRUE;
508   
509}
510
511/*
512 *  _Thread_Start
513 *
514 *  DESCRIPTION:
515 *
516 *  XXX
517 */
518 
519boolean _Thread_Start(
520  Thread_Control       *the_thread,
521  Thread_Start_types    the_prototype,
522  void                 *entry_point,
523  void                 *pointer_argument,
524  unsigned32            numeric_argument
525)
526{
527  if ( _States_Is_dormant( the_thread->current_state ) ) {
528 
529    the_thread->Start.entry_point      = entry_point;
530   
531    the_thread->Start.prototype        = the_prototype;
532    the_thread->Start.pointer_argument = pointer_argument;
533    the_thread->Start.numeric_argument = numeric_argument;
534 
535    _Thread_Load_environment( the_thread );
536 
537    _Thread_Ready( the_thread );
538 
539    _User_extensions_Thread_start( the_thread );
540 
541    return TRUE;
542  }
543 
544  return FALSE;
545 
546}
547
548/*
549 *  _Thread_Restart
550 *
551 *  DESCRIPTION:
552 *
553 *  XXX
554 */
555 
556boolean _Thread_Restart(
557  Thread_Control      *the_thread,
558  void                *pointer_argument,
559  unsigned32           numeric_argument
560)
561{
562  if ( !_States_Is_dormant( the_thread->current_state ) ) {
563 
564    _Thread_Set_transient( the_thread );
565    the_thread->resource_count = 0;
566    the_thread->is_preemptible = the_thread->Start.is_preemptible;
567    the_thread->is_timeslice   = the_thread->Start.is_timeslice;
568
569    the_thread->Start.pointer_argument = pointer_argument;
570    the_thread->Start.numeric_argument = numeric_argument;
571 
572    if ( !_Thread_queue_Extract_with_proxy( the_thread ) ) {
573 
574      if ( _Watchdog_Is_active( &the_thread->Timer ) )
575        (void) _Watchdog_Remove( &the_thread->Timer );
576    }
577
578    if ( the_thread->current_priority != the_thread->Start.initial_priority ) {
579      the_thread->real_priority = the_thread->Start.initial_priority;
580      _Thread_Set_priority( the_thread, the_thread->Start.initial_priority );
581    }
582 
583    _Thread_Load_environment( the_thread );
584 
585    _Thread_Ready( the_thread );
586 
587    _User_extensions_Thread_restart( the_thread );
588 
589    if ( _Thread_Is_executing ( the_thread ) )
590      _Thread_Restart_self();
591 
592    return TRUE;
593  }
594 
595  return FALSE;
596}
597
598/*
599 *  _Thread_Close
600 *
601 *  DESCRIPTION:
602 *
603 *  XXX
604 */
605 
606void _Thread_Close(
607  Objects_Information  *information,
608  Thread_Control       *the_thread
609)
610{
611  _Objects_Close( information, &the_thread->Object );
612 
613  _Thread_Set_state( the_thread, STATES_TRANSIENT );
614 
615  if ( !_Thread_queue_Extract_with_proxy( the_thread ) ) {
616 
617    if ( _Watchdog_Is_active( &the_thread->Timer ) )
618      (void) _Watchdog_Remove( &the_thread->Timer );
619  }
620
621  _User_extensions_Thread_delete( the_thread );
622 
623#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
624  if ( _Thread_Is_allocated_fp( the_thread ) )
625    _Thread_Deallocate_fp();
626#endif
627  the_thread->fp_context = NULL;
628
629  if ( the_thread->Start.fp_context )
630  (void) _Workspace_Free( the_thread->Start.fp_context );
631
632  if ( the_thread->Start.stack )
633    (void) _Workspace_Free( the_thread->Start.stack );
634
635  if ( the_thread->extensions )
636    (void) _Workspace_Free( the_thread->extensions );
637
638  the_thread->Start.stack = NULL;
639  the_thread->extensions = NULL;
640}
641
642/*PAGE
643 *
644 *  _Thread_Ready
645 *
646 *  This kernel routine readies the requested thread, the thread chain
647 *  is adjusted.  A new heir thread may be selected.
648 *
649 *  Input parameters:
650 *    the_thread - pointer to thread control block
651 *
652 *  Output parameters:  NONE
653 *
654 *  NOTE:  This routine uses the "blocking" heir selection mechanism.
655 *         This insures the correct heir after a thread restart.
656 *
657 *  INTERRUPT LATENCY:
658 *    ready chain
659 *    select heir
660 */
661
662void _Thread_Ready(
663  Thread_Control *the_thread
664)
665{
666  ISR_Level              level;
667  Thread_Control *heir;
668
669  _ISR_Disable( level );
670
671  the_thread->current_state = STATES_READY;
672
673  _Priority_Add_to_bit_map( &the_thread->Priority_map );
674
675  _Chain_Append_unprotected( the_thread->ready, &the_thread->Object.Node );
676
677  _ISR_Flash( level );
678
679  _Thread_Calculate_heir();
680
681  heir = _Thread_Heir;
682
683  if ( !_Thread_Is_executing( heir ) && _Thread_Executing->is_preemptible )
684    _Context_Switch_necessary = TRUE;
685
686  _ISR_Enable( level );
687}
688
689/*PAGE
690 *
691 *  _Thread_Clear_state
692 *
693 *  This kernel routine clears the appropriate states in the
694 *  requested thread.  The thread ready chain is adjusted if
695 *  necessary and the Heir thread is set accordingly.
696 *
697 *  Input parameters:
698 *    the_thread - pointer to thread control block
699 *    state      - state set to clear
700 *
701 *  Output parameters:  NONE
702 *
703 *  INTERRUPT LATENCY:
704 *    priority map
705 *    select heir
706 */
707
708void _Thread_Clear_state(
709  Thread_Control *the_thread,
710  States_Control  state
711)
712{
713  ISR_Level level;
714
715  _ISR_Disable( level );
716    the_thread->current_state =
717      _States_Clear( state, the_thread->current_state );
718
719    if ( _States_Is_ready( the_thread->current_state ) ) {
720
721      _Priority_Add_to_bit_map( &the_thread->Priority_map );
722
723      _Chain_Append_unprotected( the_thread->ready, &the_thread->Object.Node );
724
725      _ISR_Flash( level );
726
727      if ( the_thread->current_priority < _Thread_Heir->current_priority ) {
728        _Thread_Heir = the_thread;
729        if ( _Thread_Executing->is_preemptible ||
730             the_thread->current_priority == 0 )
731          _Context_Switch_necessary = TRUE;
732      }
733    }
734  _ISR_Enable( level );
735}
736
737/*PAGE
738 *
739 * _Thread_Set_state
740 *
741 * This kernel routine sets the requested state in the THREAD.  The
742 * THREAD chain is adjusted if necessary.
743 *
744 * Input parameters:
745 *   the_thread   - pointer to thread control block
746 *   state - state to be set
747 *
748 * Output parameters:  NONE
749 *
750 *  INTERRUPT LATENCY:
751 *    ready chain
752 *    select map
753 */
754
755void _Thread_Set_state(
756  Thread_Control *the_thread,
757  States_Control         state
758)
759{
760  ISR_Level             level;
761  Chain_Control *ready;
762
763  ready = the_thread->ready;
764  _ISR_Disable( level );
765  if ( !_States_Is_ready( the_thread->current_state ) ) {
766    the_thread->current_state =
767       _States_Set( state, the_thread->current_state );
768    _ISR_Enable( level );
769    return;
770  }
771
772  the_thread->current_state = state;
773
774  if ( _Chain_Has_only_one_node( ready ) ) {
775
776    _Chain_Initialize_empty( ready );
777    _Priority_Remove_from_bit_map( &the_thread->Priority_map );
778
779  } else
780    _Chain_Extract_unprotected( &the_thread->Object.Node );
781
782  _ISR_Flash( level );
783
784  if ( _Thread_Is_heir( the_thread ) )
785     _Thread_Calculate_heir();
786
787  if ( _Thread_Is_executing( the_thread ) )
788    _Context_Switch_necessary = TRUE;
789
790  _ISR_Enable( level );
791}
792
793/*PAGE
794 *
795 *  _Thread_Set_transient
796 *
797 *  This kernel routine places the requested thread in the transient state
798 *  which will remove it from the ready queue, if necessary.  No
799 *  rescheduling is necessary because it is assumed that the transient
800 *  state will be cleared before dispatching is enabled.
801 *
802 *  Input parameters:
803 *    the_thread - pointer to thread control block
804 *
805 *  Output parameters:  NONE
806 *
807 *  INTERRUPT LATENCY:
808 *    only case
809 */
810
811void _Thread_Set_transient(
812  Thread_Control *the_thread
813)
814{
815  ISR_Level             level;
816  unsigned32            old_state;
817  Chain_Control *ready;
818
819  ready = the_thread->ready;
820  _ISR_Disable( level );
821
822  old_state = the_thread->current_state;
823  the_thread->current_state = _States_Set( STATES_TRANSIENT, old_state );
824
825  if ( _States_Is_ready( old_state ) ) {
826    if ( _Chain_Has_only_one_node( ready ) ) {
827
828      _Chain_Initialize_empty( ready );
829      _Priority_Remove_from_bit_map( &the_thread->Priority_map );
830
831    } else
832      _Chain_Extract_unprotected( &the_thread->Object.Node );
833  }
834
835  _ISR_Enable( level );
836
837}
838
839/*PAGE
840 *
841 *  _Thread_Reset_timeslice
842 *
843 *  This routine will remove the running thread from the ready chain
844 *  and place it immediately at the rear of this chain and then the
845 *  timeslice counter is reset.  The heir THREAD will be updated if
846 *  the running is also the currently the heir.
847 *
848 *  Input parameters:   NONE
849 *
850 *  Output parameters:  NONE
851 *
852 *  INTERRUPT LATENCY:
853 *    ready chain
854 *    select heir
855 */
856
857void _Thread_Reset_timeslice( void )
858{
859  ISR_Level              level;
860  Thread_Control *executing;
861  Chain_Control  *ready;
862
863  executing = _Thread_Executing;
864  ready     = executing->ready;
865  _ISR_Disable( level );
866    if ( _Chain_Has_only_one_node( ready ) ) {
867      _Thread_Ticks_remaining_in_timeslice = _Thread_Ticks_per_timeslice;
868      _ISR_Enable( level );
869      return;
870    }
871    _Chain_Extract_unprotected( &executing->Object.Node );
872    _Chain_Append_unprotected( ready, &executing->Object.Node );
873
874  _ISR_Flash( level );
875
876    if ( _Thread_Is_heir( executing ) )
877      _Thread_Heir = (Thread_Control *) ready->first;
878
879    _Context_Switch_necessary = TRUE;
880
881  _ISR_Enable( level );
882}
883
884/*PAGE
885 *
886 *  _Thread_Tickle_timeslice
887 *
888 *  This scheduler routine determines if timeslicing is enabled
889 *  for the currently executing thread and, if so, updates the
890 *  timeslice count and checks for timeslice expiration.
891 *
892 *  Input parameters:   NONE
893 *
894 *  Output parameters:  NONE
895 */
896
897void _Thread_Tickle_timeslice( void )
898{
899  if ( !_Thread_Executing->is_timeslice  ||
900       !_Thread_Executing->is_preemptible ||
901       !_States_Is_ready( _Thread_Executing->current_state ) )
902    return;
903
904  if ( --_Thread_Ticks_remaining_in_timeslice == 0 ) {
905      _Thread_Reset_timeslice();
906  }
907}
908
909/*PAGE
910 *
911 *  _Thread_Yield_processor
912 *
913 *  This kernel routine will remove the running THREAD from the ready chain
914 *  and place it immediatly at the rear of this chain.  Reset timeslice
915 *  and yield the processor functions both use this routine, therefore if
916 *  reset is TRUE and this is the only thread on the chain then the
917 *  timeslice counter is reset.  The heir THREAD will be updated if the
918 *  running is also the currently the heir.
919 *
920 *  Input parameters:   NONE
921 *
922 *  Output parameters:  NONE
923 *
924 *  INTERRUPT LATENCY:
925 *    ready chain
926 *    select heir
927 */
928
929void _Thread_Yield_processor( void )
930{
931  ISR_Level       level;
932  Thread_Control *executing;
933  Chain_Control  *ready;
934
935  executing = _Thread_Executing;
936  ready     = executing->ready;
937  _ISR_Disable( level );
938    if ( !_Chain_Has_only_one_node( ready ) ) {
939      _Chain_Extract_unprotected( &executing->Object.Node );
940      _Chain_Append_unprotected( ready, &executing->Object.Node );
941
942      _ISR_Flash( level );
943
944      if ( _Thread_Is_heir( executing ) )
945        _Thread_Heir = (Thread_Control *) ready->first;
946      _Context_Switch_necessary = TRUE;
947    }
948    else if ( !_Thread_Is_heir( executing ) )
949      _Context_Switch_necessary = TRUE;
950
951  _ISR_Enable( level );
952}
953
954/*PAGE
955 *
956 *  _Thread_Load_environment
957 *
958 *  Load starting environment for another thread from its start area in the
959 *  thread.  Only called from t_restart and t_start.
960 *
961 *  Input parameters:
962 *    the_thread - thread control block pointer
963 *
964 *  Output parameters:  NONE
965 */
966
967void _Thread_Load_environment(
968  Thread_Control *the_thread
969)
970{
971  boolean is_fp = FALSE;
972
973  if ( the_thread->Start.fp_context ) {
974    the_thread->fp_context = the_thread->Start.fp_context;
975    _Context_Initialize_fp( &the_thread->fp_context );
976    is_fp = TRUE;
977  }
978
979  the_thread->do_post_task_switch_extension = FALSE;
980  the_thread->is_preemptible = the_thread->Start.is_preemptible;
981  the_thread->is_timeslice   = the_thread->Start.is_timeslice;
982
983  _Context_Initialize(
984    &the_thread->Registers,
985    the_thread->Start.Initial_stack.area,
986    the_thread->Start.Initial_stack.size,
987    the_thread->Start.isr_level,
988    _Thread_Handler,
989    is_fp
990  );
991
992}
993
994/*PAGE
995 *
996 *  _Thread_Handler
997 *
998 *  This routine is the default thread exitted error handler.  It is
999 *  returned to when a thread exits.  The configured fatal error handler
1000 *  is invoked to process the exit.
1001 *
1002 *  Input parameters:   NONE
1003 *
1004 *  Output parameters:  NONE
1005 */
1006
1007void _Thread_Handler( void )
1008{
1009  Thread_Control *executing;
1010
1011  executing = _Thread_Executing;
1012
1013  /*
1014   * Take care that 'begin' extensions get to complete before
1015   * 'switch' extensions can run.  This means must keep dispatch
1016   * disabled until all 'begin' extensions complete.
1017   */
1018 
1019  _User_extensions_Thread_begin( executing );
1020 
1021  /*
1022   *  At this point, the dispatch disable level BETTER be 1.
1023   */
1024
1025  _Thread_Enable_dispatch();
1026 
1027  switch ( executing->Start.prototype ) {
1028    case THREAD_START_NUMERIC:
1029      (*executing->Start.entry_point)( executing->Start.numeric_argument );
1030      break;
1031    case THREAD_START_POINTER:
1032      (*executing->Start.entry_point)( executing->Start.pointer_argument );
1033      break;
1034    case THREAD_START_BOTH_POINTER_FIRST:
1035      (*executing->Start.entry_point)(
1036        executing->Start.pointer_argument,
1037        executing->Start.numeric_argument
1038      );
1039      break;
1040    case THREAD_START_BOTH_NUMERIC_FIRST:
1041      (*executing->Start.entry_point)(
1042        executing->Start.numeric_argument,
1043        executing->Start.pointer_argument
1044      );
1045      break;
1046  }
1047
1048  _User_extensions_Thread_exitted( executing );
1049
1050  _Internal_error_Occurred(
1051    INTERNAL_ERROR_CORE,
1052    TRUE,
1053    INTERNAL_ERROR_THREAD_EXITTED
1054  );
1055}
1056
1057/*PAGE
1058 *
1059 *  _Thread_Delay_ended
1060 *
1061 *  This routine processes a thread whose delay period has ended.
1062 *  It is called by the watchdog handler.
1063 *
1064 *  Input parameters:
1065 *    id - thread id
1066 *
1067 *  Output parameters: NONE
1068 */
1069
1070void _Thread_Delay_ended(
1071  Objects_Id  id,
1072  void       *ignored
1073)
1074{
1075  Thread_Control    *the_thread;
1076  Objects_Locations  location;
1077
1078  the_thread = _Thread_Get( id, &location );
1079  switch ( location ) {
1080    case OBJECTS_ERROR:
1081    case OBJECTS_REMOTE:  /* impossible */
1082      break;
1083    case OBJECTS_LOCAL:
1084      _Thread_Unblock( the_thread );
1085      _Thread_Unnest_dispatch();
1086      break;
1087  }
1088}
1089
1090/*PAGE
1091 *
1092 *  _Thread_Change_priority
1093 *
1094 *  This kernel routine changes the priority of the thread.  The
1095 *  thread chain is adjusted if necessary.
1096 *
1097 *  Input parameters:
1098 *    the_thread   - pointer to thread control block
1099 *    new_priority - ultimate priority
1100 *
1101 *  Output parameters:  NONE
1102 *
1103 *  INTERRUPT LATENCY:
1104 *    ready chain
1105 *    select heir
1106 */
1107
1108void _Thread_Change_priority(
1109  Thread_Control   *the_thread,
1110  Priority_Control  new_priority
1111)
1112{
1113  ISR_Level level;
1114
1115  _Thread_Set_transient( the_thread );
1116
1117  if ( the_thread->current_priority != new_priority )
1118    _Thread_Set_priority( the_thread, new_priority );
1119
1120  _ISR_Disable( level );
1121
1122  the_thread->current_state =
1123    _States_Clear( STATES_TRANSIENT, the_thread->current_state );
1124
1125  if ( ! _States_Is_ready( the_thread->current_state ) ) {
1126    _ISR_Enable( level );
1127    return;
1128  }
1129
1130  _Priority_Add_to_bit_map( &the_thread->Priority_map );
1131  _Chain_Append_unprotected( the_thread->ready, &the_thread->Object.Node );
1132
1133  _ISR_Flash( level );
1134
1135  _Thread_Calculate_heir();
1136
1137  if ( !_Thread_Is_executing_also_the_heir() &&
1138       _Thread_Executing->is_preemptible )
1139    _Context_Switch_necessary = TRUE;
1140
1141  _ISR_Enable( level );
1142}
1143
1144/*PAGE
1145 *
1146 * _Thread_Set_priority
1147 *
1148 * This directive enables and disables several modes of
1149 * execution for the requesting thread.
1150 *
1151 *  Input parameters:
1152 *    the_thread   - pointer to thread priority
1153 *    new_priority - new priority
1154 *
1155 *  Output: NONE
1156 */
1157
1158void _Thread_Set_priority(
1159  Thread_Control   *the_thread,
1160  Priority_Control  new_priority
1161)
1162{
1163  the_thread->current_priority = new_priority;
1164  the_thread->ready            = &_Thread_Ready_chain[ new_priority ];
1165
1166  _Priority_Initialize_information( &the_thread->Priority_map, new_priority );
1167}
1168
1169/*PAGE
1170 *
1171 *  _Thread_Evaluate_mode
1172 *
1173 *  XXX
1174 */
1175
1176boolean _Thread_Evaluate_mode( void )
1177{
1178  Thread_Control     *executing;
1179
1180  executing = _Thread_Executing;
1181
1182  if ( !_States_Is_ready( executing->current_state ) ||
1183       ( !_Thread_Is_heir( executing ) && executing->is_preemptible ) ) {
1184    _Context_Switch_necessary = TRUE;
1185    return TRUE;
1186  }
1187
1188  return FALSE;
1189}
1190
1191/*PAGE
1192 *
1193 *  _Thread_Get
1194 *
1195 *  NOTE:  If we are not using static inlines, this must be a real
1196 *         subroutine call.
1197 *
1198 *  NOTE:  XXX... This routine may be able to be optimized.
1199 */
1200
1201#ifndef USE_INLINES
1202
1203Thread_Control *_Thread_Get (
1204  Objects_Id           id,
1205  Objects_Locations   *location
1206)
1207{
1208  Objects_Classes      the_class;
1209  Objects_Information *information;
1210 
1211  if ( _Objects_Are_ids_equal( id, OBJECTS_ID_OF_SELF ) ) {
1212    _Thread_Disable_dispatch();
1213    *location = OBJECTS_LOCAL;
1214    return( _Thread_Executing );
1215  }
1216 
1217  the_class = _Objects_Get_class( id );
1218 
1219  if ( the_class > OBJECTS_CLASSES_LAST ) {
1220    *location = OBJECTS_ERROR;
1221    return (Thread_Control *) 0;
1222  }
1223 
1224  information = _Objects_Information_table[ the_class ];
1225 
1226  if ( !information || !information->is_thread ) {
1227    *location = OBJECTS_ERROR;
1228    return (Thread_Control *) 0;
1229  }
1230 
1231  return (Thread_Control *) _Objects_Get( information, id, location );
1232}
1233
1234#endif
1235
1236/*PAGE
1237 *
1238 *  _Thread_Idle_body
1239 *
1240 *  This kernel routine is the idle thread.  The idle thread runs any time
1241 *  no other thread is ready to run.  This thread loops forever with
1242 *  interrupts enabled.
1243 *
1244 *  Input parameters:
1245 *    ignored - this parameter is ignored
1246 *
1247 *  Output parameters:  NONE
1248 */
1249 
1250#if (CPU_PROVIDES_IDLE_THREAD_BODY == FALSE)
1251Thread _Thread_Idle_body(
1252  unsigned32 ignored
1253)
1254{
1255  for( ; ; ) ;
1256}
1257#endif
Note: See TracBrowser for help on using the repository browser.