source: rtems/cpukit/score/src/thread.c @ 98e4ebf5

4.104.114.84.95
Last change on this file since 98e4ebf5 was 98e4ebf5, checked in by Joel Sherrill <joel.sherrill@…>, on 10/08/97 at 15:45:54

Fixed typo in the pointer to the license terms.

  • Property mode set to 100644
File size: 33.1 KB
Line 
1/*
2 *  Thread Handler
3 *
4 *
5 *  COPYRIGHT (c) 1989-1997.
6 *  On-Line Applications Research Corporation (OAR).
7 *  Copyright assigned to U.S. Government, 1994.
8 *
9 *  The license and distribution terms for this file may be
10 *  found in found in the file LICENSE in this distribution or at
11 *  http://www.OARcorp.com/rtems/license.html.
12 *
13 *  $Id$
14 */
15
16#include <rtems/system.h>
17#include <rtems/score/apiext.h>
18#include <rtems/score/context.h>
19#include <rtems/score/interr.h>
20#include <rtems/score/isr.h>
21#include <rtems/score/object.h>
22#include <rtems/score/priority.h>
23#include <rtems/score/states.h>
24#include <rtems/score/sysstate.h>
25#include <rtems/score/thread.h>
26#include <rtems/score/threadq.h>
27#include <rtems/score/userext.h>
28#include <rtems/score/wkspace.h>
29
30/*PAGE
31 *
32 *  _Thread_Handler_initialization
33 *
34 *  This routine initializes all thread manager related data structures.
35 *
36 *  Input parameters:
37 *    ticks_per_timeslice - clock ticks per quantum
38 *    maximum_proxies     - number of proxies to initialize
39 *
40 *  Output parameters:  NONE
41 */
42
43char *_Thread_Idle_name = "IDLE";
44
45void _Thread_Handler_initialization(
46  unsigned32   ticks_per_timeslice,
47  unsigned32   maximum_extensions,
48  unsigned32   maximum_proxies
49)
50{
51  unsigned32      index;
52
53  /*
54   * BOTH stacks hooks must be set or both must be NULL.
55   * Do not allow mixture.
56   */
57
58  if ( !( ( _CPU_Table.stack_allocate_hook == 0 )
59       == ( _CPU_Table.stack_free_hook == 0 ) ) )
60    _Internal_error_Occurred(
61      INTERNAL_ERROR_CORE,
62      TRUE,
63      INTERNAL_ERROR_BAD_STACK_HOOK
64    );
65
66  _Context_Switch_necessary = FALSE;
67  _Thread_Executing         = NULL;
68  _Thread_Heir              = NULL;
69  _Thread_Allocated_fp      = NULL;
70
71  _Thread_Do_post_task_switch_extension = 0;
72
73  _Thread_Maximum_extensions = maximum_extensions;
74
75  _Thread_Ticks_per_timeslice  = ticks_per_timeslice;
76
77  _Thread_Ready_chain = (Chain_Control *) _Workspace_Allocate_or_fatal_error(
78    (PRIORITY_MAXIMUM + 1) * sizeof(Chain_Control)
79  );
80
81  for ( index=0; index <= PRIORITY_MAXIMUM ; index++ )
82    _Chain_Initialize_empty( &_Thread_Ready_chain[ index ] );
83
84  _Thread_MP_Handler_initialization( maximum_proxies );
85
86  /*
87   *  Initialize this class of objects.
88   */
89 
90  _Objects_Initialize_information(
91    &_Thread_Internal_information,
92    OBJECTS_INTERNAL_THREADS,
93    FALSE,
94    ( _System_state_Is_multiprocessing ) ?  2 : 1,
95    sizeof( Thread_Control ),
96    TRUE,
97    8,
98    TRUE
99  );
100
101}
102
103/*PAGE
104 *
105 *  _Thread_Create_idle
106 */
107
108void _Thread_Create_idle( void )
109{
110  void *idle;
111
112  /*
113   *  The entire workspace is zeroed during its initialization.  Thus, all
114   *  fields not explicitly assigned were explicitly zeroed by
115   *  _Workspace_Initialization.
116   */
117 
118  _Thread_Idle = _Thread_Internal_allocate();
119 
120  /*
121   *  Initialize the IDLE task.
122   */
123 
124#if (CPU_PROVIDES_IDLE_THREAD_BODY == TRUE)
125  idle = (void *) _CPU_Thread_Idle_body;
126#else
127  idle = (void *) _Thread_Idle_body;
128#endif
129 
130  if ( _CPU_Table.idle_task )
131    idle = _CPU_Table.idle_task;
132 
133  _Thread_Initialize(
134    &_Thread_Internal_information,
135    _Thread_Idle,
136    NULL,        /* allocate the stack */
137    THREAD_IDLE_STACK_SIZE,
138    CPU_IDLE_TASK_IS_FP,
139    PRIORITY_MAXIMUM,
140    TRUE,        /* preemptable */
141    THREAD_CPU_BUDGET_ALGORITHM_NONE,
142    NULL,        /* no budget algorithm callout */
143    0,           /* all interrupts enabled */
144    _Thread_Idle_name
145  );
146 
147  /*
148   *  WARNING!!! This is necessary to "kick" start the system and
149   *             MUST be done before _Thread_Start is invoked.
150   */
151 
152  _Thread_Heir      =
153  _Thread_Executing = _Thread_Idle;
154 
155  _Thread_Start(
156    _Thread_Idle,
157    THREAD_START_NUMERIC,
158    idle,
159    NULL,
160    0
161  );
162 
163}
164
165/*PAGE
166 *
167 *  _Thread_Start_multitasking
168 *
169 *  This kernel routine readies the requested thread, the thread chain
170 *  is adjusted.  A new heir thread may be selected.
171 *
172 *  Input parameters:
173 *    system_thread - pointer to system initialization thread control block
174 *    idle_thread   - pointer to idle thread control block
175 *
176 *  Output parameters:  NONE
177 *
178 *  NOTE:  This routine uses the "blocking" heir selection mechanism.
179 *         This insures the correct heir after a thread restart.
180 *
181 *  INTERRUPT LATENCY:
182 *    ready chain
183 *    select heir
184 */
185
186void _Thread_Start_multitasking( void )
187{
188  /*
189   *  The system is now multitasking and completely initialized. 
190   *  This system thread now either "goes away" in a single processor
191   *  system or "turns into" the server thread in an MP system.
192   */
193
194  _System_state_Set( SYSTEM_STATE_UP );
195
196  _Context_Switch_necessary = FALSE;
197
198  _Thread_Executing = _Thread_Heir;
199
200   /*
201    * Get the init task(s) running.
202    *
203    * Note: Thread_Dispatch() is normally used to dispatch threads.  As
204    *       part of its work, Thread_Dispatch() restores floating point
205    *       state for the heir task.
206    *
207    *       This code avoids Thread_Dispatch(), and so we have to restore
208    *       (actually initialize) the floating point state "by hand".
209    *
210    *       Ignore the CPU_USE_DEFERRED_FP_SWITCH because we must always
211    *       switch in the first thread if it is FP.
212    */
213 
214
215#if ( CPU_HARDWARE_FP == TRUE ) || ( CPU_SOFTWARE_FP == TRUE )
216   /*
217    *  don't need to worry about saving BSP's floating point state
218    */
219
220   if ( _Thread_Heir->fp_context != NULL )
221     _Context_Restore_fp( &_Thread_Heir->fp_context );
222#endif
223
224  _Context_Switch( &_Thread_BSP_context, &_Thread_Heir->Registers );
225}
226
227/*PAGE
228 *
229 *  _Thread_Dispatch
230 *
231 *  This kernel routine determines if a dispatch is needed, and if so
232 *  dispatches to the heir thread.  Once the heir is running an attempt
233 *  is made to dispatch any ASRs.
234 *
235 *  ALTERNATE ENTRY POINTS:
236 *    void _Thread_Enable_dispatch();
237 *
238 *  Input parameters:  NONE
239 *
240 *  Output parameters:  NONE
241 *
242 *  INTERRUPT LATENCY:
243 *    dispatch thread
244 *    no dispatch thread
245 */
246
247#if ( CPU_INLINE_ENABLE_DISPATCH == FALSE )
248void _Thread_Enable_dispatch( void )
249{
250  if ( --_Thread_Dispatch_disable_level )
251    return;
252  _Thread_Dispatch();
253}
254#endif
255
256void _Thread_Dispatch( void )
257{
258  Thread_Control   *executing;
259  Thread_Control   *heir;
260  ISR_Level         level;
261
262  executing   = _Thread_Executing;
263  _ISR_Disable( level );
264  while ( _Context_Switch_necessary == TRUE ) {
265    heir = _Thread_Heir;
266    _Thread_Dispatch_disable_level = 1;
267    _Context_Switch_necessary = FALSE;
268    _Thread_Executing = heir;
269    _ISR_Enable( level );
270
271    heir->ticks_executed++;
272
273    _User_extensions_Thread_switch( executing, heir );
274
275    if ( heir->budget_algorithm == THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE )
276      heir->cpu_time_budget = _Thread_Ticks_per_timeslice;
277
278    /*
279     *  If the CPU has hardware floating point, then we must address saving
280     *  and restoring it as part of the context switch.
281     *
282     *  The second conditional compilation section selects the algorithm used
283     *  to context switch between floating point tasks.  The deferred algorithm
284     *  can be significantly better in a system with few floating point tasks
285     *  because it reduces the total number of save and restore FP context
286     *  operations.  However, this algorithm can not be used on all CPUs due
287     *  to unpredictable use of FP registers by some compilers for integer
288     *  operations.
289     */
290
291#if ( CPU_HARDWARE_FP == TRUE )
292#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
293    if ( (heir->fp_context != NULL) && !_Thread_Is_allocated_fp( heir ) ) {
294      if ( _Thread_Allocated_fp != NULL )
295        _Context_Save_fp( &_Thread_Allocated_fp->fp_context );
296      _Context_Restore_fp( &heir->fp_context );
297      _Thread_Allocated_fp = heir;
298    }
299#else
300    if ( executing->fp_context != NULL )
301      _Context_Save_fp( &executing->fp_context );
302
303    if ( heir->fp_context != NULL )
304      _Context_Restore_fp( &heir->fp_context );
305#endif
306#endif
307
308    _Context_Switch( &executing->Registers, &heir->Registers );
309
310    executing = _Thread_Executing;
311
312    _ISR_Disable( level );
313  }
314
315  _Thread_Dispatch_disable_level = 0;
316
317  _ISR_Enable( level );
318
319  if ( _Thread_Do_post_task_switch_extension ||
320       executing->do_post_task_switch_extension ) {
321    executing->do_post_task_switch_extension = FALSE;
322    _API_extensions_Run_postswitch();
323  }
324 
325}
326
327/*PAGE
328 *
329 *  _Thread_Stack_Allocate
330 *
331 *  Allocate the requested stack space for the thread.
332 *  return the actual size allocated after any adjustment
333 *  or return zero if the allocation failed.
334 *  Set the Start.stack field to the address of the stack
335 */
336
337static unsigned32 _Thread_Stack_Allocate(
338  Thread_Control *the_thread,
339  unsigned32 stack_size)
340{
341  void *stack_addr = 0;
342 
343  if ( !_Stack_Is_enough( stack_size ) )
344    stack_size = STACK_MINIMUM_SIZE;
345 
346  /*
347   * Call ONLY the CPU table stack allocate hook, _or_ the
348   * the RTEMS workspace allocate.  This is so the stack free
349   * routine can call the correct deallocation routine.
350   */
351
352  if ( _CPU_Table.stack_allocate_hook )
353  {
354    stack_addr = (*_CPU_Table.stack_allocate_hook)( stack_size );
355  } else {
356
357    /*
358     *  First pad the requested size so we allocate enough memory
359     *  so the context initialization can align it properly.  The address
360     *  returned the workspace allocate must be directly stored in the
361     *  stack control block because it is later used in the free sequence.
362     *
363     *  Thus it is the responsibility of the CPU dependent code to
364     *  get and keep the stack adjust factor, the stack alignment, and
365     *  the context initialization sequence in sync.
366     */
367
368    stack_size = _Stack_Adjust_size( stack_size );
369    stack_addr = _Workspace_Allocate( stack_size );
370  }
371 
372  if ( !stack_addr )
373      stack_size = 0;
374 
375  the_thread->Start.stack = stack_addr;
376 
377  return stack_size;
378}
379
380/*
381 *  _Thread_Stack_Free
382 *
383 *  Deallocate the Thread's stack.
384 */
385
386static void _Thread_Stack_Free(
387  Thread_Control *the_thread
388)
389{
390    /*
391     *  If the API provided the stack space, then don't free it.
392     */
393
394    if ( !the_thread->Start.core_allocated_stack )
395      return;
396
397    /*
398     * Call ONLY the CPU table stack free hook, or the
399     * the RTEMS workspace free.  This is so the free
400     * routine properly matches the allocation of the stack.
401     */
402
403    if ( _CPU_Table.stack_free_hook )
404        (*_CPU_Table.stack_free_hook)( the_thread->Start.Initial_stack.area );
405    else
406        _Workspace_Free( the_thread->Start.Initial_stack.area );
407}
408
409/*PAGE
410 *
411 *  _Thread_Initialize
412 *
413 *  XXX
414 */
415
416boolean _Thread_Initialize(
417  Objects_Information                  *information,
418  Thread_Control                       *the_thread,
419  void                                 *stack_area,
420  unsigned32                            stack_size,
421  boolean                               is_fp,
422  Priority_Control                      priority,
423  boolean                               is_preemptible,
424  Thread_CPU_budget_algorithms          budget_algorithm,
425  Thread_CPU_budget_algorithm_callout   budget_callout,
426  unsigned32                            isr_level,
427  Objects_Name                          name
428)
429{
430  unsigned32           actual_stack_size = 0;
431  void                *stack = NULL;
432  void                *fp_area;
433  void                *extensions_area;
434
435  /*
436   *  Allocate and Initialize the stack for this thread.
437   */
438
439
440  if ( !stack ) {
441    if ( !_Stack_Is_enough( stack_size ) )
442      actual_stack_size = STACK_MINIMUM_SIZE;
443    else
444      actual_stack_size = stack_size;
445
446    actual_stack_size = _Stack_Adjust_size( actual_stack_size );
447    stack             = stack_area;
448
449    actual_stack_size = _Thread_Stack_Allocate( the_thread, stack_size );
450 
451    if ( !actual_stack_size )
452      return FALSE;                     /* stack allocation failed */
453
454    stack = the_thread->Start.stack;
455
456    the_thread->Start.core_allocated_stack = TRUE;
457  } else {
458    stack = stack_area;
459    actual_stack_size = stack_size;
460    the_thread->Start.core_allocated_stack = FALSE;
461  }
462
463  _Stack_Initialize(
464     &the_thread->Start.Initial_stack,
465     stack,
466     actual_stack_size
467  );
468
469  /*
470   *  Allocate the floating point area for this thread
471   */
472 
473  if ( is_fp ) {
474
475    fp_area = _Workspace_Allocate( CONTEXT_FP_SIZE );
476    if ( !fp_area ) {
477      _Thread_Stack_Free( the_thread );
478      return FALSE;
479    }
480    fp_area = _Context_Fp_start( fp_area, 0 );
481
482  } else
483    fp_area = NULL;
484
485  the_thread->fp_context       = fp_area;
486  the_thread->Start.fp_context = fp_area;
487
488  /*
489   *  Allocate the extensions area for this thread
490   */
491
492  if ( _Thread_Maximum_extensions ) {
493    extensions_area = _Workspace_Allocate(
494      (_Thread_Maximum_extensions + 1) * sizeof( void * )
495    );
496
497    if ( !extensions_area ) {
498      if ( fp_area )
499        (void) _Workspace_Free( fp_area );
500
501      _Thread_Stack_Free( the_thread );
502
503      return FALSE;
504    }
505  } else
506    extensions_area = NULL;
507 
508  the_thread->extensions = (void **) extensions_area;
509
510  /*
511   *  General initialization
512   */
513
514  the_thread->Start.is_preemptible   = is_preemptible;
515  the_thread->Start.budget_algorithm = budget_algorithm;
516  the_thread->Start.budget_callout   = budget_callout;
517  the_thread->Start.isr_level        = isr_level;
518
519  the_thread->current_state          = STATES_DORMANT;
520  the_thread->resource_count         = 0;
521  the_thread->real_priority          = priority;
522  the_thread->Start.initial_priority = priority;
523  the_thread->ticks_executed         = 0;
524 
525  _Thread_Set_priority( the_thread, priority );
526
527  /*
528   *  Open the object
529   */
530
531  _Objects_Open( information, &the_thread->Object, name );
532
533  /*
534   *  Invoke create extensions
535   */
536
537  if ( !_User_extensions_Thread_create( the_thread ) ) {
538
539    if ( extensions_area )
540      (void) _Workspace_Free( extensions_area );
541
542    if ( fp_area )
543      (void) _Workspace_Free( fp_area );
544
545    _Thread_Stack_Free( the_thread );
546
547    return FALSE;
548  }
549
550  return TRUE;
551   
552}
553
554/*
555 *  _Thread_Start
556 *
557 *  DESCRIPTION:
558 *
559 *  XXX
560 */
561 
562boolean _Thread_Start(
563  Thread_Control       *the_thread,
564  Thread_Start_types    the_prototype,
565  void                 *entry_point,
566  void                 *pointer_argument,
567  unsigned32            numeric_argument
568)
569{
570  if ( _States_Is_dormant( the_thread->current_state ) ) {
571 
572    the_thread->Start.entry_point      = (Thread_Entry) entry_point;
573   
574    the_thread->Start.prototype        = the_prototype;
575    the_thread->Start.pointer_argument = pointer_argument;
576    the_thread->Start.numeric_argument = numeric_argument;
577 
578    _Thread_Load_environment( the_thread );
579 
580    _Thread_Ready( the_thread );
581 
582    _User_extensions_Thread_start( the_thread );
583 
584    return TRUE;
585  }
586 
587  return FALSE;
588 
589}
590
591/*
592 *  _Thread_Restart
593 *
594 *  DESCRIPTION:
595 *
596 *  XXX
597 */
598 
599boolean _Thread_Restart(
600  Thread_Control      *the_thread,
601  void                *pointer_argument,
602  unsigned32           numeric_argument
603)
604{
605  if ( !_States_Is_dormant( the_thread->current_state ) ) {
606 
607    _Thread_Set_transient( the_thread );
608    the_thread->resource_count = 0;
609    the_thread->is_preemptible   = the_thread->Start.is_preemptible;
610    the_thread->budget_algorithm = the_thread->Start.budget_algorithm;
611    the_thread->budget_callout   = the_thread->Start.budget_callout;
612
613    the_thread->Start.pointer_argument = pointer_argument;
614    the_thread->Start.numeric_argument = numeric_argument;
615 
616    if ( !_Thread_queue_Extract_with_proxy( the_thread ) ) {
617 
618      if ( _Watchdog_Is_active( &the_thread->Timer ) )
619        (void) _Watchdog_Remove( &the_thread->Timer );
620    }
621
622    if ( the_thread->current_priority != the_thread->Start.initial_priority ) {
623      the_thread->real_priority = the_thread->Start.initial_priority;
624      _Thread_Set_priority( the_thread, the_thread->Start.initial_priority );
625    }
626 
627    _Thread_Load_environment( the_thread );
628 
629    _Thread_Ready( the_thread );
630 
631    _User_extensions_Thread_restart( the_thread );
632 
633    if ( _Thread_Is_executing ( the_thread ) )
634      _Thread_Restart_self();
635 
636    return TRUE;
637  }
638 
639  return FALSE;
640}
641
642/*
643 *  _Thread_Close
644 *
645 *  DESCRIPTION:
646 *
647 *  XXX
648 */
649 
650void _Thread_Close(
651  Objects_Information  *information,
652  Thread_Control       *the_thread
653)
654{
655  _Objects_Close( information, &the_thread->Object );
656 
657  _Thread_Set_state( the_thread, STATES_TRANSIENT );
658 
659  if ( !_Thread_queue_Extract_with_proxy( the_thread ) ) {
660 
661    if ( _Watchdog_Is_active( &the_thread->Timer ) )
662      (void) _Watchdog_Remove( &the_thread->Timer );
663  }
664
665  _User_extensions_Thread_delete( the_thread );
666 
667#if ( CPU_USE_DEFERRED_FP_SWITCH == TRUE )
668  if ( _Thread_Is_allocated_fp( the_thread ) )
669    _Thread_Deallocate_fp();
670#endif
671  the_thread->fp_context = NULL;
672
673  if ( the_thread->Start.fp_context )
674  (void) _Workspace_Free( the_thread->Start.fp_context );
675
676  _Thread_Stack_Free( the_thread );
677
678  if ( the_thread->extensions )
679    (void) _Workspace_Free( the_thread->extensions );
680
681  the_thread->Start.stack = NULL;
682  the_thread->extensions = NULL;
683}
684
685/*PAGE
686 *
687 *  _Thread_Ready
688 *
689 *  This kernel routine readies the requested thread, the thread chain
690 *  is adjusted.  A new heir thread may be selected.
691 *
692 *  Input parameters:
693 *    the_thread - pointer to thread control block
694 *
695 *  Output parameters:  NONE
696 *
697 *  NOTE:  This routine uses the "blocking" heir selection mechanism.
698 *         This insures the correct heir after a thread restart.
699 *
700 *  INTERRUPT LATENCY:
701 *    ready chain
702 *    select heir
703 */
704
705void _Thread_Ready(
706  Thread_Control *the_thread
707)
708{
709  ISR_Level              level;
710  Thread_Control *heir;
711
712  _ISR_Disable( level );
713
714  the_thread->current_state = STATES_READY;
715
716  _Priority_Add_to_bit_map( &the_thread->Priority_map );
717
718  _Chain_Append_unprotected( the_thread->ready, &the_thread->Object.Node );
719
720  _ISR_Flash( level );
721
722  _Thread_Calculate_heir();
723
724  heir = _Thread_Heir;
725
726  if ( !_Thread_Is_executing( heir ) && _Thread_Executing->is_preemptible )
727    _Context_Switch_necessary = TRUE;
728
729  _ISR_Enable( level );
730}
731
732/*PAGE
733 *
734 *  _Thread_Clear_state
735 *
736 *  This kernel routine clears the appropriate states in the
737 *  requested thread.  The thread ready chain is adjusted if
738 *  necessary and the Heir thread is set accordingly.
739 *
740 *  Input parameters:
741 *    the_thread - pointer to thread control block
742 *    state      - state set to clear
743 *
744 *  Output parameters:  NONE
745 *
746 *  INTERRUPT LATENCY:
747 *    priority map
748 *    select heir
749 */
750
751
752void _Thread_Clear_state(
753  Thread_Control *the_thread,
754  States_Control  state
755)
756{
757  ISR_Level       level;
758  States_Control  current_state;
759
760  _ISR_Disable( level );
761    current_state = the_thread->current_state;
762   
763    if ( current_state & state ) {
764      current_state =
765      the_thread->current_state = _States_Clear( state, current_state );
766
767      if ( _States_Is_ready( current_state ) ) {
768
769        _Priority_Add_to_bit_map( &the_thread->Priority_map );
770
771        _Chain_Append_unprotected(the_thread->ready, &the_thread->Object.Node);
772
773        _ISR_Flash( level );
774
775        if ( the_thread->current_priority < _Thread_Heir->current_priority ) {
776          _Thread_Heir = the_thread;
777          if ( _Thread_Executing->is_preemptible ||
778               the_thread->current_priority == 0 )
779            _Context_Switch_necessary = TRUE;
780        }
781      }
782  }
783  _ISR_Enable( level );
784}
785
786/*PAGE
787 *
788 * _Thread_Set_state
789 *
790 * This kernel routine sets the requested state in the THREAD.  The
791 * THREAD chain is adjusted if necessary.
792 *
793 * Input parameters:
794 *   the_thread   - pointer to thread control block
795 *   state - state to be set
796 *
797 * Output parameters:  NONE
798 *
799 *  INTERRUPT LATENCY:
800 *    ready chain
801 *    select map
802 */
803
804void _Thread_Set_state(
805  Thread_Control *the_thread,
806  States_Control         state
807)
808{
809  ISR_Level             level;
810  Chain_Control *ready;
811
812  ready = the_thread->ready;
813  _ISR_Disable( level );
814  if ( !_States_Is_ready( the_thread->current_state ) ) {
815    the_thread->current_state =
816       _States_Set( state, the_thread->current_state );
817    _ISR_Enable( level );
818    return;
819  }
820
821  the_thread->current_state = state;
822
823  if ( _Chain_Has_only_one_node( ready ) ) {
824
825    _Chain_Initialize_empty( ready );
826    _Priority_Remove_from_bit_map( &the_thread->Priority_map );
827
828  } else
829    _Chain_Extract_unprotected( &the_thread->Object.Node );
830
831  _ISR_Flash( level );
832
833  if ( _Thread_Is_heir( the_thread ) )
834     _Thread_Calculate_heir();
835
836  if ( _Thread_Is_executing( the_thread ) )
837    _Context_Switch_necessary = TRUE;
838
839  _ISR_Enable( level );
840}
841
842/*PAGE
843 *
844 *  _Thread_Set_transient
845 *
846 *  This kernel routine places the requested thread in the transient state
847 *  which will remove it from the ready queue, if necessary.  No
848 *  rescheduling is necessary because it is assumed that the transient
849 *  state will be cleared before dispatching is enabled.
850 *
851 *  Input parameters:
852 *    the_thread - pointer to thread control block
853 *
854 *  Output parameters:  NONE
855 *
856 *  INTERRUPT LATENCY:
857 *    only case
858 */
859
860void _Thread_Set_transient(
861  Thread_Control *the_thread
862)
863{
864  ISR_Level             level;
865  unsigned32            old_state;
866  Chain_Control *ready;
867
868  ready = the_thread->ready;
869  _ISR_Disable( level );
870
871  old_state = the_thread->current_state;
872  the_thread->current_state = _States_Set( STATES_TRANSIENT, old_state );
873
874  if ( _States_Is_ready( old_state ) ) {
875    if ( _Chain_Has_only_one_node( ready ) ) {
876
877      _Chain_Initialize_empty( ready );
878      _Priority_Remove_from_bit_map( &the_thread->Priority_map );
879
880    } else
881      _Chain_Extract_unprotected( &the_thread->Object.Node );
882  }
883
884  _ISR_Enable( level );
885
886}
887
888/*PAGE
889 *
890 *  _Thread_Reset_timeslice
891 *
892 *  This routine will remove the running thread from the ready chain
893 *  and place it immediately at the rear of this chain and then the
894 *  timeslice counter is reset.  The heir THREAD will be updated if
895 *  the running is also the currently the heir.
896 *
897 *  Input parameters:   NONE
898 *
899 *  Output parameters:  NONE
900 *
901 *  INTERRUPT LATENCY:
902 *    ready chain
903 *    select heir
904 */
905
906void _Thread_Reset_timeslice( void )
907{
908  ISR_Level       level;
909  Thread_Control *executing;
910  Chain_Control  *ready;
911
912  executing = _Thread_Executing;
913  ready     = executing->ready;
914  _ISR_Disable( level );
915    if ( _Chain_Has_only_one_node( ready ) ) {
916      _ISR_Enable( level );
917      return;
918    }
919    _Chain_Extract_unprotected( &executing->Object.Node );
920    _Chain_Append_unprotected( ready, &executing->Object.Node );
921
922  _ISR_Flash( level );
923
924    if ( _Thread_Is_heir( executing ) )
925      _Thread_Heir = (Thread_Control *) ready->first;
926
927    _Context_Switch_necessary = TRUE;
928
929  _ISR_Enable( level );
930}
931
932/*PAGE
933 *
934 *  _Thread_Tickle_timeslice
935 *
936 *  This scheduler routine determines if timeslicing is enabled
937 *  for the currently executing thread and, if so, updates the
938 *  timeslice count and checks for timeslice expiration.
939 *
940 *  Input parameters:   NONE
941 *
942 *  Output parameters:  NONE
943 */
944
945void _Thread_Tickle_timeslice( void )
946{
947  Thread_Control *executing;
948
949  executing = _Thread_Executing;
950
951  /*
952   *  Increment the number of ticks this thread has been executing
953   */
954
955  executing->ticks_executed++;
956
957  /*
958   *  If the thread is not preemptible or is not ready, then
959   *  just return.
960   */
961
962  if ( !executing->is_preemptible )
963    return;
964
965  if ( !_States_Is_ready( executing->current_state ) )
966    return;
967
968  /*
969   *  The cpu budget algorithm determines what happens next.
970   */
971
972  switch ( executing->budget_algorithm ) {
973    case THREAD_CPU_BUDGET_ALGORITHM_NONE:
974      break;
975
976    case THREAD_CPU_BUDGET_ALGORITHM_RESET_TIMESLICE:
977    case THREAD_CPU_BUDGET_ALGORITHM_EXHAUST_TIMESLICE:
978      if ( --executing->cpu_time_budget == 0 ) {
979        _Thread_Reset_timeslice();
980        executing->cpu_time_budget = _Thread_Ticks_per_timeslice;
981      }
982      break;
983
984    case THREAD_CPU_BUDGET_ALGORITHM_CALLOUT:
985      if ( --executing->cpu_time_budget == 0 )
986        (*executing->budget_callout)( executing );
987      break;
988  }
989}
990
991/*PAGE
992 *
993 *  _Thread_Yield_processor
994 *
995 *  This kernel routine will remove the running THREAD from the ready chain
996 *  and place it immediatly at the rear of this chain.  Reset timeslice
997 *  and yield the processor functions both use this routine, therefore if
998 *  reset is TRUE and this is the only thread on the chain then the
999 *  timeslice counter is reset.  The heir THREAD will be updated if the
1000 *  running is also the currently the heir.
1001 *
1002 *  Input parameters:   NONE
1003 *
1004 *  Output parameters:  NONE
1005 *
1006 *  INTERRUPT LATENCY:
1007 *    ready chain
1008 *    select heir
1009 */
1010
1011void _Thread_Yield_processor( void )
1012{
1013  ISR_Level       level;
1014  Thread_Control *executing;
1015  Chain_Control  *ready;
1016
1017  executing = _Thread_Executing;
1018  ready     = executing->ready;
1019  _ISR_Disable( level );
1020    if ( !_Chain_Has_only_one_node( ready ) ) {
1021      _Chain_Extract_unprotected( &executing->Object.Node );
1022      _Chain_Append_unprotected( ready, &executing->Object.Node );
1023
1024      _ISR_Flash( level );
1025
1026      if ( _Thread_Is_heir( executing ) )
1027        _Thread_Heir = (Thread_Control *) ready->first;
1028      _Context_Switch_necessary = TRUE;
1029    }
1030    else if ( !_Thread_Is_heir( executing ) )
1031      _Context_Switch_necessary = TRUE;
1032
1033  _ISR_Enable( level );
1034}
1035
1036/*PAGE
1037 *
1038 *  _Thread_Load_environment
1039 *
1040 *  Load starting environment for another thread from its start area in the
1041 *  thread.  Only called from t_restart and t_start.
1042 *
1043 *  Input parameters:
1044 *    the_thread - thread control block pointer
1045 *
1046 *  Output parameters:  NONE
1047 */
1048
1049void _Thread_Load_environment(
1050  Thread_Control *the_thread
1051)
1052{
1053  boolean is_fp = FALSE;
1054
1055  if ( the_thread->Start.fp_context ) {
1056    the_thread->fp_context = the_thread->Start.fp_context;
1057    _Context_Initialize_fp( &the_thread->fp_context );
1058    is_fp = TRUE;
1059  }
1060
1061  the_thread->do_post_task_switch_extension = FALSE;
1062  the_thread->is_preemptible   = the_thread->Start.is_preemptible;
1063  the_thread->budget_algorithm = the_thread->Start.budget_algorithm;
1064  the_thread->budget_callout   = the_thread->Start.budget_callout;
1065
1066  _Context_Initialize(
1067    &the_thread->Registers,
1068    the_thread->Start.Initial_stack.area,
1069    the_thread->Start.Initial_stack.size,
1070    the_thread->Start.isr_level,
1071    _Thread_Handler,
1072    is_fp
1073  );
1074
1075}
1076
1077/*PAGE
1078 *
1079 *  _Thread_Handler
1080 *
1081 *  This routine is the "primal" entry point for all threads.
1082 *  _Context_Initialize() dummies up the thread's initial context
1083 *  to cause the first Context_Switch() to jump to _Thread_Handler().
1084 *
1085 *  This routine is the default thread exitted error handler.  It is
1086 *  returned to when a thread exits.  The configured fatal error handler
1087 *  is invoked to process the exit.
1088 *
1089 *  NOTE:
1090 *
1091 *  On entry, it is assumed all interrupts are blocked and that this
1092 *  routine needs to set the initial isr level.  This may or may not
1093 *  actually be needed by the context switch routine and as a result
1094 *  interrupts may already be at there proper level.  Either way,
1095 *  setting the initial isr level properly here is safe.
1096 * 
1097 *  Currently this is only really needed for the posix port,
1098 *  ref: _Context_Switch in unix/cpu.c
1099 *
1100 *  Input parameters:   NONE
1101 *
1102 *  Output parameters:  NONE
1103 */
1104
1105void _Thread_Handler( void )
1106{
1107  ISR_Level  level;
1108  Thread_Control *executing;
1109 
1110  executing = _Thread_Executing;
1111 
1112  /*
1113   * have to put level into a register for those cpu's that use
1114   * inline asm here
1115   */
1116 
1117  level = executing->Start.isr_level;
1118  _ISR_Set_level(level);
1119
1120  /*
1121   * Take care that 'begin' extensions get to complete before
1122   * 'switch' extensions can run.  This means must keep dispatch
1123   * disabled until all 'begin' extensions complete.
1124   */
1125 
1126  _User_extensions_Thread_begin( executing );
1127 
1128  /*
1129   *  At this point, the dispatch disable level BETTER be 1.
1130   */
1131
1132  _Thread_Enable_dispatch();
1133 
1134  switch ( executing->Start.prototype ) {
1135    case THREAD_START_NUMERIC:
1136      (*(Thread_Entry_numeric) executing->Start.entry_point)(
1137        executing->Start.numeric_argument
1138      );
1139      break;
1140    case THREAD_START_POINTER:
1141      (*(Thread_Entry_pointer) executing->Start.entry_point)(
1142        executing->Start.pointer_argument
1143      );
1144      break;
1145    case THREAD_START_BOTH_POINTER_FIRST:
1146      (*(Thread_Entry_both_pointer_first) executing->Start.entry_point)(
1147        executing->Start.pointer_argument,
1148        executing->Start.numeric_argument
1149      );
1150      break;
1151    case THREAD_START_BOTH_NUMERIC_FIRST:
1152      (*(Thread_Entry_both_numeric_first) executing->Start.entry_point)(
1153        executing->Start.numeric_argument,
1154        executing->Start.pointer_argument
1155      );
1156      break;
1157  }
1158
1159  _User_extensions_Thread_exitted( executing );
1160
1161  _Internal_error_Occurred(
1162    INTERNAL_ERROR_CORE,
1163    TRUE,
1164    INTERNAL_ERROR_THREAD_EXITTED
1165  );
1166}
1167
1168/*PAGE
1169 *
1170 *  _Thread_Delay_ended
1171 *
1172 *  This routine processes a thread whose delay period has ended.
1173 *  It is called by the watchdog handler.
1174 *
1175 *  Input parameters:
1176 *    id - thread id
1177 *
1178 *  Output parameters: NONE
1179 */
1180
1181void _Thread_Delay_ended(
1182  Objects_Id  id,
1183  void       *ignored
1184)
1185{
1186  Thread_Control    *the_thread;
1187  Objects_Locations  location;
1188
1189  the_thread = _Thread_Get( id, &location );
1190  switch ( location ) {
1191    case OBJECTS_ERROR:
1192    case OBJECTS_REMOTE:  /* impossible */
1193      break;
1194    case OBJECTS_LOCAL:
1195      _Thread_Unblock( the_thread );
1196      _Thread_Unnest_dispatch();
1197      break;
1198  }
1199}
1200
1201/*PAGE
1202 *
1203 *  _Thread_Change_priority
1204 *
1205 *  This kernel routine changes the priority of the thread.  The
1206 *  thread chain is adjusted if necessary.
1207 *
1208 *  Input parameters:
1209 *    the_thread   - pointer to thread control block
1210 *    new_priority - ultimate priority
1211 *    prepend_it   - TRUE if the thread should be prepended to the chain
1212 *
1213 *  Output parameters:  NONE
1214 *
1215 *  INTERRUPT LATENCY:
1216 *    ready chain
1217 *    select heir
1218 */
1219
1220void _Thread_Change_priority(
1221  Thread_Control   *the_thread,
1222  Priority_Control  new_priority,
1223  boolean           prepend_it
1224)
1225{
1226  ISR_Level level;
1227  /* boolean   do_prepend = FALSE; */
1228
1229  /*
1230   *  If this is a case where prepending the task to its priority is
1231   *  potentially desired, then we need to consider whether to do it.
1232   *  This usually occurs when a task lowers its priority implcitly as
1233   *  the result of losing inherited priority.  Normal explicit priority
1234   *  change calls (e.g. rtems_task_set_priority) should always do an
1235   *  append not a prepend.
1236   */
1237 
1238  /*
1239   *  Techically, the prepend should conditional on the thread lowering
1240   *  its priority but that does allow cxd2004 of the acvc 2.0.1 to
1241   *  pass with rtems 4.0.0.  This should change when gnat redoes its
1242   *  priority scheme.
1243   */
1244/*
1245  if ( prepend_it &&
1246       _Thread_Is_executing( the_thread ) &&
1247       new_priority >= the_thread->current_priority )
1248    prepend_it = TRUE;
1249*/
1250                 
1251  _Thread_Set_transient( the_thread );
1252
1253  if ( the_thread->current_priority != new_priority )
1254    _Thread_Set_priority( the_thread, new_priority );
1255
1256  _ISR_Disable( level );
1257
1258  the_thread->current_state =
1259    _States_Clear( STATES_TRANSIENT, the_thread->current_state );
1260
1261  if ( ! _States_Is_ready( the_thread->current_state ) ) {
1262    _ISR_Enable( level );
1263    return;
1264  }
1265
1266  _Priority_Add_to_bit_map( &the_thread->Priority_map );
1267  if ( prepend_it )
1268    _Chain_Prepend_unprotected( the_thread->ready, &the_thread->Object.Node );
1269  else
1270    _Chain_Append_unprotected( the_thread->ready, &the_thread->Object.Node );
1271
1272  _ISR_Flash( level );
1273
1274  _Thread_Calculate_heir();
1275
1276  if ( !_Thread_Is_executing_also_the_heir() &&
1277       _Thread_Executing->is_preemptible )
1278    _Context_Switch_necessary = TRUE;
1279  _ISR_Enable( level );
1280}
1281
1282/*PAGE
1283 *
1284 * _Thread_Set_priority
1285 *
1286 * This directive enables and disables several modes of
1287 * execution for the requesting thread.
1288 *
1289 *  Input parameters:
1290 *    the_thread   - pointer to thread priority
1291 *    new_priority - new priority
1292 *
1293 *  Output: NONE
1294 */
1295
1296void _Thread_Set_priority(
1297  Thread_Control   *the_thread,
1298  Priority_Control  new_priority
1299)
1300{
1301  the_thread->current_priority = new_priority;
1302  the_thread->ready            = &_Thread_Ready_chain[ new_priority ];
1303
1304  _Priority_Initialize_information( &the_thread->Priority_map, new_priority );
1305}
1306
1307/*PAGE
1308 *
1309 *  _Thread_Evaluate_mode
1310 *
1311 *  XXX
1312 */
1313
1314boolean _Thread_Evaluate_mode( void )
1315{
1316  Thread_Control     *executing;
1317
1318  executing = _Thread_Executing;
1319
1320  if ( !_States_Is_ready( executing->current_state ) ||
1321       ( !_Thread_Is_heir( executing ) && executing->is_preemptible ) ) {
1322    _Context_Switch_necessary = TRUE;
1323    return TRUE;
1324  }
1325
1326  return FALSE;
1327}
1328
1329/*PAGE
1330 *
1331 *  _Thread_Get
1332 *
1333 *  NOTE:  If we are not using static inlines, this must be a real
1334 *         subroutine call.
1335 *
1336 *  NOTE:  XXX... This routine may be able to be optimized.
1337 */
1338
1339#ifndef USE_INLINES
1340
1341Thread_Control *_Thread_Get (
1342  Objects_Id           id,
1343  Objects_Locations   *location
1344)
1345{
1346  Objects_Classes      the_class;
1347  Objects_Information *information;
1348 
1349  if ( _Objects_Are_ids_equal( id, OBJECTS_ID_OF_SELF ) ) {
1350    _Thread_Disable_dispatch();
1351    *location = OBJECTS_LOCAL;
1352    return( _Thread_Executing );
1353  }
1354 
1355  the_class = _Objects_Get_class( id );
1356 
1357  if ( the_class > OBJECTS_CLASSES_LAST ) {
1358    *location = OBJECTS_ERROR;
1359    return (Thread_Control *) 0;
1360  }
1361 
1362  information = _Objects_Information_table[ the_class ];
1363 
1364  if ( !information || !information->is_thread ) {
1365    *location = OBJECTS_ERROR;
1366    return (Thread_Control *) 0;
1367  }
1368 
1369  return (Thread_Control *) _Objects_Get( information, id, location );
1370}
1371
1372#endif
1373
1374/*PAGE
1375 *
1376 *  _Thread_Idle_body
1377 *
1378 *  This kernel routine is the idle thread.  The idle thread runs any time
1379 *  no other thread is ready to run.  This thread loops forever with
1380 *  interrupts enabled.
1381 *
1382 *  Input parameters:
1383 *    ignored - this parameter is ignored
1384 *
1385 *  Output parameters:  NONE
1386 */
1387 
1388#if (CPU_PROVIDES_IDLE_THREAD_BODY == FALSE)
1389Thread _Thread_Idle_body(
1390  unsigned32 ignored
1391)
1392{
1393  for( ; ; ) ;
1394}
1395#endif
Note: See TracBrowser for help on using the repository browser.