Changeset 93b4e6ef in rtems


Ignore:
Timestamp:
Nov 2, 1999, 9:05:17 PM (20 years ago)
Author:
Joel Sherrill <joel.sherrill@…>
Branches:
4.10, 4.11, 4.8, 4.9, master
Children:
82cb78d8
Parents:
dfbfa2b0
Message:

Split Heap and Time of Day Handlers.

Files:
20 added
7 edited

Legend:

Unmodified
Added
Removed
  • c/src/exec/score/src/Makefile.in

    rdfbfa2b0 r93b4e6ef  
    2323MP_C_PIECES = $(MP_C_PIECES_$(HAS_MP)_V)
    2424
     25HEAP_C_PIECES = heap heapallocate heapextend heapfree \
     26    heapsizeofuserarea heapwalk
     27
    2528OBJECT_C_PIECES = object objectallocate objectallocatebyindex \
    2629    objectclearname objectcomparenameraw objectcomparenamestring \
     
    4346    threadqfirstfifo threadqfirstpriority threadqflush threadqtimeout
    4447
     48TOD_C_PIECES= coretod coretodset coretodtickle coretodtoseconds \
     49    coretodvalidate
     50
    4551# C and C++ source names, if any, go here -- minus the .c or .cc
    46 C_PIECES = apiext chain coremsg coremutex coresem coretod heap interr isr \
    47     $(OBJECT_C_PIECES) $(THREAD_C_PIECES) $(THREADQ_C_PIECES) userext \
     52C_PIECES = apiext chain coremsg coremutex coresem $(HEAP_C_PIECES) interr isr \
     53    $(OBJECT_C_PIECES) $(THREAD_C_PIECES) $(THREADQ_C_PIECES) \
     54    $(TOD_C_PIECES) userext \
    4855    watchdog wkspace $(MP_C_PIECES)
    4956C_FILES = $(C_PIECES:%=%.c)
  • c/src/exec/score/src/coretod.c

    rdfbfa2b0 r93b4e6ef  
    5959  _TOD_Activate( _TOD_Ticks_per_second );
    6060}
    61 
    62 /*PAGE
    63  *
    64  *  _TOD_Set
    65  *
    66  *  This rountine sets the current date and time with the specified
    67  *  new date and time structure.
    68  *
    69  *  Input parameters:
    70  *    the_tod             - pointer to the time and date structure
    71  *    seconds_since_epoch - seconds since system epoch
    72  *
    73  *  Output parameters: NONE
    74  */
    75 
    76 void _TOD_Set(
    77   TOD_Control *the_tod,
    78   Watchdog_Interval  seconds_since_epoch
    79 )
    80 {
    81   Watchdog_Interval ticks_until_next_second;
    82 
    83   _Thread_Disable_dispatch();
    84   _TOD_Deactivate();
    85 
    86   if ( seconds_since_epoch < _TOD_Seconds_since_epoch )
    87     _Watchdog_Adjust_seconds( WATCHDOG_BACKWARD,
    88        _TOD_Seconds_since_epoch - seconds_since_epoch );
    89   else
    90     _Watchdog_Adjust_seconds( WATCHDOG_FORWARD,
    91        seconds_since_epoch - _TOD_Seconds_since_epoch );
    92 
    93   ticks_until_next_second = _TOD_Ticks_per_second;
    94   if ( ticks_until_next_second > _TOD_Current.ticks )
    95     ticks_until_next_second -= _TOD_Current.ticks;
    96 
    97   _TOD_Current             = *the_tod;
    98   _TOD_Seconds_since_epoch = seconds_since_epoch;
    99   _TOD_Is_set              = TRUE;
    100   _TOD_Activate( ticks_until_next_second );
    101 
    102   _Thread_Enable_dispatch();
    103 }
    104 
    105 /*PAGE
    106  *
    107  *  _TOD_Validate
    108  *
    109  *  This kernel routine checks the validity of a date and time structure.
    110  *
    111  *  Input parameters:
    112  *    the_tod - pointer to a time and date structure
    113  *
    114  *  Output parameters:
    115  *    TRUE  - if the date, time, and tick are valid
    116  *    FALSE - if the the_tod is invalid
    117  *
    118  *  NOTE: This routine only works for leap-years through 2099.
    119  */
    120 
    121 boolean _TOD_Validate(
    122   TOD_Control *the_tod
    123 )
    124 {
    125   unsigned32 days_in_month;
    126 
    127   if ((the_tod->ticks  >= _TOD_Ticks_per_second)  ||
    128       (the_tod->second >= TOD_SECONDS_PER_MINUTE) ||
    129       (the_tod->minute >= TOD_MINUTES_PER_HOUR)   ||
    130       (the_tod->hour   >= TOD_HOURS_PER_DAY)      ||
    131       (the_tod->month  == 0)                      ||
    132       (the_tod->month  >  TOD_MONTHS_PER_YEAR)    ||
    133       (the_tod->year   <  TOD_BASE_YEAR)          ||
    134       (the_tod->day    == 0) )
    135      return FALSE;
    136 
    137   if ( (the_tod->year % 4) == 0 )
    138     days_in_month = _TOD_Days_per_month[ 1 ][ the_tod->month ];
    139   else
    140     days_in_month = _TOD_Days_per_month[ 0 ][ the_tod->month ];
    141 
    142   if ( the_tod->day > days_in_month )
    143     return FALSE;
    144 
    145   return TRUE;
    146 }
    147 
    148 /*PAGE
    149  *
    150  *  _TOD_To_seconds
    151  *
    152  *  This routine returns the seconds from the epoch until the
    153  *  current date and time.
    154  *
    155  *  Input parameters:
    156  *    the_tod - pointer to the time and date structure
    157  *
    158  *  Output parameters:
    159  *    returns    - seconds since epoch until the_tod
    160  */
    161 
    162 unsigned32 _TOD_To_seconds(
    163   TOD_Control *the_tod
    164 )
    165 {
    166   unsigned32 time;
    167   unsigned32 year_mod_4;
    168 
    169   time = the_tod->day - 1;
    170   year_mod_4 = the_tod->year & 3;
    171 
    172   if ( year_mod_4 == 0 )
    173     time += _TOD_Days_to_date[ 1 ][ the_tod->month ];
    174   else
    175     time += _TOD_Days_to_date[ 0 ][ the_tod->month ];
    176 
    177   time += ( (the_tod->year - TOD_BASE_YEAR) / 4 ) *
    178             ( (TOD_DAYS_PER_YEAR * 4) + 1);
    179 
    180   time += _TOD_Days_since_last_leap_year[ year_mod_4 ];
    181 
    182   time *= TOD_SECONDS_PER_DAY;
    183 
    184   time += ((the_tod->hour * TOD_MINUTES_PER_HOUR) + the_tod->minute)
    185              * TOD_SECONDS_PER_MINUTE;
    186 
    187   time += the_tod->second;
    188 
    189   return( time );
    190 }
    191 
    192 /*PAGE
    193  *
    194  *  _TOD_Tickle
    195  *
    196  *  This routine updates the calendar time and tickles the
    197  *  per second watchdog timer chain.
    198  *
    199  *  Input parameters:
    200  *    ignored - this parameter is ignored
    201  *
    202  *  Output parameters: NONE
    203  *
    204  *  NOTE: This routine only works for leap-years through 2099.
    205  */
    206 
    207 void _TOD_Tickle(
    208   Objects_Id  id,
    209   void       *ignored
    210 )
    211 {
    212   unsigned32 leap;
    213 
    214   _TOD_Current.ticks = 0;
    215   ++_TOD_Seconds_since_epoch;
    216   if ( ++_TOD_Current.second >= TOD_SECONDS_PER_MINUTE ) {
    217     _TOD_Current.second = 0;
    218     if ( ++_TOD_Current.minute >= TOD_MINUTES_PER_HOUR ) {
    219       _TOD_Current.minute = 0;
    220       if ( ++_TOD_Current.hour >= TOD_HOURS_PER_DAY ) {
    221         _TOD_Current.hour = 0;
    222         if ( _TOD_Current.year & 0x3 ) leap = 0;
    223         else                           leap = 1;
    224         if ( ++_TOD_Current.day >
    225                _TOD_Days_per_month[ leap ][ _TOD_Current.month ]) {
    226           _TOD_Current.day = 1;
    227           if ( ++_TOD_Current.month > TOD_MONTHS_PER_YEAR ) {
    228             _TOD_Current.month = 1;
    229             _TOD_Current.year++;
    230           }
    231         }
    232       }
    233     }
    234   }
    235 
    236   _Watchdog_Tickle_seconds();
    237   _Watchdog_Insert_ticks( &_TOD_Seconds_watchdog, _TOD_Ticks_per_second );
    238 }
  • c/src/exec/score/src/heap.c

    rdfbfa2b0 r93b4e6ef  
    9393}
    9494
    95 /*PAGE
    96  *
    97  *  _Heap_Extend
    98  *
    99  *  This routine grows the_heap memory area using the size bytes which
    100  *  begin at starting_address.
    101  *
    102  *  Input parameters:
    103  *    the_heap          - pointer to heap header.
    104  *    starting_address  - pointer to the memory area.
    105  *    size              - size in bytes of the memory block to allocate.
    106  *
    107  *  Output parameters:
    108  *    *amount_extended  - amount of memory added to the_heap
    109  */
    110 
    111 Heap_Extend_status _Heap_Extend(
    112   Heap_Control        *the_heap,
    113   void                *starting_address,
    114   unsigned32           size,
    115   unsigned32          *amount_extended
    116 )
    117 {
    118   Heap_Block        *the_block;
    119   unsigned32        *p;
    120  
    121   /*
    122    *  The overhead was taken from the original heap memory.
    123    */
    124 
    125   Heap_Block  *old_final;
    126   Heap_Block  *new_final;
    127 
    128   /*
    129    *  There are five possibilities for the location of starting
    130    *  address:
    131    *
    132    *    1. non-contiguous lower address     (NOT SUPPORTED)
    133    *    2. contiguous lower address         (NOT SUPPORTED)
    134    *    3. in the heap                      (ERROR)
    135    *    4. contiguous higher address        (SUPPORTED)
    136    *    5. non-contiguous higher address    (NOT SUPPORTED)
    137    *
    138    *  As noted, this code only supports (4).
    139    */
    140 
    141   if ( starting_address >= (void *) the_heap->start &&        /* case 3 */
    142        starting_address <= (void *) the_heap->final
    143      )
    144     return HEAP_EXTEND_ERROR;
    145 
    146   if ( starting_address < (void *) the_heap->start ) {  /* cases 1 and 2 */
    147 
    148       return HEAP_EXTEND_NOT_IMPLEMENTED;               /* cases 1 and 2 */
    149 
    150   } else {                                              /* cases 4 and 5 */
    151 
    152     the_block = (Heap_Block *)
    153        _Addresses_Subtract_offset( starting_address, HEAP_OVERHEAD );
    154     if ( the_block != the_heap->final )
    155       return HEAP_EXTEND_NOT_IMPLEMENTED;                   /* case 5 */
    156   }
    157 
    158   /*
    159    *  Currently only case 4 should make it to this point.
    160    *  The basic trick is to make the extend area look like a used
    161    *  block and free it.
    162    */
    163 
    164   *amount_extended = size;
    165 
    166   old_final = the_heap->final;
    167   new_final = _Addresses_Add_offset( old_final, size );
    168   /* SAME AS: _Addresses_Add_offset( starting_address, size-HEAP_OVERHEAD ); */
    169 
    170   the_heap->final = new_final;
    171 
    172   old_final->front_flag =
    173   new_final->back_flag  = _Heap_Build_flag( size, HEAP_BLOCK_USED );
    174   new_final->front_flag = HEAP_DUMMY_FLAG;
    175 
    176   /*
    177    *  Must pass in address of "user" area
    178    *  So add in the offset field.
    179    */
    180 
    181   p = (unsigned32 *) &old_final->next;
    182   *p = sizeof(unsigned32);
    183   p++;
    184   _Heap_Free( the_heap, p );
    185  
    186   return HEAP_EXTEND_SUCCESSFUL;
    187 }
    188 
    189 /*PAGE
    190  *
    191  *  _Heap_Allocate
    192  *
    193  *  This kernel routine allocates the requested size of memory
    194  *  from the specified heap.
    195  *
    196  *  Input parameters:
    197  *    the_heap  - pointer to heap header.
    198  *    size      - size in bytes of the memory block to allocate.
    199  *
    200  *  Output parameters:
    201  *    returns - starting address of memory block allocated
    202  */
    203 
    204 void *_Heap_Allocate(
    205   Heap_Control        *the_heap,
    206   unsigned32           size
    207 )
    208 {
    209   unsigned32  excess;
    210   unsigned32  the_size;
    211   Heap_Block *the_block;
    212   Heap_Block *next_block;
    213   Heap_Block *temporary_block;
    214   void       *ptr;
    215   unsigned32  offset;
    216  
    217   excess   = size % the_heap->page_size;
    218   the_size = size + the_heap->page_size + HEAP_BLOCK_USED_OVERHEAD;
    219  
    220   if ( excess )
    221     the_size += the_heap->page_size - excess;
    222 
    223   if ( the_size < sizeof( Heap_Block ) )
    224     the_size = sizeof( Heap_Block );
    225 
    226   for ( the_block = the_heap->first;
    227         ;
    228         the_block = the_block->next ) {
    229     if ( the_block == _Heap_Tail( the_heap ) )
    230       return( NULL );
    231     if ( the_block->front_flag >= the_size )
    232       break;
    233   }
    234 
    235   if ( (the_block->front_flag - the_size) >
    236        (the_heap->page_size + HEAP_BLOCK_USED_OVERHEAD) ) {
    237     the_block->front_flag -= the_size;
    238     next_block             = _Heap_Next_block( the_block );
    239     next_block->back_flag  = the_block->front_flag;
    240 
    241     temporary_block            = _Heap_Block_at( next_block, the_size );
    242     temporary_block->back_flag =
    243     next_block->front_flag     = _Heap_Build_flag( the_size,
    244                                     HEAP_BLOCK_USED );
    245     ptr = _Heap_Start_of_user_area( next_block );
    246   } else {
    247     next_block                = _Heap_Next_block( the_block );
    248     next_block->back_flag     = _Heap_Build_flag( the_block->front_flag,
    249                                    HEAP_BLOCK_USED );
    250     the_block->front_flag     = next_block->back_flag;
    251     the_block->next->previous = the_block->previous;
    252     the_block->previous->next = the_block->next;
    253     ptr = _Heap_Start_of_user_area( the_block );
    254   }
    255  
    256   /*
    257    * round ptr up to a multiple of page size
    258    * Have to save the bump amount in the buffer so that free can figure it out
    259    */
    260  
    261   offset = the_heap->page_size - (((unsigned32) ptr) & (the_heap->page_size - 1));
    262   ptr = _Addresses_Add_offset( ptr, offset );
    263   *(((unsigned32 *) ptr) - 1) = offset;
    264 
    265 #ifdef RTEMS_DEBUG
    266   {
    267       unsigned32 ptr_u32;
    268       ptr_u32 = (unsigned32) ptr;
    269       if (ptr_u32 & (the_heap->page_size - 1))
    270           abort();
    271   }
    272 #endif
    273 
    274   return ptr;
    275 }
    276 
    277 /*PAGE
    278  *
    279  *  _Heap_Size_of_user_area
    280  *
    281  *  This kernel routine returns the size of the memory area
    282  *  given heap block.
    283  *
    284  *  Input parameters:
    285  *    the_heap         - pointer to heap header
    286  *    starting_address - starting address of the memory block to free.
    287  *    size             - pointer to size of area
    288  *
    289  *  Output parameters:
    290  *    size  - size of area filled in
    291  *    TRUE  - if starting_address is valid heap address
    292  *    FALSE - if starting_address is invalid heap address
    293  */
    294 
    295 boolean _Heap_Size_of_user_area(
    296   Heap_Control        *the_heap,
    297   void                *starting_address,
    298   unsigned32          *size
    299 )
    300 {
    301   Heap_Block        *the_block;
    302   Heap_Block        *next_block;
    303   unsigned32         the_size;
    304 
    305   the_block = _Heap_User_block_at( starting_address );
    306  
    307   if ( !_Heap_Is_block_in( the_heap, the_block ) ||
    308         _Heap_Is_block_free( the_block ) )
    309     return( FALSE );
    310 
    311   the_size   = _Heap_Block_size( the_block );
    312   next_block = _Heap_Block_at( the_block, the_size );
    313 
    314   if ( !_Heap_Is_block_in( the_heap, next_block ) ||
    315        (the_block->front_flag != next_block->back_flag) )
    316     return( FALSE );
    317 
    318   *size = the_size;
    319   return( TRUE );
    320 }
    321 
    322 /*PAGE
    323  *
    324  *  _Heap_Free
    325  *
    326  *  This kernel routine returns the memory designated by the
    327  *  given heap and given starting address to the memory pool.
    328  *
    329  *  Input parameters:
    330  *    the_heap         - pointer to heap header
    331  *    starting_address - starting address of the memory block to free.
    332  *
    333  *  Output parameters:
    334  *    TRUE  - if starting_address is valid heap address
    335  *    FALSE - if starting_address is invalid heap address
    336  */
    337 
    338 boolean _Heap_Free(
    339   Heap_Control        *the_heap,
    340   void                *starting_address
    341 )
    342 {
    343   Heap_Block        *the_block;
    344   Heap_Block        *next_block;
    345   Heap_Block        *new_next_block;
    346   Heap_Block        *previous_block;
    347   Heap_Block        *temporary_block;
    348   unsigned32         the_size;
    349 
    350   the_block = _Heap_User_block_at( starting_address );
    351 
    352   if ( !_Heap_Is_block_in( the_heap, the_block ) ||
    353         _Heap_Is_block_free( the_block ) ) {
    354       return( FALSE );
    355   }
    356 
    357   the_size   = _Heap_Block_size( the_block );
    358   next_block = _Heap_Block_at( the_block, the_size );
    359 
    360   if ( !_Heap_Is_block_in( the_heap, next_block ) ||
    361        (the_block->front_flag != next_block->back_flag) ) {
    362       return( FALSE );
    363   }
    364 
    365   if ( _Heap_Is_previous_block_free( the_block ) ) {
    366     previous_block = _Heap_Previous_block( the_block );
    367 
    368     if ( !_Heap_Is_block_in( the_heap, previous_block ) ) {
    369         return( FALSE );
    370     }
    371 
    372     if ( _Heap_Is_block_free( next_block ) ) {      /* coalesce both */
    373       previous_block->front_flag += next_block->front_flag + the_size;
    374       temporary_block             = _Heap_Next_block( previous_block );
    375       temporary_block->back_flag  = previous_block->front_flag;
    376       next_block->next->previous  = next_block->previous;
    377       next_block->previous->next  = next_block->next;
    378     }
    379     else {                     /* coalesce prev */
    380       previous_block->front_flag =
    381       next_block->back_flag      = previous_block->front_flag + the_size;
    382     }
    383   }
    384   else if ( _Heap_Is_block_free( next_block ) ) { /* coalesce next */
    385     the_block->front_flag     = the_size + next_block->front_flag;
    386     new_next_block            = _Heap_Next_block( the_block );
    387     new_next_block->back_flag = the_block->front_flag;
    388     the_block->next           = next_block->next;
    389     the_block->previous       = next_block->previous;
    390     next_block->previous->next = the_block;
    391     next_block->next->previous = the_block;
    392 
    393     if (the_heap->first == next_block)
    394         the_heap->first = the_block;
    395   }
    396   else {                          /* no coalesce */
    397     next_block->back_flag     =
    398     the_block->front_flag     = the_size;
    399     the_block->previous       = _Heap_Head( the_heap );
    400     the_block->next           = the_heap->first;
    401     the_heap->first           = the_block;
    402     the_block->next->previous = the_block;
    403   }
    404 
    405   return( TRUE );
    406 }
    407 
    408 /*PAGE
    409  *
    410  *  _Heap_Walk
    411  *
    412  *  This kernel routine walks the heap and verifies its correctness.
    413  *
    414  *  Input parameters:
    415  *    the_heap  - pointer to heap header
    416  *    source    - a numeric indicator of the invoker of this routine
    417  *    do_dump   - when TRUE print the information
    418  *
    419  *  Output parameters: NONE
    420  */
    421 
    422 #ifndef RTEMS_DEBUG
    423 
    424 void _Heap_Walk(
    425   Heap_Control  *the_heap,
    426   int            source,
    427   boolean        do_dump
    428 )
    429 {
    430 }
    431 
    432 #else
    433 
    434 #include <stdio.h>
    435 #include <unistd.h>
    436 
    437 void _Heap_Walk(
    438   Heap_Control  *the_heap,
    439   int            source,
    440   boolean        do_dump
    441 )
    442 {
    443   Heap_Block *the_block  = 0;  /* avoid warnings */
    444   Heap_Block *next_block = 0;  /* avoid warnings */
    445   int         notdone = 1;
    446   int         error = 0;
    447   int         passes = 0;
    448 
    449   /*
    450    * We don't want to allow walking the heap until we have
    451    * transferred control to the user task so we watch the
    452    * system state.
    453    */
    454 
    455   if ( !_System_state_Is_up( _System_state_Get() ) )
    456     return;
    457 
    458   the_block = the_heap->start;
    459 
    460   if (do_dump == TRUE) {
    461     printf("\nPASS: %d  start @ 0x%p   final 0x%p,   first 0x%p  last 0x%p\n",
    462             source, the_heap->start, the_heap->final,
    463                   the_heap->first, the_heap->last
    464           );
    465   }
    466 
    467   /*
    468    * Handle the 1st block
    469    */
    470 
    471   if (the_block->back_flag != HEAP_DUMMY_FLAG) {
    472     printf("PASS: %d  Back flag of 1st block isn't HEAP_DUMMY_FLAG\n", source);
    473     error = 1;
    474   }
    475 
    476   while (notdone) {
    477     passes++;
    478     if (error && (passes > 10))
    479         abort();
    480    
    481     if (do_dump == TRUE) {
    482       printf("PASS: %d  Block @ 0x%p   Back %d,   Front %d",
    483               source, the_block,
    484               the_block->back_flag, the_block->front_flag);
    485       if ( _Heap_Is_block_free(the_block) ) {
    486         printf( "      Prev 0x%p,   Next 0x%p\n",
    487                           the_block->previous, the_block->next);
    488       } else {
    489         printf("\n");
    490       }
    491     }
    492 
    493     /*
    494      * Handle the last block
    495      */
    496 
    497     if ( the_block->front_flag != HEAP_DUMMY_FLAG ) {
    498       next_block = _Heap_Next_block(the_block);
    499       if ( the_block->front_flag != next_block->back_flag ) {
    500         error = 1;
    501         printf("PASS: %d  Front and back flags don't match\n", source);
    502         printf("         Current Block (%p):  Back - %d,  Front - %d",
    503                the_block, the_block->back_flag, the_block->front_flag);
    504         if (do_dump == TRUE) {
    505           if (_Heap_Is_block_free(the_block)) {
    506             printf("      Prev 0x%p,   Next 0x%p\n",
    507                    the_block->previous, the_block->next);
    508           } else {
    509             printf("\n");
    510           }
    511         } else {
    512           printf("\n");
    513         }
    514         printf("         Next Block (%p):     Back - %d,  Front - %d",
    515                next_block, next_block->back_flag, next_block->front_flag);
    516         if (do_dump == TRUE) {
    517           if (_Heap_Is_block_free(next_block)) {
    518             printf("      Prev 0x%p,   Next 0x%p\n",
    519                    the_block->previous, the_block->next);
    520           } else {
    521             printf("\n");
    522           }
    523         } else {
    524           printf("\n");
    525         }
    526       }
    527     }
    528 
    529     if (the_block->front_flag == HEAP_DUMMY_FLAG)
    530       notdone = 0;
    531     else
    532       the_block = next_block;
    533   }
    534 
    535   if (error)
    536       abort();
    537 }
    538 #endif
  • c/src/exec/score/src/threadq.c

    rdfbfa2b0 r93b4e6ef  
    7171
    7272}
    73 #if 0
    74 
    75 /*PAGE
    76  *
    77  *  _Thread_queue_Enqueue
    78  *
    79  *  This routine blocks a thread, places it on a thread, and optionally
    80  *  starts a timeout timer.
    81  *
    82  *  Input parameters:
    83  *    the_thread_queue - pointer to threadq
    84  *    timeout          - interval to wait
    85  *
    86  *  Output parameters: NONE
    87  *
    88  *  INTERRUPT LATENCY:
    89  *    only case
    90  */
    91 
    92 void _Thread_queue_Enqueue(
    93   Thread_queue_Control *the_thread_queue,
    94   Watchdog_Interval     timeout
    95 )
    96 {
    97   Thread_Control *the_thread;
    98 
    99   the_thread = _Thread_Executing;
    100 
    101 #if defined(RTEMS_MULTIPROCESSING)
    102   if ( _Thread_MP_Is_receive( the_thread ) && the_thread->receive_packet )
    103     the_thread = _Thread_MP_Allocate_proxy( the_thread_queue->state );
    104   else
    105 #endif
    106     _Thread_Set_state( the_thread, the_thread_queue->state );
    107 
    108   if ( timeout ) {
    109     _Watchdog_Initialize(
    110        &the_thread->Timer,
    111        _Thread_queue_Timeout,
    112        the_thread->Object.id,
    113        NULL
    114     );
    115 
    116     _Watchdog_Insert_ticks( &the_thread->Timer, timeout );
    117   }
    118 
    119   switch( the_thread_queue->discipline ) {
    120     case THREAD_QUEUE_DISCIPLINE_FIFO:
    121       _Thread_queue_Enqueue_fifo( the_thread_queue, the_thread, timeout );
    122       break;
    123     case THREAD_QUEUE_DISCIPLINE_PRIORITY:
    124       _Thread_queue_Enqueue_priority( the_thread_queue, the_thread, timeout );
    125       break;
    126   }
    127 }
    128 
    129 /*PAGE
    130  *
    131  *  _Thread_queue_Dequeue
    132  *
    133  *  This routine removes a thread from the specified threadq.  If the
    134  *  threadq discipline is FIFO, it unblocks a thread, and cancels its
    135  *  timeout timer.  Priority discipline is processed elsewhere.
    136  *
    137  *  Input parameters:
    138  *    the_thread_queue - pointer to threadq
    139  *
    140  *  Output parameters:
    141  *    returns - thread dequeued or NULL
    142  *
    143  *  INTERRUPT LATENCY:
    144  *    check sync
    145  */
    146 
    147 Thread_Control *_Thread_queue_Dequeue(
    148   Thread_queue_Control *the_thread_queue
    149 )
    150 {
    151   Thread_Control *the_thread;
    152 
    153   switch ( the_thread_queue->discipline ) {
    154     case THREAD_QUEUE_DISCIPLINE_FIFO:
    155       the_thread = _Thread_queue_Dequeue_fifo( the_thread_queue );
    156       break;
    157     case THREAD_QUEUE_DISCIPLINE_PRIORITY:
    158       the_thread = _Thread_queue_Dequeue_priority( the_thread_queue );
    159       break;
    160     default:              /* this is only to prevent warnings */
    161       the_thread = NULL;
    162       break;
    163   }
    164 
    165   return( the_thread );
    166 }
    167 
    168 /*PAGE
    169  *
    170  *  _Thread_queue_Extract_with_proxy
    171  *
    172  *  This routine extracts the_thread from the_thread_queue
    173  *  and insures that if there is a proxy for this task on
    174  *  another node, it is also dealt with.
    175  *
    176  *  XXX
    177  */
    178  
    179 boolean _Thread_queue_Extract_with_proxy(
    180   Thread_Control       *the_thread
    181 )
    182 {
    183   States_Control                state;
    184   Objects_Classes               the_class;
    185   Thread_queue_Extract_callout  proxy_extract_callout;
    186 
    187   state = the_thread->current_state;
    188 
    189   if ( _States_Is_waiting_on_thread_queue( state ) ) {
    190     if ( _States_Is_waiting_for_rpc_reply( state ) &&
    191          _States_Is_locally_blocked( state ) ) {
    192 
    193       the_class = _Objects_Get_class( the_thread->Wait.id );
    194 
    195       proxy_extract_callout = _Thread_queue_Extract_table[ the_class ];
    196 
    197       if ( proxy_extract_callout )
    198         (*proxy_extract_callout)( the_thread );
    199     }
    200     _Thread_queue_Extract( the_thread->Wait.queue, the_thread );
    201 
    202     return TRUE;
    203   }
    204   return FALSE;
    205 }
    206 
    207 /*PAGE
    208  *
    209  *  _Thread_queue_Extract
    210  *
    211  *  This routine removes a specific thread from the specified threadq,
    212  *  deletes any timeout, and unblocks the thread.
    213  *
    214  *  Input parameters:
    215  *    the_thread_queue - pointer to a threadq header
    216  *    the_thread       - pointer to a thread control block
    217  *
    218  *  Output parameters: NONE
    219  *
    220  *  INTERRUPT LATENCY: NONE
    221  */
    222 
    223 void _Thread_queue_Extract(
    224   Thread_queue_Control *the_thread_queue,
    225   Thread_Control       *the_thread
    226 )
    227 {
    228   switch ( the_thread_queue->discipline ) {
    229     case THREAD_QUEUE_DISCIPLINE_FIFO:
    230       _Thread_queue_Extract_fifo( the_thread_queue, the_thread );
    231       break;
    232     case THREAD_QUEUE_DISCIPLINE_PRIORITY:
    233       _Thread_queue_Extract_priority( the_thread_queue, the_thread );
    234       break;
    235    }
    236 }
    237 
    238 /*PAGE
    239  *
    240  *  _Thread_queue_Flush
    241  *
    242  *  This kernel routine flushes the given thread queue.
    243  *
    244  *  Input parameters:
    245  *    the_thread_queue       - pointer to threadq to be flushed
    246  *    remote_extract_callout - pointer to routine which extracts a remote thread
    247  *    status                 - status to return to the thread
    248  *
    249  *  Output parameters:  NONE
    250  */
    251 
    252 void _Thread_queue_Flush(
    253   Thread_queue_Control       *the_thread_queue,
    254   Thread_queue_Flush_callout  remote_extract_callout,
    255   unsigned32                  status
    256 )
    257 {
    258   Thread_Control *the_thread;
    259 
    260   while ( (the_thread = _Thread_queue_Dequeue( the_thread_queue )) ) {
    261 #if defined(RTEMS_MULTIPROCESSING)
    262     if ( !_Objects_Is_local_id( the_thread->Object.id ) )
    263       ( *remote_extract_callout )( the_thread );
    264     else
    265 #endif
    266       the_thread->Wait.return_code = status;
    267   }
    268 }
    269 
    270 /*PAGE
    271  *
    272  *  _Thread_queue_First
    273  *
    274  *  This routines returns a pointer to the first thread on the
    275  *  specified threadq.
    276  *
    277  *  Input parameters:
    278  *    the_thread_queue - pointer to thread queue
    279  *
    280  *  Output parameters:
    281  *    returns - first thread or NULL
    282  */
    283 
    284 Thread_Control *_Thread_queue_First(
    285   Thread_queue_Control *the_thread_queue
    286 )
    287 {
    288   Thread_Control *the_thread;
    289 
    290   switch ( the_thread_queue->discipline ) {
    291     case THREAD_QUEUE_DISCIPLINE_FIFO:
    292       the_thread = _Thread_queue_First_fifo( the_thread_queue );
    293       break;
    294     case THREAD_QUEUE_DISCIPLINE_PRIORITY:
    295       the_thread = _Thread_queue_First_priority( the_thread_queue );
    296       break;
    297     default:              /* this is only to prevent warnings */
    298       the_thread = NULL;
    299       break;
    300   }
    301 
    302   return the_thread;
    303 }
    304 
    305 /*PAGE
    306  *
    307  *  _Thread_queue_Timeout
    308  *
    309  *  This routine processes a thread which timeouts while waiting on
    310  *  a thread queue. It is called by the watchdog handler.
    311  *
    312  *  Input parameters:
    313  *    id - thread id
    314  *
    315  *  Output parameters: NONE
    316  */
    317 
    318 void _Thread_queue_Timeout(
    319   Objects_Id  id,
    320   void       *ignored
    321 )
    322 {
    323   Thread_Control       *the_thread;
    324   Thread_queue_Control *the_thread_queue;
    325   Objects_Locations     location;
    326 
    327   the_thread = _Thread_Get( id, &location );
    328   switch ( location ) {
    329     case OBJECTS_ERROR:
    330     case OBJECTS_REMOTE:  /* impossible */
    331       break;
    332     case OBJECTS_LOCAL:
    333       the_thread_queue = the_thread->Wait.queue;
    334 
    335       /*
    336        *  If the_thread_queue is not synchronized, then it is either
    337        *  "nothing happened", "timeout", or "satisfied".   If the_thread
    338        *  is the executing thread, then it is in the process of blocking
    339        *  and it is the thread which is responsible for the synchronization
    340        *  process.
    341        *
    342        *  If it is not satisfied, then it is "nothing happened" and
    343        *  this is the "timeout" transition.  After a request is satisfied,
    344        *  a timeout is not allowed to occur.
    345        */
    346 
    347       if ( the_thread_queue->sync_state != THREAD_QUEUE_SYNCHRONIZED &&
    348            _Thread_Is_executing( the_thread ) ) {
    349         if ( the_thread_queue->sync_state != THREAD_QUEUE_SATISFIED )
    350           the_thread_queue->sync_state = THREAD_QUEUE_TIMEOUT;
    351       } else {
    352         the_thread->Wait.return_code = the_thread->Wait.queue->timeout_status;
    353         _Thread_queue_Extract( the_thread->Wait.queue, the_thread );
    354       }
    355       _Thread_Unnest_dispatch();
    356       break;
    357   }
    358 }
    359 
    360 /*PAGE
    361  *
    362  *  _Thread_queue_Enqueue_fifo
    363  *
    364  *  This routine blocks a thread, places it on a thread, and optionally
    365  *  starts a timeout timer.
    366  *
    367  *  Input parameters:
    368  *    the_thread_queue - pointer to threadq
    369  *    the_thread       - pointer to the thread to block
    370  *    timeout          - interval to wait
    371  *
    372  *  Output parameters: NONE
    373  *
    374  *  INTERRUPT LATENCY:
    375  *    only case
    376  */
    377 
    378 void _Thread_queue_Enqueue_fifo (
    379   Thread_queue_Control *the_thread_queue,
    380   Thread_Control       *the_thread,
    381   Watchdog_Interval    timeout
    382 )
    383 {
    384   ISR_Level            level;
    385   Thread_queue_States  sync_state;
    386 
    387   _ISR_Disable( level );
    388 
    389   sync_state = the_thread_queue->sync_state;
    390   the_thread_queue->sync_state = THREAD_QUEUE_SYNCHRONIZED;
    391 
    392   switch ( sync_state ) {
    393     case THREAD_QUEUE_SYNCHRONIZED:
    394       /*
    395        *  This should never happen.  It indicates that someone did not
    396        *  enter a thread queue critical section.
    397        */
    398       break;
    399 
    400     case THREAD_QUEUE_NOTHING_HAPPENED:
    401       _Chain_Append_unprotected(
    402         &the_thread_queue->Queues.Fifo,
    403         &the_thread->Object.Node
    404       );
    405       _ISR_Enable( level );
    406       return;
    407 
    408     case THREAD_QUEUE_TIMEOUT:
    409       the_thread->Wait.return_code = the_thread->Wait.queue->timeout_status;
    410       _ISR_Enable( level );
    411       break;
    412 
    413     case THREAD_QUEUE_SATISFIED:
    414       if ( _Watchdog_Is_active( &the_thread->Timer ) ) {
    415         _Watchdog_Deactivate( &the_thread->Timer );
    416         _ISR_Enable( level );
    417         (void) _Watchdog_Remove( &the_thread->Timer );
    418       } else
    419         _ISR_Enable( level );
    420       break;
    421   }
    422 
    423   /*
    424    *  Global objects with thread queue's should not be operated on from an
    425    *  ISR.  But the sync code still must allow short timeouts to be processed
    426    *  correctly.
    427    */
    428 
    429   _Thread_Unblock( the_thread );
    430 
    431 #if defined(RTEMS_MULTIPROCESSING)
    432   if ( !_Objects_Is_local_id( the_thread->Object.id ) )
    433     _Thread_MP_Free_proxy( the_thread );
    434 #endif
    435 
    436 }
    437 
    438 /*PAGE
    439  *
    440  *  _Thread_queue_Dequeue_fifo
    441  *
    442  *  This routine removes a thread from the specified threadq.
    443  *
    444  *  Input parameters:
    445  *    the_thread_queue - pointer to threadq
    446  *
    447  *  Output parameters:
    448  *    returns - thread dequeued or NULL
    449  *
    450  *  INTERRUPT LATENCY:
    451  *    check sync
    452  *    FIFO
    453  */
    454 
    455 Thread_Control *_Thread_queue_Dequeue_fifo(
    456   Thread_queue_Control *the_thread_queue
    457 )
    458 {
    459   ISR_Level              level;
    460   Thread_Control *the_thread;
    461 
    462   _ISR_Disable( level );
    463   if ( !_Chain_Is_empty( &the_thread_queue->Queues.Fifo ) ) {
    464 
    465     the_thread = (Thread_Control *)
    466        _Chain_Get_first_unprotected( &the_thread_queue->Queues.Fifo );
    467 
    468     if ( !_Watchdog_Is_active( &the_thread->Timer ) ) {
    469       _ISR_Enable( level );
    470       _Thread_Unblock( the_thread );
    471     } else {
    472       _Watchdog_Deactivate( &the_thread->Timer );
    473       _ISR_Enable( level );
    474       (void) _Watchdog_Remove( &the_thread->Timer );
    475       _Thread_Unblock( the_thread );
    476     }
    477 
    478 #if defined(RTEMS_MULTIPROCESSING)
    479     if ( !_Objects_Is_local_id( the_thread->Object.id ) )
    480       _Thread_MP_Free_proxy( the_thread );
    481 #endif
    482 
    483     return the_thread;
    484   }
    485 
    486   switch ( the_thread_queue->sync_state ) {
    487     case THREAD_QUEUE_SYNCHRONIZED:
    488     case THREAD_QUEUE_SATISFIED:
    489       _ISR_Enable( level );
    490       return NULL;
    491 
    492     case THREAD_QUEUE_NOTHING_HAPPENED:
    493     case THREAD_QUEUE_TIMEOUT:
    494       the_thread_queue->sync_state = THREAD_QUEUE_SATISFIED;
    495       _ISR_Enable( level );
    496       return _Thread_Executing;
    497   }
    498   return NULL;                /* this is only to prevent warnings */
    499 }
    500 
    501 /*PAGE
    502  *
    503  *  _Thread_queue_Extract_fifo
    504  *
    505  *  This routine removes a specific thread from the specified threadq,
    506  *  deletes any timeout, and unblocks the thread.
    507  *
    508  *  Input parameters:
    509  *    the_thread_queue - pointer to a threadq header
    510  *    the_thread       - pointer to the thread to block
    511  *
    512  *  Output parameters: NONE
    513  *
    514  *  INTERRUPT LATENCY:
    515  *    EXTRACT_FIFO
    516  */
    517 
    518 void _Thread_queue_Extract_fifo(
    519   Thread_queue_Control *the_thread_queue,
    520   Thread_Control       *the_thread
    521 )
    522 {
    523   ISR_Level level;
    524 
    525   _ISR_Disable( level );
    526 
    527   if ( !_States_Is_waiting_on_thread_queue( the_thread->current_state ) ) {
    528     _ISR_Enable( level );
    529     return;
    530   }
    531 
    532   _Chain_Extract_unprotected( &the_thread->Object.Node );
    533 
    534   if ( !_Watchdog_Is_active( &the_thread->Timer ) ) {
    535     _ISR_Enable( level );
    536   } else {
    537     _Watchdog_Deactivate( &the_thread->Timer );
    538     _ISR_Enable( level );
    539     (void) _Watchdog_Remove( &the_thread->Timer );
    540   }
    541 
    542   _Thread_Unblock( the_thread );
    543 
    544 #if defined(RTEMS_MULTIPROCESSING)
    545   if ( !_Objects_Is_local_id( the_thread->Object.id ) )
    546     _Thread_MP_Free_proxy( the_thread );
    547 #endif
    548  
    549 }
    550 
    551 /*PAGE
    552  *
    553  *  _Thread_queue_First_fifo
    554  *
    555  *  This routines returns a pointer to the first thread on the
    556  *  specified threadq.
    557  *
    558  *  Input parameters:
    559  *    the_thread_queue - pointer to threadq
    560  *
    561  *  Output parameters:
    562  *    returns - first thread or NULL
    563  */
    564 
    565 Thread_Control *_Thread_queue_First_fifo(
    566   Thread_queue_Control *the_thread_queue
    567 )
    568 {
    569   if ( !_Chain_Is_empty( &the_thread_queue->Queues.Fifo ) )
    570     return (Thread_Control *) the_thread_queue->Queues.Fifo.first;
    571 
    572   return NULL;
    573 }
    574 
    575 /*PAGE
    576  *
    577  *  _Thread_queue_Enqueue_priority
    578  *
    579  *  This routine blocks a thread, places it on a thread, and optionally
    580  *  starts a timeout timer.
    581  *
    582  *  Input parameters:
    583  *    the_thread_queue - pointer to threadq
    584  *    thread           - thread to insert
    585  *    timeout          - timeout interval in ticks
    586  *
    587  *  Output parameters: NONE
    588  *
    589  *  INTERRUPT LATENCY:
    590  *    forward less than
    591  *    forward equal
    592  */
    593 
    594 void _Thread_queue_Enqueue_priority(
    595   Thread_queue_Control *the_thread_queue,
    596   Thread_Control       *the_thread,
    597   Watchdog_Interval     timeout
    598 )
    599 {
    600   Priority_Control     search_priority;
    601   Thread_Control      *search_thread;
    602   ISR_Level            level;
    603   Chain_Control       *header;
    604   unsigned32           header_index;
    605   Chain_Node          *the_node;
    606   Chain_Node          *next_node;
    607   Chain_Node          *previous_node;
    608   Chain_Node          *search_node;
    609   Priority_Control     priority;
    610   States_Control       block_state;
    611   Thread_queue_States  sync_state;
    612 
    613   _Chain_Initialize_empty( &the_thread->Wait.Block2n );
    614 
    615   priority     = the_thread->current_priority;
    616   header_index = _Thread_queue_Header_number( priority );
    617   header       = &the_thread_queue->Queues.Priority[ header_index ];
    618   block_state  = the_thread_queue->state;
    619 
    620   if ( _Thread_queue_Is_reverse_search( priority ) )
    621     goto restart_reverse_search;
    622 
    623 restart_forward_search:
    624   search_priority = PRIORITY_MINIMUM - 1;
    625   _ISR_Disable( level );
    626   search_thread = (Thread_Control *) header->first;
    627   while ( !_Chain_Is_tail( header, (Chain_Node *)search_thread ) ) {
    628     search_priority = search_thread->current_priority;
    629     if ( priority <= search_priority )
    630       break;
    631 
    632 #if ( CPU_UNROLL_ENQUEUE_PRIORITY == TRUE )
    633     search_thread = (Thread_Control *) search_thread->Object.Node.next;
    634     if ( _Chain_Is_tail( header, (Chain_Node *)search_thread ) )
    635       break;
    636     search_priority = search_thread->current_priority;
    637     if ( priority <= search_priority )
    638       break;
    639 #endif
    640     _ISR_Flash( level );
    641     if ( !_States_Are_set( search_thread->current_state, block_state) ) {
    642       _ISR_Enable( level );
    643       goto restart_forward_search;
    644     }
    645     search_thread =
    646        (Thread_Control *)search_thread->Object.Node.next;
    647   }
    648 
    649   if ( the_thread_queue->sync_state != THREAD_QUEUE_NOTHING_HAPPENED )
    650     goto synchronize;
    651 
    652   the_thread_queue->sync_state = THREAD_QUEUE_SYNCHRONIZED;
    653 
    654   if ( priority == search_priority )
    655     goto equal_priority;
    656 
    657   search_node   = (Chain_Node *) search_thread;
    658   previous_node = search_node->previous;
    659   the_node      = (Chain_Node *) the_thread;
    660 
    661   the_node->next        = search_node;
    662   the_node->previous    = previous_node;
    663   previous_node->next   = the_node;
    664   search_node->previous = the_node;
    665   _ISR_Enable( level );
    666   return;
    667 
    668 restart_reverse_search:
    669   search_priority     = PRIORITY_MAXIMUM + 1;
    670 
    671   _ISR_Disable( level );
    672   search_thread = (Thread_Control *) header->last;
    673   while ( !_Chain_Is_head( header, (Chain_Node *)search_thread ) ) {
    674     search_priority = search_thread->current_priority;
    675     if ( priority >= search_priority )
    676       break;
    677 #if ( CPU_UNROLL_ENQUEUE_PRIORITY == TRUE )
    678     search_thread = (Thread_Control *) search_thread->Object.Node.previous;
    679     if ( _Chain_Is_head( header, (Chain_Node *)search_thread ) )
    680       break;
    681     search_priority = search_thread->current_priority;
    682     if ( priority >= search_priority )
    683       break;
    684 #endif
    685     _ISR_Flash( level );
    686     if ( !_States_Are_set( search_thread->current_state, block_state) ) {
    687       _ISR_Enable( level );
    688       goto restart_reverse_search;
    689     }
    690     search_thread = (Thread_Control *)
    691                          search_thread->Object.Node.previous;
    692   }
    693 
    694   if ( the_thread_queue->sync_state != THREAD_QUEUE_NOTHING_HAPPENED )
    695     goto synchronize;
    696 
    697   the_thread_queue->sync_state = THREAD_QUEUE_SYNCHRONIZED;
    698 
    699   if ( priority == search_priority )
    700     goto equal_priority;
    701 
    702   search_node = (Chain_Node *) search_thread;
    703   next_node   = search_node->next;
    704   the_node    = (Chain_Node *) the_thread;
    705 
    706   the_node->next      = next_node;
    707   the_node->previous  = search_node;
    708   search_node->next   = the_node;
    709   next_node->previous = the_node;
    710   _ISR_Enable( level );
    711   return;
    712 
    713 equal_priority:               /* add at end of priority group */
    714   search_node   = _Chain_Tail( &search_thread->Wait.Block2n );
    715   previous_node = search_node->previous;
    716   the_node      = (Chain_Node *) the_thread;
    717 
    718   the_node->next        = search_node;
    719   the_node->previous    = previous_node;
    720   previous_node->next   = the_node;
    721   search_node->previous = the_node;
    722   _ISR_Enable( level );
    723   return;
    724 
    725 synchronize:
    726 
    727   sync_state = the_thread_queue->sync_state;
    728   the_thread_queue->sync_state = THREAD_QUEUE_SYNCHRONIZED;
    729 
    730   switch ( sync_state ) {
    731     case THREAD_QUEUE_SYNCHRONIZED:
    732       /*
    733        *  This should never happen.  It indicates that someone did not
    734        *  enter a thread queue critical section.
    735        */
    736       break;
    737  
    738     case THREAD_QUEUE_NOTHING_HAPPENED:
    739       /*
    740        *  This should never happen.  All of this was dealt with above.
    741        */
    742       break;
    743  
    744     case THREAD_QUEUE_TIMEOUT:
    745       the_thread->Wait.return_code = the_thread->Wait.queue->timeout_status;
    746       _ISR_Enable( level );
    747       break;
    748  
    749     case THREAD_QUEUE_SATISFIED:
    750       if ( _Watchdog_Is_active( &the_thread->Timer ) ) {
    751         _Watchdog_Deactivate( &the_thread->Timer );
    752         _ISR_Enable( level );
    753         (void) _Watchdog_Remove( &the_thread->Timer );
    754       } else
    755         _ISR_Enable( level );
    756       break;
    757   }
    758  
    759   /*
    760    *  Global objects with thread queue's should not be operated on from an
    761    *  ISR.  But the sync code still must allow short timeouts to be processed
    762    *  correctly.
    763    */
    764  
    765   _Thread_Unblock( the_thread );
    766  
    767 #if defined(RTEMS_MULTIPROCESSING)
    768   if ( !_Objects_Is_local_id( the_thread->Object.id ) )
    769     _Thread_MP_Free_proxy( the_thread );
    770 #endif
    771 }
    772 
    773 /*PAGE
    774  *
    775  *  _Thread_queue_Dequeue_priority
    776  *
    777  *  This routine removes a thread from the specified PRIORITY based
    778  *  threadq, unblocks it, and cancels its timeout timer.
    779  *
    780  *  Input parameters:
    781  *    the_thread_queue - pointer to thread queue
    782  *
    783  *  Output parameters:
    784  *    returns - thread dequeued or NULL
    785  *
    786  *  INTERRUPT LATENCY:
    787  *    only case
    788  */
    789 
    790 Thread_Control *_Thread_queue_Dequeue_priority(
    791   Thread_queue_Control *the_thread_queue
    792 )
    793 {
    794   unsigned32      index;
    795   ISR_Level       level;
    796   Thread_Control *the_thread = NULL;  /* just to remove warnings */
    797   Thread_Control *new_first_thread;
    798   Chain_Node     *new_first_node;
    799   Chain_Node     *new_second_node;
    800   Chain_Node     *last_node;
    801   Chain_Node     *next_node;
    802   Chain_Node     *previous_node;
    803 
    804   _ISR_Disable( level );
    805   for( index=0 ;
    806        index < TASK_QUEUE_DATA_NUMBER_OF_PRIORITY_HEADERS ;
    807        index++ ) {
    808     if ( !_Chain_Is_empty( &the_thread_queue->Queues.Priority[ index ] ) ) {
    809       the_thread = (Thread_Control *)
    810                     the_thread_queue->Queues.Priority[ index ].first;
    811       goto dequeue;
    812     }
    813   }
    814 
    815   switch ( the_thread_queue->sync_state ) {
    816     case THREAD_QUEUE_SYNCHRONIZED:
    817     case THREAD_QUEUE_SATISFIED:
    818       _ISR_Enable( level );
    819       return NULL;
    820 
    821     case THREAD_QUEUE_NOTHING_HAPPENED:
    822     case THREAD_QUEUE_TIMEOUT:
    823       the_thread_queue->sync_state = THREAD_QUEUE_SATISFIED;
    824       _ISR_Enable( level );
    825       return _Thread_Executing;
    826   }
    827 
    828 dequeue:
    829   new_first_node   = the_thread->Wait.Block2n.first;
    830   new_first_thread = (Thread_Control *) new_first_node;
    831   next_node        = the_thread->Object.Node.next;
    832   previous_node    = the_thread->Object.Node.previous;
    833 
    834   if ( !_Chain_Is_empty( &the_thread->Wait.Block2n ) ) {
    835     last_node       = the_thread->Wait.Block2n.last;
    836     new_second_node = new_first_node->next;
    837 
    838     previous_node->next      = new_first_node;
    839     next_node->previous      = new_first_node;
    840     new_first_node->next     = next_node;
    841     new_first_node->previous = previous_node;
    842 
    843     if ( !_Chain_Has_only_one_node( &the_thread->Wait.Block2n ) ) {
    844                                                 /* > two threads on 2-n */
    845       new_second_node->previous =
    846                 _Chain_Head( &new_first_thread->Wait.Block2n );
    847 
    848       new_first_thread->Wait.Block2n.first = new_second_node;
    849       new_first_thread->Wait.Block2n.last  = last_node;
    850 
    851       last_node->next = _Chain_Tail( &new_first_thread->Wait.Block2n );
    852     }
    853   } else {
    854     previous_node->next = next_node;
    855     next_node->previous = previous_node;
    856   }
    857 
    858   if ( !_Watchdog_Is_active( &the_thread->Timer ) ) {
    859     _ISR_Enable( level );
    860     _Thread_Unblock( the_thread );
    861   } else {
    862     _Watchdog_Deactivate( &the_thread->Timer );
    863     _ISR_Enable( level );
    864     (void) _Watchdog_Remove( &the_thread->Timer );
    865     _Thread_Unblock( the_thread );
    866   }
    867 
    868 #if defined(RTEMS_MULTIPROCESSING)
    869   if ( !_Objects_Is_local_id( the_thread->Object.id ) )
    870     _Thread_MP_Free_proxy( the_thread );
    871 #endif
    872   return( the_thread );
    873 }
    874 
    875 /*PAGE
    876  *
    877  *  _Thread_queue_Extract_priority
    878  *
    879  *  This routine removes a specific thread from the specified threadq,
    880  *  deletes any timeout, and unblocks the thread.
    881  *
    882  *  Input parameters:
    883  *    the_thread_queue - pointer to a threadq header
    884  *    the_thread       - pointer to a thread control block
    885  *
    886  *  Output parameters: NONE
    887  *
    888  *  INTERRUPT LATENCY:
    889  *    EXTRACT_PRIORITY
    890  */
    891 
    892 void _Thread_queue_Extract_priority(
    893   Thread_queue_Control *the_thread_queue,
    894   Thread_Control       *the_thread
    895 )
    896 {
    897   ISR_Level              level;
    898   Chain_Node     *the_node;
    899   Chain_Node     *next_node;
    900   Chain_Node     *previous_node;
    901   Thread_Control *new_first_thread;
    902   Chain_Node     *new_first_node;
    903   Chain_Node     *new_second_node;
    904   Chain_Node     *last_node;
    905 
    906   the_node = (Chain_Node *) the_thread;
    907   _ISR_Disable( level );
    908   if ( _States_Is_waiting_on_thread_queue( the_thread->current_state ) ) {
    909     next_node     = the_node->next;
    910     previous_node = the_node->previous;
    911 
    912     if ( !_Chain_Is_empty( &the_thread->Wait.Block2n ) ) {
    913       new_first_node   = the_thread->Wait.Block2n.first;
    914       new_first_thread = (Thread_Control *) new_first_node;
    915       last_node        = the_thread->Wait.Block2n.last;
    916       new_second_node  = new_first_node->next;
    917 
    918       previous_node->next      = new_first_node;
    919       next_node->previous      = new_first_node;
    920       new_first_node->next     = next_node;
    921       new_first_node->previous = previous_node;
    922 
    923       if ( !_Chain_Has_only_one_node( &the_thread->Wait.Block2n ) ) {
    924                                           /* > two threads on 2-n */
    925         new_second_node->previous =
    926                   _Chain_Head( &new_first_thread->Wait.Block2n );
    927         new_first_thread->Wait.Block2n.first = new_second_node;
    928 
    929         new_first_thread->Wait.Block2n.last = last_node;
    930         last_node->next = _Chain_Tail( &new_first_thread->Wait.Block2n );
    931       }
    932     } else {
    933       previous_node->next = next_node;
    934       next_node->previous = previous_node;
    935     }
    936 
    937     if ( !_Watchdog_Is_active( &the_thread->Timer ) ) {
    938       _ISR_Enable( level );
    939       _Thread_Unblock( the_thread );
    940     } else {
    941       _Watchdog_Deactivate( &the_thread->Timer );
    942       _ISR_Enable( level );
    943       (void) _Watchdog_Remove( &the_thread->Timer );
    944       _Thread_Unblock( the_thread );
    945     }
    946 
    947 #if defined(RTEMS_MULTIPROCESSING)
    948     if ( !_Objects_Is_local_id( the_thread->Object.id ) )
    949       _Thread_MP_Free_proxy( the_thread );
    950 #endif
    951   }
    952   else
    953     _ISR_Enable( level );
    954 }
    955 
    956 /*PAGE
    957  *
    958  *  _Thread_queue_First_priority
    959  *
    960  *  This routines returns a pointer to the first thread on the
    961  *  specified threadq.
    962  *
    963  *  Input parameters:
    964  *    the_thread_queue - pointer to thread queue
    965  *
    966  *  Output parameters:
    967  *    returns - first thread or NULL
    968  */
    969 
    970 Thread_Control *_Thread_queue_First_priority (
    971   Thread_queue_Control *the_thread_queue
    972 )
    973 {
    974   unsigned32 index;
    975 
    976   for( index=0 ;
    977        index < TASK_QUEUE_DATA_NUMBER_OF_PRIORITY_HEADERS ;
    978        index++ ) {
    979     if ( !_Chain_Is_empty( &the_thread_queue->Queues.Priority[ index ] ) )
    980       return (Thread_Control *)
    981         the_thread_queue->Queues.Priority[ index ].first;
    982   }
    983   return NULL;
    984 }
    985 #endif
  • cpukit/score/src/coretod.c

    rdfbfa2b0 r93b4e6ef  
    5959  _TOD_Activate( _TOD_Ticks_per_second );
    6060}
    61 
    62 /*PAGE
    63  *
    64  *  _TOD_Set
    65  *
    66  *  This rountine sets the current date and time with the specified
    67  *  new date and time structure.
    68  *
    69  *  Input parameters:
    70  *    the_tod             - pointer to the time and date structure
    71  *    seconds_since_epoch - seconds since system epoch
    72  *
    73  *  Output parameters: NONE
    74  */
    75 
    76 void _TOD_Set(
    77   TOD_Control *the_tod,
    78   Watchdog_Interval  seconds_since_epoch
    79 )
    80 {
    81   Watchdog_Interval ticks_until_next_second;
    82 
    83   _Thread_Disable_dispatch();
    84   _TOD_Deactivate();
    85 
    86   if ( seconds_since_epoch < _TOD_Seconds_since_epoch )
    87     _Watchdog_Adjust_seconds( WATCHDOG_BACKWARD,
    88        _TOD_Seconds_since_epoch - seconds_since_epoch );
    89   else
    90     _Watchdog_Adjust_seconds( WATCHDOG_FORWARD,
    91        seconds_since_epoch - _TOD_Seconds_since_epoch );
    92 
    93   ticks_until_next_second = _TOD_Ticks_per_second;
    94   if ( ticks_until_next_second > _TOD_Current.ticks )
    95     ticks_until_next_second -= _TOD_Current.ticks;
    96 
    97   _TOD_Current             = *the_tod;
    98   _TOD_Seconds_since_epoch = seconds_since_epoch;
    99   _TOD_Is_set              = TRUE;
    100   _TOD_Activate( ticks_until_next_second );
    101 
    102   _Thread_Enable_dispatch();
    103 }
    104 
    105 /*PAGE
    106  *
    107  *  _TOD_Validate
    108  *
    109  *  This kernel routine checks the validity of a date and time structure.
    110  *
    111  *  Input parameters:
    112  *    the_tod - pointer to a time and date structure
    113  *
    114  *  Output parameters:
    115  *    TRUE  - if the date, time, and tick are valid
    116  *    FALSE - if the the_tod is invalid
    117  *
    118  *  NOTE: This routine only works for leap-years through 2099.
    119  */
    120 
    121 boolean _TOD_Validate(
    122   TOD_Control *the_tod
    123 )
    124 {
    125   unsigned32 days_in_month;
    126 
    127   if ((the_tod->ticks  >= _TOD_Ticks_per_second)  ||
    128       (the_tod->second >= TOD_SECONDS_PER_MINUTE) ||
    129       (the_tod->minute >= TOD_MINUTES_PER_HOUR)   ||
    130       (the_tod->hour   >= TOD_HOURS_PER_DAY)      ||
    131       (the_tod->month  == 0)                      ||
    132       (the_tod->month  >  TOD_MONTHS_PER_YEAR)    ||
    133       (the_tod->year   <  TOD_BASE_YEAR)          ||
    134       (the_tod->day    == 0) )
    135      return FALSE;
    136 
    137   if ( (the_tod->year % 4) == 0 )
    138     days_in_month = _TOD_Days_per_month[ 1 ][ the_tod->month ];
    139   else
    140     days_in_month = _TOD_Days_per_month[ 0 ][ the_tod->month ];
    141 
    142   if ( the_tod->day > days_in_month )
    143     return FALSE;
    144 
    145   return TRUE;
    146 }
    147 
    148 /*PAGE
    149  *
    150  *  _TOD_To_seconds
    151  *
    152  *  This routine returns the seconds from the epoch until the
    153  *  current date and time.
    154  *
    155  *  Input parameters:
    156  *    the_tod - pointer to the time and date structure
    157  *
    158  *  Output parameters:
    159  *    returns    - seconds since epoch until the_tod
    160  */
    161 
    162 unsigned32 _TOD_To_seconds(
    163   TOD_Control *the_tod
    164 )
    165 {
    166   unsigned32 time;
    167   unsigned32 year_mod_4;
    168 
    169   time = the_tod->day - 1;
    170   year_mod_4 = the_tod->year & 3;
    171 
    172   if ( year_mod_4 == 0 )
    173     time += _TOD_Days_to_date[ 1 ][ the_tod->month ];
    174   else
    175     time += _TOD_Days_to_date[ 0 ][ the_tod->month ];
    176 
    177   time += ( (the_tod->year - TOD_BASE_YEAR) / 4 ) *
    178             ( (TOD_DAYS_PER_YEAR * 4) + 1);
    179 
    180   time += _TOD_Days_since_last_leap_year[ year_mod_4 ];
    181 
    182   time *= TOD_SECONDS_PER_DAY;
    183 
    184   time += ((the_tod->hour * TOD_MINUTES_PER_HOUR) + the_tod->minute)
    185              * TOD_SECONDS_PER_MINUTE;
    186 
    187   time += the_tod->second;
    188 
    189   return( time );
    190 }
    191 
    192 /*PAGE
    193  *
    194  *  _TOD_Tickle
    195  *
    196  *  This routine updates the calendar time and tickles the
    197  *  per second watchdog timer chain.
    198  *
    199  *  Input parameters:
    200  *    ignored - this parameter is ignored
    201  *
    202  *  Output parameters: NONE
    203  *
    204  *  NOTE: This routine only works for leap-years through 2099.
    205  */
    206 
    207 void _TOD_Tickle(
    208   Objects_Id  id,
    209   void       *ignored
    210 )
    211 {
    212   unsigned32 leap;
    213 
    214   _TOD_Current.ticks = 0;
    215   ++_TOD_Seconds_since_epoch;
    216   if ( ++_TOD_Current.second >= TOD_SECONDS_PER_MINUTE ) {
    217     _TOD_Current.second = 0;
    218     if ( ++_TOD_Current.minute >= TOD_MINUTES_PER_HOUR ) {
    219       _TOD_Current.minute = 0;
    220       if ( ++_TOD_Current.hour >= TOD_HOURS_PER_DAY ) {
    221         _TOD_Current.hour = 0;
    222         if ( _TOD_Current.year & 0x3 ) leap = 0;
    223         else                           leap = 1;
    224         if ( ++_TOD_Current.day >
    225                _TOD_Days_per_month[ leap ][ _TOD_Current.month ]) {
    226           _TOD_Current.day = 1;
    227           if ( ++_TOD_Current.month > TOD_MONTHS_PER_YEAR ) {
    228             _TOD_Current.month = 1;
    229             _TOD_Current.year++;
    230           }
    231         }
    232       }
    233     }
    234   }
    235 
    236   _Watchdog_Tickle_seconds();
    237   _Watchdog_Insert_ticks( &_TOD_Seconds_watchdog, _TOD_Ticks_per_second );
    238 }
  • cpukit/score/src/heap.c

    rdfbfa2b0 r93b4e6ef  
    9393}
    9494
    95 /*PAGE
    96  *
    97  *  _Heap_Extend
    98  *
    99  *  This routine grows the_heap memory area using the size bytes which
    100  *  begin at starting_address.
    101  *
    102  *  Input parameters:
    103  *    the_heap          - pointer to heap header.
    104  *    starting_address  - pointer to the memory area.
    105  *    size              - size in bytes of the memory block to allocate.
    106  *
    107  *  Output parameters:
    108  *    *amount_extended  - amount of memory added to the_heap
    109  */
    110 
    111 Heap_Extend_status _Heap_Extend(
    112   Heap_Control        *the_heap,
    113   void                *starting_address,
    114   unsigned32           size,
    115   unsigned32          *amount_extended
    116 )
    117 {
    118   Heap_Block        *the_block;
    119   unsigned32        *p;
    120  
    121   /*
    122    *  The overhead was taken from the original heap memory.
    123    */
    124 
    125   Heap_Block  *old_final;
    126   Heap_Block  *new_final;
    127 
    128   /*
    129    *  There are five possibilities for the location of starting
    130    *  address:
    131    *
    132    *    1. non-contiguous lower address     (NOT SUPPORTED)
    133    *    2. contiguous lower address         (NOT SUPPORTED)
    134    *    3. in the heap                      (ERROR)
    135    *    4. contiguous higher address        (SUPPORTED)
    136    *    5. non-contiguous higher address    (NOT SUPPORTED)
    137    *
    138    *  As noted, this code only supports (4).
    139    */
    140 
    141   if ( starting_address >= (void *) the_heap->start &&        /* case 3 */
    142        starting_address <= (void *) the_heap->final
    143      )
    144     return HEAP_EXTEND_ERROR;
    145 
    146   if ( starting_address < (void *) the_heap->start ) {  /* cases 1 and 2 */
    147 
    148       return HEAP_EXTEND_NOT_IMPLEMENTED;               /* cases 1 and 2 */
    149 
    150   } else {                                              /* cases 4 and 5 */
    151 
    152     the_block = (Heap_Block *)
    153        _Addresses_Subtract_offset( starting_address, HEAP_OVERHEAD );
    154     if ( the_block != the_heap->final )
    155       return HEAP_EXTEND_NOT_IMPLEMENTED;                   /* case 5 */
    156   }
    157 
    158   /*
    159    *  Currently only case 4 should make it to this point.
    160    *  The basic trick is to make the extend area look like a used
    161    *  block and free it.
    162    */
    163 
    164   *amount_extended = size;
    165 
    166   old_final = the_heap->final;
    167   new_final = _Addresses_Add_offset( old_final, size );
    168   /* SAME AS: _Addresses_Add_offset( starting_address, size-HEAP_OVERHEAD ); */
    169 
    170   the_heap->final = new_final;
    171 
    172   old_final->front_flag =
    173   new_final->back_flag  = _Heap_Build_flag( size, HEAP_BLOCK_USED );
    174   new_final->front_flag = HEAP_DUMMY_FLAG;
    175 
    176   /*
    177    *  Must pass in address of "user" area
    178    *  So add in the offset field.
    179    */
    180 
    181   p = (unsigned32 *) &old_final->next;
    182   *p = sizeof(unsigned32);
    183   p++;
    184   _Heap_Free( the_heap, p );
    185  
    186   return HEAP_EXTEND_SUCCESSFUL;
    187 }
    188 
    189 /*PAGE
    190  *
    191  *  _Heap_Allocate
    192  *
    193  *  This kernel routine allocates the requested size of memory
    194  *  from the specified heap.
    195  *
    196  *  Input parameters:
    197  *    the_heap  - pointer to heap header.
    198  *    size      - size in bytes of the memory block to allocate.
    199  *
    200  *  Output parameters:
    201  *    returns - starting address of memory block allocated
    202  */
    203 
    204 void *_Heap_Allocate(
    205   Heap_Control        *the_heap,
    206   unsigned32           size
    207 )
    208 {
    209   unsigned32  excess;
    210   unsigned32  the_size;
    211   Heap_Block *the_block;
    212   Heap_Block *next_block;
    213   Heap_Block *temporary_block;
    214   void       *ptr;
    215   unsigned32  offset;
    216  
    217   excess   = size % the_heap->page_size;
    218   the_size = size + the_heap->page_size + HEAP_BLOCK_USED_OVERHEAD;
    219  
    220   if ( excess )
    221     the_size += the_heap->page_size - excess;
    222 
    223   if ( the_size < sizeof( Heap_Block ) )
    224     the_size = sizeof( Heap_Block );
    225 
    226   for ( the_block = the_heap->first;
    227         ;
    228         the_block = the_block->next ) {
    229     if ( the_block == _Heap_Tail( the_heap ) )
    230       return( NULL );
    231     if ( the_block->front_flag >= the_size )
    232       break;
    233   }
    234 
    235   if ( (the_block->front_flag - the_size) >
    236        (the_heap->page_size + HEAP_BLOCK_USED_OVERHEAD) ) {
    237     the_block->front_flag -= the_size;
    238     next_block             = _Heap_Next_block( the_block );
    239     next_block->back_flag  = the_block->front_flag;
    240 
    241     temporary_block            = _Heap_Block_at( next_block, the_size );
    242     temporary_block->back_flag =
    243     next_block->front_flag     = _Heap_Build_flag( the_size,
    244                                     HEAP_BLOCK_USED );
    245     ptr = _Heap_Start_of_user_area( next_block );
    246   } else {
    247     next_block                = _Heap_Next_block( the_block );
    248     next_block->back_flag     = _Heap_Build_flag( the_block->front_flag,
    249                                    HEAP_BLOCK_USED );
    250     the_block->front_flag     = next_block->back_flag;
    251     the_block->next->previous = the_block->previous;
    252     the_block->previous->next = the_block->next;
    253     ptr = _Heap_Start_of_user_area( the_block );
    254   }
    255  
    256   /*
    257    * round ptr up to a multiple of page size
    258    * Have to save the bump amount in the buffer so that free can figure it out
    259    */
    260  
    261   offset = the_heap->page_size - (((unsigned32) ptr) & (the_heap->page_size - 1));
    262   ptr = _Addresses_Add_offset( ptr, offset );
    263   *(((unsigned32 *) ptr) - 1) = offset;
    264 
    265 #ifdef RTEMS_DEBUG
    266   {
    267       unsigned32 ptr_u32;
    268       ptr_u32 = (unsigned32) ptr;
    269       if (ptr_u32 & (the_heap->page_size - 1))
    270           abort();
    271   }
    272 #endif
    273 
    274   return ptr;
    275 }
    276 
    277 /*PAGE
    278  *
    279  *  _Heap_Size_of_user_area
    280  *
    281  *  This kernel routine returns the size of the memory area
    282  *  given heap block.
    283  *
    284  *  Input parameters:
    285  *    the_heap         - pointer to heap header
    286  *    starting_address - starting address of the memory block to free.
    287  *    size             - pointer to size of area
    288  *
    289  *  Output parameters:
    290  *    size  - size of area filled in
    291  *    TRUE  - if starting_address is valid heap address
    292  *    FALSE - if starting_address is invalid heap address
    293  */
    294 
    295 boolean _Heap_Size_of_user_area(
    296   Heap_Control        *the_heap,
    297   void                *starting_address,
    298   unsigned32          *size
    299 )
    300 {
    301   Heap_Block        *the_block;
    302   Heap_Block        *next_block;
    303   unsigned32         the_size;
    304 
    305   the_block = _Heap_User_block_at( starting_address );
    306  
    307   if ( !_Heap_Is_block_in( the_heap, the_block ) ||
    308         _Heap_Is_block_free( the_block ) )
    309     return( FALSE );
    310 
    311   the_size   = _Heap_Block_size( the_block );
    312   next_block = _Heap_Block_at( the_block, the_size );
    313 
    314   if ( !_Heap_Is_block_in( the_heap, next_block ) ||
    315        (the_block->front_flag != next_block->back_flag) )
    316     return( FALSE );
    317 
    318   *size = the_size;
    319   return( TRUE );
    320 }
    321 
    322 /*PAGE
    323  *
    324  *  _Heap_Free
    325  *
    326  *  This kernel routine returns the memory designated by the
    327  *  given heap and given starting address to the memory pool.
    328  *
    329  *  Input parameters:
    330  *    the_heap         - pointer to heap header
    331  *    starting_address - starting address of the memory block to free.
    332  *
    333  *  Output parameters:
    334  *    TRUE  - if starting_address is valid heap address
    335  *    FALSE - if starting_address is invalid heap address
    336  */
    337 
    338 boolean _Heap_Free(
    339   Heap_Control        *the_heap,
    340   void                *starting_address
    341 )
    342 {
    343   Heap_Block        *the_block;
    344   Heap_Block        *next_block;
    345   Heap_Block        *new_next_block;
    346   Heap_Block        *previous_block;
    347   Heap_Block        *temporary_block;
    348   unsigned32         the_size;
    349 
    350   the_block = _Heap_User_block_at( starting_address );
    351 
    352   if ( !_Heap_Is_block_in( the_heap, the_block ) ||
    353         _Heap_Is_block_free( the_block ) ) {
    354       return( FALSE );
    355   }
    356 
    357   the_size   = _Heap_Block_size( the_block );
    358   next_block = _Heap_Block_at( the_block, the_size );
    359 
    360   if ( !_Heap_Is_block_in( the_heap, next_block ) ||
    361        (the_block->front_flag != next_block->back_flag) ) {
    362       return( FALSE );
    363   }
    364 
    365   if ( _Heap_Is_previous_block_free( the_block ) ) {
    366     previous_block = _Heap_Previous_block( the_block );
    367 
    368     if ( !_Heap_Is_block_in( the_heap, previous_block ) ) {
    369         return( FALSE );
    370     }
    371 
    372     if ( _Heap_Is_block_free( next_block ) ) {      /* coalesce both */
    373       previous_block->front_flag += next_block->front_flag + the_size;
    374       temporary_block             = _Heap_Next_block( previous_block );
    375       temporary_block->back_flag  = previous_block->front_flag;
    376       next_block->next->previous  = next_block->previous;
    377       next_block->previous->next  = next_block->next;
    378     }
    379     else {                     /* coalesce prev */
    380       previous_block->front_flag =
    381       next_block->back_flag      = previous_block->front_flag + the_size;
    382     }
    383   }
    384   else if ( _Heap_Is_block_free( next_block ) ) { /* coalesce next */
    385     the_block->front_flag     = the_size + next_block->front_flag;
    386     new_next_block            = _Heap_Next_block( the_block );
    387     new_next_block->back_flag = the_block->front_flag;
    388     the_block->next           = next_block->next;
    389     the_block->previous       = next_block->previous;
    390     next_block->previous->next = the_block;
    391     next_block->next->previous = the_block;
    392 
    393     if (the_heap->first == next_block)
    394         the_heap->first = the_block;
    395   }
    396   else {                          /* no coalesce */
    397     next_block->back_flag     =
    398     the_block->front_flag     = the_size;
    399     the_block->previous       = _Heap_Head( the_heap );
    400     the_block->next           = the_heap->first;
    401     the_heap->first           = the_block;
    402     the_block->next->previous = the_block;
    403   }
    404 
    405   return( TRUE );
    406 }
    407 
    408 /*PAGE
    409  *
    410  *  _Heap_Walk
    411  *
    412  *  This kernel routine walks the heap and verifies its correctness.
    413  *
    414  *  Input parameters:
    415  *    the_heap  - pointer to heap header
    416  *    source    - a numeric indicator of the invoker of this routine
    417  *    do_dump   - when TRUE print the information
    418  *
    419  *  Output parameters: NONE
    420  */
    421 
    422 #ifndef RTEMS_DEBUG
    423 
    424 void _Heap_Walk(
    425   Heap_Control  *the_heap,
    426   int            source,
    427   boolean        do_dump
    428 )
    429 {
    430 }
    431 
    432 #else
    433 
    434 #include <stdio.h>
    435 #include <unistd.h>
    436 
    437 void _Heap_Walk(
    438   Heap_Control  *the_heap,
    439   int            source,
    440   boolean        do_dump
    441 )
    442 {
    443   Heap_Block *the_block  = 0;  /* avoid warnings */
    444   Heap_Block *next_block = 0;  /* avoid warnings */
    445   int         notdone = 1;
    446   int         error = 0;
    447   int         passes = 0;
    448 
    449   /*
    450    * We don't want to allow walking the heap until we have
    451    * transferred control to the user task so we watch the
    452    * system state.
    453    */
    454 
    455   if ( !_System_state_Is_up( _System_state_Get() ) )
    456     return;
    457 
    458   the_block = the_heap->start;
    459 
    460   if (do_dump == TRUE) {
    461     printf("\nPASS: %d  start @ 0x%p   final 0x%p,   first 0x%p  last 0x%p\n",
    462             source, the_heap->start, the_heap->final,
    463                   the_heap->first, the_heap->last
    464           );
    465   }
    466 
    467   /*
    468    * Handle the 1st block
    469    */
    470 
    471   if (the_block->back_flag != HEAP_DUMMY_FLAG) {
    472     printf("PASS: %d  Back flag of 1st block isn't HEAP_DUMMY_FLAG\n", source);
    473     error = 1;
    474   }
    475 
    476   while (notdone) {
    477     passes++;
    478     if (error && (passes > 10))
    479         abort();
    480    
    481     if (do_dump == TRUE) {
    482       printf("PASS: %d  Block @ 0x%p   Back %d,   Front %d",
    483               source, the_block,
    484               the_block->back_flag, the_block->front_flag);
    485       if ( _Heap_Is_block_free(the_block) ) {
    486         printf( "      Prev 0x%p,   Next 0x%p\n",
    487                           the_block->previous, the_block->next);
    488       } else {
    489         printf("\n");
    490       }
    491     }
    492 
    493     /*
    494      * Handle the last block
    495      */
    496 
    497     if ( the_block->front_flag != HEAP_DUMMY_FLAG ) {
    498       next_block = _Heap_Next_block(the_block);
    499       if ( the_block->front_flag != next_block->back_flag ) {
    500         error = 1;
    501         printf("PASS: %d  Front and back flags don't match\n", source);
    502         printf("         Current Block (%p):  Back - %d,  Front - %d",
    503                the_block, the_block->back_flag, the_block->front_flag);
    504         if (do_dump == TRUE) {
    505           if (_Heap_Is_block_free(the_block)) {
    506             printf("      Prev 0x%p,   Next 0x%p\n",
    507                    the_block->previous, the_block->next);
    508           } else {
    509             printf("\n");
    510           }
    511         } else {
    512           printf("\n");
    513         }
    514         printf("         Next Block (%p):     Back - %d,  Front - %d",
    515                next_block, next_block->back_flag, next_block->front_flag);
    516         if (do_dump == TRUE) {
    517           if (_Heap_Is_block_free(next_block)) {
    518             printf("      Prev 0x%p,   Next 0x%p\n",
    519                    the_block->previous, the_block->next);
    520           } else {
    521             printf("\n");
    522           }
    523         } else {
    524           printf("\n");
    525         }
    526       }
    527     }
    528 
    529     if (the_block->front_flag == HEAP_DUMMY_FLAG)
    530       notdone = 0;
    531     else
    532       the_block = next_block;
    533   }
    534 
    535   if (error)
    536       abort();
    537 }
    538 #endif
  • cpukit/score/src/threadq.c

    rdfbfa2b0 r93b4e6ef  
    7171
    7272}
    73 #if 0
    74 
    75 /*PAGE
    76  *
    77  *  _Thread_queue_Enqueue
    78  *
    79  *  This routine blocks a thread, places it on a thread, and optionally
    80  *  starts a timeout timer.
    81  *
    82  *  Input parameters:
    83  *    the_thread_queue - pointer to threadq
    84  *    timeout          - interval to wait
    85  *
    86  *  Output parameters: NONE
    87  *
    88  *  INTERRUPT LATENCY:
    89  *    only case
    90  */
    91 
    92 void _Thread_queue_Enqueue(
    93   Thread_queue_Control *the_thread_queue,
    94   Watchdog_Interval     timeout
    95 )
    96 {
    97   Thread_Control *the_thread;
    98 
    99   the_thread = _Thread_Executing;
    100 
    101 #if defined(RTEMS_MULTIPROCESSING)
    102   if ( _Thread_MP_Is_receive( the_thread ) && the_thread->receive_packet )
    103     the_thread = _Thread_MP_Allocate_proxy( the_thread_queue->state );
    104   else
    105 #endif
    106     _Thread_Set_state( the_thread, the_thread_queue->state );
    107 
    108   if ( timeout ) {
    109     _Watchdog_Initialize(
    110        &the_thread->Timer,
    111        _Thread_queue_Timeout,
    112        the_thread->Object.id,
    113        NULL
    114     );
    115 
    116     _Watchdog_Insert_ticks( &the_thread->Timer, timeout );
    117   }
    118 
    119   switch( the_thread_queue->discipline ) {
    120     case THREAD_QUEUE_DISCIPLINE_FIFO:
    121       _Thread_queue_Enqueue_fifo( the_thread_queue, the_thread, timeout );
    122       break;
    123     case THREAD_QUEUE_DISCIPLINE_PRIORITY:
    124       _Thread_queue_Enqueue_priority( the_thread_queue, the_thread, timeout );
    125       break;
    126   }
    127 }
    128 
    129 /*PAGE
    130  *
    131  *  _Thread_queue_Dequeue
    132  *
    133  *  This routine removes a thread from the specified threadq.  If the
    134  *  threadq discipline is FIFO, it unblocks a thread, and cancels its
    135  *  timeout timer.  Priority discipline is processed elsewhere.
    136  *
    137  *  Input parameters:
    138  *    the_thread_queue - pointer to threadq
    139  *
    140  *  Output parameters:
    141  *    returns - thread dequeued or NULL
    142  *
    143  *  INTERRUPT LATENCY:
    144  *    check sync
    145  */
    146 
    147 Thread_Control *_Thread_queue_Dequeue(
    148   Thread_queue_Control *the_thread_queue
    149 )
    150 {
    151   Thread_Control *the_thread;
    152 
    153   switch ( the_thread_queue->discipline ) {
    154     case THREAD_QUEUE_DISCIPLINE_FIFO:
    155       the_thread = _Thread_queue_Dequeue_fifo( the_thread_queue );
    156       break;
    157     case THREAD_QUEUE_DISCIPLINE_PRIORITY:
    158       the_thread = _Thread_queue_Dequeue_priority( the_thread_queue );
    159       break;
    160     default:              /* this is only to prevent warnings */
    161       the_thread = NULL;
    162       break;
    163   }
    164 
    165   return( the_thread );
    166 }
    167 
    168 /*PAGE
    169  *
    170  *  _Thread_queue_Extract_with_proxy
    171  *
    172  *  This routine extracts the_thread from the_thread_queue
    173  *  and insures that if there is a proxy for this task on
    174  *  another node, it is also dealt with.
    175  *
    176  *  XXX
    177  */
    178  
    179 boolean _Thread_queue_Extract_with_proxy(
    180   Thread_Control       *the_thread
    181 )
    182 {
    183   States_Control                state;
    184   Objects_Classes               the_class;
    185   Thread_queue_Extract_callout  proxy_extract_callout;
    186 
    187   state = the_thread->current_state;
    188 
    189   if ( _States_Is_waiting_on_thread_queue( state ) ) {
    190     if ( _States_Is_waiting_for_rpc_reply( state ) &&
    191          _States_Is_locally_blocked( state ) ) {
    192 
    193       the_class = _Objects_Get_class( the_thread->Wait.id );
    194 
    195       proxy_extract_callout = _Thread_queue_Extract_table[ the_class ];
    196 
    197       if ( proxy_extract_callout )
    198         (*proxy_extract_callout)( the_thread );
    199     }
    200     _Thread_queue_Extract( the_thread->Wait.queue, the_thread );
    201 
    202     return TRUE;
    203   }
    204   return FALSE;
    205 }
    206 
    207 /*PAGE
    208  *
    209  *  _Thread_queue_Extract
    210  *
    211  *  This routine removes a specific thread from the specified threadq,
    212  *  deletes any timeout, and unblocks the thread.
    213  *
    214  *  Input parameters:
    215  *    the_thread_queue - pointer to a threadq header
    216  *    the_thread       - pointer to a thread control block
    217  *
    218  *  Output parameters: NONE
    219  *
    220  *  INTERRUPT LATENCY: NONE
    221  */
    222 
    223 void _Thread_queue_Extract(
    224   Thread_queue_Control *the_thread_queue,
    225   Thread_Control       *the_thread
    226 )
    227 {
    228   switch ( the_thread_queue->discipline ) {
    229     case THREAD_QUEUE_DISCIPLINE_FIFO:
    230       _Thread_queue_Extract_fifo( the_thread_queue, the_thread );
    231       break;
    232     case THREAD_QUEUE_DISCIPLINE_PRIORITY:
    233       _Thread_queue_Extract_priority( the_thread_queue, the_thread );
    234       break;
    235    }
    236 }
    237 
    238 /*PAGE
    239  *
    240  *  _Thread_queue_Flush
    241  *
    242  *  This kernel routine flushes the given thread queue.
    243  *
    244  *  Input parameters:
    245  *    the_thread_queue       - pointer to threadq to be flushed
    246  *    remote_extract_callout - pointer to routine which extracts a remote thread
    247  *    status                 - status to return to the thread
    248  *
    249  *  Output parameters:  NONE
    250  */
    251 
    252 void _Thread_queue_Flush(
    253   Thread_queue_Control       *the_thread_queue,
    254   Thread_queue_Flush_callout  remote_extract_callout,
    255   unsigned32                  status
    256 )
    257 {
    258   Thread_Control *the_thread;
    259 
    260   while ( (the_thread = _Thread_queue_Dequeue( the_thread_queue )) ) {
    261 #if defined(RTEMS_MULTIPROCESSING)
    262     if ( !_Objects_Is_local_id( the_thread->Object.id ) )
    263       ( *remote_extract_callout )( the_thread );
    264     else
    265 #endif
    266       the_thread->Wait.return_code = status;
    267   }
    268 }
    269 
    270 /*PAGE
    271  *
    272  *  _Thread_queue_First
    273  *
    274  *  This routines returns a pointer to the first thread on the
    275  *  specified threadq.
    276  *
    277  *  Input parameters:
    278  *    the_thread_queue - pointer to thread queue
    279  *
    280  *  Output parameters:
    281  *    returns - first thread or NULL
    282  */
    283 
    284 Thread_Control *_Thread_queue_First(
    285   Thread_queue_Control *the_thread_queue
    286 )
    287 {
    288   Thread_Control *the_thread;
    289 
    290   switch ( the_thread_queue->discipline ) {
    291     case THREAD_QUEUE_DISCIPLINE_FIFO:
    292       the_thread = _Thread_queue_First_fifo( the_thread_queue );
    293       break;
    294     case THREAD_QUEUE_DISCIPLINE_PRIORITY:
    295       the_thread = _Thread_queue_First_priority( the_thread_queue );
    296       break;
    297     default:              /* this is only to prevent warnings */
    298       the_thread = NULL;
    299       break;
    300   }
    301 
    302   return the_thread;
    303 }
    304 
    305 /*PAGE
    306  *
    307  *  _Thread_queue_Timeout
    308  *
    309  *  This routine processes a thread which timeouts while waiting on
    310  *  a thread queue. It is called by the watchdog handler.
    311  *
    312  *  Input parameters:
    313  *    id - thread id
    314  *
    315  *  Output parameters: NONE
    316  */
    317 
    318 void _Thread_queue_Timeout(
    319   Objects_Id  id,
    320   void       *ignored
    321 )
    322 {
    323   Thread_Control       *the_thread;
    324   Thread_queue_Control *the_thread_queue;
    325   Objects_Locations     location;
    326 
    327   the_thread = _Thread_Get( id, &location );
    328   switch ( location ) {
    329     case OBJECTS_ERROR:
    330     case OBJECTS_REMOTE:  /* impossible */
    331       break;
    332     case OBJECTS_LOCAL:
    333       the_thread_queue = the_thread->Wait.queue;
    334 
    335       /*
    336        *  If the_thread_queue is not synchronized, then it is either
    337        *  "nothing happened", "timeout", or "satisfied".   If the_thread
    338        *  is the executing thread, then it is in the process of blocking
    339        *  and it is the thread which is responsible for the synchronization
    340        *  process.
    341        *
    342        *  If it is not satisfied, then it is "nothing happened" and
    343        *  this is the "timeout" transition.  After a request is satisfied,
    344        *  a timeout is not allowed to occur.
    345        */
    346 
    347       if ( the_thread_queue->sync_state != THREAD_QUEUE_SYNCHRONIZED &&
    348            _Thread_Is_executing( the_thread ) ) {
    349         if ( the_thread_queue->sync_state != THREAD_QUEUE_SATISFIED )
    350           the_thread_queue->sync_state = THREAD_QUEUE_TIMEOUT;
    351       } else {
    352         the_thread->Wait.return_code = the_thread->Wait.queue->timeout_status;
    353         _Thread_queue_Extract( the_thread->Wait.queue, the_thread );
    354       }
    355       _Thread_Unnest_dispatch();
    356       break;
    357   }
    358 }
    359 
    360 /*PAGE
    361  *
    362  *  _Thread_queue_Enqueue_fifo
    363  *
    364  *  This routine blocks a thread, places it on a thread, and optionally
    365  *  starts a timeout timer.
    366  *
    367  *  Input parameters:
    368  *    the_thread_queue - pointer to threadq
    369  *    the_thread       - pointer to the thread to block
    370  *    timeout          - interval to wait
    371  *
    372  *  Output parameters: NONE
    373  *
    374  *  INTERRUPT LATENCY:
    375  *    only case
    376  */
    377 
    378 void _Thread_queue_Enqueue_fifo (
    379   Thread_queue_Control *the_thread_queue,
    380   Thread_Control       *the_thread,
    381   Watchdog_Interval    timeout
    382 )
    383 {
    384   ISR_Level            level;
    385   Thread_queue_States  sync_state;
    386 
    387   _ISR_Disable( level );
    388 
    389   sync_state = the_thread_queue->sync_state;
    390   the_thread_queue->sync_state = THREAD_QUEUE_SYNCHRONIZED;
    391 
    392   switch ( sync_state ) {
    393     case THREAD_QUEUE_SYNCHRONIZED:
    394       /*
    395        *  This should never happen.  It indicates that someone did not
    396        *  enter a thread queue critical section.
    397        */
    398       break;
    399 
    400     case THREAD_QUEUE_NOTHING_HAPPENED:
    401       _Chain_Append_unprotected(
    402         &the_thread_queue->Queues.Fifo,
    403         &the_thread->Object.Node
    404       );
    405       _ISR_Enable( level );
    406       return;
    407 
    408     case THREAD_QUEUE_TIMEOUT:
    409       the_thread->Wait.return_code = the_thread->Wait.queue->timeout_status;
    410       _ISR_Enable( level );
    411       break;
    412 
    413     case THREAD_QUEUE_SATISFIED:
    414       if ( _Watchdog_Is_active( &the_thread->Timer ) ) {
    415         _Watchdog_Deactivate( &the_thread->Timer );
    416         _ISR_Enable( level );
    417         (void) _Watchdog_Remove( &the_thread->Timer );
    418       } else
    419         _ISR_Enable( level );
    420       break;
    421   }
    422 
    423   /*
    424    *  Global objects with thread queue's should not be operated on from an
    425    *  ISR.  But the sync code still must allow short timeouts to be processed
    426    *  correctly.
    427    */
    428 
    429   _Thread_Unblock( the_thread );
    430 
    431 #if defined(RTEMS_MULTIPROCESSING)
    432   if ( !_Objects_Is_local_id( the_thread->Object.id ) )
    433     _Thread_MP_Free_proxy( the_thread );
    434 #endif
    435 
    436 }
    437 
    438 /*PAGE
    439  *
    440  *  _Thread_queue_Dequeue_fifo
    441  *
    442  *  This routine removes a thread from the specified threadq.
    443  *
    444  *  Input parameters:
    445  *    the_thread_queue - pointer to threadq
    446  *
    447  *  Output parameters:
    448  *    returns - thread dequeued or NULL
    449  *
    450  *  INTERRUPT LATENCY:
    451  *    check sync
    452  *    FIFO
    453  */
    454 
    455 Thread_Control *_Thread_queue_Dequeue_fifo(
    456   Thread_queue_Control *the_thread_queue
    457 )
    458 {
    459   ISR_Level              level;
    460   Thread_Control *the_thread;
    461 
    462   _ISR_Disable( level );
    463   if ( !_Chain_Is_empty( &the_thread_queue->Queues.Fifo ) ) {
    464 
    465     the_thread = (Thread_Control *)
    466        _Chain_Get_first_unprotected( &the_thread_queue->Queues.Fifo );
    467 
    468     if ( !_Watchdog_Is_active( &the_thread->Timer ) ) {
    469       _ISR_Enable( level );
    470       _Thread_Unblock( the_thread );
    471     } else {
    472       _Watchdog_Deactivate( &the_thread->Timer );
    473       _ISR_Enable( level );
    474       (void) _Watchdog_Remove( &the_thread->Timer );
    475       _Thread_Unblock( the_thread );
    476     }
    477 
    478 #if defined(RTEMS_MULTIPROCESSING)
    479     if ( !_Objects_Is_local_id( the_thread->Object.id ) )
    480       _Thread_MP_Free_proxy( the_thread );
    481 #endif
    482 
    483     return the_thread;
    484   }
    485 
    486   switch ( the_thread_queue->sync_state ) {
    487     case THREAD_QUEUE_SYNCHRONIZED:
    488     case THREAD_QUEUE_SATISFIED:
    489       _ISR_Enable( level );
    490       return NULL;
    491 
    492     case THREAD_QUEUE_NOTHING_HAPPENED:
    493     case THREAD_QUEUE_TIMEOUT:
    494       the_thread_queue->sync_state = THREAD_QUEUE_SATISFIED;
    495       _ISR_Enable( level );
    496       return _Thread_Executing;
    497   }
    498   return NULL;                /* this is only to prevent warnings */
    499 }
    500 
    501 /*PAGE
    502  *
    503  *  _Thread_queue_Extract_fifo
    504  *
    505  *  This routine removes a specific thread from the specified threadq,
    506  *  deletes any timeout, and unblocks the thread.
    507  *
    508  *  Input parameters:
    509  *    the_thread_queue - pointer to a threadq header
    510  *    the_thread       - pointer to the thread to block
    511  *
    512  *  Output parameters: NONE
    513  *
    514  *  INTERRUPT LATENCY:
    515  *    EXTRACT_FIFO
    516  */
    517 
    518 void _Thread_queue_Extract_fifo(
    519   Thread_queue_Control *the_thread_queue,
    520   Thread_Control       *the_thread
    521 )
    522 {
    523   ISR_Level level;
    524 
    525   _ISR_Disable( level );
    526 
    527   if ( !_States_Is_waiting_on_thread_queue( the_thread->current_state ) ) {
    528     _ISR_Enable( level );
    529     return;
    530   }
    531 
    532   _Chain_Extract_unprotected( &the_thread->Object.Node );
    533 
    534   if ( !_Watchdog_Is_active( &the_thread->Timer ) ) {
    535     _ISR_Enable( level );
    536   } else {
    537     _Watchdog_Deactivate( &the_thread->Timer );
    538     _ISR_Enable( level );
    539     (void) _Watchdog_Remove( &the_thread->Timer );
    540   }
    541 
    542   _Thread_Unblock( the_thread );
    543 
    544 #if defined(RTEMS_MULTIPROCESSING)
    545   if ( !_Objects_Is_local_id( the_thread->Object.id ) )
    546     _Thread_MP_Free_proxy( the_thread );
    547 #endif
    548  
    549 }
    550 
    551 /*PAGE
    552  *
    553  *  _Thread_queue_First_fifo
    554  *
    555  *  This routines returns a pointer to the first thread on the
    556  *  specified threadq.
    557  *
    558  *  Input parameters:
    559  *    the_thread_queue - pointer to threadq
    560  *
    561  *  Output parameters:
    562  *    returns - first thread or NULL
    563  */
    564 
    565 Thread_Control *_Thread_queue_First_fifo(
    566   Thread_queue_Control *the_thread_queue
    567 )
    568 {
    569   if ( !_Chain_Is_empty( &the_thread_queue->Queues.Fifo ) )
    570     return (Thread_Control *) the_thread_queue->Queues.Fifo.first;
    571 
    572   return NULL;
    573 }
    574 
    575 /*PAGE
    576  *
    577  *  _Thread_queue_Enqueue_priority
    578  *
    579  *  This routine blocks a thread, places it on a thread, and optionally
    580  *  starts a timeout timer.
    581  *
    582  *  Input parameters:
    583  *    the_thread_queue - pointer to threadq
    584  *    thread           - thread to insert
    585  *    timeout          - timeout interval in ticks
    586  *
    587  *  Output parameters: NONE
    588  *
    589  *  INTERRUPT LATENCY:
    590  *    forward less than
    591  *    forward equal
    592  */
    593 
    594 void _Thread_queue_Enqueue_priority(
    595   Thread_queue_Control *the_thread_queue,
    596   Thread_Control       *the_thread,
    597   Watchdog_Interval     timeout
    598 )
    599 {
    600   Priority_Control     search_priority;
    601   Thread_Control      *search_thread;
    602   ISR_Level            level;
    603   Chain_Control       *header;
    604   unsigned32           header_index;
    605   Chain_Node          *the_node;
    606   Chain_Node          *next_node;
    607   Chain_Node          *previous_node;
    608   Chain_Node          *search_node;
    609   Priority_Control     priority;
    610   States_Control       block_state;
    611   Thread_queue_States  sync_state;
    612 
    613   _Chain_Initialize_empty( &the_thread->Wait.Block2n );
    614 
    615   priority     = the_thread->current_priority;
    616   header_index = _Thread_queue_Header_number( priority );
    617   header       = &the_thread_queue->Queues.Priority[ header_index ];
    618   block_state  = the_thread_queue->state;
    619 
    620   if ( _Thread_queue_Is_reverse_search( priority ) )
    621     goto restart_reverse_search;
    622 
    623 restart_forward_search:
    624   search_priority = PRIORITY_MINIMUM - 1;
    625   _ISR_Disable( level );
    626   search_thread = (Thread_Control *) header->first;
    627   while ( !_Chain_Is_tail( header, (Chain_Node *)search_thread ) ) {
    628     search_priority = search_thread->current_priority;
    629     if ( priority <= search_priority )
    630       break;
    631 
    632 #if ( CPU_UNROLL_ENQUEUE_PRIORITY == TRUE )
    633     search_thread = (Thread_Control *) search_thread->Object.Node.next;
    634     if ( _Chain_Is_tail( header, (Chain_Node *)search_thread ) )
    635       break;
    636     search_priority = search_thread->current_priority;
    637     if ( priority <= search_priority )
    638       break;
    639 #endif
    640     _ISR_Flash( level );
    641     if ( !_States_Are_set( search_thread->current_state, block_state) ) {
    642       _ISR_Enable( level );
    643       goto restart_forward_search;
    644     }
    645     search_thread =
    646        (Thread_Control *)search_thread->Object.Node.next;
    647   }
    648 
    649   if ( the_thread_queue->sync_state != THREAD_QUEUE_NOTHING_HAPPENED )
    650     goto synchronize;
    651 
    652   the_thread_queue->sync_state = THREAD_QUEUE_SYNCHRONIZED;
    653 
    654   if ( priority == search_priority )
    655     goto equal_priority;
    656 
    657   search_node   = (Chain_Node *) search_thread;
    658   previous_node = search_node->previous;
    659   the_node      = (Chain_Node *) the_thread;
    660 
    661   the_node->next        = search_node;
    662   the_node->previous    = previous_node;
    663   previous_node->next   = the_node;
    664   search_node->previous = the_node;
    665   _ISR_Enable( level );
    666   return;
    667 
    668 restart_reverse_search:
    669   search_priority     = PRIORITY_MAXIMUM + 1;
    670 
    671   _ISR_Disable( level );
    672   search_thread = (Thread_Control *) header->last;
    673   while ( !_Chain_Is_head( header, (Chain_Node *)search_thread ) ) {
    674     search_priority = search_thread->current_priority;
    675     if ( priority >= search_priority )
    676       break;
    677 #if ( CPU_UNROLL_ENQUEUE_PRIORITY == TRUE )
    678     search_thread = (Thread_Control *) search_thread->Object.Node.previous;
    679     if ( _Chain_Is_head( header, (Chain_Node *)search_thread ) )
    680       break;
    681     search_priority = search_thread->current_priority;
    682     if ( priority >= search_priority )
    683       break;
    684 #endif
    685     _ISR_Flash( level );
    686     if ( !_States_Are_set( search_thread->current_state, block_state) ) {
    687       _ISR_Enable( level );
    688       goto restart_reverse_search;
    689     }
    690     search_thread = (Thread_Control *)
    691                          search_thread->Object.Node.previous;
    692   }
    693 
    694   if ( the_thread_queue->sync_state != THREAD_QUEUE_NOTHING_HAPPENED )
    695     goto synchronize;
    696 
    697   the_thread_queue->sync_state = THREAD_QUEUE_SYNCHRONIZED;
    698 
    699   if ( priority == search_priority )
    700     goto equal_priority;
    701 
    702   search_node = (Chain_Node *) search_thread;
    703   next_node   = search_node->next;
    704   the_node    = (Chain_Node *) the_thread;
    705 
    706   the_node->next      = next_node;
    707   the_node->previous  = search_node;
    708   search_node->next   = the_node;
    709   next_node->previous = the_node;
    710   _ISR_Enable( level );
    711   return;
    712 
    713 equal_priority:               /* add at end of priority group */
    714   search_node   = _Chain_Tail( &search_thread->Wait.Block2n );
    715   previous_node = search_node->previous;
    716   the_node      = (Chain_Node *) the_thread;
    717 
    718   the_node->next        = search_node;
    719   the_node->previous    = previous_node;
    720   previous_node->next   = the_node;
    721   search_node->previous = the_node;
    722   _ISR_Enable( level );
    723   return;
    724 
    725 synchronize:
    726 
    727   sync_state = the_thread_queue->sync_state;
    728   the_thread_queue->sync_state = THREAD_QUEUE_SYNCHRONIZED;
    729 
    730   switch ( sync_state ) {
    731     case THREAD_QUEUE_SYNCHRONIZED:
    732       /*
    733        *  This should never happen.  It indicates that someone did not
    734        *  enter a thread queue critical section.
    735        */
    736       break;
    737  
    738     case THREAD_QUEUE_NOTHING_HAPPENED:
    739       /*
    740        *  This should never happen.  All of this was dealt with above.
    741        */
    742       break;
    743  
    744     case THREAD_QUEUE_TIMEOUT:
    745       the_thread->Wait.return_code = the_thread->Wait.queue->timeout_status;
    746       _ISR_Enable( level );
    747       break;
    748  
    749     case THREAD_QUEUE_SATISFIED:
    750       if ( _Watchdog_Is_active( &the_thread->Timer ) ) {
    751         _Watchdog_Deactivate( &the_thread->Timer );
    752         _ISR_Enable( level );
    753         (void) _Watchdog_Remove( &the_thread->Timer );
    754       } else
    755         _ISR_Enable( level );
    756       break;
    757   }
    758  
    759   /*
    760    *  Global objects with thread queue's should not be operated on from an
    761    *  ISR.  But the sync code still must allow short timeouts to be processed
    762    *  correctly.
    763    */
    764  
    765   _Thread_Unblock( the_thread );
    766  
    767 #if defined(RTEMS_MULTIPROCESSING)
    768   if ( !_Objects_Is_local_id( the_thread->Object.id ) )
    769     _Thread_MP_Free_proxy( the_thread );
    770 #endif
    771 }
    772 
    773 /*PAGE
    774  *
    775  *  _Thread_queue_Dequeue_priority
    776  *
    777  *  This routine removes a thread from the specified PRIORITY based
    778  *  threadq, unblocks it, and cancels its timeout timer.
    779  *
    780  *  Input parameters:
    781  *    the_thread_queue - pointer to thread queue
    782  *
    783  *  Output parameters:
    784  *    returns - thread dequeued or NULL
    785  *
    786  *  INTERRUPT LATENCY:
    787  *    only case
    788  */
    789 
    790 Thread_Control *_Thread_queue_Dequeue_priority(
    791   Thread_queue_Control *the_thread_queue
    792 )
    793 {
    794   unsigned32      index;
    795   ISR_Level       level;
    796   Thread_Control *the_thread = NULL;  /* just to remove warnings */
    797   Thread_Control *new_first_thread;
    798   Chain_Node     *new_first_node;
    799   Chain_Node     *new_second_node;
    800   Chain_Node     *last_node;
    801   Chain_Node     *next_node;
    802   Chain_Node     *previous_node;
    803 
    804   _ISR_Disable( level );
    805   for( index=0 ;
    806        index < TASK_QUEUE_DATA_NUMBER_OF_PRIORITY_HEADERS ;
    807        index++ ) {
    808     if ( !_Chain_Is_empty( &the_thread_queue->Queues.Priority[ index ] ) ) {
    809       the_thread = (Thread_Control *)
    810                     the_thread_queue->Queues.Priority[ index ].first;
    811       goto dequeue;
    812     }
    813   }
    814 
    815   switch ( the_thread_queue->sync_state ) {
    816     case THREAD_QUEUE_SYNCHRONIZED:
    817     case THREAD_QUEUE_SATISFIED:
    818       _ISR_Enable( level );
    819       return NULL;
    820 
    821     case THREAD_QUEUE_NOTHING_HAPPENED:
    822     case THREAD_QUEUE_TIMEOUT:
    823       the_thread_queue->sync_state = THREAD_QUEUE_SATISFIED;
    824       _ISR_Enable( level );
    825       return _Thread_Executing;
    826   }
    827 
    828 dequeue:
    829   new_first_node   = the_thread->Wait.Block2n.first;
    830   new_first_thread = (Thread_Control *) new_first_node;
    831   next_node        = the_thread->Object.Node.next;
    832   previous_node    = the_thread->Object.Node.previous;
    833 
    834   if ( !_Chain_Is_empty( &the_thread->Wait.Block2n ) ) {
    835     last_node       = the_thread->Wait.Block2n.last;
    836     new_second_node = new_first_node->next;
    837 
    838     previous_node->next      = new_first_node;
    839     next_node->previous      = new_first_node;
    840     new_first_node->next     = next_node;
    841     new_first_node->previous = previous_node;
    842 
    843     if ( !_Chain_Has_only_one_node( &the_thread->Wait.Block2n ) ) {
    844                                                 /* > two threads on 2-n */
    845       new_second_node->previous =
    846                 _Chain_Head( &new_first_thread->Wait.Block2n );
    847 
    848       new_first_thread->Wait.Block2n.first = new_second_node;
    849       new_first_thread->Wait.Block2n.last  = last_node;
    850 
    851       last_node->next = _Chain_Tail( &new_first_thread->Wait.Block2n );
    852     }
    853   } else {
    854     previous_node->next = next_node;
    855     next_node->previous = previous_node;
    856   }
    857 
    858   if ( !_Watchdog_Is_active( &the_thread->Timer ) ) {
    859     _ISR_Enable( level );
    860     _Thread_Unblock( the_thread );
    861   } else {
    862     _Watchdog_Deactivate( &the_thread->Timer );
    863     _ISR_Enable( level );
    864     (void) _Watchdog_Remove( &the_thread->Timer );
    865     _Thread_Unblock( the_thread );
    866   }
    867 
    868 #if defined(RTEMS_MULTIPROCESSING)
    869   if ( !_Objects_Is_local_id( the_thread->Object.id ) )
    870     _Thread_MP_Free_proxy( the_thread );
    871 #endif
    872   return( the_thread );
    873 }
    874 
    875 /*PAGE
    876  *
    877  *  _Thread_queue_Extract_priority
    878  *
    879  *  This routine removes a specific thread from the specified threadq,
    880  *  deletes any timeout, and unblocks the thread.
    881  *
    882  *  Input parameters:
    883  *    the_thread_queue - pointer to a threadq header
    884  *    the_thread       - pointer to a thread control block
    885  *
    886  *  Output parameters: NONE
    887  *
    888  *  INTERRUPT LATENCY:
    889  *    EXTRACT_PRIORITY
    890  */
    891 
    892 void _Thread_queue_Extract_priority(
    893   Thread_queue_Control *the_thread_queue,
    894   Thread_Control       *the_thread
    895 )
    896 {
    897   ISR_Level              level;
    898   Chain_Node     *the_node;
    899   Chain_Node     *next_node;
    900   Chain_Node     *previous_node;
    901   Thread_Control *new_first_thread;
    902   Chain_Node     *new_first_node;
    903   Chain_Node     *new_second_node;
    904   Chain_Node     *last_node;
    905 
    906   the_node = (Chain_Node *) the_thread;
    907   _ISR_Disable( level );
    908   if ( _States_Is_waiting_on_thread_queue( the_thread->current_state ) ) {
    909     next_node     = the_node->next;
    910     previous_node = the_node->previous;
    911 
    912     if ( !_Chain_Is_empty( &the_thread->Wait.Block2n ) ) {
    913       new_first_node   = the_thread->Wait.Block2n.first;
    914       new_first_thread = (Thread_Control *) new_first_node;
    915       last_node        = the_thread->Wait.Block2n.last;
    916       new_second_node  = new_first_node->next;
    917 
    918       previous_node->next      = new_first_node;
    919       next_node->previous      = new_first_node;
    920       new_first_node->next     = next_node;
    921       new_first_node->previous = previous_node;
    922 
    923       if ( !_Chain_Has_only_one_node( &the_thread->Wait.Block2n ) ) {
    924                                           /* > two threads on 2-n */
    925         new_second_node->previous =
    926                   _Chain_Head( &new_first_thread->Wait.Block2n );
    927         new_first_thread->Wait.Block2n.first = new_second_node;
    928 
    929         new_first_thread->Wait.Block2n.last = last_node;
    930         last_node->next = _Chain_Tail( &new_first_thread->Wait.Block2n );
    931       }
    932     } else {
    933       previous_node->next = next_node;
    934       next_node->previous = previous_node;
    935     }
    936 
    937     if ( !_Watchdog_Is_active( &the_thread->Timer ) ) {
    938       _ISR_Enable( level );
    939       _Thread_Unblock( the_thread );
    940     } else {
    941       _Watchdog_Deactivate( &the_thread->Timer );
    942       _ISR_Enable( level );
    943       (void) _Watchdog_Remove( &the_thread->Timer );
    944       _Thread_Unblock( the_thread );
    945     }
    946 
    947 #if defined(RTEMS_MULTIPROCESSING)
    948     if ( !_Objects_Is_local_id( the_thread->Object.id ) )
    949       _Thread_MP_Free_proxy( the_thread );
    950 #endif
    951   }
    952   else
    953     _ISR_Enable( level );
    954 }
    955 
    956 /*PAGE
    957  *
    958  *  _Thread_queue_First_priority
    959  *
    960  *  This routines returns a pointer to the first thread on the
    961  *  specified threadq.
    962  *
    963  *  Input parameters:
    964  *    the_thread_queue - pointer to thread queue
    965  *
    966  *  Output parameters:
    967  *    returns - first thread or NULL
    968  */
    969 
    970 Thread_Control *_Thread_queue_First_priority (
    971   Thread_queue_Control *the_thread_queue
    972 )
    973 {
    974   unsigned32 index;
    975 
    976   for( index=0 ;
    977        index < TASK_QUEUE_DATA_NUMBER_OF_PRIORITY_HEADERS ;
    978        index++ ) {
    979     if ( !_Chain_Is_empty( &the_thread_queue->Queues.Priority[ index ] ) )
    980       return (Thread_Control *)
    981         the_thread_queue->Queues.Priority[ index ].first;
    982   }
    983   return NULL;
    984 }
    985 #endif
Note: See TracChangeset for help on using the changeset viewer.