Changeset 82cb78d8 in rtems


Ignore:
Timestamp:
11/02/99 21:45:15 (24 years ago)
Author:
Joel Sherrill <joel.sherrill@…>
Branches:
4.10, 4.11, 4.8, 4.9, 5, master
Children:
7cc8d6c
Parents:
93b4e6ef
Message:

Split core message queue and watchdog handler objects into separate files.

Files:
20 added
9 edited

Legend:

Unmodified
Added
Removed
  • c/src/exec/score/src/Makefile.in

    r93b4e6ef r82cb78d8  
    2222MP_C_PIECES_yes_V = mpci objectmp threadmp
    2323MP_C_PIECES = $(MP_C_PIECES_$(HAS_MP)_V)
     24
     25CORE_MESSAGE_QUEUE_C_PIECES= coremsg coremsgbroadcast coremsgclose \
     26    coremsgflush coremsgflushsupp coremsgseize coremsgsubmit
     27
     28CORE_MUTEX_C_PIECES= coremutex coremutexflush coremutexseize \
     29    coremutexsurrender
     30
     31CORE_SEMAPHORE_C_PIECES= coresem coresemflush coresemseize coresemsurrender
    2432
    2533HEAP_C_PIECES = heap heapallocate heapextend heapfree \
     
    4957    coretodvalidate
    5058
     59WATCHDOG_C_PIECES= watchdog watchdogadjust watchdoginsert watchdogremove \
     60    watchdogtickle
     61
    5162# C and C++ source names, if any, go here -- minus the .c or .cc
    52 C_PIECES = apiext chain coremsg coremutex coresem $(HEAP_C_PIECES) interr isr \
     63C_PIECES = apiext chain $(CORE_MESSAGE_QUEUE_C_PIECES) $(CORE_MUTEX_C_PIECES) \
     64    $(CORE_SEMAPHORE_C_PIECES) $(HEAP_C_PIECES) interr isr \
    5365    $(OBJECT_C_PIECES) $(THREAD_C_PIECES) $(THREADQ_C_PIECES) \
    5466    $(TOD_C_PIECES) userext \
    55     watchdog wkspace $(MP_C_PIECES)
     67    $(WATCHDOG_C_PIECES) wkspace $(MP_C_PIECES)
    5668C_FILES = $(C_PIECES:%=%.c)
    5769C_O_FILES = $(C_PIECES:%=${ARCH}/%.o)
  • c/src/exec/score/src/coremsg.c

    r93b4e6ef r82cb78d8  
    108108  return TRUE;
    109109}
    110 
    111 /*PAGE
    112  *
    113  *  _CORE_message_queue_Close
    114  *
    115  *  This function closes a message by returning all allocated space and
    116  *  flushing the message_queue's task wait queue.
    117  *
    118  *  Input parameters:
    119  *    the_message_queue      - the message_queue to be flushed
    120  *    remote_extract_callout - function to invoke remotely
    121  *    status                 - status to pass to thread
    122  *
    123  *  Output parameters:  NONE
    124  */
    125  
    126 void _CORE_message_queue_Close(
    127   CORE_message_queue_Control *the_message_queue,
    128   Thread_queue_Flush_callout  remote_extract_callout,
    129   unsigned32                  status
    130 )
    131 {
    132  
    133   if ( the_message_queue->number_of_pending_messages != 0 )
    134     (void) _CORE_message_queue_Flush_support( the_message_queue );
    135   else
    136     _Thread_queue_Flush(
    137       &the_message_queue->Wait_queue,
    138       remote_extract_callout,
    139       status
    140     );
    141 
    142   (void) _Workspace_Free( the_message_queue->message_buffers );
    143 
    144 }
    145 
    146 /*PAGE
    147  *
    148  *  _CORE_message_queue_Flush
    149  *
    150  *  This function flushes the message_queue's task wait queue.  The number
    151  *  of messages flushed from the queue is returned.
    152  *
    153  *  Input parameters:
    154  *    the_message_queue - the message_queue to be flushed
    155  *
    156  *  Output parameters:
    157  *    returns - the number of messages flushed from the queue
    158  */
    159  
    160 unsigned32 _CORE_message_queue_Flush(
    161   CORE_message_queue_Control *the_message_queue
    162 )
    163 {
    164   if ( the_message_queue->number_of_pending_messages != 0 )
    165     return _CORE_message_queue_Flush_support( the_message_queue );
    166   else
    167     return 0;
    168 }
    169 
    170 /*PAGE
    171  *
    172  *  _CORE_message_queue_Broadcast
    173  *
    174  *  This function sends a message for every thread waiting on the queue and
    175  *  returns the number of threads made ready by the message.
    176  *
    177  *  Input parameters:
    178  *    the_message_queue            - message is submitted to this message queue
    179  *    buffer                       - pointer to message buffer
    180  *    size                         - size in bytes of message to send
    181  *    id                           - id of message queue
    182  *    api_message_queue_mp_support - api specific mp support callout
    183  *    count                        - area to store number of threads made ready
    184  *
    185  *  Output parameters:
    186  *    count                         - number of threads made ready
    187  *    CORE_MESSAGE_QUEUE_SUCCESSFUL - if successful
    188  *    error code                    - if unsuccessful
    189  */
    190 
    191 CORE_message_queue_Status _CORE_message_queue_Broadcast(
    192   CORE_message_queue_Control                *the_message_queue,
    193   void                                      *buffer,
    194   unsigned32                                 size,
    195   Objects_Id                                 id,
    196   CORE_message_queue_API_mp_support_callout  api_message_queue_mp_support,
    197   unsigned32                                *count
    198 )
    199 {
    200   Thread_Control          *the_thread;
    201   unsigned32               number_broadcasted;
    202   Thread_Wait_information *waitp;
    203   unsigned32               constrained_size;
    204 
    205   number_broadcasted = 0;
    206   while ((the_thread = _Thread_queue_Dequeue(&the_message_queue->Wait_queue))) {
    207     waitp = &the_thread->Wait;
    208     number_broadcasted += 1;
    209 
    210     constrained_size = size;
    211     if ( size > the_message_queue->maximum_message_size )
    212         constrained_size = the_message_queue->maximum_message_size;
    213 
    214     _CORE_message_queue_Copy_buffer(
    215       buffer,
    216       waitp->return_argument,
    217       constrained_size
    218     );
    219 
    220     *(unsigned32 *)the_thread->Wait.return_argument_1 = size;
    221 
    222 #if defined(RTEMS_MULTIPROCESSING)
    223     if ( !_Objects_Is_local_id( the_thread->Object.id ) )
    224       (*api_message_queue_mp_support) ( the_thread, id );
    225 #endif
    226 
    227   }
    228   *count = number_broadcasted;
    229   return CORE_MESSAGE_QUEUE_STATUS_SUCCESSFUL;
    230 }
    231 
    232 /*PAGE
    233  *
    234  *  _CORE_message_queue_Seize
    235  *
    236  *  This kernel routine dequeues a message, copies the message buffer to
    237  *  a given destination buffer, and frees the message buffer to the
    238  *  inactive message pool.  The thread will be blocked if wait is TRUE,
    239  *  otherwise an error will be given to the thread if no messages are available.
    240  *
    241  *  Input parameters:
    242  *    the_message_queue - pointer to message queue
    243  *    id                - id of object we are waitig on
    244  *    buffer            - pointer to message buffer to be filled
    245  *    size              - pointer to the size of buffer to be filled
    246  *    wait              - TRUE if wait is allowed, FALSE otherwise
    247  *    timeout           - time to wait for a message
    248  *
    249  *  Output parameters:  NONE
    250  *
    251  *  NOTE: Dependent on BUFFER_LENGTH
    252  *
    253  *  INTERRUPT LATENCY:
    254  *    available
    255  *    wait
    256  */
    257 
    258 void _CORE_message_queue_Seize(
    259   CORE_message_queue_Control *the_message_queue,
    260   Objects_Id                  id,
    261   void                       *buffer,
    262   unsigned32                 *size,
    263   boolean                     wait,
    264   Watchdog_Interval           timeout
    265 )
    266 {
    267   ISR_Level                          level;
    268   CORE_message_queue_Buffer_control *the_message;
    269   Thread_Control                    *executing;
    270 
    271   executing = _Thread_Executing;
    272   executing->Wait.return_code = CORE_MESSAGE_QUEUE_STATUS_SUCCESSFUL;
    273   _ISR_Disable( level );
    274   if ( the_message_queue->number_of_pending_messages != 0 ) {
    275     the_message_queue->number_of_pending_messages -= 1;
    276 
    277     the_message = _CORE_message_queue_Get_pending_message( the_message_queue );
    278     _ISR_Enable( level );
    279     *size = the_message->Contents.size;
    280     _CORE_message_queue_Copy_buffer(the_message->Contents.buffer,buffer,*size );
    281     _CORE_message_queue_Free_message_buffer(the_message_queue, the_message );
    282     return;
    283   }
    284 
    285   if ( !wait ) {
    286     _ISR_Enable( level );
    287     executing->Wait.return_code = CORE_MESSAGE_QUEUE_STATUS_UNSATISFIED_NOWAIT;
    288     return;
    289   }
    290 
    291   _Thread_queue_Enter_critical_section( &the_message_queue->Wait_queue );
    292   executing->Wait.queue              = &the_message_queue->Wait_queue;
    293   executing->Wait.id                 = id;
    294   executing->Wait.return_argument    = (void *)buffer;
    295   executing->Wait.return_argument_1  = (void *)size;
    296   _ISR_Enable( level );
    297 
    298   _Thread_queue_Enqueue( &the_message_queue->Wait_queue, timeout );
    299 }
    300 
    301 /*PAGE
    302  *
    303  *  _CORE_message_queue_Flush_support
    304  *
    305  *  This message handler routine removes all messages from a message queue
    306  *  and returns them to the inactive message pool.  The number of messages
    307  *  flushed from the queue is returned
    308  *
    309  *  Input parameters:
    310  *    the_message_queue - pointer to message queue
    311  *
    312  *  Output parameters:
    313  *    returns - number of messages placed on inactive chain
    314  *
    315  *  INTERRUPT LATENCY:
    316  *    only case
    317  */
    318 
    319 unsigned32 _CORE_message_queue_Flush_support(
    320   CORE_message_queue_Control *the_message_queue
    321 )
    322 {
    323   ISR_Level   level;
    324   Chain_Node *inactive_first;
    325   Chain_Node *message_queue_first;
    326   Chain_Node *message_queue_last;
    327   unsigned32  count;
    328 
    329   _ISR_Disable( level );
    330     inactive_first      = the_message_queue->Inactive_messages.first;
    331     message_queue_first = the_message_queue->Pending_messages.first;
    332     message_queue_last  = the_message_queue->Pending_messages.last;
    333 
    334     the_message_queue->Inactive_messages.first = message_queue_first;
    335     message_queue_last->next = inactive_first;
    336     inactive_first->previous = message_queue_last;
    337     message_queue_first->previous          =
    338                _Chain_Head( &the_message_queue->Inactive_messages );
    339 
    340     _Chain_Initialize_empty( &the_message_queue->Pending_messages );
    341 
    342     count = the_message_queue->number_of_pending_messages;
    343     the_message_queue->number_of_pending_messages = 0;
    344   _ISR_Enable( level );
    345   return count;
    346 }
    347 
    348 /*PAGE
    349  *
    350  *  _CORE_message_queue_Submit
    351  *
    352  *  This routine implements the send and urgent message functions. It
    353  *  processes a message that is to be submitted to the designated
    354  *  message queue.  The message will either be processed as a
    355  *  send message which it will be inserted at the rear of the queue
    356  *  or it will be processed as an urgent message which will be inserted
    357  *  at the front of the queue.
    358  *
    359  *  Input parameters:
    360  *    the_message_queue            - message is submitted to this message queue
    361  *    buffer                       - pointer to message buffer
    362  *    size                         - size in bytes of message to send
    363  *    id                           - id of message queue
    364  *    api_message_queue_mp_support - api specific mp support callout
    365  *    submit_type                  - send or urgent message
    366  *
    367  *  Output parameters:
    368  *    CORE_MESSAGE_QUEUE_SUCCESSFUL - if successful
    369  *    error code                    - if unsuccessful
    370  */
    371 
    372 CORE_message_queue_Status _CORE_message_queue_Submit(
    373   CORE_message_queue_Control                *the_message_queue,
    374   void                                      *buffer,
    375   unsigned32                                 size,
    376   Objects_Id                                 id,
    377   CORE_message_queue_API_mp_support_callout  api_message_queue_mp_support,
    378   CORE_message_queue_Submit_types            submit_type
    379 )
    380 {
    381   CORE_message_queue_Buffer_control   *the_message;
    382   Thread_Control                      *the_thread;
    383 
    384   if ( size > the_message_queue->maximum_message_size )
    385     return CORE_MESSAGE_QUEUE_STATUS_INVALID_SIZE;
    386 
    387   /*
    388    * Is there a thread currently waiting on this message queue?
    389    */
    390      
    391   the_thread = _Thread_queue_Dequeue( &the_message_queue->Wait_queue );
    392   if ( the_thread )
    393   {
    394     _CORE_message_queue_Copy_buffer(
    395       buffer,
    396       the_thread->Wait.return_argument,
    397       size
    398     );
    399     *(unsigned32 *)the_thread->Wait.return_argument_1 = size;
    400    
    401 #if defined(RTEMS_MULTIPROCESSING)
    402     if ( !_Objects_Is_local_id( the_thread->Object.id ) )
    403       (*api_message_queue_mp_support) ( the_thread, id );
    404 #endif
    405 
    406     return CORE_MESSAGE_QUEUE_STATUS_SUCCESSFUL;
    407   }
    408 
    409   /*
    410    * No one waiting on this one currently.
    411    * Allocate a message buffer and store it away
    412    */
    413 
    414   if ( the_message_queue->number_of_pending_messages ==
    415        the_message_queue->maximum_pending_messages ) {
    416     return CORE_MESSAGE_QUEUE_STATUS_TOO_MANY;
    417   }
    418 
    419   the_message = _CORE_message_queue_Allocate_message_buffer(the_message_queue);
    420   if ( the_message == 0)
    421     return CORE_MESSAGE_QUEUE_STATUS_UNSATISFIED;
    422 
    423   _CORE_message_queue_Copy_buffer( buffer, the_message->Contents.buffer, size );
    424   the_message->Contents.size = size;
    425   the_message->priority  = submit_type;
    426 
    427   the_message_queue->number_of_pending_messages += 1;
    428 
    429   switch ( submit_type ) {
    430     case CORE_MESSAGE_QUEUE_SEND_REQUEST:
    431       _CORE_message_queue_Append( the_message_queue, the_message );
    432       break;
    433     case CORE_MESSAGE_QUEUE_URGENT_REQUEST:
    434       _CORE_message_queue_Prepend( the_message_queue, the_message );
    435       break;
    436     default:
    437       /* XXX interrupt critical section needs to be addressed */
    438       {
    439         CORE_message_queue_Buffer_control *this_message;
    440         Chain_Node                        *the_node;
    441 
    442         the_message->priority = submit_type;
    443         for ( the_node = the_message_queue->Pending_messages.first ;
    444            !_Chain_Is_tail( &the_message_queue->Pending_messages, the_node ) ;
    445            the_node = the_node->next ) {
    446 
    447           this_message = (CORE_message_queue_Buffer_control *) the_node;
    448 
    449           if ( this_message->priority >= the_message->priority )
    450             continue;
    451 
    452           _Chain_Insert( the_node, &the_message->Node );
    453           break;
    454         }
    455       }
    456       break;
    457   }
    458 
    459   /*
    460    *  According to POSIX, does this happen before or after the message
    461    *  is actually enqueued.  It is logical to think afterwards, because
    462    *  the message is actually in the queue at this point.
    463    */
    464 
    465   if ( the_message_queue->number_of_pending_messages == 1 &&
    466        the_message_queue->notify_handler )
    467     (*the_message_queue->notify_handler)( the_message_queue->notify_argument );
    468  
    469   return CORE_MESSAGE_QUEUE_STATUS_SUCCESSFUL;
    470 }
  • c/src/exec/score/src/coremutex.c

    r93b4e6ef r82cb78d8  
    8181}
    8282
    83 /*PAGE
    84  *
    85  *  _CORE_mutex_Seize
    86  *
    87  *  This routine attempts to allocate a mutex to the calling thread.
    88  *
    89  *  Input parameters:
    90  *    the_mutex - pointer to mutex control block
    91  *    id        - id of object to wait on
    92  *    wait      - TRUE if wait is allowed, FALSE otherwise
    93  *    timeout   - number of ticks to wait (0 means forever)
    94  *
    95  *  Output parameters:  NONE
    96  *
    97  *  INTERRUPT LATENCY:
    98  *    available
    99  *    wait
    100  */
    101 
    102 void _CORE_mutex_Seize(
    103   CORE_mutex_Control  *the_mutex,
    104   Objects_Id           id,
    105   boolean              wait,
    106   Watchdog_Interval    timeout
    107 )
    108 {
    109   Thread_Control *executing;
    110   ISR_Level       level;
    111 
    112   executing = _Thread_Executing;
    113   switch ( the_mutex->Attributes.discipline ) {
    114     case CORE_MUTEX_DISCIPLINES_FIFO:
    115     case CORE_MUTEX_DISCIPLINES_PRIORITY:
    116     case CORE_MUTEX_DISCIPLINES_PRIORITY_INHERIT:
    117       break;
    118     case CORE_MUTEX_DISCIPLINES_PRIORITY_CEILING:
    119       if ( executing->current_priority <
    120                               the_mutex->Attributes.priority_ceiling) {
    121         executing->Wait.return_code = CORE_MUTEX_STATUS_CEILING_VIOLATED;
    122         return;
    123       }
    124   }
    125   executing->Wait.return_code = CORE_MUTEX_STATUS_SUCCESSFUL;
    126   _ISR_Disable( level );
    127   if ( ! _CORE_mutex_Is_locked( the_mutex ) ) {
    128     the_mutex->lock       = CORE_MUTEX_LOCKED;
    129     the_mutex->holder     = executing;
    130     the_mutex->holder_id  = executing->Object.id;
    131     the_mutex->nest_count = 1;
    132     executing->resource_count++;
    133     _ISR_Enable( level );
    134     switch ( the_mutex->Attributes.discipline ) {
    135       case CORE_MUTEX_DISCIPLINES_FIFO:
    136       case CORE_MUTEX_DISCIPLINES_PRIORITY:
    137       case CORE_MUTEX_DISCIPLINES_PRIORITY_INHERIT:
    138         /* already the highest priority */
    139         break;
    140       case CORE_MUTEX_DISCIPLINES_PRIORITY_CEILING:
    141       if ( the_mutex->Attributes.priority_ceiling <
    142                                            executing->current_priority ) {
    143         _Thread_Change_priority(
    144           the_mutex->holder,
    145           the_mutex->Attributes.priority_ceiling,
    146           FALSE
    147         );
    148       }
    149     }
    150     return;
    151   }
    152 
    153   if ( _Objects_Are_ids_equal(
    154               _Thread_Executing->Object.id, the_mutex->holder_id ) ) {
    155     if ( _CORE_mutex_Is_nesting_allowed( &the_mutex->Attributes ) )
    156       the_mutex->nest_count++;
    157     else
    158       executing->Wait.return_code = CORE_MUTEX_STATUS_NESTING_NOT_ALLOWED;
    159 
    160     _ISR_Enable( level );
    161     return;
    162   }
    163 
    164   if ( !wait ) {
    165     _ISR_Enable( level );
    166     executing->Wait.return_code = CORE_MUTEX_STATUS_UNSATISFIED_NOWAIT;
    167     return;
    168   }
    169 
    170   _Thread_queue_Enter_critical_section( &the_mutex->Wait_queue );
    171   executing->Wait.queue = &the_mutex->Wait_queue;
    172   executing->Wait.id    = id;
    173   _ISR_Enable( level );
    174 
    175   switch ( the_mutex->Attributes.discipline ) {
    176     case CORE_MUTEX_DISCIPLINES_FIFO:
    177     case CORE_MUTEX_DISCIPLINES_PRIORITY:
    178     case CORE_MUTEX_DISCIPLINES_PRIORITY_CEILING:
    179       break;
    180     case CORE_MUTEX_DISCIPLINES_PRIORITY_INHERIT:
    181       if ( the_mutex->holder->current_priority > executing->current_priority ) {
    182         _Thread_Change_priority(
    183           the_mutex->holder,
    184           executing->current_priority,
    185           FALSE
    186         );
    187       }
    188       break;
    189   }
    190 
    191   _Thread_queue_Enqueue( &the_mutex->Wait_queue, timeout );
    192 
    193   if ( _Thread_Executing->Wait.return_code == CORE_MUTEX_STATUS_SUCCESSFUL ) {
    194     switch ( the_mutex->Attributes.discipline ) {
    195       case CORE_MUTEX_DISCIPLINES_FIFO:
    196       case CORE_MUTEX_DISCIPLINES_PRIORITY:
    197       case CORE_MUTEX_DISCIPLINES_PRIORITY_INHERIT:
    198         break;
    199       case CORE_MUTEX_DISCIPLINES_PRIORITY_CEILING:
    200         if ( the_mutex->Attributes.priority_ceiling <
    201                                            executing->current_priority ) {
    202           _Thread_Change_priority(
    203             executing,
    204             the_mutex->Attributes.priority_ceiling,
    205             FALSE
    206           );
    207         };
    208         break;
    209     }
    210   }
    211 }
    212 
    213 /*
    214  *  _CORE_mutex_Surrender
    215  *
    216  *  DESCRIPTION:
    217  *
    218  *  This routine frees a unit to the mutex.  If a task was blocked waiting for
    219  *  a unit from this mutex, then that task will be readied and the unit
    220  *  given to that task.  Otherwise, the unit will be returned to the mutex.
    221  *
    222  *  Input parameters:
    223  *    the_mutex            - the mutex to be flushed
    224  *    id                   - id of parent mutex
    225  *    api_mutex_mp_support - api dependent MP support actions
    226  *
    227  *  Output parameters:
    228  *    CORE_MUTEX_STATUS_SUCCESSFUL - if successful
    229  *    core error code              - if unsuccessful
    230  */
    231 
    232 CORE_mutex_Status _CORE_mutex_Surrender(
    233   CORE_mutex_Control                *the_mutex,
    234   Objects_Id                         id,
    235   CORE_mutex_API_mp_support_callout  api_mutex_mp_support
    236 )
    237 {
    238   Thread_Control *the_thread;
    239   Thread_Control *executing;
    240 
    241   executing = _Thread_Executing;
    242 
    243   /*
    244    *  The following code allows a thread (or ISR) other than the thread
    245    *  which acquired the mutex to release that mutex.  This is only
    246    *  allowed when the mutex in quetion is FIFO or simple Priority
    247    *  discipline.  But Priority Ceiling or Priority Inheritance mutexes
    248    *  must be released by the thread which acquired them.
    249    */
    250 
    251   if ( !_Objects_Are_ids_equal(
    252            _Thread_Executing->Object.id, the_mutex->holder_id ) ) {
    253 
    254     switch ( the_mutex->Attributes.discipline ) {
    255       case CORE_MUTEX_DISCIPLINES_FIFO:
    256       case CORE_MUTEX_DISCIPLINES_PRIORITY:
    257         break;
    258       case CORE_MUTEX_DISCIPLINES_PRIORITY_CEILING:
    259       case CORE_MUTEX_DISCIPLINES_PRIORITY_INHERIT:
    260         return( CORE_MUTEX_STATUS_NOT_OWNER_OF_RESOURCE );
    261         break;
    262     }
    263   }
    264 
    265   the_mutex->nest_count--;
    266 
    267   if ( the_mutex->nest_count != 0 )
    268     return( CORE_MUTEX_STATUS_SUCCESSFUL );
    269 
    270   _Thread_Executing->resource_count--;
    271   the_mutex->holder    = NULL;
    272   the_mutex->holder_id = 0;
    273 
    274   /*
    275    *  Whether or not someone is waiting for the mutex, an
    276    *  inherited priority must be lowered if this is the last
    277    *  mutex (i.e. resource) this task has.
    278    */
    279 
    280   switch ( the_mutex->Attributes.discipline ) {
    281     case CORE_MUTEX_DISCIPLINES_FIFO:
    282     case CORE_MUTEX_DISCIPLINES_PRIORITY:
    283       break;
    284     case CORE_MUTEX_DISCIPLINES_PRIORITY_CEILING:
    285     case CORE_MUTEX_DISCIPLINES_PRIORITY_INHERIT:
    286       if ( executing->resource_count == 0 &&
    287            executing->real_priority != executing->current_priority ) {
    288          _Thread_Change_priority( executing, executing->real_priority, TRUE );
    289       }
    290       break;
    291   }
    292 
    293 
    294   if ( ( the_thread = _Thread_queue_Dequeue( &the_mutex->Wait_queue ) ) ) {
    295 
    296 #if defined(RTEMS_MULTIPROCESSING)
    297     if ( !_Objects_Is_local_id( the_thread->Object.id ) ) {
    298      
    299       the_mutex->holder     = NULL;
    300       the_mutex->holder_id  = the_thread->Object.id;
    301       the_mutex->nest_count = 1;
    302 
    303       ( *api_mutex_mp_support)( the_thread, id );
    304 
    305     } else
    306 #endif
    307     {
    308 
    309       the_mutex->holder     = the_thread;
    310       the_mutex->holder_id  = the_thread->Object.id;
    311       the_thread->resource_count++;
    312       the_mutex->nest_count = 1;
    313 
    314      /*
    315       *  No special action for priority inheritance or priority ceiling
    316       *  because the_thread is guaranteed to be the highest priority
    317       *  thread waiting for the mutex.
    318       */
    319     }
    320   } else
    321     the_mutex->lock = CORE_MUTEX_UNLOCKED;
    322 
    323   return( CORE_MUTEX_STATUS_SUCCESSFUL );
    324 }
    325 
    326 /*PAGE
    327  *
    328  *  _CORE_mutex_Flush
    329  *
    330  *  This function a flushes the mutex's task wait queue.
    331  *
    332  *  Input parameters:
    333  *    the_mutex              - the mutex to be flushed
    334  *    remote_extract_callout - function to invoke remotely
    335  *    status                 - status to pass to thread
    336  *
    337  *  Output parameters:  NONE
    338  */
    339 
    340 void _CORE_mutex_Flush(
    341   CORE_mutex_Control         *the_mutex,
    342   Thread_queue_Flush_callout  remote_extract_callout,
    343   unsigned32                  status
    344 )
    345 {
    346   _Thread_queue_Flush(
    347     &the_mutex->Wait_queue,
    348     remote_extract_callout,
    349     status
    350   );
    351 }
  • c/src/exec/score/src/coresem.c

    r93b4e6ef r82cb78d8  
    6868  );
    6969}
    70 
    71 /*PAGE
    72  *
    73  *  _CORE_semaphore_Surrender
    74  *
    75  *  Input parameters:
    76  *    the_semaphore            - the semaphore to be flushed
    77  *    id                       - id of parent semaphore
    78  *    api_semaphore_mp_support - api dependent MP support actions
    79  *
    80  *  Output parameters:
    81  *    CORE_SEMAPHORE_STATUS_SUCCESSFUL - if successful
    82  *    core error code                  - if unsuccessful
    83  *
    84  *  Output parameters:
    85  */
    86 
    87 CORE_semaphore_Status _CORE_semaphore_Surrender(
    88   CORE_semaphore_Control                *the_semaphore,
    89   Objects_Id                             id,
    90   CORE_semaphore_API_mp_support_callout  api_semaphore_mp_support
    91 )
    92 {
    93   Thread_Control *the_thread;
    94   ISR_Level       level;
    95   CORE_semaphore_Status status;
    96 
    97   status = CORE_SEMAPHORE_STATUS_SUCCESSFUL;
    98 
    99   if ( (the_thread = _Thread_queue_Dequeue(&the_semaphore->Wait_queue)) ) {
    100 
    101     if ( !_Objects_Is_local_id( the_thread->Object.id ) )
    102       (*api_semaphore_mp_support) ( the_thread, id );
    103 
    104   } else {
    105     _ISR_Disable( level );
    106       if ( the_semaphore->count <= the_semaphore->Attributes.maximum_count )
    107         the_semaphore->count += 1;
    108       else
    109         status = CORE_SEMAPHORE_MAXIMUM_COUNT_EXCEEDED;
    110     _ISR_Enable( level );
    111   }
    112 
    113   return status;
    114 }
    115 
    116 /*PAGE
    117  *
    118  *  _CORE_semaphore_Seize
    119  *
    120  *  This routine attempts to allocate a core semaphore to the calling thread.
    121  *
    122  *  Input parameters:
    123  *    the_semaphore - pointer to semaphore control block
    124  *    id            - id of object to wait on
    125  *    wait          - TRUE if wait is allowed, FALSE otherwise
    126  *    timeout       - number of ticks to wait (0 means forever)
    127  *
    128  *  Output parameters:  NONE
    129  *
    130  *  INTERRUPT LATENCY:
    131  *    available
    132  *    wait
    133  */
    134 
    135 void _CORE_semaphore_Seize(
    136   CORE_semaphore_Control  *the_semaphore,
    137   Objects_Id               id,
    138   boolean                  wait,
    139   Watchdog_Interval        timeout
    140 )
    141 {
    142   Thread_Control *executing;
    143   ISR_Level       level;
    144 
    145   executing = _Thread_Executing;
    146   executing->Wait.return_code = CORE_SEMAPHORE_STATUS_SUCCESSFUL;
    147   _ISR_Disable( level );
    148   if ( the_semaphore->count != 0 ) {
    149     the_semaphore->count -= 1;
    150     _ISR_Enable( level );
    151     return;
    152   }
    153 
    154   if ( !wait ) {
    155     _ISR_Enable( level );
    156     executing->Wait.return_code = CORE_SEMAPHORE_STATUS_UNSATISFIED_NOWAIT;
    157     return;
    158   }
    159 
    160   _Thread_queue_Enter_critical_section( &the_semaphore->Wait_queue );
    161   executing->Wait.queue          = &the_semaphore->Wait_queue;
    162   executing->Wait.id             = id;
    163   _ISR_Enable( level );
    164 
    165   _Thread_queue_Enqueue( &the_semaphore->Wait_queue, timeout );
    166 }
    167 
    168 
    169 /*PAGE
    170  *
    171  *  _CORE_semaphore_Flush
    172  *
    173  *  This function a flushes the semaphore's task wait queue.
    174  *
    175  *  Input parameters:
    176  *    the_semaphore          - the semaphore to be flushed
    177  *    remote_extract_callout - function to invoke remotely
    178  *    status                 - status to pass to thread
    179  *
    180  *  Output parameters:  NONE
    181  */
    182  
    183 void _CORE_semaphore_Flush(
    184   CORE_semaphore_Control     *the_semaphore,
    185   Thread_queue_Flush_callout  remote_extract_callout,
    186   unsigned32                  status
    187 )
    188 {
    189  
    190   _Thread_queue_Flush(
    191     &the_semaphore->Wait_queue,
    192     remote_extract_callout,
    193     status
    194   );
    195  
    196 }
  • c/src/exec/score/src/watchdog.c

    r93b4e6ef r82cb78d8  
    3838  _Chain_Initialize_empty( &_Watchdog_Seconds_chain );
    3939}
    40 
    41 /*PAGE
    42  *
    43  *  _Watchdog_Remove
    44  *
    45  *  The routine removes a watchdog from a delta chain and updates
    46  *  the delta counters of the remaining watchdogs.
    47  */
    48 
    49 Watchdog_States _Watchdog_Remove(
    50   Watchdog_Control *the_watchdog
    51 )
    52 {
    53   ISR_Level         level;
    54   Watchdog_States   previous_state;
    55   Watchdog_Control *next_watchdog;
    56 
    57   _ISR_Disable( level );
    58   previous_state = the_watchdog->state;
    59   switch ( previous_state ) {
    60     case WATCHDOG_INACTIVE:
    61       break;
    62 
    63     case WATCHDOG_BEING_INSERTED: 
    64    
    65       /*
    66        *  It is not actually on the chain so just change the state and
    67        *  the Insert operation we interrupted will be aborted.
    68        */
    69       the_watchdog->state = WATCHDOG_INACTIVE;
    70       break;
    71 
    72     case WATCHDOG_ACTIVE:
    73     case WATCHDOG_REMOVE_IT:
    74 
    75       the_watchdog->state = WATCHDOG_INACTIVE;
    76       next_watchdog = _Watchdog_Next( the_watchdog );
    77 
    78       if ( _Watchdog_Next(next_watchdog) )
    79         next_watchdog->delta_interval += the_watchdog->delta_interval;
    80 
    81       if ( _Watchdog_Sync_count )
    82         _Watchdog_Sync_level = _ISR_Nest_level;
    83 
    84       _Chain_Extract_unprotected( &the_watchdog->Node );
    85       break;
    86   }
    87   the_watchdog->stop_time = _Watchdog_Ticks_since_boot;
    88 
    89   _ISR_Enable( level );
    90   return( previous_state );
    91 }
    92 
    93 /*PAGE
    94  *
    95  *  _Watchdog_Adjust
    96  *
    97  *  This routine adjusts the delta chain backward or forward in response
    98  *  to a time change.
    99  *
    100  *  Input parameters:
    101  *    header    - pointer to the delta chain to be adjusted
    102  *    direction - forward or backward adjustment to delta chain
    103  *    units     - units to adjust
    104  *
    105  *  Output parameters:
    106  */
    107 
    108 void _Watchdog_Adjust(
    109   Chain_Control               *header,
    110   Watchdog_Adjust_directions   direction,
    111   Watchdog_Interval            units
    112 )
    113 {
    114   if ( !_Chain_Is_empty( header ) ) {
    115     switch ( direction ) {
    116       case WATCHDOG_BACKWARD:
    117         _Watchdog_First( header )->delta_interval += units;
    118         break;
    119       case WATCHDOG_FORWARD:
    120         while ( units ) {
    121           if ( units < _Watchdog_First( header )->delta_interval ) {
    122             _Watchdog_First( header )->delta_interval -= units;
    123             break;
    124           } else {
    125             units -= _Watchdog_First( header )->delta_interval;
    126             _Watchdog_First( header )->delta_interval = 1;
    127             _Watchdog_Tickle( header );
    128             if ( _Chain_Is_empty( header ) )
    129               break;
    130           }
    131         }
    132         break;
    133     }
    134   }
    135 }
    136 
    137 /*PAGE
    138  *
    139  *  _Watchdog_Insert
    140  *
    141  *  This routine inserts a watchdog timer on to the appropriate delta
    142  *  chain while updating the delta interval counters.
    143  */
    144 
    145 void _Watchdog_Insert(
    146   Chain_Control         *header,
    147   Watchdog_Control      *the_watchdog
    148 )
    149 {
    150   ISR_Level          level;
    151   Watchdog_Control  *after;
    152   unsigned32         insert_isr_nest_level;
    153   Watchdog_Interval  delta_interval;
    154  
    155 
    156   insert_isr_nest_level   = _ISR_Nest_level;
    157   the_watchdog->state = WATCHDOG_BEING_INSERTED;
    158 
    159   _Watchdog_Sync_count++;
    160 restart:
    161   delta_interval = the_watchdog->initial;
    162 
    163   _ISR_Disable( level );
    164 
    165   for ( after = _Watchdog_First( header ) ;
    166         ;
    167         after = _Watchdog_Next( after ) ) {
    168 
    169      if ( delta_interval == 0 || !_Watchdog_Next( after ) )
    170        break;
    171 
    172      if ( delta_interval < after->delta_interval ) {
    173        after->delta_interval -= delta_interval;
    174        break;
    175      }
    176 
    177      delta_interval -= after->delta_interval;
    178 
    179      /*
    180       *  If you experience problems comment out the _ISR_Flash line. 
    181       *  3.2.0 was the first release with this critical section redesigned.
    182       *  Under certain circumstances, the PREVIOUS critical section algorithm
    183       *  used around this flash point allowed interrupts to execute
    184       *  which violated the design assumptions.  The critical section
    185       *  mechanism used here WAS redesigned to address this.
    186       */
    187 
    188      _ISR_Flash( level );
    189 
    190      if ( the_watchdog->state != WATCHDOG_BEING_INSERTED ) {
    191        goto exit_insert;
    192      }
    193 
    194      if ( _Watchdog_Sync_level > insert_isr_nest_level ) {
    195        _Watchdog_Sync_level = insert_isr_nest_level;
    196        _ISR_Enable( level );
    197        goto restart;
    198      }
    199   }
    200 
    201   _Watchdog_Activate( the_watchdog );
    202 
    203   the_watchdog->delta_interval = delta_interval;
    204 
    205   _Chain_Insert_unprotected( after->Node.previous, &the_watchdog->Node );
    206 
    207   the_watchdog->start_time = _Watchdog_Ticks_since_boot;
    208 
    209 exit_insert:
    210   _Watchdog_Sync_level = insert_isr_nest_level;
    211   _Watchdog_Sync_count--;
    212   _ISR_Enable( level );
    213 }
    214 
    215 /*PAGE
    216  *
    217  *  _Watchdog_Tickle
    218  *
    219  *  This routine decrements the delta counter in response to a tick.  The
    220  *  delta chain is updated accordingly.
    221  *
    222  *  Input parameters:
    223  *    header - pointer to the delta chain to be tickled
    224  *
    225  *  Output parameters: NONE
    226  */
    227 
    228 void _Watchdog_Tickle(
    229   Chain_Control *header
    230 )
    231 {
    232   Watchdog_Control *the_watchdog;
    233 
    234   if ( _Chain_Is_empty( header ) )
    235     return;
    236 
    237   the_watchdog = _Watchdog_First( header );
    238   the_watchdog->delta_interval--;
    239   if ( the_watchdog->delta_interval != 0 )
    240     return;
    241 
    242   do {
    243      switch( _Watchdog_Remove( the_watchdog ) ) {
    244        case WATCHDOG_ACTIVE:
    245          (*the_watchdog->routine)(
    246            the_watchdog->id,
    247            the_watchdog->user_data
    248          );
    249          break;
    250 
    251        case WATCHDOG_INACTIVE:
    252          /*
    253           *  This state indicates that the watchdog is not on any chain.
    254           *  Thus, it is NOT on a chain being tickled.  This case should
    255           *  never occur.
    256           */
    257          break;
    258 
    259        case WATCHDOG_BEING_INSERTED:
    260          /*
    261           *  This state indicates that the watchdog is in the process of
    262           *  BEING inserted on the chain.  Thus, it can NOT be on a chain
    263           *  being tickled.  This case should never occur.
    264           */
    265          break;
    266 
    267        case WATCHDOG_REMOVE_IT:
    268          break;
    269      }
    270      the_watchdog = _Watchdog_First( header );
    271    } while ( !_Chain_Is_empty( header ) &&
    272              (the_watchdog->delta_interval == 0) );
    273 }
  • cpukit/score/src/coremsg.c

    r93b4e6ef r82cb78d8  
    108108  return TRUE;
    109109}
    110 
    111 /*PAGE
    112  *
    113  *  _CORE_message_queue_Close
    114  *
    115  *  This function closes a message by returning all allocated space and
    116  *  flushing the message_queue's task wait queue.
    117  *
    118  *  Input parameters:
    119  *    the_message_queue      - the message_queue to be flushed
    120  *    remote_extract_callout - function to invoke remotely
    121  *    status                 - status to pass to thread
    122  *
    123  *  Output parameters:  NONE
    124  */
    125  
    126 void _CORE_message_queue_Close(
    127   CORE_message_queue_Control *the_message_queue,
    128   Thread_queue_Flush_callout  remote_extract_callout,
    129   unsigned32                  status
    130 )
    131 {
    132  
    133   if ( the_message_queue->number_of_pending_messages != 0 )
    134     (void) _CORE_message_queue_Flush_support( the_message_queue );
    135   else
    136     _Thread_queue_Flush(
    137       &the_message_queue->Wait_queue,
    138       remote_extract_callout,
    139       status
    140     );
    141 
    142   (void) _Workspace_Free( the_message_queue->message_buffers );
    143 
    144 }
    145 
    146 /*PAGE
    147  *
    148  *  _CORE_message_queue_Flush
    149  *
    150  *  This function flushes the message_queue's task wait queue.  The number
    151  *  of messages flushed from the queue is returned.
    152  *
    153  *  Input parameters:
    154  *    the_message_queue - the message_queue to be flushed
    155  *
    156  *  Output parameters:
    157  *    returns - the number of messages flushed from the queue
    158  */
    159  
    160 unsigned32 _CORE_message_queue_Flush(
    161   CORE_message_queue_Control *the_message_queue
    162 )
    163 {
    164   if ( the_message_queue->number_of_pending_messages != 0 )
    165     return _CORE_message_queue_Flush_support( the_message_queue );
    166   else
    167     return 0;
    168 }
    169 
    170 /*PAGE
    171  *
    172  *  _CORE_message_queue_Broadcast
    173  *
    174  *  This function sends a message for every thread waiting on the queue and
    175  *  returns the number of threads made ready by the message.
    176  *
    177  *  Input parameters:
    178  *    the_message_queue            - message is submitted to this message queue
    179  *    buffer                       - pointer to message buffer
    180  *    size                         - size in bytes of message to send
    181  *    id                           - id of message queue
    182  *    api_message_queue_mp_support - api specific mp support callout
    183  *    count                        - area to store number of threads made ready
    184  *
    185  *  Output parameters:
    186  *    count                         - number of threads made ready
    187  *    CORE_MESSAGE_QUEUE_SUCCESSFUL - if successful
    188  *    error code                    - if unsuccessful
    189  */
    190 
    191 CORE_message_queue_Status _CORE_message_queue_Broadcast(
    192   CORE_message_queue_Control                *the_message_queue,
    193   void                                      *buffer,
    194   unsigned32                                 size,
    195   Objects_Id                                 id,
    196   CORE_message_queue_API_mp_support_callout  api_message_queue_mp_support,
    197   unsigned32                                *count
    198 )
    199 {
    200   Thread_Control          *the_thread;
    201   unsigned32               number_broadcasted;
    202   Thread_Wait_information *waitp;
    203   unsigned32               constrained_size;
    204 
    205   number_broadcasted = 0;
    206   while ((the_thread = _Thread_queue_Dequeue(&the_message_queue->Wait_queue))) {
    207     waitp = &the_thread->Wait;
    208     number_broadcasted += 1;
    209 
    210     constrained_size = size;
    211     if ( size > the_message_queue->maximum_message_size )
    212         constrained_size = the_message_queue->maximum_message_size;
    213 
    214     _CORE_message_queue_Copy_buffer(
    215       buffer,
    216       waitp->return_argument,
    217       constrained_size
    218     );
    219 
    220     *(unsigned32 *)the_thread->Wait.return_argument_1 = size;
    221 
    222 #if defined(RTEMS_MULTIPROCESSING)
    223     if ( !_Objects_Is_local_id( the_thread->Object.id ) )
    224       (*api_message_queue_mp_support) ( the_thread, id );
    225 #endif
    226 
    227   }
    228   *count = number_broadcasted;
    229   return CORE_MESSAGE_QUEUE_STATUS_SUCCESSFUL;
    230 }
    231 
    232 /*PAGE
    233  *
    234  *  _CORE_message_queue_Seize
    235  *
    236  *  This kernel routine dequeues a message, copies the message buffer to
    237  *  a given destination buffer, and frees the message buffer to the
    238  *  inactive message pool.  The thread will be blocked if wait is TRUE,
    239  *  otherwise an error will be given to the thread if no messages are available.
    240  *
    241  *  Input parameters:
    242  *    the_message_queue - pointer to message queue
    243  *    id                - id of object we are waitig on
    244  *    buffer            - pointer to message buffer to be filled
    245  *    size              - pointer to the size of buffer to be filled
    246  *    wait              - TRUE if wait is allowed, FALSE otherwise
    247  *    timeout           - time to wait for a message
    248  *
    249  *  Output parameters:  NONE
    250  *
    251  *  NOTE: Dependent on BUFFER_LENGTH
    252  *
    253  *  INTERRUPT LATENCY:
    254  *    available
    255  *    wait
    256  */
    257 
    258 void _CORE_message_queue_Seize(
    259   CORE_message_queue_Control *the_message_queue,
    260   Objects_Id                  id,
    261   void                       *buffer,
    262   unsigned32                 *size,
    263   boolean                     wait,
    264   Watchdog_Interval           timeout
    265 )
    266 {
    267   ISR_Level                          level;
    268   CORE_message_queue_Buffer_control *the_message;
    269   Thread_Control                    *executing;
    270 
    271   executing = _Thread_Executing;
    272   executing->Wait.return_code = CORE_MESSAGE_QUEUE_STATUS_SUCCESSFUL;
    273   _ISR_Disable( level );
    274   if ( the_message_queue->number_of_pending_messages != 0 ) {
    275     the_message_queue->number_of_pending_messages -= 1;
    276 
    277     the_message = _CORE_message_queue_Get_pending_message( the_message_queue );
    278     _ISR_Enable( level );
    279     *size = the_message->Contents.size;
    280     _CORE_message_queue_Copy_buffer(the_message->Contents.buffer,buffer,*size );
    281     _CORE_message_queue_Free_message_buffer(the_message_queue, the_message );
    282     return;
    283   }
    284 
    285   if ( !wait ) {
    286     _ISR_Enable( level );
    287     executing->Wait.return_code = CORE_MESSAGE_QUEUE_STATUS_UNSATISFIED_NOWAIT;
    288     return;
    289   }
    290 
    291   _Thread_queue_Enter_critical_section( &the_message_queue->Wait_queue );
    292   executing->Wait.queue              = &the_message_queue->Wait_queue;
    293   executing->Wait.id                 = id;
    294   executing->Wait.return_argument    = (void *)buffer;
    295   executing->Wait.return_argument_1  = (void *)size;
    296   _ISR_Enable( level );
    297 
    298   _Thread_queue_Enqueue( &the_message_queue->Wait_queue, timeout );
    299 }
    300 
    301 /*PAGE
    302  *
    303  *  _CORE_message_queue_Flush_support
    304  *
    305  *  This message handler routine removes all messages from a message queue
    306  *  and returns them to the inactive message pool.  The number of messages
    307  *  flushed from the queue is returned
    308  *
    309  *  Input parameters:
    310  *    the_message_queue - pointer to message queue
    311  *
    312  *  Output parameters:
    313  *    returns - number of messages placed on inactive chain
    314  *
    315  *  INTERRUPT LATENCY:
    316  *    only case
    317  */
    318 
    319 unsigned32 _CORE_message_queue_Flush_support(
    320   CORE_message_queue_Control *the_message_queue
    321 )
    322 {
    323   ISR_Level   level;
    324   Chain_Node *inactive_first;
    325   Chain_Node *message_queue_first;
    326   Chain_Node *message_queue_last;
    327   unsigned32  count;
    328 
    329   _ISR_Disable( level );
    330     inactive_first      = the_message_queue->Inactive_messages.first;
    331     message_queue_first = the_message_queue->Pending_messages.first;
    332     message_queue_last  = the_message_queue->Pending_messages.last;
    333 
    334     the_message_queue->Inactive_messages.first = message_queue_first;
    335     message_queue_last->next = inactive_first;
    336     inactive_first->previous = message_queue_last;
    337     message_queue_first->previous          =
    338                _Chain_Head( &the_message_queue->Inactive_messages );
    339 
    340     _Chain_Initialize_empty( &the_message_queue->Pending_messages );
    341 
    342     count = the_message_queue->number_of_pending_messages;
    343     the_message_queue->number_of_pending_messages = 0;
    344   _ISR_Enable( level );
    345   return count;
    346 }
    347 
    348 /*PAGE
    349  *
    350  *  _CORE_message_queue_Submit
    351  *
    352  *  This routine implements the send and urgent message functions. It
    353  *  processes a message that is to be submitted to the designated
    354  *  message queue.  The message will either be processed as a
    355  *  send message which it will be inserted at the rear of the queue
    356  *  or it will be processed as an urgent message which will be inserted
    357  *  at the front of the queue.
    358  *
    359  *  Input parameters:
    360  *    the_message_queue            - message is submitted to this message queue
    361  *    buffer                       - pointer to message buffer
    362  *    size                         - size in bytes of message to send
    363  *    id                           - id of message queue
    364  *    api_message_queue_mp_support - api specific mp support callout
    365  *    submit_type                  - send or urgent message
    366  *
    367  *  Output parameters:
    368  *    CORE_MESSAGE_QUEUE_SUCCESSFUL - if successful
    369  *    error code                    - if unsuccessful
    370  */
    371 
    372 CORE_message_queue_Status _CORE_message_queue_Submit(
    373   CORE_message_queue_Control                *the_message_queue,
    374   void                                      *buffer,
    375   unsigned32                                 size,
    376   Objects_Id                                 id,
    377   CORE_message_queue_API_mp_support_callout  api_message_queue_mp_support,
    378   CORE_message_queue_Submit_types            submit_type
    379 )
    380 {
    381   CORE_message_queue_Buffer_control   *the_message;
    382   Thread_Control                      *the_thread;
    383 
    384   if ( size > the_message_queue->maximum_message_size )
    385     return CORE_MESSAGE_QUEUE_STATUS_INVALID_SIZE;
    386 
    387   /*
    388    * Is there a thread currently waiting on this message queue?
    389    */
    390      
    391   the_thread = _Thread_queue_Dequeue( &the_message_queue->Wait_queue );
    392   if ( the_thread )
    393   {
    394     _CORE_message_queue_Copy_buffer(
    395       buffer,
    396       the_thread->Wait.return_argument,
    397       size
    398     );
    399     *(unsigned32 *)the_thread->Wait.return_argument_1 = size;
    400    
    401 #if defined(RTEMS_MULTIPROCESSING)
    402     if ( !_Objects_Is_local_id( the_thread->Object.id ) )
    403       (*api_message_queue_mp_support) ( the_thread, id );
    404 #endif
    405 
    406     return CORE_MESSAGE_QUEUE_STATUS_SUCCESSFUL;
    407   }
    408 
    409   /*
    410    * No one waiting on this one currently.
    411    * Allocate a message buffer and store it away
    412    */
    413 
    414   if ( the_message_queue->number_of_pending_messages ==
    415        the_message_queue->maximum_pending_messages ) {
    416     return CORE_MESSAGE_QUEUE_STATUS_TOO_MANY;
    417   }
    418 
    419   the_message = _CORE_message_queue_Allocate_message_buffer(the_message_queue);
    420   if ( the_message == 0)
    421     return CORE_MESSAGE_QUEUE_STATUS_UNSATISFIED;
    422 
    423   _CORE_message_queue_Copy_buffer( buffer, the_message->Contents.buffer, size );
    424   the_message->Contents.size = size;
    425   the_message->priority  = submit_type;
    426 
    427   the_message_queue->number_of_pending_messages += 1;
    428 
    429   switch ( submit_type ) {
    430     case CORE_MESSAGE_QUEUE_SEND_REQUEST:
    431       _CORE_message_queue_Append( the_message_queue, the_message );
    432       break;
    433     case CORE_MESSAGE_QUEUE_URGENT_REQUEST:
    434       _CORE_message_queue_Prepend( the_message_queue, the_message );
    435       break;
    436     default:
    437       /* XXX interrupt critical section needs to be addressed */
    438       {
    439         CORE_message_queue_Buffer_control *this_message;
    440         Chain_Node                        *the_node;
    441 
    442         the_message->priority = submit_type;
    443         for ( the_node = the_message_queue->Pending_messages.first ;
    444            !_Chain_Is_tail( &the_message_queue->Pending_messages, the_node ) ;
    445            the_node = the_node->next ) {
    446 
    447           this_message = (CORE_message_queue_Buffer_control *) the_node;
    448 
    449           if ( this_message->priority >= the_message->priority )
    450             continue;
    451 
    452           _Chain_Insert( the_node, &the_message->Node );
    453           break;
    454         }
    455       }
    456       break;
    457   }
    458 
    459   /*
    460    *  According to POSIX, does this happen before or after the message
    461    *  is actually enqueued.  It is logical to think afterwards, because
    462    *  the message is actually in the queue at this point.
    463    */
    464 
    465   if ( the_message_queue->number_of_pending_messages == 1 &&
    466        the_message_queue->notify_handler )
    467     (*the_message_queue->notify_handler)( the_message_queue->notify_argument );
    468  
    469   return CORE_MESSAGE_QUEUE_STATUS_SUCCESSFUL;
    470 }
  • cpukit/score/src/coremutex.c

    r93b4e6ef r82cb78d8  
    8181}
    8282
    83 /*PAGE
    84  *
    85  *  _CORE_mutex_Seize
    86  *
    87  *  This routine attempts to allocate a mutex to the calling thread.
    88  *
    89  *  Input parameters:
    90  *    the_mutex - pointer to mutex control block
    91  *    id        - id of object to wait on
    92  *    wait      - TRUE if wait is allowed, FALSE otherwise
    93  *    timeout   - number of ticks to wait (0 means forever)
    94  *
    95  *  Output parameters:  NONE
    96  *
    97  *  INTERRUPT LATENCY:
    98  *    available
    99  *    wait
    100  */
    101 
    102 void _CORE_mutex_Seize(
    103   CORE_mutex_Control  *the_mutex,
    104   Objects_Id           id,
    105   boolean              wait,
    106   Watchdog_Interval    timeout
    107 )
    108 {
    109   Thread_Control *executing;
    110   ISR_Level       level;
    111 
    112   executing = _Thread_Executing;
    113   switch ( the_mutex->Attributes.discipline ) {
    114     case CORE_MUTEX_DISCIPLINES_FIFO:
    115     case CORE_MUTEX_DISCIPLINES_PRIORITY:
    116     case CORE_MUTEX_DISCIPLINES_PRIORITY_INHERIT:
    117       break;
    118     case CORE_MUTEX_DISCIPLINES_PRIORITY_CEILING:
    119       if ( executing->current_priority <
    120                               the_mutex->Attributes.priority_ceiling) {
    121         executing->Wait.return_code = CORE_MUTEX_STATUS_CEILING_VIOLATED;
    122         return;
    123       }
    124   }
    125   executing->Wait.return_code = CORE_MUTEX_STATUS_SUCCESSFUL;
    126   _ISR_Disable( level );
    127   if ( ! _CORE_mutex_Is_locked( the_mutex ) ) {
    128     the_mutex->lock       = CORE_MUTEX_LOCKED;
    129     the_mutex->holder     = executing;
    130     the_mutex->holder_id  = executing->Object.id;
    131     the_mutex->nest_count = 1;
    132     executing->resource_count++;
    133     _ISR_Enable( level );
    134     switch ( the_mutex->Attributes.discipline ) {
    135       case CORE_MUTEX_DISCIPLINES_FIFO:
    136       case CORE_MUTEX_DISCIPLINES_PRIORITY:
    137       case CORE_MUTEX_DISCIPLINES_PRIORITY_INHERIT:
    138         /* already the highest priority */
    139         break;
    140       case CORE_MUTEX_DISCIPLINES_PRIORITY_CEILING:
    141       if ( the_mutex->Attributes.priority_ceiling <
    142                                            executing->current_priority ) {
    143         _Thread_Change_priority(
    144           the_mutex->holder,
    145           the_mutex->Attributes.priority_ceiling,
    146           FALSE
    147         );
    148       }
    149     }
    150     return;
    151   }
    152 
    153   if ( _Objects_Are_ids_equal(
    154               _Thread_Executing->Object.id, the_mutex->holder_id ) ) {
    155     if ( _CORE_mutex_Is_nesting_allowed( &the_mutex->Attributes ) )
    156       the_mutex->nest_count++;
    157     else
    158       executing->Wait.return_code = CORE_MUTEX_STATUS_NESTING_NOT_ALLOWED;
    159 
    160     _ISR_Enable( level );
    161     return;
    162   }
    163 
    164   if ( !wait ) {
    165     _ISR_Enable( level );
    166     executing->Wait.return_code = CORE_MUTEX_STATUS_UNSATISFIED_NOWAIT;
    167     return;
    168   }
    169 
    170   _Thread_queue_Enter_critical_section( &the_mutex->Wait_queue );
    171   executing->Wait.queue = &the_mutex->Wait_queue;
    172   executing->Wait.id    = id;
    173   _ISR_Enable( level );
    174 
    175   switch ( the_mutex->Attributes.discipline ) {
    176     case CORE_MUTEX_DISCIPLINES_FIFO:
    177     case CORE_MUTEX_DISCIPLINES_PRIORITY:
    178     case CORE_MUTEX_DISCIPLINES_PRIORITY_CEILING:
    179       break;
    180     case CORE_MUTEX_DISCIPLINES_PRIORITY_INHERIT:
    181       if ( the_mutex->holder->current_priority > executing->current_priority ) {
    182         _Thread_Change_priority(
    183           the_mutex->holder,
    184           executing->current_priority,
    185           FALSE
    186         );
    187       }
    188       break;
    189   }
    190 
    191   _Thread_queue_Enqueue( &the_mutex->Wait_queue, timeout );
    192 
    193   if ( _Thread_Executing->Wait.return_code == CORE_MUTEX_STATUS_SUCCESSFUL ) {
    194     switch ( the_mutex->Attributes.discipline ) {
    195       case CORE_MUTEX_DISCIPLINES_FIFO:
    196       case CORE_MUTEX_DISCIPLINES_PRIORITY:
    197       case CORE_MUTEX_DISCIPLINES_PRIORITY_INHERIT:
    198         break;
    199       case CORE_MUTEX_DISCIPLINES_PRIORITY_CEILING:
    200         if ( the_mutex->Attributes.priority_ceiling <
    201                                            executing->current_priority ) {
    202           _Thread_Change_priority(
    203             executing,
    204             the_mutex->Attributes.priority_ceiling,
    205             FALSE
    206           );
    207         };
    208         break;
    209     }
    210   }
    211 }
    212 
    213 /*
    214  *  _CORE_mutex_Surrender
    215  *
    216  *  DESCRIPTION:
    217  *
    218  *  This routine frees a unit to the mutex.  If a task was blocked waiting for
    219  *  a unit from this mutex, then that task will be readied and the unit
    220  *  given to that task.  Otherwise, the unit will be returned to the mutex.
    221  *
    222  *  Input parameters:
    223  *    the_mutex            - the mutex to be flushed
    224  *    id                   - id of parent mutex
    225  *    api_mutex_mp_support - api dependent MP support actions
    226  *
    227  *  Output parameters:
    228  *    CORE_MUTEX_STATUS_SUCCESSFUL - if successful
    229  *    core error code              - if unsuccessful
    230  */
    231 
    232 CORE_mutex_Status _CORE_mutex_Surrender(
    233   CORE_mutex_Control                *the_mutex,
    234   Objects_Id                         id,
    235   CORE_mutex_API_mp_support_callout  api_mutex_mp_support
    236 )
    237 {
    238   Thread_Control *the_thread;
    239   Thread_Control *executing;
    240 
    241   executing = _Thread_Executing;
    242 
    243   /*
    244    *  The following code allows a thread (or ISR) other than the thread
    245    *  which acquired the mutex to release that mutex.  This is only
    246    *  allowed when the mutex in quetion is FIFO or simple Priority
    247    *  discipline.  But Priority Ceiling or Priority Inheritance mutexes
    248    *  must be released by the thread which acquired them.
    249    */
    250 
    251   if ( !_Objects_Are_ids_equal(
    252            _Thread_Executing->Object.id, the_mutex->holder_id ) ) {
    253 
    254     switch ( the_mutex->Attributes.discipline ) {
    255       case CORE_MUTEX_DISCIPLINES_FIFO:
    256       case CORE_MUTEX_DISCIPLINES_PRIORITY:
    257         break;
    258       case CORE_MUTEX_DISCIPLINES_PRIORITY_CEILING:
    259       case CORE_MUTEX_DISCIPLINES_PRIORITY_INHERIT:
    260         return( CORE_MUTEX_STATUS_NOT_OWNER_OF_RESOURCE );
    261         break;
    262     }
    263   }
    264 
    265   the_mutex->nest_count--;
    266 
    267   if ( the_mutex->nest_count != 0 )
    268     return( CORE_MUTEX_STATUS_SUCCESSFUL );
    269 
    270   _Thread_Executing->resource_count--;
    271   the_mutex->holder    = NULL;
    272   the_mutex->holder_id = 0;
    273 
    274   /*
    275    *  Whether or not someone is waiting for the mutex, an
    276    *  inherited priority must be lowered if this is the last
    277    *  mutex (i.e. resource) this task has.
    278    */
    279 
    280   switch ( the_mutex->Attributes.discipline ) {
    281     case CORE_MUTEX_DISCIPLINES_FIFO:
    282     case CORE_MUTEX_DISCIPLINES_PRIORITY:
    283       break;
    284     case CORE_MUTEX_DISCIPLINES_PRIORITY_CEILING:
    285     case CORE_MUTEX_DISCIPLINES_PRIORITY_INHERIT:
    286       if ( executing->resource_count == 0 &&
    287            executing->real_priority != executing->current_priority ) {
    288          _Thread_Change_priority( executing, executing->real_priority, TRUE );
    289       }
    290       break;
    291   }
    292 
    293 
    294   if ( ( the_thread = _Thread_queue_Dequeue( &the_mutex->Wait_queue ) ) ) {
    295 
    296 #if defined(RTEMS_MULTIPROCESSING)
    297     if ( !_Objects_Is_local_id( the_thread->Object.id ) ) {
    298      
    299       the_mutex->holder     = NULL;
    300       the_mutex->holder_id  = the_thread->Object.id;
    301       the_mutex->nest_count = 1;
    302 
    303       ( *api_mutex_mp_support)( the_thread, id );
    304 
    305     } else
    306 #endif
    307     {
    308 
    309       the_mutex->holder     = the_thread;
    310       the_mutex->holder_id  = the_thread->Object.id;
    311       the_thread->resource_count++;
    312       the_mutex->nest_count = 1;
    313 
    314      /*
    315       *  No special action for priority inheritance or priority ceiling
    316       *  because the_thread is guaranteed to be the highest priority
    317       *  thread waiting for the mutex.
    318       */
    319     }
    320   } else
    321     the_mutex->lock = CORE_MUTEX_UNLOCKED;
    322 
    323   return( CORE_MUTEX_STATUS_SUCCESSFUL );
    324 }
    325 
    326 /*PAGE
    327  *
    328  *  _CORE_mutex_Flush
    329  *
    330  *  This function a flushes the mutex's task wait queue.
    331  *
    332  *  Input parameters:
    333  *    the_mutex              - the mutex to be flushed
    334  *    remote_extract_callout - function to invoke remotely
    335  *    status                 - status to pass to thread
    336  *
    337  *  Output parameters:  NONE
    338  */
    339 
    340 void _CORE_mutex_Flush(
    341   CORE_mutex_Control         *the_mutex,
    342   Thread_queue_Flush_callout  remote_extract_callout,
    343   unsigned32                  status
    344 )
    345 {
    346   _Thread_queue_Flush(
    347     &the_mutex->Wait_queue,
    348     remote_extract_callout,
    349     status
    350   );
    351 }
  • cpukit/score/src/coresem.c

    r93b4e6ef r82cb78d8  
    6868  );
    6969}
    70 
    71 /*PAGE
    72  *
    73  *  _CORE_semaphore_Surrender
    74  *
    75  *  Input parameters:
    76  *    the_semaphore            - the semaphore to be flushed
    77  *    id                       - id of parent semaphore
    78  *    api_semaphore_mp_support - api dependent MP support actions
    79  *
    80  *  Output parameters:
    81  *    CORE_SEMAPHORE_STATUS_SUCCESSFUL - if successful
    82  *    core error code                  - if unsuccessful
    83  *
    84  *  Output parameters:
    85  */
    86 
    87 CORE_semaphore_Status _CORE_semaphore_Surrender(
    88   CORE_semaphore_Control                *the_semaphore,
    89   Objects_Id                             id,
    90   CORE_semaphore_API_mp_support_callout  api_semaphore_mp_support
    91 )
    92 {
    93   Thread_Control *the_thread;
    94   ISR_Level       level;
    95   CORE_semaphore_Status status;
    96 
    97   status = CORE_SEMAPHORE_STATUS_SUCCESSFUL;
    98 
    99   if ( (the_thread = _Thread_queue_Dequeue(&the_semaphore->Wait_queue)) ) {
    100 
    101     if ( !_Objects_Is_local_id( the_thread->Object.id ) )
    102       (*api_semaphore_mp_support) ( the_thread, id );
    103 
    104   } else {
    105     _ISR_Disable( level );
    106       if ( the_semaphore->count <= the_semaphore->Attributes.maximum_count )
    107         the_semaphore->count += 1;
    108       else
    109         status = CORE_SEMAPHORE_MAXIMUM_COUNT_EXCEEDED;
    110     _ISR_Enable( level );
    111   }
    112 
    113   return status;
    114 }
    115 
    116 /*PAGE
    117  *
    118  *  _CORE_semaphore_Seize
    119  *
    120  *  This routine attempts to allocate a core semaphore to the calling thread.
    121  *
    122  *  Input parameters:
    123  *    the_semaphore - pointer to semaphore control block
    124  *    id            - id of object to wait on
    125  *    wait          - TRUE if wait is allowed, FALSE otherwise
    126  *    timeout       - number of ticks to wait (0 means forever)
    127  *
    128  *  Output parameters:  NONE
    129  *
    130  *  INTERRUPT LATENCY:
    131  *    available
    132  *    wait
    133  */
    134 
    135 void _CORE_semaphore_Seize(
    136   CORE_semaphore_Control  *the_semaphore,
    137   Objects_Id               id,
    138   boolean                  wait,
    139   Watchdog_Interval        timeout
    140 )
    141 {
    142   Thread_Control *executing;
    143   ISR_Level       level;
    144 
    145   executing = _Thread_Executing;
    146   executing->Wait.return_code = CORE_SEMAPHORE_STATUS_SUCCESSFUL;
    147   _ISR_Disable( level );
    148   if ( the_semaphore->count != 0 ) {
    149     the_semaphore->count -= 1;
    150     _ISR_Enable( level );
    151     return;
    152   }
    153 
    154   if ( !wait ) {
    155     _ISR_Enable( level );
    156     executing->Wait.return_code = CORE_SEMAPHORE_STATUS_UNSATISFIED_NOWAIT;
    157     return;
    158   }
    159 
    160   _Thread_queue_Enter_critical_section( &the_semaphore->Wait_queue );
    161   executing->Wait.queue          = &the_semaphore->Wait_queue;
    162   executing->Wait.id             = id;
    163   _ISR_Enable( level );
    164 
    165   _Thread_queue_Enqueue( &the_semaphore->Wait_queue, timeout );
    166 }
    167 
    168 
    169 /*PAGE
    170  *
    171  *  _CORE_semaphore_Flush
    172  *
    173  *  This function a flushes the semaphore's task wait queue.
    174  *
    175  *  Input parameters:
    176  *    the_semaphore          - the semaphore to be flushed
    177  *    remote_extract_callout - function to invoke remotely
    178  *    status                 - status to pass to thread
    179  *
    180  *  Output parameters:  NONE
    181  */
    182  
    183 void _CORE_semaphore_Flush(
    184   CORE_semaphore_Control     *the_semaphore,
    185   Thread_queue_Flush_callout  remote_extract_callout,
    186   unsigned32                  status
    187 )
    188 {
    189  
    190   _Thread_queue_Flush(
    191     &the_semaphore->Wait_queue,
    192     remote_extract_callout,
    193     status
    194   );
    195  
    196 }
  • cpukit/score/src/watchdog.c

    r93b4e6ef r82cb78d8  
    3838  _Chain_Initialize_empty( &_Watchdog_Seconds_chain );
    3939}
    40 
    41 /*PAGE
    42  *
    43  *  _Watchdog_Remove
    44  *
    45  *  The routine removes a watchdog from a delta chain and updates
    46  *  the delta counters of the remaining watchdogs.
    47  */
    48 
    49 Watchdog_States _Watchdog_Remove(
    50   Watchdog_Control *the_watchdog
    51 )
    52 {
    53   ISR_Level         level;
    54   Watchdog_States   previous_state;
    55   Watchdog_Control *next_watchdog;
    56 
    57   _ISR_Disable( level );
    58   previous_state = the_watchdog->state;
    59   switch ( previous_state ) {
    60     case WATCHDOG_INACTIVE:
    61       break;
    62 
    63     case WATCHDOG_BEING_INSERTED: 
    64    
    65       /*
    66        *  It is not actually on the chain so just change the state and
    67        *  the Insert operation we interrupted will be aborted.
    68        */
    69       the_watchdog->state = WATCHDOG_INACTIVE;
    70       break;
    71 
    72     case WATCHDOG_ACTIVE:
    73     case WATCHDOG_REMOVE_IT:
    74 
    75       the_watchdog->state = WATCHDOG_INACTIVE;
    76       next_watchdog = _Watchdog_Next( the_watchdog );
    77 
    78       if ( _Watchdog_Next(next_watchdog) )
    79         next_watchdog->delta_interval += the_watchdog->delta_interval;
    80 
    81       if ( _Watchdog_Sync_count )
    82         _Watchdog_Sync_level = _ISR_Nest_level;
    83 
    84       _Chain_Extract_unprotected( &the_watchdog->Node );
    85       break;
    86   }
    87   the_watchdog->stop_time = _Watchdog_Ticks_since_boot;
    88 
    89   _ISR_Enable( level );
    90   return( previous_state );
    91 }
    92 
    93 /*PAGE
    94  *
    95  *  _Watchdog_Adjust
    96  *
    97  *  This routine adjusts the delta chain backward or forward in response
    98  *  to a time change.
    99  *
    100  *  Input parameters:
    101  *    header    - pointer to the delta chain to be adjusted
    102  *    direction - forward or backward adjustment to delta chain
    103  *    units     - units to adjust
    104  *
    105  *  Output parameters:
    106  */
    107 
    108 void _Watchdog_Adjust(
    109   Chain_Control               *header,
    110   Watchdog_Adjust_directions   direction,
    111   Watchdog_Interval            units
    112 )
    113 {
    114   if ( !_Chain_Is_empty( header ) ) {
    115     switch ( direction ) {
    116       case WATCHDOG_BACKWARD:
    117         _Watchdog_First( header )->delta_interval += units;
    118         break;
    119       case WATCHDOG_FORWARD:
    120         while ( units ) {
    121           if ( units < _Watchdog_First( header )->delta_interval ) {
    122             _Watchdog_First( header )->delta_interval -= units;
    123             break;
    124           } else {
    125             units -= _Watchdog_First( header )->delta_interval;
    126             _Watchdog_First( header )->delta_interval = 1;
    127             _Watchdog_Tickle( header );
    128             if ( _Chain_Is_empty( header ) )
    129               break;
    130           }
    131         }
    132         break;
    133     }
    134   }
    135 }
    136 
    137 /*PAGE
    138  *
    139  *  _Watchdog_Insert
    140  *
    141  *  This routine inserts a watchdog timer on to the appropriate delta
    142  *  chain while updating the delta interval counters.
    143  */
    144 
    145 void _Watchdog_Insert(
    146   Chain_Control         *header,
    147   Watchdog_Control      *the_watchdog
    148 )
    149 {
    150   ISR_Level          level;
    151   Watchdog_Control  *after;
    152   unsigned32         insert_isr_nest_level;
    153   Watchdog_Interval  delta_interval;
    154  
    155 
    156   insert_isr_nest_level   = _ISR_Nest_level;
    157   the_watchdog->state = WATCHDOG_BEING_INSERTED;
    158 
    159   _Watchdog_Sync_count++;
    160 restart:
    161   delta_interval = the_watchdog->initial;
    162 
    163   _ISR_Disable( level );
    164 
    165   for ( after = _Watchdog_First( header ) ;
    166         ;
    167         after = _Watchdog_Next( after ) ) {
    168 
    169      if ( delta_interval == 0 || !_Watchdog_Next( after ) )
    170        break;
    171 
    172      if ( delta_interval < after->delta_interval ) {
    173        after->delta_interval -= delta_interval;
    174        break;
    175      }
    176 
    177      delta_interval -= after->delta_interval;
    178 
    179      /*
    180       *  If you experience problems comment out the _ISR_Flash line. 
    181       *  3.2.0 was the first release with this critical section redesigned.
    182       *  Under certain circumstances, the PREVIOUS critical section algorithm
    183       *  used around this flash point allowed interrupts to execute
    184       *  which violated the design assumptions.  The critical section
    185       *  mechanism used here WAS redesigned to address this.
    186       */
    187 
    188      _ISR_Flash( level );
    189 
    190      if ( the_watchdog->state != WATCHDOG_BEING_INSERTED ) {
    191        goto exit_insert;
    192      }
    193 
    194      if ( _Watchdog_Sync_level > insert_isr_nest_level ) {
    195        _Watchdog_Sync_level = insert_isr_nest_level;
    196        _ISR_Enable( level );
    197        goto restart;
    198      }
    199   }
    200 
    201   _Watchdog_Activate( the_watchdog );
    202 
    203   the_watchdog->delta_interval = delta_interval;
    204 
    205   _Chain_Insert_unprotected( after->Node.previous, &the_watchdog->Node );
    206 
    207   the_watchdog->start_time = _Watchdog_Ticks_since_boot;
    208 
    209 exit_insert:
    210   _Watchdog_Sync_level = insert_isr_nest_level;
    211   _Watchdog_Sync_count--;
    212   _ISR_Enable( level );
    213 }
    214 
    215 /*PAGE
    216  *
    217  *  _Watchdog_Tickle
    218  *
    219  *  This routine decrements the delta counter in response to a tick.  The
    220  *  delta chain is updated accordingly.
    221  *
    222  *  Input parameters:
    223  *    header - pointer to the delta chain to be tickled
    224  *
    225  *  Output parameters: NONE
    226  */
    227 
    228 void _Watchdog_Tickle(
    229   Chain_Control *header
    230 )
    231 {
    232   Watchdog_Control *the_watchdog;
    233 
    234   if ( _Chain_Is_empty( header ) )
    235     return;
    236 
    237   the_watchdog = _Watchdog_First( header );
    238   the_watchdog->delta_interval--;
    239   if ( the_watchdog->delta_interval != 0 )
    240     return;
    241 
    242   do {
    243      switch( _Watchdog_Remove( the_watchdog ) ) {
    244        case WATCHDOG_ACTIVE:
    245          (*the_watchdog->routine)(
    246            the_watchdog->id,
    247            the_watchdog->user_data
    248          );
    249          break;
    250 
    251        case WATCHDOG_INACTIVE:
    252          /*
    253           *  This state indicates that the watchdog is not on any chain.
    254           *  Thus, it is NOT on a chain being tickled.  This case should
    255           *  never occur.
    256           */
    257          break;
    258 
    259        case WATCHDOG_BEING_INSERTED:
    260          /*
    261           *  This state indicates that the watchdog is in the process of
    262           *  BEING inserted on the chain.  Thus, it can NOT be on a chain
    263           *  being tickled.  This case should never occur.
    264           */
    265          break;
    266 
    267        case WATCHDOG_REMOVE_IT:
    268          break;
    269      }
    270      the_watchdog = _Watchdog_First( header );
    271    } while ( !_Chain_Is_empty( header ) &&
    272              (the_watchdog->delta_interval == 0) );
    273 }
Note: See TracChangeset for help on using the changeset viewer.