Changeset dce48791 in rtems


Ignore:
Timestamp:
May 23, 2016, 11:37:59 AM (4 years ago)
Author:
Sebastian Huber <sebastian.huber@…>
Branches:
master
Children:
fd6fde8
Parents:
d887c1b
git-author:
Sebastian Huber <sebastian.huber@…> (05/23/16 11:37:59)
git-committer:
Sebastian Huber <sebastian.huber@…> (05/26/16 19:44:31)
Message:

score: Add Status_Control for all APIs

Unify the status codes of the Classic and POSIX API to use the new enum
Status_Control. This eliminates the Thread_Control::Wait::timeout_code
field and the timeout parameter of _Thread_queue_Enqueue_critical() and
_MPCI_Send_request_packet(). It gets rid of the status code translation
tables and instead uses simple bit operations to get the status for a
particular API. This enables translation of status code constants at
compile time. Add _Thread_Wait_get_status() to avoid direct access of
thread internal data structures.

Files:
1 added
9 deleted
99 edited

Legend:

Unmodified
Added
Removed
  • cpukit/libnetworking/rtems/rtems_glue.c

    rd887c1b rdce48791  
    373373#ifdef RTEMS_FAST_MUTEX
    374374        Thread_queue_Context queue_context;
    375         Thread_Control *executing;
     375        Status_Control status;
    376376        if (!the_networkSemaphore)
    377377                rtems_panic ("rtems-net: network sema obtain: network not initialised\n");
    378378        _Thread_queue_Context_initialize(&queue_context, NULL);
    379379        _ISR_lock_ISR_disable(&queue_context.Lock_context);
    380         executing = _Thread_Executing;
    381         _CORE_mutex_Seize (
     380        status = _CORE_mutex_Seize (
    382381                &the_networkSemaphore->Core_control.mutex,
    383                 executing,
     382                _Thread_Executing,
    384383                1,              /* wait */
    385384                0,              /* forever */
    386385                &queue_context
    387386                );
    388         if (executing->Wait.return_code)
    389                 rtems_panic ("rtems-net: can't obtain network sema: %d\n",
    390                  executing->Wait.return_code);
     387        if (status != STATUS_SUCCESSFUL)
     388                rtems_panic ("rtems-net: can't obtain network sema: %d\n", status);
    391389#else
    392390        rtems_status_code sc;
     
    407405#ifdef RTEMS_FAST_MUTEX
    408406        Thread_queue_Context queue_context;
    409         CORE_mutex_Status status;
     407        Status_Control status;
    410408
    411409        if (!the_networkSemaphore)
     
    417415                &queue_context
    418416                );
    419         if (status != CORE_MUTEX_STATUS_SUCCESSFUL)
     417        if (status != STATUS_SUCCESSFUL)
    420418                rtems_panic ("rtems-net: can't release network sema: %i\n");
    421419#else
  • cpukit/posix/Makefile.am

    rd887c1b rdce48791  
    7575    src/barrierattrinit.c src/barrierattrsetpshared.c src/pbarrier.c \
    7676    src/pbarrierdestroy.c src/pbarrierinit.c \
    77     src/pbarriertranslatereturncode.c src/pbarrierwait.c
     77    src/pbarrierwait.c
    7878
    7979## CANCEL_C_FILES
     
    100100    src/mqueuereceive.c src/mqueuerecvsupp.c src/mqueuesend.c \
    101101    src/mqueuesendsupp.c src/mqueuesetattr.c src/mqueuetimedreceive.c \
    102     src/mqueuetimedsend.c src/mqueuetranslatereturncode.c \
     102    src/mqueuetimedsend.c \
    103103    src/mqueueunlink.c
    104104
     
    111111    src/mutexgetprioceiling.c src/mutexinit.c src/mutexlock.c \
    112112    src/mutexlocksupp.c src/mutexsetprioceiling.c src/mutextimedlock.c \
    113     src/mutextranslatereturncode.c src/mutextrylock.c src/mutexunlock.c
     113    src/mutextrylock.c src/mutexunlock.c
    114114
    115115## PTHREAD_C_FILES
     
    176176    src/prwlocktryrdlock.c src/prwlocktrywrlock.c src/prwlockunlock.c \
    177177    src/prwlockwrlock.c src/rwlockattrdestroy.c src/rwlockattrgetpshared.c \
    178     src/rwlockattrinit.c src/rwlockattrsetpshared.c \
    179     src/prwlocktranslatereturncode.c
     178    src/rwlockattrinit.c src/rwlockattrsetpshared.c
    180179
    181180## SEMAPHORE_C_FILES
    182181libposix_a_SOURCES += src/semaphore.c src/semaphorecreatesupp.c \
    183182    src/semaphoredeletesupp.c \
    184     src/semaphoretranslatereturncode.c src/semaphorewaitsupp.c \
     183    src/semaphorewaitsupp.c \
    185184    src/semclose.c src/semdestroy.c src/semgetvalue.c src/seminit.c \
    186185    src/semopen.c src/sempost.c src/semtimedwait.c src/semtrywait.c \
     
    189188## SPINLOCK_C_FILES
    190189libposix_a_SOURCES += src/pspin.c src/pspindestroy.c src/pspininit.c \
    191     src/pspinlock.c src/pspinlocktranslatereturncode.c src/pspintrylock.c \
     190    src/pspinlock.c src/pspintrylock.c \
    192191    src/pspinunlock.c
    193192
  • cpukit/posix/include/rtems/posix/barrierimpl.h

    rd887c1b rdce48791  
    3737
    3838extern Objects_Information _POSIX_Barrier_Information;
    39 
    40 /**
    41  * @brief POSIX translate barrier return code.
    42  *
    43  * This routine translates SuperCore Barrier status codes into the
    44  * corresponding POSIX ones.
    45  *
    46  * @param[in] the_barrier_status is the SuperCore status.
    47  *
    48  * @return the corresponding POSIX status
    49  */
    50 int _POSIX_Barrier_Translate_core_barrier_return_code(
    51   CORE_barrier_Status  the_barrier_status
    52 );
    5339
    5440/**
  • cpukit/posix/include/rtems/posix/mqueueimpl.h

    rd887c1b rdce48791  
    150150
    151151/**
    152  *  @brief POSIX Message Queue Translate Score Return Code
    153  *
    154  */
    155 int _POSIX_Message_queue_Translate_core_message_queue_return_code(
    156   uint32_t   the_message_queue_status
    157 );
    158 
    159 /**
    160152 *  @brief POSIX Message Queue Remove from Namespace
    161153 */
  • cpukit/posix/include/rtems/posix/muteximpl.h

    rd887c1b rdce48791  
    4141
    4242/**
    43  *  This array contains a mapping from Score Mutex return codes to
    44  *  POSIX return codes.
    45  */
    46 extern const int _POSIX_Mutex_Return_codes[CORE_MUTEX_STATUS_LAST + 1];
    47 
    48 /**
    4943 *  @brief POSIX Mutex Allocate
    5044 *
     
    8478
    8579/**
    86  * @brief Convert Score mutex status codes into POSIX status values
    87  *
    88  * A support routine which converts core mutex status codes into the
    89  * appropriate POSIX status values.
    90  *
    91  * @param[in] the_mutex_status is the mutex status code to translate
    92  *
    93  * @retval 0 Mutex status code indicates the operation completed successfully.
    94  * @retval EBUSY Mutex status code indicates that the operation unable to
    95  *         complete immediately because the resource was unavailable.
    96  * @retval EDEADLK Mutex status code indicates that an attempt was made to
    97  *         relock a mutex for which nesting is not configured.
    98  * @retval EPERM Mutex status code indicates that an attempt was made to
    99  *         release a mutex by a thread other than the thread which locked it.
    100  * @retval EINVAL Mutex status code indicates that the thread was blocked
    101  *         waiting for an operation to complete and the mutex was deleted.
    102  * @retval ETIMEDOUT Mutex status code indicates that the calling task was
    103  *         willing to block but the operation was unable to complete
    104  *         within the time allotted because the resource never became
    105  *         available.
    106  */
    107 RTEMS_INLINE_ROUTINE int _POSIX_Mutex_Translate_core_mutex_return_code(
    108   CORE_mutex_Status  the_mutex_status
    109 )
    110 {
    111   /*
    112    *  Internal consistency check for bad status from SuperCore
    113    */
    114   #if defined(RTEMS_DEBUG)
    115     if ( the_mutex_status > CORE_MUTEX_STATUS_LAST )
    116       return EINVAL;
    117   #endif
    118   return _POSIX_Mutex_Return_codes[the_mutex_status];
    119 }
    120 
    121 /**
    12280 *  @brief POSIX Mutex Get (Interrupt Disable)
    12381 *
  • cpukit/posix/include/rtems/posix/posixapi.h

    rd887c1b rdce48791  
    2424#include <rtems/score/apimutex.h>
    2525#include <rtems/score/objectimpl.h>
     26#include <rtems/score/threadimpl.h>
     27#include <rtems/seterr.h>
    2628
    2729/**
     
    5961  _Assert( (size_t) error < RTEMS_ARRAY_SIZE( _POSIX_Get_by_name_error_table ) );
    6062  return _POSIX_Get_by_name_error_table[ error ];
     63}
     64
     65RTEMS_INLINE_ROUTINE int _POSIX_Get_error( Status_Control status )
     66{
     67  return STATUS_GET_POSIX( status );
     68}
     69
     70RTEMS_INLINE_ROUTINE int _POSIX_Get_error_after_wait(
     71  const Thread_Control *executing
     72)
     73{
     74  return _POSIX_Get_error( _Thread_Wait_get_status( executing ) );
     75}
     76
     77RTEMS_INLINE_ROUTINE int _POSIX_Zero_or_minus_one_plus_errno(
     78  Status_Control status
     79)
     80{
     81  if ( status == STATUS_SUCCESSFUL ) {
     82    return 0;
     83  }
     84
     85  rtems_set_errno_and_return_minus_one( _POSIX_Get_error( status ) );
    6186}
    6287
  • cpukit/posix/include/rtems/posix/rwlockimpl.h

    rd887c1b rdce48791  
    3939
    4040/**
    41  * @brief POSIX translate core RWLock return code.
    42  *
    43  * This routine translates SuperCore RWLock status codes into the
    44  * corresponding POSIX ones.
    45  *
    46  *
    47  * @param[in] the_RWLock_status is the SuperCore status.
    48  *
    49  * @return the corresponding POSIX status
    50  * @retval 0 The status indicates that the operation completed successfully.
    51  * @retval EINVAL The status indicates that the thread was blocked waiting for
    52  * an operation to complete and the RWLock was deleted.
    53  * @retval EBUSY This status indicates that the RWLock was not
    54  * immediately available.
    55  * @retval ETIMEDOUT This status indicates that the calling task was
    56  * willing to block but the operation was unable to complete within
    57  * the time allotted because the resource never became available.
    58  */
    59 int _POSIX_RWLock_Translate_core_RWLock_return_code(
    60   CORE_RWLock_Status  the_RWLock_status
    61 );
    62 
    63 /**
    6441 * @brief Allocate a RWLock control block.
    6542 *
  • cpukit/posix/include/rtems/posix/semaphoreimpl.h

    rd887c1b rdce48791  
    3434 */
    3535extern Objects_Information _POSIX_Semaphore_Information;
    36 
    37 /**
    38  *  This defines the mapping from Score status codes to POSIX return codes.
    39  */
    40 extern const int
    41   _POSIX_Semaphore_Return_codes[CORE_SEMAPHORE_STATUS_LAST + 1];
    4236
    4337RTEMS_INLINE_ROUTINE POSIX_Semaphore_Control *
     
    109103  Watchdog_Interval    timeout
    110104);
    111 
    112 /**
    113  *  @brief POSIX Semaphore Translate Score to POSIX Return Codes
    114  *
    115  *  A support routine which converts core semaphore status codes into the
    116  *  appropriate POSIX status values.
    117  */
    118 RTEMS_INLINE_ROUTINE int
    119 _POSIX_Semaphore_Translate_core_semaphore_return_code(
    120   CORE_semaphore_Status  the_semaphore_status
    121 )
    122 {
    123   /*
    124    *  Internal consistency check for bad status from SuperCore
    125    */
    126   #if defined(RTEMS_DEBUG)
    127     if ( the_semaphore_status > CORE_SEMAPHORE_STATUS_LAST )
    128       return EINVAL;
    129   #endif
    130   return _POSIX_Semaphore_Return_codes[the_semaphore_status];
    131 }
    132105 
    133106/**
  • cpukit/posix/include/rtems/posix/spinlockimpl.h

    rd887c1b rdce48791  
    3636
    3737extern Objects_Information _POSIX_Spinlock_Information;
    38 
    39 /**
    40  * @brief Translate core spinlock status code.
    41  *
    42  * This routine translates SuperCore Spinlock status codes into the
    43  * corresponding POSIX ones.
    44  *
    45  * @param[in] the_spinlock_status is the SuperCore status.
    46  *
    47  * @return the corresponding POSIX status
    48  */
    49 int _POSIX_Spinlock_Translate_core_spinlock_return_code(
    50   CORE_spinlock_Status  the_spinlock_status
    51 );
    5238
    5339/**
  • cpukit/posix/src/condwaitsupp.c

    rd887c1b rdce48791  
    2121#include <rtems/posix/condimpl.h>
    2222#include <rtems/posix/muteximpl.h>
     23#include <rtems/posix/posixapi.h>
    2324#include <rtems/score/assert.h>
    2425#include <rtems/score/statesimpl.h>
     26#include <rtems/score/status.h>
    2527#include <rtems/score/threaddispatch.h>
    2628
     
    3739  POSIX_Mutex_Control               *the_mutex;
    3840  Thread_queue_Context               queue_context;
    39   int                                status;
    40   int                                mutex_status;
    41   CORE_mutex_Status                  core_mutex_status;
     41  int                                error;
     42  int                                mutex_error;
     43  Status_Control                     status;
    4244  Per_CPU_Control                   *cpu_self;
    4345  Thread_Control                    *executing;
     
    8587
    8688  if ( !already_timedout ) {
    87     executing->Wait.return_code = 0;
    8889    _Thread_queue_Enqueue_critical(
    8990      &the_cond->Wait_queue.Queue,
     
    9293      STATES_WAITING_FOR_CONDITION_VARIABLE,
    9394      timeout,
    94       ETIMEDOUT,
    9595      &queue_context.Lock_context
    9696    );
    9797  } else {
    9898    _POSIX_Condition_variables_Release( the_cond, &queue_context );
    99     executing->Wait.return_code = ETIMEDOUT;
     99    executing->Wait.return_code = STATUS_TIMEOUT;
    100100  }
    101101
    102102  _ISR_lock_ISR_disable( &queue_context.Lock_context );
    103   core_mutex_status = _CORE_mutex_Surrender(
    104     &the_mutex->Mutex,
    105     &queue_context
    106   );
    107   _Assert( core_mutex_status == CORE_MUTEX_STATUS_SUCCESSFUL );
    108   (void) core_mutex_status;
     103  status = _CORE_mutex_Surrender( &the_mutex->Mutex, &queue_context );
     104  _Assert( status == STATUS_SUCCESSFUL );
     105  (void) status;
    109106
    110107  /*
     
    115112  _Thread_Dispatch_enable( cpu_self );
    116113
    117   status = (int) executing->Wait.return_code;
     114  error = _POSIX_Get_error_after_wait( executing );
    118115
    119116  /*
     
    125122   */
    126123
    127   if ( status == EINTR ) {
    128     status = 0;
     124  if ( error == EINTR ) {
     125    error = 0;
    129126  }
    130127
     
    133130   */
    134131
    135   mutex_status = pthread_mutex_lock( mutex );
    136   if ( mutex_status != 0 ) {
     132  mutex_error = pthread_mutex_lock( mutex );
     133  if ( mutex_error != 0 ) {
    137134    return EINVAL;
    138135  }
    139136
    140   return status;
     137  return error;
    141138}
  • cpukit/posix/src/mqueuerecvsupp.c

    rd887c1b rdce48791  
    2020
    2121#include <rtems/posix/mqueueimpl.h>
     22#include <rtems/posix/posixapi.h>
    2223
    2324#include <fcntl.h>
     
    4950  bool                         do_wait;
    5051  Thread_Control              *executing;
     52  Status_Control               status;
    5153
    5254  the_mq = _POSIX_Message_queue_Get( mqdes, &queue_context );
     
    9698   */
    9799  executing = _Thread_Executing;
    98   _CORE_message_queue_Seize(
     100  status = _CORE_message_queue_Seize(
    99101    &the_mq->Message_queue,
    100102    executing,
     
    112114  }
    113115
    114   if ( executing->Wait.return_code != CORE_MESSAGE_QUEUE_STATUS_SUCCESSFUL ) {
    115     rtems_set_errno_and_return_minus_one(
    116       _POSIX_Message_queue_Translate_core_message_queue_return_code(
    117         executing->Wait.return_code
    118       )
    119     );
     116  if ( status != STATUS_SUCCESSFUL ) {
     117    rtems_set_errno_and_return_minus_one( _POSIX_Get_error( status ) );
    120118  }
    121119
  • cpukit/posix/src/mqueuesendsupp.c

    rd887c1b rdce48791  
    4646  POSIX_Message_queue_Control *the_mq;
    4747  Thread_queue_Context         queue_context;
    48   CORE_message_queue_Status    msg_status;
     48  Status_Control               status;
    4949  bool                         do_wait;
    5050  Thread_Control              *executing;
     
    9393   */
    9494  executing = _Thread_Executing;
    95   msg_status = _CORE_message_queue_Submit(
     95  status = _CORE_message_queue_Submit(
    9696    &the_mq->Message_queue,
    9797    executing,
     
    103103    &queue_context
    104104  );
    105 
    106   if ( msg_status != CORE_MESSAGE_QUEUE_STATUS_SUCCESSFUL ) {
    107     rtems_set_errno_and_return_minus_one(
    108       _POSIX_Message_queue_Translate_core_message_queue_return_code(
    109         msg_status
    110       )
    111     );
    112   }
    113 
    114   return 0;
     105  return _POSIX_Zero_or_minus_one_plus_errno( status );
    115106}
  • cpukit/posix/src/mutexlocksupp.c

    rd887c1b rdce48791  
    2020
    2121#include <rtems/posix/muteximpl.h>
     22#include <rtems/posix/posixapi.h>
    2223
    2324THREAD_QUEUE_OBJECT_ASSERT( POSIX_Mutex_Control, Mutex.Wait_queue );
     
    3132  POSIX_Mutex_Control  *the_mutex;
    3233  Thread_queue_Context  queue_context;
    33   Thread_Control       *executing;
     34  Status_Control        status;
    3435
    3536  the_mutex = _POSIX_Mutex_Get( mutex, &queue_context );
     
    3940  }
    4041
    41   executing = _Thread_Executing;
    42   _CORE_mutex_Seize(
     42  status = _CORE_mutex_Seize(
    4343    &the_mutex->Mutex,
    44     executing,
     44    _Thread_Executing,
    4545    blocking,
    4646    timeout,
    4747    &queue_context
    4848  );
    49   return _POSIX_Mutex_Translate_core_mutex_return_code(
    50     (CORE_mutex_Status) executing->Wait.return_code
    51   );
     49  return _POSIX_Get_error( status );
    5250}
  • cpukit/posix/src/mutexunlock.c

    rd887c1b rdce48791  
    2020
    2121#include <rtems/posix/muteximpl.h>
     22#include <rtems/posix/posixapi.h>
    2223
    2324/*
     
    3233{
    3334  POSIX_Mutex_Control  *the_mutex;
    34   CORE_mutex_Status     status;
    3535  Thread_queue_Context  queue_context;
     36  Status_Control        status;
    3637
    3738  the_mutex = _POSIX_Mutex_Get( mutex, &queue_context );
     
    4243
    4344  status = _CORE_mutex_Surrender( &the_mutex->Mutex, &queue_context );
    44   return _POSIX_Mutex_Translate_core_mutex_return_code( status );
     45  return _POSIX_Get_error( status );
    4546}
  • cpukit/posix/src/nanosleep.c

    rd887c1b rdce48791  
    9393    executing,
    9494    STATES_DELAYING | STATES_INTERRUPTIBLE_BY_SIGNAL,
    95     ticks,
    96     0
     95    ticks
    9796  );
    9897
  • cpukit/posix/src/pbarrierwait.c

    rd887c1b rdce48791  
    2020
    2121#include <rtems/posix/barrierimpl.h>
     22#include <rtems/posix/posixapi.h>
    2223
    2324THREAD_QUEUE_OBJECT_ASSERT( POSIX_Barrier_Control, Barrier.Wait_queue );
    24 
    25 /**
    26  * This directive allows a thread to wait at a barrier.
    27  *
    28  * @param[in] barrier is the barrier id
    29  *
    30  * @retval 0 if successful
    31  * @retval PTHREAD_BARRIER_SERIAL_THREAD if successful
    32  * @retval error_code if unsuccessful
    33  */
    3425
    3526int pthread_barrier_wait(
     
    3930  POSIX_Barrier_Control *the_barrier;
    4031  Thread_queue_Context   queue_context;
    41   Thread_Control        *executing;
     32  Status_Control         status;
    4233
    4334  if ( barrier == NULL ) {
     
    5142  }
    5243
    53   executing = _Thread_Executing;
    54   _CORE_barrier_Seize(
     44  status = _CORE_barrier_Seize(
    5545    &the_barrier->Barrier,
    56     executing,
     46    _Thread_Executing,
    5747    true,
    58     0,
     48    WATCHDOG_NO_TIMEOUT,
    5949    &queue_context
    6050  );
    61   return _POSIX_Barrier_Translate_core_barrier_return_code(
    62     executing->Wait.return_code
    63   );
     51  return _POSIX_Get_error( status );
    6452}
  • cpukit/posix/src/prwlockrdlock.c

    rd887c1b rdce48791  
    2020
    2121#include <rtems/posix/rwlockimpl.h>
     22#include <rtems/posix/posixapi.h>
    2223
    2324int pthread_rwlock_rdlock(
     
    2728  POSIX_RWLock_Control *the_rwlock;
    2829  Thread_queue_Context  queue_context;
    29   Thread_Control       *executing;
     30  Status_Control        status;
    3031
    3132  the_rwlock = _POSIX_RWLock_Get( rwlock, &queue_context );
     
    3536  }
    3637
    37   executing = _Thread_Executing;
    38   _CORE_RWLock_Seize_for_reading(
     38  status = _CORE_RWLock_Seize_for_reading(
    3939    &the_rwlock->RWLock,
    40     executing,
     40    _Thread_Executing,
    4141    true,                 /* we are willing to wait forever */
    4242    0,
    4343    &queue_context
    4444  );
    45   return _POSIX_RWLock_Translate_core_RWLock_return_code(
    46     (CORE_RWLock_Status) executing->Wait.return_code
    47   );
     45  return _POSIX_Get_error( status );
    4846}
  • cpukit/posix/src/prwlocktimedrdlock.c

    rd887c1b rdce48791  
    2020
    2121#include <rtems/posix/rwlockimpl.h>
     22#include <rtems/posix/posixapi.h>
    2223#include <rtems/score/todimpl.h>
    2324
     
    3132  Watchdog_Interval                        ticks;
    3233  bool                                     do_wait;
    33   TOD_Absolute_timeout_conversion_results  status;
    34   Thread_Control                          *executing;
     34  TOD_Absolute_timeout_conversion_results  timeout_status;
     35  Status_Control                           status;
    3536
    3637  /*
     
    4041   *  So we check the abstime provided, and hold on to whether it
    4142   *  is valid or not.  If it isn't correct and in the future,
    42    *  then we do a polling operation and convert the UNSATISFIED
     43   *  then we do a polling operation and convert the STATUS_UNAVAILABLE
    4344   *  status into the appropriate error.
    4445   *
    45    *  If the status is TOD_ABSOLUTE_TIMEOUT_INVALID,
     46   *  If the timeout status is TOD_ABSOLUTE_TIMEOUT_INVALID,
    4647   *  TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST, or TOD_ABSOLUTE_TIMEOUT_IS_NOW,
    4748   *  then we should not wait.
    4849   */
    49   status = _TOD_Absolute_timeout_to_ticks( abstime, &ticks );
    50   do_wait = ( status == TOD_ABSOLUTE_TIMEOUT_IS_IN_FUTURE );
     50  timeout_status = _TOD_Absolute_timeout_to_ticks( abstime, &ticks );
     51  do_wait = ( timeout_status == TOD_ABSOLUTE_TIMEOUT_IS_IN_FUTURE );
    5152
    5253  the_rwlock = _POSIX_RWLock_Get( rwlock, &queue_context );
     
    5657  }
    5758
    58   executing = _Thread_Executing;
    59   _CORE_RWLock_Seize_for_reading(
     59  status = _CORE_RWLock_Seize_for_reading(
    6060    &the_rwlock->RWLock,
    61     executing,
     61    _Thread_Executing,
    6262    do_wait,
    6363    ticks,
     
    6565  );
    6666
    67   if (
    68     !do_wait
    69       && ( executing->Wait.return_code == CORE_RWLOCK_UNAVAILABLE )
    70   ) {
    71     if ( status == TOD_ABSOLUTE_TIMEOUT_INVALID ) {
     67  if ( !do_wait && status == STATUS_UNAVAILABLE ) {
     68    if ( timeout_status == TOD_ABSOLUTE_TIMEOUT_INVALID ) {
    7269      return EINVAL;
    7370    }
    7471
    7572    if (
    76       status == TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST
    77         || status == TOD_ABSOLUTE_TIMEOUT_IS_NOW
     73      timeout_status == TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST
     74        || timeout_status == TOD_ABSOLUTE_TIMEOUT_IS_NOW
    7875    ) {
    7976      return ETIMEDOUT;
     
    8178  }
    8279
    83   return _POSIX_RWLock_Translate_core_RWLock_return_code(
    84     (CORE_RWLock_Status) executing->Wait.return_code
    85   );
     80  return _POSIX_Get_error( status );
    8681}
  • cpukit/posix/src/prwlocktimedwrlock.c

    rd887c1b rdce48791  
    2222
    2323#include <rtems/posix/rwlockimpl.h>
     24#include <rtems/posix/posixapi.h>
    2425#include <rtems/score/todimpl.h>
    2526
     
    3334  Watchdog_Interval                        ticks;
    3435  bool                                     do_wait;
    35   TOD_Absolute_timeout_conversion_results  status;
    36   Thread_Control                          *executing;
     36  TOD_Absolute_timeout_conversion_results  timeout_status;
     37  Status_Control                           status;
    3738
    3839  /*
     
    4243   *  So we check the abstime provided, and hold on to whether it
    4344   *  is valid or not.  If it isn't correct and in the future,
    44    *  then we do a polling operation and convert the UNSATISFIED
     45   *  then we do a polling operation and convert the STATUS_UNAVAILABLE
    4546   *  status into the appropriate error.
    4647   *
    47    *  If the status is TOD_ABSOLUTE_TIMEOUT_INVALID,
     48   *  If the timeout status is TOD_ABSOLUTE_TIMEOUT_INVALID,
    4849   *  TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST, or TOD_ABSOLUTE_TIMEOUT_IS_NOW,
    4950   *  then we should not wait.
    5051   */
    51   status = _TOD_Absolute_timeout_to_ticks( abstime, &ticks );
    52   do_wait = ( status == TOD_ABSOLUTE_TIMEOUT_IS_IN_FUTURE );
     52  timeout_status = _TOD_Absolute_timeout_to_ticks( abstime, &ticks );
     53  do_wait = ( timeout_status == TOD_ABSOLUTE_TIMEOUT_IS_IN_FUTURE );
    5354
    5455  the_rwlock = _POSIX_RWLock_Get( rwlock, &queue_context );
     
    5859  }
    5960
    60   executing = _Thread_Executing;
    61   _CORE_RWLock_Seize_for_writing(
     61  status = _CORE_RWLock_Seize_for_writing(
    6262    &the_rwlock->RWLock,
    63     executing,
     63    _Thread_Executing,
    6464    do_wait,
    6565    ticks,
     
    6767  );
    6868
    69   if (
    70     !do_wait
    71       && ( executing->Wait.return_code == CORE_RWLOCK_UNAVAILABLE )
    72   ) {
    73     if ( status == TOD_ABSOLUTE_TIMEOUT_INVALID ) {
     69  if ( !do_wait && status == STATUS_UNAVAILABLE ) {
     70    if ( timeout_status == TOD_ABSOLUTE_TIMEOUT_INVALID ) {
    7471      return EINVAL;
    7572    }
    7673
    7774    if (
    78       status == TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST
    79         || status == TOD_ABSOLUTE_TIMEOUT_IS_NOW
     75      timeout_status == TOD_ABSOLUTE_TIMEOUT_IS_IN_PAST
     76        || timeout_status == TOD_ABSOLUTE_TIMEOUT_IS_NOW
    8077    ) {
    8178      return ETIMEDOUT;
     
    8380  }
    8481
    85   return _POSIX_RWLock_Translate_core_RWLock_return_code(
    86     (CORE_RWLock_Status) executing->Wait.return_code
    87   );
     82  return _POSIX_Get_error( status );
    8883}
  • cpukit/posix/src/prwlocktryrdlock.c

    rd887c1b rdce48791  
    2020
    2121#include <rtems/posix/rwlockimpl.h>
     22#include <rtems/posix/posixapi.h>
    2223
    2324int pthread_rwlock_tryrdlock(
     
    2728  POSIX_RWLock_Control *the_rwlock;
    2829  Thread_queue_Context  queue_context;
    29   Thread_Control       *executing;
     30  Status_Control        status;
    3031
    3132  the_rwlock = _POSIX_RWLock_Get( rwlock, &queue_context );
     
    3536  }
    3637
    37   executing = _Thread_Executing;
    38   _CORE_RWLock_Seize_for_reading(
     38  status = _CORE_RWLock_Seize_for_reading(
    3939    &the_rwlock->RWLock,
    40     executing,
     40    _Thread_Executing,
    4141    false,                  /* do not wait for the rwlock */
    4242    0,
    4343    &queue_context
    4444  );
    45   return _POSIX_RWLock_Translate_core_RWLock_return_code(
    46     (CORE_RWLock_Status) executing->Wait.return_code
    47   );
     45  return _POSIX_Get_error( status );
    4846}
  • cpukit/posix/src/prwlocktrywrlock.c

    rd887c1b rdce48791  
    2020
    2121#include <rtems/posix/rwlockimpl.h>
     22#include <rtems/posix/posixapi.h>
    2223
    2324int pthread_rwlock_trywrlock(
     
    2728  POSIX_RWLock_Control *the_rwlock;
    2829  Thread_queue_Context  queue_context;
    29   Thread_Control       *executing;
     30  Status_Control        status;
    3031
    3132  the_rwlock = _POSIX_RWLock_Get( rwlock, &queue_context );
     
    3536  }
    3637
    37   executing = _Thread_Executing;
    38   _CORE_RWLock_Seize_for_writing(
     38  status = _CORE_RWLock_Seize_for_writing(
    3939    &the_rwlock->RWLock,
    40     executing,
     40    _Thread_Executing,
    4141    false,                 /* we are not willing to wait */
    4242    0,
    4343    &queue_context
    4444  );
    45   return _POSIX_RWLock_Translate_core_RWLock_return_code(
    46     (CORE_RWLock_Status) executing->Wait.return_code
    47   );
     45  return _POSIX_Get_error( status );
    4846}
  • cpukit/posix/src/prwlockunlock.c

    rd887c1b rdce48791  
    2222
    2323#include <rtems/posix/rwlockimpl.h>
     24#include <rtems/posix/posixapi.h>
    2425
    2526int pthread_rwlock_unlock(
     
    2930  POSIX_RWLock_Control *the_rwlock;
    3031  Thread_queue_Context  queue_context;
    31   CORE_RWLock_Status    status;
     32  Status_Control        status;
    3233
    3334  the_rwlock = _POSIX_RWLock_Get( rwlock, &queue_context );
     
    3839
    3940  status = _CORE_RWLock_Surrender( &the_rwlock->RWLock, &queue_context );
    40   return _POSIX_RWLock_Translate_core_RWLock_return_code( status );
     41  return _POSIX_Get_error( status );
    4142}
  • cpukit/posix/src/prwlockwrlock.c

    rd887c1b rdce48791  
    2121#endif
    2222
    23 #include <pthread.h>
    24 #include <errno.h>
    25 
    2623#include <rtems/posix/rwlockimpl.h>
     24#include <rtems/posix/posixapi.h>
    2725
    2826THREAD_QUEUE_OBJECT_ASSERT( POSIX_RWLock_Control, RWLock.Wait_queue );
     
    3432  POSIX_RWLock_Control *the_rwlock;
    3533  Thread_queue_Context  queue_context;
    36   Thread_Control       *executing;
     34  Status_Control        status;
    3735
    3836  the_rwlock = _POSIX_RWLock_Get( rwlock, &queue_context );
     
    4240  }
    4341
    44   executing = _Thread_Executing;
    45   _CORE_RWLock_Seize_for_writing(
     42  status = _CORE_RWLock_Seize_for_writing(
    4643    &the_rwlock->RWLock,
    47     executing,
     44    _Thread_Executing,
    4845    true,          /* do not timeout -- wait forever */
    4946    0,
    5047    &queue_context
    5148  );
    52   return _POSIX_RWLock_Translate_core_RWLock_return_code(
    53     (CORE_RWLock_Status) executing->Wait.return_code
    54   );
     49  return _POSIX_Get_error( status );
    5550}
  • cpukit/posix/src/psignalunblockthread.c

    rd887c1b rdce48791  
    9999  POSIX_API_Control  *api;
    100100  int                 signo;
    101   int                 hold_errno;
     101  uint32_t            hold_errno;
    102102
    103103  (void) action;
     
    199199
    200200    if ( (the_thread->Wait.option & mask) || (api->signals_unblocked & mask) ) {
    201       the_thread->Wait.return_code = EINTR;
     201      the_thread->Wait.return_code = STATUS_INTERRUPTED;
    202202
    203203      the_info = (siginfo_t *) the_thread->Wait.return_argument;
     
    241241
    242242    if ( _States_Is_interruptible_by_signal( the_thread->current_state ) ) {
    243       the_thread->Wait.return_code = EINTR;
     243      the_thread->Wait.return_code = STATUS_INTERRUPTED;
    244244      _Thread_queue_Extract_with_proxy( the_thread );
    245245    }
  • cpukit/posix/src/pspinlock.c

    rd887c1b rdce48791  
    2020
    2121#include <rtems/posix/spinlockimpl.h>
    22 
    23 #include <errno.h>
     22#include <rtems/posix/posixapi.h>
    2423
    2524int pthread_spin_lock( pthread_spinlock_t *spinlock )
     
    2726  POSIX_Spinlock_Control *the_spinlock;
    2827  ISR_lock_Context        lock_context;
    29   CORE_spinlock_Status    status;
     28  Status_Control          status;
    3029
    3130  the_spinlock = _POSIX_Spinlock_Get( spinlock, &lock_context );
     
    4039    &lock_context
    4140  );
    42   return _POSIX_Spinlock_Translate_core_spinlock_return_code( status );
     41  return _POSIX_Get_error( status );
    4342}
  • cpukit/posix/src/pspintrylock.c

    rd887c1b rdce48791  
    2020
    2121#include <rtems/posix/spinlockimpl.h>
    22 
    23 #include <errno.h>
     22#include <rtems/posix/posixapi.h>
    2423
    2524int pthread_spin_trylock( pthread_spinlock_t *spinlock )
     
    2726  POSIX_Spinlock_Control *the_spinlock;
    2827  ISR_lock_Context        lock_context;
    29   CORE_spinlock_Status    status;
     28  Status_Control          status;
    3029
    3130  the_spinlock = _POSIX_Spinlock_Get( spinlock, &lock_context );
     
    4039    &lock_context
    4140  );
    42   return _POSIX_Spinlock_Translate_core_spinlock_return_code( status );
     41  return _POSIX_Get_error( status );
    4342}
  • cpukit/posix/src/pspinunlock.c

    rd887c1b rdce48791  
    2222
    2323#include <rtems/posix/spinlockimpl.h>
    24 
    25 #include <errno.h>
     24#include <rtems/posix/posixapi.h>
    2625
    2726int pthread_spin_unlock( pthread_spinlock_t *spinlock )
     
    2928  POSIX_Spinlock_Control *the_spinlock;
    3029  ISR_lock_Context        lock_context;
    31   CORE_spinlock_Status    status;
     30  Status_Control          status;
    3231
    3332  the_spinlock = _POSIX_Spinlock_Get( spinlock, &lock_context );
     
    3736
    3837  status = _CORE_spinlock_Surrender( &the_spinlock->Spinlock, &lock_context );
    39   return _POSIX_Spinlock_Translate_core_spinlock_return_code( status );
     38  return _POSIX_Get_error( status );
    4039}
  • cpukit/posix/src/pthreadjoin.c

    rd887c1b rdce48791  
    2727
    2828#include <rtems/posix/threadsup.h>
     29#include <rtems/posix/posixapi.h>
    2930#include <rtems/score/threadimpl.h>
    3031#include <rtems/score/statesimpl.h>
     
    7374    );
    7475
    75     if ( executing->Wait.return_code != 0 ) {
    76       _Assert( executing->Wait.return_code == EINTR );
     76    if ( _POSIX_Get_error_after_wait( executing ) != 0 ) {
     77      _Assert( _POSIX_Get_error_after_wait( executing ) == EINTR );
    7778      return EINTR;
    7879    }
  • cpukit/posix/src/semaphorewaitsupp.c

    rd887c1b rdce48791  
    2222
    2323#include <rtems/posix/semaphoreimpl.h>
     24#include <rtems/posix/posixapi.h>
    2425
    2526THREAD_QUEUE_OBJECT_ASSERT( POSIX_Semaphore_Control, Semaphore.Wait_queue );
     
    3233{
    3334  POSIX_Semaphore_Control *the_semaphore;
    34   Thread_Control          *executing;
    3535  Thread_queue_Context     queue_context;
     36  Status_Control           status;
    3637
    3738  the_semaphore = _POSIX_Semaphore_Get( sem, &queue_context );
     
    4142  }
    4243
    43   executing = _Thread_Executing;
    44 
    45   _CORE_semaphore_Seize(
     44  status = _CORE_semaphore_Seize(
    4645    &the_semaphore->Semaphore,
    47     executing,
     46    _Thread_Executing,
    4847    blocking,
    4948    timeout,
    5049    &queue_context
    5150  );
    52 
    53   if ( executing->Wait.return_code == CORE_SEMAPHORE_STATUS_SUCCESSFUL ) {
    54     return 0;
    55   }
    56 
    57   rtems_set_errno_and_return_minus_one(
    58     _POSIX_Semaphore_Translate_core_semaphore_return_code(
    59       executing->Wait.return_code
    60     )
    61   );
     51  return _POSIX_Zero_or_minus_one_plus_errno( status );
    6252}
  • cpukit/posix/src/sempost.c

    rd887c1b rdce48791  
    2323
    2424#include <rtems/posix/semaphoreimpl.h>
     25#include <rtems/posix/posixapi.h>
    2526
    2627int sem_post(
     
    3031  POSIX_Semaphore_Control *the_semaphore;
    3132  Thread_queue_Context     queue_context;
    32   CORE_semaphore_Status    status;
     33  Status_Control           status;
    3334
    3435  the_semaphore = _POSIX_Semaphore_Get( sem, &queue_context );
     
    4344    &queue_context
    4445  );
    45 
    46   if ( status == CORE_SEMAPHORE_STATUS_SUCCESSFUL ) {
    47     return 0;
    48   }
    49 
    50   rtems_set_errno_and_return_minus_one(
    51     _POSIX_Semaphore_Translate_core_semaphore_return_code( status )
    52   );
     46  return _POSIX_Zero_or_minus_one_plus_errno( status );
    5347}
  • cpukit/posix/src/sigtimedwait.c

    rd887c1b rdce48791  
    1919#endif
    2020
    21 #include <pthread.h>
    2221#include <signal.h>
    23 #include <errno.h>
    2422
    2523#include <rtems/posix/pthreadimpl.h>
    2624#include <rtems/posix/psignalimpl.h>
     25#include <rtems/posix/posixapi.h>
    2726#include <rtems/score/threadqimpl.h>
    28 #include <rtems/seterr.h>
    2927#include <rtems/score/isr.h>
    3028
     
    7977  int                signo;
    8078  ISR_lock_Context   lock_context;
     79  int                error;
    8180
    8281  /*
     
    151150  the_info->si_signo = -1;
    152151
    153   executing->Wait.return_code     = EINTR;
    154152  executing->Wait.option          = *set;
    155153  executing->Wait.return_argument = the_info;
     
    160158    STATES_WAITING_FOR_SIGNAL | STATES_INTERRUPTIBLE_BY_SIGNAL,
    161159    interval,
    162     EAGAIN,
    163160    &lock_context
    164161  );
     
    183180   */
    184181
    185   if ( (executing->Wait.return_code != EINTR)
    186        || !(*set & signo_to_mask( the_info->si_signo )) ) {
    187     errno = executing->Wait.return_code;
    188     return -1;
     182  error = _POSIX_Get_error_after_wait( executing );
     183
     184  if (
     185    error != EINTR
     186     || ( *set & signo_to_mask( the_info->si_signo ) ) == 0
     187  ) {
     188    if ( error == ETIMEDOUT ) {
     189      error = EAGAIN;
     190    }
     191
     192    rtems_set_errno_and_return_minus_one( error );
    189193  }
    190194
  • cpukit/rtems/Makefile.am

    rd887c1b rdce48791  
    130130librtems_a_SOURCES += src/barrierdelete.c
    131131librtems_a_SOURCES += src/barrierident.c
    132 librtems_a_SOURCES += src/barriertranslatereturncode.c
    133132librtems_a_SOURCES += src/barrierrelease.c
    134133librtems_a_SOURCES += src/barrierwait.c
     
    171170librtems_a_SOURCES += src/msgqreceive.c
    172171librtems_a_SOURCES += src/msgqsend.c
    173 librtems_a_SOURCES += src/msgqtranslatereturncode.c
    174172librtems_a_SOURCES += src/msgqurgent.c
    175173
     
    182180librtems_a_SOURCES += src/semrelease.c
    183181librtems_a_SOURCES += src/semflush.c
    184 librtems_a_SOURCES += src/semtranslatereturncode.c
    185182librtems_a_SOURCES += src/semsetpriority.c
    186183
  • cpukit/rtems/include/rtems/rtems/barrierimpl.h

    rd887c1b rdce48791  
    8080}
    8181
    82 /**
    83  * @brief Translate SuperCore Barrier Status Code to RTEMS Status Code
    84  *
    85  * This function returns a RTEMS status code based on the barrier
    86  * status code specified.
    87  *
    88  * @param[in] the_status is the SuperCore Barrier status to translate.
    89  *
    90  * @retval a status code indicating success or the reason for failure.
    91  */
    92 rtems_status_code _Barrier_Translate_core_barrier_return_code (
    93   CORE_barrier_Status  the_status
    94 );
    95 
    9682/**@}*/
    9783
  • cpukit/rtems/include/rtems/rtems/eventimpl.h

    rd887c1b rdce48791  
    4545#define EVENT_SETS_NONE_PENDING 0
    4646
    47 void _Event_Seize(
     47rtems_status_code _Event_Seize(
    4848  rtems_event_set    event_in,
    4949  rtems_option       option_set,
  • cpukit/rtems/include/rtems/rtems/messageimpl.h

    rd887c1b rdce48791  
    7575
    7676/**
    77  * @brief Message queue Translate Core Message Queue Return Code
    78  *
    79  * This function returns a RTEMS status code based on
    80  * @a the_message_queue_status.
    81  *
    82  * @param[in] the_message_queue_status is the status code to translate
    83  *
    84  * @retval translated RTEMS status code
    85  */
    86 rtems_status_code _Message_queue_Translate_core_message_queue_return_code (
    87   uint32_t   the_message_queue_status
    88 );
    89 
    90 /**
    9177 *  @brief Deallocates a message queue control block into
    9278 *  the inactive chain of free message queue control blocks.
  • cpukit/rtems/include/rtems/rtems/semimpl.h

    rd887c1b rdce48791  
    3232 */
    3333extern Objects_Information _Semaphore_Information;
    34 
    35 extern const rtems_status_code
    36   _Semaphore_Translate_core_mutex_return_code_[];
    37 
    38 extern const rtems_status_code
    39   _Semaphore_Translate_core_semaphore_return_code_[];
    40 
    41 /**
    42  * @brief Semaphore Translate Core Mutex Return Code
    43  *
    44  * This function returns a RTEMS status code based on the mutex
    45  * status code specified.
    46  *
    47  * @param[in] status is the mutex status code to translate
    48  *
    49  * @retval translated RTEMS status code
    50  */
    51 RTEMS_INLINE_ROUTINE rtems_status_code
    52 _Semaphore_Translate_core_mutex_return_code(
    53   uint32_t   status
    54 )
    55 {
    56   /*
    57    *  If this thread is blocking waiting for a result on a remote operation.
    58    */
    59   #if defined(RTEMS_MULTIPROCESSING)
    60     if ( _Thread_Is_proxy_blocking(status) )
    61       return RTEMS_PROXY_BLOCKING;
    62   #endif
    63 
    64   /*
    65    *  Internal consistency check for bad status from SuperCore
    66    */
    67   #if defined(RTEMS_DEBUG)
    68     if ( status > CORE_MUTEX_STATUS_LAST )
    69       return RTEMS_INTERNAL_ERROR;
    70   #endif
    71   return _Semaphore_Translate_core_mutex_return_code_[status];
    72 }
    73 
    74 #if defined(RTEMS_SMP)
    75 RTEMS_INLINE_ROUTINE rtems_status_code
    76 _Semaphore_Translate_MRSP_status_code( MRSP_Status mrsp_status )
    77 {
    78   return (rtems_status_code) mrsp_status;
    79 }
    80 #endif
    81 
    82 /**
    83  * @brief Semaphore Translate Core Semaphore Return Code
    84  *
    85  * This function returns a RTEMS status code based on the semaphore
    86  * status code specified.
    87  *
    88  * @param[in] status is the semaphore status code to translate
    89  *
    90  * @retval translated RTEMS status code
    91  */
    92 RTEMS_INLINE_ROUTINE rtems_status_code
    93 _Semaphore_Translate_core_semaphore_return_code(
    94   uint32_t   status
    95 )
    96 {
    97   #if defined(RTEMS_MULTIPROCESSING)
    98     if ( _Thread_Is_proxy_blocking(status) )
    99       return RTEMS_PROXY_BLOCKING;
    100   #endif
    101   /*
    102    *  Internal consistency check for bad status from SuperCore
    103    */
    104   #if defined(RTEMS_DEBUG)
    105     if ( status > CORE_SEMAPHORE_STATUS_LAST )
    106       return RTEMS_INTERNAL_ERROR;
    107   #endif
    108   return _Semaphore_Translate_core_semaphore_return_code_[status];
    109 }
    11034
    11135/**
  • cpukit/rtems/include/rtems/rtems/statusimpl.h

    rd887c1b rdce48791  
    1919
    2020#include <rtems/rtems/status.h>
     21#include <rtems/score/threadimpl.h>
    2122
    2223#ifdef __cplusplus
     
    4041extern const rtems_status_code _Status_Object_name_errors_to_status[];
    4142
     43RTEMS_INLINE_ROUTINE rtems_status_code _Status_Get(
     44  Status_Control status
     45)
     46{
     47  return (rtems_status_code) STATUS_GET_CLASSIC( status );
     48}
     49
     50RTEMS_INLINE_ROUTINE rtems_status_code _Status_Get_after_wait(
     51  const Thread_Control *executing
     52)
     53{
     54  return _Status_Get( _Thread_Wait_get_status( executing ) );
     55}
     56
    4257/**@}*/
    4358
  • cpukit/rtems/src/barrierwait.c

    rd887c1b rdce48791  
    2020
    2121#include <rtems/rtems/barrierimpl.h>
     22#include <rtems/rtems/statusimpl.h>
    2223
    2324THREAD_QUEUE_OBJECT_ASSERT( Barrier_Control, Barrier.Wait_queue );
     
    3031  Barrier_Control      *the_barrier;
    3132  Thread_queue_Context  queue_context;
    32   Thread_Control       *executing;
     33  Status_Control        status;
    3334
    3435  the_barrier = _Barrier_Get( id, &queue_context );
     
    3839  }
    3940
    40   executing = _Thread_Executing;
    41   _CORE_barrier_Seize(
     41  status = _CORE_barrier_Seize(
    4242    &the_barrier->Barrier,
    43     executing,
     43    _Thread_Executing,
    4444    true,
    4545    timeout,
    4646    &queue_context
    4747  );
    48   return _Barrier_Translate_core_barrier_return_code(
    49     executing->Wait.return_code
    50   );
     48  return _Status_Get( status );
    5149}
  • cpukit/rtems/src/eventmp.c

    rd887c1b rdce48791  
    2020
    2121#include <rtems/rtems/eventimpl.h>
     22#include <rtems/rtems/statusimpl.h>
    2223#include <rtems/score/threadimpl.h>
    2324#include <rtems/score/statesimpl.h>
     
    6263{
    6364  Event_MP_Packet *the_packet;
     65  Status_Control   status;
    6466
    6567  the_packet = _Event_MP_Get_packet( id );
     
    7577  the_packet->event_in          = event_in;
    7678
    77   return (rtems_status_code) _MPCI_Send_request_packet(
     79  status = _MPCI_Send_request_packet(
    7880    _Objects_Get_node( id ),
    7981    &the_packet->Prefix,
    80     STATES_READY,
    81     RTEMS_TIMEOUT
     82    STATES_READY
    8283  );
     84  return _Status_Get( status );
    8385}
    8486
  • cpukit/rtems/src/eventreceive.c

    rd887c1b rdce48791  
    4444
    4545    if ( !_Event_sets_Is_empty( event_in ) ) {
    46       _Event_Seize(
     46      sc = _Event_Seize(
    4747        event_in,
    4848        option_set,
     
    5555        &lock_context
    5656      );
    57 
    58       sc = executing->Wait.return_code;
    5957    } else {
    6058      *event_out = event->pending_events;
  • cpukit/rtems/src/eventseize.c

    rd887c1b rdce48791  
    2222#include <rtems/rtems/eventimpl.h>
    2323#include <rtems/rtems/optionsimpl.h>
     24#include <rtems/rtems/statusimpl.h>
    2425#include <rtems/score/threadimpl.h>
    2526#include <rtems/score/watchdogimpl.h>
    2627
    27 /*
    28  *  INTERRUPT LATENCY:
    29  *    available
    30  *    wait
    31  *    check sync
    32  */
    33 
    34 void _Event_Seize(
     28rtems_status_code _Event_Seize(
    3529  rtems_event_set    event_in,
    3630  rtems_option       option_set,
     
    5044  Per_CPU_Control   *cpu_self;
    5145
    52   executing->Wait.return_code = RTEMS_SUCCESSFUL;
    53 
    5446  pending_events = event->pending_events;
    5547  seized_events  = _Event_sets_Get( pending_events, event_in );
     
    6153    _Thread_Lock_release_default( executing, lock_context );
    6254    *event_out = seized_events;
    63     return;
     55    return RTEMS_SUCCESSFUL;
    6456  }
    6557
    6658  if ( _Options_Is_no_wait( option_set ) ) {
    6759    _Thread_Lock_release_default( executing, lock_context );
    68     executing->Wait.return_code = RTEMS_UNSATISFIED;
    6960    *event_out = seized_events;
    70     return;
     61    return RTEMS_UNSATISFIED;
    7162  }
    7263
     
    8172   *        issue but better safe than sorry.
    8273   */
     74  executing->Wait.return_code     = STATUS_SUCCESSFUL;
    8375  executing->Wait.option          = option_set;
    8476  executing->Wait.count           = event_in;
     
    9082
    9183  if ( ticks ) {
    92     _Thread_Wait_set_timeout_code( executing, RTEMS_TIMEOUT );
    9384    _Thread_Timer_insert_relative(
    9485      executing,
     
    118109
    119110  _Thread_Dispatch_enable( cpu_self );
     111  return _Status_Get_after_wait( executing );
    120112}
    121113
  • cpukit/rtems/src/msgmp.c

    rd887c1b rdce48791  
    2121#include <rtems/rtems/messageimpl.h>
    2222#include <rtems/rtems/optionsimpl.h>
     23#include <rtems/rtems/statusimpl.h>
    2324#include <rtems/score/coremsgimpl.h>
    2425#include <rtems/score/statesimpl.h>
     
    106107{
    107108  Message_queue_MP_Packet *the_packet;
     109  Status_Control           status;
    108110
    109111  if ( !_Message_queue_MP_Is_remote( message_queue_id ) ) {
     
    156158      }
    157159
    158       return (rtems_status_code) _MPCI_Send_request_packet(
     160      status = _MPCI_Send_request_packet(
    159161        _Objects_Get_node(message_queue_id),
    160162        &the_packet->Prefix,
    161         STATES_WAITING_FOR_MESSAGE,
    162         RTEMS_TIMEOUT
    163       );
    164       break;
     163        STATES_WAITING_FOR_MESSAGE
     164      );
     165      return _Status_Get( status );
    165166
    166167    case MESSAGE_QUEUE_MP_RECEIVE_REQUEST:
     
    182183      _Thread_Executing->Wait.return_argument = size_p;
    183184
    184       return (rtems_status_code) _MPCI_Send_request_packet(
     185      status = _MPCI_Send_request_packet(
    185186        _Objects_Get_node(message_queue_id),
    186187        &the_packet->Prefix,
    187         STATES_WAITING_FOR_MESSAGE,
    188         RTEMS_TIMEOUT
    189       );
    190       break;
     188        STATES_WAITING_FOR_MESSAGE
     189      );
     190      return _Status_Get( status );
    191191
    192192    case MESSAGE_QUEUE_MP_ANNOUNCE_CREATE:
  • cpukit/rtems/src/msgqbroadcast.c

    rd887c1b rdce48791  
    2020
    2121#include <rtems/rtems/messageimpl.h>
     22#include <rtems/rtems/statusimpl.h>
    2223
    2324rtems_status_code rtems_message_queue_broadcast(
     
    2829)
    2930{
    30   Message_queue_Control     *the_message_queue;
    31   Thread_queue_Context       queue_context;
    32   CORE_message_queue_Status  status;
     31  Message_queue_Control *the_message_queue;
     32  Thread_queue_Context   queue_context;
     33  Status_Control         status;
    3334
    3435  if ( buffer == NULL ) {
     
    6162    &queue_context
    6263  );
    63   return _Message_queue_Translate_core_message_queue_return_code( status );
     64  return _Status_Get( status );
    6465}
  • cpukit/rtems/src/msgqreceive.c

    rd887c1b rdce48791  
    2121#include <rtems/rtems/messageimpl.h>
    2222#include <rtems/rtems/optionsimpl.h>
     23#include <rtems/rtems/statusimpl.h>
    2324
    2425THREAD_QUEUE_OBJECT_ASSERT( Message_queue_Control, message_queue.Wait_queue );
     
    3536  Thread_queue_Context   queue_context;
    3637  Thread_Control        *executing;
     38  Status_Control         status;
    3739
    3840  if ( buffer == NULL ) {
     
    6062
    6163  executing = _Thread_Executing;
    62   _CORE_message_queue_Seize(
     64  status = _CORE_message_queue_Seize(
    6365    &the_message_queue->message_queue,
    6466    executing,
     
    6971    &queue_context
    7072  );
    71   return _Message_queue_Translate_core_message_queue_return_code(
    72     executing->Wait.return_code
    73   );
     73  return _Status_Get( status );
    7474}
  • cpukit/rtems/src/msgqsend.c

    rd887c1b rdce48791  
    2020
    2121#include <rtems/rtems/messageimpl.h>
     22#include <rtems/rtems/statusimpl.h>
    2223
    2324rtems_status_code rtems_message_queue_send(
     
    2728)
    2829{
    29   Message_queue_Control     *the_message_queue;
    30   Thread_queue_Context       queue_context;
    31   CORE_message_queue_Status  status;
     30  Message_queue_Control *the_message_queue;
     31  Thread_queue_Context   queue_context;
     32  Status_Control         status;
    3233
    3334  if ( buffer == NULL ) {
     
    6162    &queue_context
    6263  );
    63 
    64   /*
    65    *  Since this API does not allow for blocking sends, we can directly
    66    *  return the returned status.
    67    */
    68 
    69   return _Message_queue_Translate_core_message_queue_return_code( status );
     64  return _Status_Get( status );
    7065}
  • cpukit/rtems/src/msgqurgent.c

    rd887c1b rdce48791  
    2020
    2121#include <rtems/rtems/messageimpl.h>
     22#include <rtems/rtems/statusimpl.h>
    2223
    2324rtems_status_code rtems_message_queue_urgent(
     
    2728)
    2829{
    29   Message_queue_Control     *the_message_queue;
    30   Thread_queue_Context       queue_context;
    31   CORE_message_queue_Status  status;
     30  Message_queue_Control *the_message_queue;
     31  Thread_queue_Context   queue_context;
     32  Status_Control         status;
    3233
    3334  if ( buffer == NULL ) {
     
    6162    &queue_context
    6263  );
    63 
    64   /*
    65    *  Since this API does not allow for blocking sends, we can directly
    66    *  return the returned status.
    67    */
    68 
    69   return _Message_queue_Translate_core_message_queue_return_code( status );
     64  return _Status_Get( status );
    7065}
  • cpukit/rtems/src/partmp.c

    rd887c1b rdce48791  
    2020
    2121#include <rtems/rtems/partimpl.h>
     22#include <rtems/rtems/statusimpl.h>
    2223#include <rtems/score/statesimpl.h>
    2324#include <rtems/score/threadimpl.h>
     
    101102{
    102103  Partition_MP_Packet *the_packet;
     104  Status_Control       status;
    103105
    104106  if ( !_Partition_MP_Is_remote( partition_id ) ) {
     
    115117      the_packet->buffer = buffer;
    116118
    117       return
    118         _MPCI_Send_request_packet(
    119           _Objects_Get_node( partition_id ),
    120           &the_packet->Prefix,
    121           STATES_READY,     /* Not used */
    122           RTEMS_TIMEOUT
    123         );
    124 
    125       break;
     119      status = _MPCI_Send_request_packet(
     120        _Objects_Get_node( partition_id ),
     121        &the_packet->Prefix,
     122        STATES_READY /* Not used */
     123      );
     124      return _Status_Get( status );
    126125
    127126    case PARTITION_MP_ANNOUNCE_CREATE:
  • cpukit/rtems/src/regiongetsegment.c

    rd887c1b rdce48791  
    2121#include <rtems/rtems/regionimpl.h>
    2222#include <rtems/rtems/optionsimpl.h>
     23#include <rtems/rtems/statusimpl.h>
    2324#include <rtems/score/threadqimpl.h>
    2425#include <rtems/score/statesimpl.h>
     
    8687        executing,
    8788        STATES_WAITING_FOR_SEGMENT,
    88         timeout,
    89         RTEMS_TIMEOUT
     89        timeout
    9090      );
    9191
    9292      _Thread_Dispatch_enable( cpu_self );
    9393
    94       return (rtems_status_code) executing->Wait.return_code;
     94      return _Status_Get_after_wait( executing );
    9595    }
    9696  }
  • cpukit/rtems/src/regionprocessqueue.c

    rd887c1b rdce48791  
    2020
    2121#include <rtems/rtems/regionimpl.h>
     22#include <rtems/score/status.h>
    2223#include <rtems/score/threadqimpl.h>
    2324
     
    6667    *(void **)the_thread->Wait.return_argument = the_segment;
    6768    _Thread_queue_Extract( the_thread );
    68     the_thread->Wait.return_code = RTEMS_SUCCESSFUL;
     69    the_thread->Wait.return_code = STATUS_SUCCESSFUL;
    6970  }
    7071
  • cpukit/rtems/src/semcreate.c

    rd887c1b rdce48791  
    2626#include <rtems/rtems/options.h>
    2727#include <rtems/rtems/semimpl.h>
     28#include <rtems/rtems/statusimpl.h>
    2829#include <rtems/rtems/tasksimpl.h>
    2930#include <rtems/score/coremuteximpl.h>
     
    6465  CORE_mutex_Attributes       the_mutex_attr;
    6566  CORE_semaphore_Disciplines  semaphore_discipline;
    66   CORE_mutex_Status           mutex_status;
     67  Status_Control              status;
    6768
    6869  if ( !rtems_is_name_valid( name ) )
     
    156157      count
    157158    );
     159    status = STATUS_SUCCESSFUL;
    158160#if defined(RTEMS_SMP)
    159161  } else if ( _Attributes_Is_multiprocessor_resource_sharing( attribute_set ) ) {
    160     MRSP_Status mrsp_status = _MRSP_Initialize(
     162    status = _MRSP_Initialize(
    161163      &the_semaphore->Core_control.mrsp,
    162164      priority_ceiling,
     
    164166      count != 1
    165167    );
    166 
    167     if ( mrsp_status != MRSP_SUCCESSFUL ) {
    168       _Semaphore_Free( the_semaphore );
    169       _Objects_Allocator_unlock();
    170 
    171       return _Semaphore_Translate_MRSP_status_code( mrsp_status );
    172     }
    173168#endif
    174169  } else {
     
    203198    }
    204199
    205     mutex_status = _CORE_mutex_Initialize(
     200    status = _CORE_mutex_Initialize(
    206201      &the_semaphore->Core_control.mutex,
    207202      _Thread_Get_executing(),
     
    209204      count != 1
    210205    );
    211 
    212     if ( mutex_status == CORE_MUTEX_STATUS_CEILING_VIOLATED ) {
    213       _Semaphore_Free( the_semaphore );
    214       _Objects_Allocator_unlock();
    215       return RTEMS_INVALID_PRIORITY;
    216     }
     206  }
     207
     208  if ( status != STATUS_SUCCESSFUL ) {
     209    _Semaphore_Free( the_semaphore );
     210    _Objects_Allocator_unlock();
     211    return _Status_Get( status );
    217212  }
    218213
  • cpukit/rtems/src/semdelete.c

    rd887c1b rdce48791  
    2121#include <rtems/rtems/semimpl.h>
    2222#include <rtems/rtems/attrimpl.h>
     23#include <rtems/rtems/statusimpl.h>
    2324
    2425rtems_status_code rtems_semaphore_delete(
     
    5354#if defined(RTEMS_SMP)
    5455  if ( _Attributes_Is_multiprocessor_resource_sharing( attribute_set ) ) {
    55     MRSP_Status mrsp_status;
     56    Status_Control status;
    5657
    5758    _MRSP_Acquire_critical(
     
    5960      &queue_context
    6061    );
    61     mrsp_status = _MRSP_Can_destroy( &the_semaphore->Core_control.mrsp );
    62     if ( mrsp_status != MRSP_SUCCESSFUL ) {
     62    status = _MRSP_Can_destroy( &the_semaphore->Core_control.mrsp );
     63    if ( status != STATUS_SUCCESSFUL ) {
    6364      _MRSP_Release(
    6465        &the_semaphore->Core_control.mrsp,
     
    6667      );
    6768      _Objects_Allocator_unlock();
    68       return _Semaphore_Translate_MRSP_status_code( mrsp_status );
     69      return _Status_Get( status );
    6970    }
    7071  } else
     
    104105    _CORE_mutex_Flush(
    105106      &the_semaphore->Core_control.mutex,
    106       _CORE_mutex_Was_deleted,
     107      _Thread_queue_Flush_status_object_was_deleted,
    107108      &queue_context
    108109    );
  • cpukit/rtems/src/semflush.c

    rd887c1b rdce48791  
    5959    _CORE_mutex_Flush(
    6060      &the_semaphore->Core_control.mutex,
    61       _CORE_mutex_Unsatisfied_nowait,
     61      _Thread_queue_Flush_status_unavailable,
    6262      &queue_context
    6363    );
  • cpukit/rtems/src/semmp.c

    rd887c1b rdce48791  
    2121#include <rtems/rtems/semimpl.h>
    2222#include <rtems/rtems/optionsimpl.h>
     23#include <rtems/rtems/statusimpl.h>
    2324
    2425RTEMS_STATIC_ASSERT(
     
    8182{
    8283  Semaphore_MP_Packet *the_packet;
     84  Status_Control       status;
    8385
    8486  switch ( operation ) {
     
    98100      the_packet->option_set        = option_set;
    99101
    100       return _MPCI_Send_request_packet(
    101           _Objects_Get_node( semaphore_id ),
    102           &the_packet->Prefix,
    103           STATES_WAITING_FOR_SEMAPHORE,
    104           RTEMS_TIMEOUT
    105         );
    106       break;
     102      status = _MPCI_Send_request_packet(
     103        _Objects_Get_node( semaphore_id ),
     104        &the_packet->Prefix,
     105        STATES_WAITING_FOR_SEMAPHORE
     106      );
     107      return _Status_Get( status );
    107108
    108109    case SEMAPHORE_MP_ANNOUNCE_CREATE:
  • cpukit/rtems/src/semobtain.c

    rd887c1b rdce48791  
    2222#include <rtems/rtems/attrimpl.h>
    2323#include <rtems/rtems/optionsimpl.h>
     24#include <rtems/rtems/statusimpl.h>
    2425
    2526THREAD_QUEUE_OBJECT_ASSERT(
     
    4445  rtems_attribute       attribute_set;
    4546  bool                  wait;
     47  Status_Control        status;
    4648
    4749  the_semaphore = _Semaphore_Get( id, &queue_context, NULL );
     
    6062#if defined(RTEMS_SMP)
    6163  if ( _Attributes_Is_multiprocessor_resource_sharing( attribute_set ) ) {
    62     MRSP_Status mrsp_status;
    63 
    64     mrsp_status = _MRSP_Seize(
     64    status = _MRSP_Seize(
    6565      &the_semaphore->Core_control.mrsp,
    6666      executing,
     
    6969      &queue_context
    7070    );
    71     return _Semaphore_Translate_MRSP_status_code( mrsp_status );
    7271  } else
    7372#endif
    7473  if ( !_Attributes_Is_counting_semaphore( attribute_set ) ) {
    75     _CORE_mutex_Seize(
     74    status = _CORE_mutex_Seize(
    7675      &the_semaphore->Core_control.mutex,
    7776      executing,
     
    8079      &queue_context
    8180    );
    82     return _Semaphore_Translate_core_mutex_return_code(
    83       executing->Wait.return_code
     81  } else {
     82    /* must be a counting semaphore */
     83    status = _CORE_semaphore_Seize(
     84      &the_semaphore->Core_control.semaphore,
     85      executing,
     86      wait,
     87      timeout,
     88      &queue_context
    8489    );
    8590  }
    8691
    87   /* must be a counting semaphore */
    88   _CORE_semaphore_Seize(
    89     &the_semaphore->Core_control.semaphore,
    90     executing,
    91     wait,
    92     timeout,
    93     &queue_context
    94   );
    95   return _Semaphore_Translate_core_semaphore_return_code(
    96     executing->Wait.return_code
    97   );
     92  return _Status_Get( status );
    9893}
  • cpukit/rtems/src/semrelease.c

    rd887c1b rdce48791  
    2424#include <rtems/rtems/semimpl.h>
    2525#include <rtems/rtems/attrimpl.h>
     26#include <rtems/rtems/statusimpl.h>
    2627
    2728rtems_status_code rtems_semaphore_release( rtems_id id )
    2829{
    29   Semaphore_Control     *the_semaphore;
    30   CORE_mutex_Status      mutex_status;
    31   CORE_semaphore_Status  semaphore_status;
    32   rtems_attribute        attribute_set;
    33   Thread_queue_Context   queue_context;
     30  Semaphore_Control    *the_semaphore;
     31  Thread_queue_Context  queue_context;
     32  rtems_attribute       attribute_set;
     33  Status_Control        status;
    3434
    3535  the_semaphore = _Semaphore_Get(
     
    5050#if defined(RTEMS_SMP)
    5151  if ( _Attributes_Is_multiprocessor_resource_sharing( attribute_set ) ) {
    52     MRSP_Status mrsp_status;
    53 
    54     mrsp_status = _MRSP_Surrender(
     52    status = _MRSP_Surrender(
    5553      &the_semaphore->Core_control.mrsp,
    5654      _Thread_Executing,
    5755      &queue_context
    5856    );
    59     return _Semaphore_Translate_MRSP_status_code( mrsp_status );
    6057  } else
    6158#endif
    6259  if ( !_Attributes_Is_counting_semaphore( attribute_set ) ) {
    63     mutex_status = _CORE_mutex_Surrender(
     60    status = _CORE_mutex_Surrender(
    6461      &the_semaphore->Core_control.mutex,
    6562      &queue_context
    6663    );
    67     return _Semaphore_Translate_core_mutex_return_code( mutex_status );
    6864  } else {
    69     semaphore_status = _CORE_semaphore_Surrender(
     65    status = _CORE_semaphore_Surrender(
    7066      &the_semaphore->Core_control.semaphore,
    7167      UINT32_MAX,
    7268      &queue_context
    7369    );
    74     return _Semaphore_Translate_core_semaphore_return_code( semaphore_status );
    7570  }
     71
     72  return _Status_Get( status );
    7673}
  • cpukit/rtems/src/signalmp.c

    rd887c1b rdce48791  
    2121#include <rtems/rtems/signalimpl.h>
    2222#include <rtems/rtems/optionsimpl.h>
     23#include <rtems/rtems/statusimpl.h>
    2324#include <rtems/score/statesimpl.h>
    2425#include <rtems/score/threadimpl.h>
     
    7273{
    7374  Signal_MP_Packet *the_packet;
     75  Status_Control    status;
    7476
    7577  the_packet = _Signal_MP_Get_packet( id );
     
    8587  the_packet->signal_set        = signal_set;
    8688
    87   return (rtems_status_code) _MPCI_Send_request_packet(
     89  status = _MPCI_Send_request_packet(
    8890    _Objects_Get_node( id ),
    8991    &the_packet->Prefix,
    90     STATES_READY,
    91     RTEMS_TIMEOUT
     92    STATES_READY
    9293  );
     94  return _Status_Get( status );
    9395}
    9496
  • cpukit/rtems/src/systemeventreceive.c

    rd887c1b rdce48791  
    5050
    5151    if ( !_Event_sets_Is_empty( event_in ) ) {
    52       _Event_Seize(
     52      sc = _Event_Seize(
    5353        event_in,
    5454        option_set,
     
    6161        &lock_context
    6262      );
    63 
    64       sc = executing->Wait.return_code;
    6563    } else {
    6664      *event_out = event->pending_events;
  • cpukit/rtems/src/taskmp.c

    rd887c1b rdce48791  
    2121#include <rtems/rtems/tasksimpl.h>
    2222#include <rtems/rtems/optionsimpl.h>
     23#include <rtems/rtems/statusimpl.h>
    2324#include <rtems/score/statesimpl.h>
    2425#include <rtems/score/threadimpl.h>
     
    102103)
    103104{
     105  Status_Control status;
     106
    104107  the_packet->Prefix.the_class  = MP_PACKET_TASKS;
    105108  the_packet->Prefix.length     = sizeof( *the_packet );
     
    108111  the_packet->operation         = operation;
    109112
    110   return _MPCI_Send_request_packet(
     113  status = _MPCI_Send_request_packet(
    111114    _Objects_Get_node( id ),
    112115    &the_packet->Prefix,
    113     STATES_READY,    /* Not used */
    114     RTEMS_TIMEOUT
     116    STATES_READY /* Not used */
    115117  );
     118  return _Status_Get( status );
    116119}
    117120
  • cpukit/score/Makefile.am

    rd887c1b rdce48791  
    8888include_rtems_score_HEADERS += include/rtems/score/states.h
    8989include_rtems_score_HEADERS += include/rtems/score/statesimpl.h
     90include_rtems_score_HEADERS += include/rtems/score/status.h
    9091include_rtems_score_HEADERS += include/rtems/score/sysstate.h
    9192include_rtems_score_HEADERS += include/rtems/score/thread.h
  • cpukit/score/include/rtems/score/corebarrierimpl.h

    rd887c1b rdce48791  
    2121
    2222#include <rtems/score/corebarrier.h>
     23#include <rtems/score/status.h>
    2324#include <rtems/score/threadqimpl.h>
    2425
     
    3132 */
    3233/**@{**/
    33 
    34 /**
    35  *  Core Barrier handler return statuses.
    36  */
    37 typedef enum {
    38   /** This status indicates that the operation completed successfully. */
    39   CORE_BARRIER_STATUS_SUCCESSFUL,
    40   /** This status indicates that the barrier is configured for automatic
    41    *  release and the caller tripped the automatic release.  The caller
    42    *  thus did not block.
    43    */
    44   CORE_BARRIER_STATUS_AUTOMATICALLY_RELEASED,
    45   /** This status indicates that the thread was blocked waiting for an
    46    *  operation to complete and the barrier was deleted.
    47    */
    48   CORE_BARRIER_WAS_DELETED,
    49   /** This status indicates that the calling task was willing to block
    50    *  but the operation was unable to complete within the time allotted
    51    *  because the resource never became available.
    52    */
    53   CORE_BARRIER_TIMEOUT
    54 }   CORE_barrier_Status;
    55 
    56 /**
    57  *  @brief Core barrier last status value.
    58  *
    59  *  This is the last status value.
    60  */
    61 #define CORE_BARRIER_STATUS_LAST CORE_BARRIER_TIMEOUT
    6234
    6335#define CORE_BARRIER_TQ_OPERATIONS &_Thread_queue_Operations_FIFO
     
    12193 *         thread unblocked is remote
    12294 *
    123  * @note Status is returned via the thread control block.
     95 * @return The method status.
    12496 */
    125 void _CORE_barrier_Seize(
     97Status_Control _CORE_barrier_Seize(
    12698  CORE_barrier_Control *the_barrier,
    12799  Thread_Control       *executing,
     
    161133}
    162134
    163 Thread_Control *_CORE_barrier_Was_deleted(
    164   Thread_Control       *the_thread,
    165   Thread_queue_Queue   *queue,
    166   Thread_queue_Context *queue_context
    167 );
    168 
    169135RTEMS_INLINE_ROUTINE void _CORE_barrier_Flush(
    170136  CORE_barrier_Control *the_barrier,
     
    174140  _CORE_barrier_Do_flush(
    175141    the_barrier,
    176     _CORE_barrier_Was_deleted,
     142    _Thread_queue_Flush_status_object_was_deleted,
    177143    queue_context
    178144  );
  • cpukit/score/include/rtems/score/coremsgimpl.h

    rd887c1b rdce48791  
    2121
    2222#include <rtems/score/coremsg.h>
     23#include <rtems/score/status.h>
    2324#include <rtems/score/chainimpl.h>
    2425#include <rtems/score/threaddispatch.h>
     
    6465 */
    6566typedef int CORE_message_queue_Submit_types;
    66 
    67 /**
    68  *  @brief The possible set of Core Message Queue handler return statuses.
    69  *
    70  *  This enumerated type defines the possible set of Core Message
    71  *  Queue handler return statuses.
    72  */
    73 typedef enum {
    74   /** This value indicates the operation completed sucessfully. */
    75   CORE_MESSAGE_QUEUE_STATUS_SUCCESSFUL,
    76   /** This value indicates that the message was too large for this queue. */
    77   CORE_MESSAGE_QUEUE_STATUS_INVALID_SIZE,
    78   /** This value indicates that there are too many messages pending. */
    79   CORE_MESSAGE_QUEUE_STATUS_TOO_MANY,
    80   /** This value indicates that a receive was unsuccessful. */
    81   CORE_MESSAGE_QUEUE_STATUS_UNSATISFIED,
    82   /** This value indicates that a blocking send was unsuccessful. */
    83   CORE_MESSAGE_QUEUE_STATUS_UNSATISFIED_NOWAIT,
    84   /** This value indicates that the message queue being blocked upon
    85    *  was deleted while the thread was waiting.
    86    */
    87   CORE_MESSAGE_QUEUE_STATUS_WAS_DELETED,
    88   /** This value indicates that the thread had to timeout while waiting
    89    *  to receive a message because one did not become available.
    90    */
    91   CORE_MESSAGE_QUEUE_STATUS_TIMEOUT,
    92 }   CORE_message_queue_Status;
    93 
    94 /**
    95  *  @brief Core message queue last status value.
    96  *
    97  *  This is the last status value.
    98  */
    99 #define CORE_MESSAGE_QUEUE_STATUS_LAST CORE_MESSAGE_QUEUE_STATUS_TIMEOUT
    10067
    10168/**
     
    203170 *  @retval indication of the successful completion or reason for failure
    204171 */
    205 CORE_message_queue_Status _CORE_message_queue_Broadcast(
     172Status_Control _CORE_message_queue_Broadcast(
    206173  CORE_message_queue_Control *the_message_queue,
    207174  const void                 *buffer,
     
    234201 *  @retval indication of the successful completion or reason for failure
    235202 */
    236 CORE_message_queue_Status _CORE_message_queue_Submit(
     203Status_Control _CORE_message_queue_Submit(
    237204  CORE_message_queue_Control       *the_message_queue,
    238205  Thread_Control                   *executing,
     
    279246 *    + wait
    280247 */
    281 void _CORE_message_queue_Seize(
     248Status_Control _CORE_message_queue_Seize(
    282249  CORE_message_queue_Control *the_message_queue,
    283250  Thread_Control             *executing,
     
    310277);
    311278
    312 RTEMS_INLINE_ROUTINE CORE_message_queue_Status _CORE_message_queue_Send(
     279RTEMS_INLINE_ROUTINE Status_Control _CORE_message_queue_Send(
    313280  CORE_message_queue_Control       *the_message_queue,
    314281  const void                       *buffer,
     
    331298}
    332299
    333 RTEMS_INLINE_ROUTINE CORE_message_queue_Status _CORE_message_queue_Urgent(
     300RTEMS_INLINE_ROUTINE Status_Control _CORE_message_queue_Urgent(
    334301  CORE_message_queue_Control       *the_message_queue,
    335302  const void                       *buffer,
  • cpukit/score/include/rtems/score/coremuteximpl.h

    rd887c1b rdce48791  
    2121#include <rtems/score/coremutex.h>
    2222#include <rtems/score/chainimpl.h>
     23#include <rtems/score/status.h>
    2324#include <rtems/score/sysstate.h>
    2425#include <rtems/score/threadimpl.h>
     
    3334 */
    3435/**@{**/
    35 
    36 /**
    37  *  @brief The possible Mutex handler return statuses.
    38  *
    39  *  This enumerated type defines the possible Mutex handler return statuses.
    40  */
    41 typedef enum {
    42   /** This status indicates that the operation completed successfully. */
    43   CORE_MUTEX_STATUS_SUCCESSFUL,
    44   /** This status indicates that the calling task did not want to block
    45    *  and the operation was unable to complete immediately because the
    46    *  resource was unavailable.
    47    */
    48   CORE_MUTEX_STATUS_UNSATISFIED_NOWAIT,
    49 #if defined(RTEMS_POSIX_API)
    50   /** This status indicates that an attempt was made to relock a mutex
    51    *  for which nesting is not configured.
    52    */
    53   CORE_MUTEX_STATUS_NESTING_NOT_ALLOWED,
    54 #endif
    55   /** This status indicates that an attempt was made to release a mutex
    56    *  by a thread other than the thread which locked it.
    57    */
    58   CORE_MUTEX_STATUS_NOT_OWNER_OF_RESOURCE,
    59   /** This status indicates that the thread was blocked waiting for an
    60    *  operation to complete and the mutex was deleted.
    61    */
    62   CORE_MUTEX_WAS_DELETED,
    63   /** This status indicates that the calling task was willing to block
    64    *  but the operation was unable to complete within the time allotted
    65    *  because the resource never became available.
    66    */
    67   CORE_MUTEX_TIMEOUT,
    68 
    69   /** This status indicates that a thread of logically greater importance
    70    *  than the ceiling priority attempted to lock this mutex.
    71    */
    72   CORE_MUTEX_STATUS_CEILING_VIOLATED
    73 
    74 }   CORE_mutex_Status;
    75 
    76 /**
    77  *  @brief The last status value.
    78  *
    79  *  This is the last status value.
    80  */
    81 #define CORE_MUTEX_STATUS_LAST CORE_MUTEX_STATUS_CEILING_VIOLATED
    8236
    8337/**
     
    9347 *  the executing thread.
    9448 *
    95  *  @retval This method returns CORE_MUTEX_STATUS_SUCCESSFUL if successful.
    96  */
    97 CORE_mutex_Status _CORE_mutex_Initialize(
     49 *  @retval This method returns STATUS_SUCCESSFUL if successful.
     50 */
     51Status_Control _CORE_mutex_Initialize(
    9852  CORE_mutex_Control           *the_mutex,
    9953  Thread_Control               *executing,
     
    14195 *  @param[in] lock_context is the interrupt level
    14296 */
    143 void _CORE_mutex_Seize_interrupt_blocking(
     97Status_Control _CORE_mutex_Seize_interrupt_blocking(
    14498  CORE_mutex_Control  *the_mutex,
    14599  Thread_Control      *executing,
     
    228182 *  @param[in] queue_context is the interrupt level
    229183 *
    230  *  @retval This routine returns 0 if "trylock" can resolve whether or not
    231  *  the mutex is immediately obtained or there was an error attempting to
    232  *  get it.  It returns 1 to indicate that the caller cannot obtain
    233  *  the mutex and will have to block to do so.
    234  */
    235 RTEMS_INLINE_ROUTINE int _CORE_mutex_Seize_interrupt_trylock(
     184 *  @retval STATUS_UNAVAILABLE The mutex is already locked.
     185 *  @retval other Otherwise.
     186 */
     187RTEMS_INLINE_ROUTINE Status_Control _CORE_mutex_Seize_interrupt_trylock(
    236188  CORE_mutex_Control   *the_mutex,
    237189  Thread_Control       *executing,
     
    241193  /* disabled when you get here */
    242194
    243   executing->Wait.return_code = CORE_MUTEX_STATUS_SUCCESSFUL;
    244195  if ( !_CORE_mutex_Is_locked( the_mutex ) ) {
    245196    the_mutex->holder     = executing;
     
    252203    if ( !_CORE_mutex_Is_priority_ceiling( &the_mutex->Attributes ) ) {
    253204      _CORE_mutex_Release( the_mutex, queue_context );
    254       return 0;
    255     } /* else must be CORE_MUTEX_DISCIPLINES_PRIORITY_CEILING
     205    } else {
     206      /*
     207       * must be CORE_MUTEX_DISCIPLINES_PRIORITY_CEILING
    256208       *
    257209       * we possibly bump the priority of the current holder -- which
    258210       * happens to be _Thread_Executing.
    259211       */
    260     {
    261212      Priority_Control  ceiling;
    262213      Priority_Control  current;
     
    266217      if ( current == ceiling ) {
    267218        _CORE_mutex_Release( the_mutex, queue_context );
    268         return 0;
    269       }
    270 
    271       if ( current > ceiling ) {
     219      } else if ( current > ceiling ) {
    272220        Per_CPU_Control *cpu_self;
    273221
     
    278226        _Thread_Raise_priority( executing, ceiling );
    279227        _Thread_Dispatch_enable( cpu_self );
    280         return 0;
    281       }
    282       /* if ( current < ceiling ) */ {
    283         executing->Wait.return_code = CORE_MUTEX_STATUS_CEILING_VIOLATED;
     228      } else /* if ( current < ceiling ) */ {
    284229        the_mutex->holder = NULL;
    285230        the_mutex->nest_count = 0;     /* undo locking above */
    286231        executing->resource_count--;   /* undo locking above */
    287232        _CORE_mutex_Release( the_mutex, queue_context );
    288         return 0;
     233        return STATUS_MUTEX_CEILING_VIOLATED;
    289234      }
    290235    }
    291     return 0;
     236
     237    return STATUS_SUCCESSFUL;
    292238  }
    293239
     
    302248        the_mutex->nest_count++;
    303249        _CORE_mutex_Release( the_mutex, queue_context );
    304         return 0;
     250        return STATUS_SUCCESSFUL;
    305251      #if defined(RTEMS_POSIX_API)
    306252        case CORE_MUTEX_NESTING_IS_ERROR:
    307           executing->Wait.return_code = CORE_MUTEX_STATUS_NESTING_NOT_ALLOWED;
    308253          _CORE_mutex_Release( the_mutex, queue_context );
    309           return 0;
     254          return STATUS_NESTING_NOT_ALLOWED;
    310255      #endif
    311256      case CORE_MUTEX_NESTING_BLOCKS:
     
    318263   *  of blocking.
    319264   */
    320   return 1;
     265  return STATUS_UNAVAILABLE;
    321266}
    322267
     
    347292 *      then they are blocked.
    348293 */
    349 RTEMS_INLINE_ROUTINE void _CORE_mutex_Seize(
     294RTEMS_INLINE_ROUTINE Status_Control _CORE_mutex_Seize(
    350295  CORE_mutex_Control   *the_mutex,
    351296  Thread_Control       *executing,
     
    355300)
    356301{
     302  Status_Control status;
     303
    357304  if ( _CORE_mutex_Check_dispatch_for_seize( wait ) ) {
    358305    _Terminate(
     
    362309    );
    363310  }
     311
    364312  _CORE_mutex_Acquire_critical( the_mutex, queue_context );
    365   if (
    366     _CORE_mutex_Seize_interrupt_trylock( the_mutex, executing, queue_context )
    367   ) {
    368     if ( !wait ) {
    369       _CORE_mutex_Release( the_mutex, queue_context );
    370       executing->Wait.return_code =
    371         CORE_MUTEX_STATUS_UNSATISFIED_NOWAIT;
    372     } else {
    373       _CORE_mutex_Seize_interrupt_blocking(
    374         the_mutex,
    375         executing,
    376         timeout,
    377         &queue_context->Lock_context
    378       );
    379     }
    380   }
    381 }
    382 
    383 CORE_mutex_Status _CORE_mutex_Surrender(
    384   CORE_mutex_Control   *the_mutex,
    385   Thread_queue_Context *queue_context
    386 );
    387 
    388 Thread_Control *_CORE_mutex_Was_deleted(
    389   Thread_Control       *the_thread,
    390   Thread_queue_Queue   *queue,
    391   Thread_queue_Context *queue_context
    392 );
    393 
    394 Thread_Control *_CORE_mutex_Unsatisfied_nowait(
    395   Thread_Control       *the_thread,
    396   Thread_queue_Queue   *queue,
     313
     314  status = _CORE_mutex_Seize_interrupt_trylock(
     315    the_mutex,
     316    executing,
     317    queue_context
     318  );
     319
     320  if ( status != STATUS_UNAVAILABLE ) {
     321    return status;
     322  }
     323
     324  if ( !wait ) {
     325    _CORE_mutex_Release( the_mutex, queue_context );
     326    return status;
     327  }
     328
     329  return _CORE_mutex_Seize_interrupt_blocking(
     330    the_mutex,
     331    executing,
     332    timeout,
     333    &queue_context->Lock_context
     334  );
     335}
     336
     337Status_Control _CORE_mutex_Surrender(
     338  CORE_mutex_Control   *the_mutex,
    397339  Thread_queue_Context *queue_context
    398340);
  • cpukit/score/include/rtems/score/corerwlockimpl.h

    rd887c1b rdce48791  
    2323#include <rtems/score/thread.h>
    2424#include <rtems/score/threadqimpl.h>
     25#include <rtems/score/status.h>
    2526#include <rtems/score/watchdog.h>
    2627
     
    3536
    3637#define CORE_RWLOCK_TQ_OPERATIONS &_Thread_queue_Operations_FIFO
    37 
    38 /**
    39  *  Core RWLock handler return statuses.
    40  */
    41 typedef enum {
    42   /** This status indicates that the operation completed successfully. */
    43   CORE_RWLOCK_SUCCESSFUL,
    44   /** This status indicates that the thread was blocked waiting for an */
    45   CORE_RWLOCK_WAS_DELETED,
    46   /** This status indicates that the rwlock was not immediately available. */
    47   CORE_RWLOCK_UNAVAILABLE,
    48   /** This status indicates that the calling task was willing to block
    49    *  but the operation was unable to complete within the time allotted
    50    *  because the resource never became available.
    51    */
    52   CORE_RWLOCK_TIMEOUT
    53 }   CORE_RWLock_Status;
    54 
    55 /** This is the last status value.
    56  */
    57 #define CORE_RWLOCK_STATUS_LAST CORE_RWLOCK_TIMEOUT
    5838
    5939/**
     
    11898 *  @param[in] timeout is the number of ticks the calling thread is willing
    11999 *         to wait if @a wait is true.
    120  *
    121  * @note Status is returned via the thread control block.
    122100 */
    123101
    124 void _CORE_RWLock_Seize_for_reading(
     102Status_Control _CORE_RWLock_Seize_for_reading(
    125103  CORE_RWLock_Control  *the_rwlock,
    126104  Thread_Control       *executing,
     
    139117 *  @param[in] timeout is the number of ticks the calling thread is willing
    140118 *         to wait if @a wait is true.
    141  *
    142  * @note Status is returned via the thread control block.
    143119 */
    144 void _CORE_RWLock_Seize_for_writing(
     120Status_Control _CORE_RWLock_Seize_for_writing(
    145121  CORE_RWLock_Control  *the_rwlock,
    146122  Thread_Control       *executing,
     
    160136 *  @retval Status is returned to indicate successful or failure.
    161137 */
    162 CORE_RWLock_Status _CORE_RWLock_Surrender(
     138Status_Control _CORE_RWLock_Surrender(
    163139  CORE_RWLock_Control  *the_rwlock,
    164140  Thread_queue_Context *queue_context
  • cpukit/score/include/rtems/score/coresemimpl.h

    rd887c1b rdce48791  
    2323#include <rtems/score/objectimpl.h>
    2424#include <rtems/score/threaddispatch.h>
     25#include <rtems/score/threadimpl.h>
    2526#include <rtems/score/threadqimpl.h>
    2627#include <rtems/score/statesimpl.h>
     28#include <rtems/score/status.h>
    2729
    2830#ifdef __cplusplus
     
    3436 */
    3537/**@{**/
    36 
    37 /**
    38  *  Core Semaphore handler return statuses.
    39  */
    40 typedef enum {
    41   /** This status indicates that the operation completed successfully. */
    42   CORE_SEMAPHORE_STATUS_SUCCESSFUL,
    43   /** This status indicates that the calling task did not want to block
    44    *  and the operation was unable to complete immediately because the
    45    *  resource was unavailable.
    46    */
    47   CORE_SEMAPHORE_STATUS_UNSATISFIED_NOWAIT,
    48   /** This status indicates that the thread was blocked waiting for an
    49    *  operation to complete and the semaphore was deleted.
    50    */
    51   CORE_SEMAPHORE_WAS_DELETED,
    52   /** This status indicates that the calling task was willing to block
    53    *  but the operation was unable to complete within the time allotted
    54    *  because the resource never became available.
    55    */
    56   CORE_SEMAPHORE_TIMEOUT,
    57   /** This status indicates that an attempt was made to unlock the semaphore
    58    *  and this would have made its count greater than that allowed.
    59    */
    60   CORE_SEMAPHORE_MAXIMUM_COUNT_EXCEEDED
    61 }   CORE_semaphore_Status;
    62 
    63 /**
    64  *  @brief Core semaphore last status value.
    65  *
    66  *  This is the last status value.
    67  */
    68 #define CORE_SEMAPHORE_STATUS_LAST CORE_SEMAPHORE_MAXIMUM_COUNT_EXCEEDED
    6938
    7039/**
     
    10978}
    11079
    111 Thread_Control *_CORE_semaphore_Was_deleted(
    112   Thread_Control       *the_thread,
    113   Thread_queue_Queue   *queue,
    114   Thread_queue_Context *queue_context
    115 );
    116 
    117 Thread_Control *_CORE_semaphore_Unsatisfied_nowait(
    118   Thread_Control       *the_thread,
    119   Thread_queue_Queue   *queue,
    120   Thread_queue_Context *queue_context
    121 );
    122 
    12380RTEMS_INLINE_ROUTINE void _CORE_semaphore_Destroy(
    12481  CORE_semaphore_Control *the_semaphore,
     
    12986    &the_semaphore->Wait_queue.Queue,
    13087    the_semaphore->operations,
    131     _CORE_semaphore_Was_deleted,
     88    _Thread_queue_Flush_status_object_was_deleted,
    13289    queue_context
    13390  );
     
    148105 *  @retval an indication of whether the routine succeeded or failed
    149106 */
    150 RTEMS_INLINE_ROUTINE CORE_semaphore_Status _CORE_semaphore_Surrender(
     107RTEMS_INLINE_ROUTINE Status_Control _CORE_semaphore_Surrender(
    151108  CORE_semaphore_Control  *the_semaphore,
    152109  uint32_t                 maximum_count,
     
    155112{
    156113  Thread_Control *the_thread;
    157   CORE_semaphore_Status status;
    158 
    159   status = CORE_SEMAPHORE_STATUS_SUCCESSFUL;
     114  Status_Control status;
     115
     116  status = STATUS_SUCCESSFUL;
    160117
    161118  _CORE_semaphore_Acquire_critical( the_semaphore, queue_context );
     
    176133      the_semaphore->count += 1;
    177134    else
    178       status = CORE_SEMAPHORE_MAXIMUM_COUNT_EXCEEDED;
     135      status = STATUS_MAXIMUM_COUNT_EXCEEDED;
    179136
    180137    _CORE_semaphore_Release( the_semaphore, queue_context );
     
    192149    &the_semaphore->Wait_queue.Queue,
    193150    the_semaphore->operations,
    194     _CORE_semaphore_Unsatisfied_nowait,
     151    _Thread_queue_Flush_status_unavailable,
    195152    queue_context
    196153  );
     
    226183 * @note There is currently no MACRO version of this routine.
    227184 */
    228 RTEMS_INLINE_ROUTINE void _CORE_semaphore_Seize(
     185RTEMS_INLINE_ROUTINE Status_Control _CORE_semaphore_Seize(
    229186  CORE_semaphore_Control *the_semaphore,
    230187  Thread_Control         *executing,
     
    236193  /* disabled when you get here */
    237194
    238   executing->Wait.return_code = CORE_SEMAPHORE_STATUS_SUCCESSFUL;
    239195  _CORE_semaphore_Acquire_critical( the_semaphore, queue_context );
    240196  if ( the_semaphore->count != 0 ) {
    241197    the_semaphore->count -= 1;
    242198    _CORE_semaphore_Release( the_semaphore, queue_context );
    243     return;
     199    return STATUS_SUCCESSFUL;
    244200  }
    245201
    246202  if ( !wait ) {
    247203    _CORE_semaphore_Release( the_semaphore, queue_context );
    248     executing->Wait.return_code = CORE_SEMAPHORE_STATUS_UNSATISFIED_NOWAIT;
    249     return;
     204    return STATUS_UNSATISFIED;
    250205  }
    251206
     
    256211    STATES_WAITING_FOR_SEMAPHORE,
    257212    timeout,
    258     CORE_SEMAPHORE_TIMEOUT,
    259213    &queue_context->Lock_context
    260214  );
     215  return _Thread_Wait_get_status( executing );
    261216}
    262217
  • cpukit/score/include/rtems/score/corespinlockimpl.h

    rd887c1b rdce48791  
    2121
    2222#include <rtems/score/corespinlock.h>
     23#include <rtems/score/status.h>
    2324#include <rtems/score/watchdog.h>
    2425
     
    3334 */
    3435/**@{**/
    35 
    36 /**
    37  *  Core Spinlock handler return statuses.
    38  */
    39 typedef enum {
    40   /** This status indicates that the operation completed successfully. */
    41   CORE_SPINLOCK_SUCCESSFUL,
    42   /** This status indicates that the current thread already holds the spinlock.
    43    *  An attempt to relock it will result in deadlock.
    44    */
    45   CORE_SPINLOCK_HOLDER_RELOCKING,
    46   /** This status indicates that the current thread is attempting to unlock a
    47    *  spinlock that is held by another thread.
    48    */
    49   CORE_SPINLOCK_NOT_HOLDER,
    50   /** This status indicates that a thread reached the limit of time it
    51    *  was willing to wait on the spin lock.
    52    */
    53   CORE_SPINLOCK_TIMEOUT,
    54   /** This status indicates that a thread is currently waiting for this
    55    *  spin lock.
    56    */
    57   CORE_SPINLOCK_IS_BUSY,
    58   /** This status indicates that the spinlock is currently locked and thus
    59    *  unavailable.
    60    */
    61   CORE_SPINLOCK_UNAVAILABLE
    62 }   CORE_spinlock_Status;
    63 
    64 /** This is a shorthand for the last status code. */
    65 #define CORE_SPINLOCK_STATUS_LAST CORE_SPINLOCK_UNAVAILABLE
    6636
    6737/** This indicates the lock is available. */
     
    11686 *         this operation.
    11787 */
    118 CORE_spinlock_Status _CORE_spinlock_Seize(
     88Status_Control _CORE_spinlock_Seize(
    11989  CORE_spinlock_Control *the_spinlock,
    12090  bool                   wait,
     
    131101 *  @param[in] the_spinlock is the spinlock to surrender
    132102 */
    133 CORE_spinlock_Status _CORE_spinlock_Surrender(
     103Status_Control _CORE_spinlock_Surrender(
    134104  CORE_spinlock_Control *the_spinlock,
    135105  ISR_lock_Context      *lock_context
  • cpukit/score/include/rtems/score/mpciimpl.h

    rd887c1b rdce48791  
    2020
    2121#include <rtems/score/mpci.h>
     22#include <rtems/score/status.h>
    2223
    2324#ifdef __cplusplus
     
    163164 *             may indicate the caller is blocking on a message queue
    164165 *             operation.
    165  *  @param[in] timeout_code is the timeout code
    166166 *
    167167 *  @retval This method returns the operation status from the remote node.
    168168 */
    169 uint32_t _MPCI_Send_request_packet (
    170   uint32_t           destination,
    171   MP_packet_Prefix  *the_packet,
    172   States_Control     extra_state,
    173   uint32_t           timeout_code
     169Status_Control _MPCI_Send_request_packet(
     170  uint32_t          destination,
     171  MP_packet_Prefix *the_packet,
     172  States_Control    extra_state
    174173);
    175174
  • cpukit/score/include/rtems/score/mrsp.h

    rd887c1b rdce48791  
    5252 */
    5353
    54 /**
    55  * @brief MrsP status code.
    56  *
    57  * The values are chosen to directly map to RTEMS status codes.  In case this
    58  * implementation is used for other APIs, then for example the errno values can
    59  * be added with a bit shift.
    60  */
    61 typedef enum {
    62   MRSP_SUCCESSFUL = 0,
    63   MRSP_TIMEOUT = 6,
    64   MRSP_INVALID_NUMBER = 10,
    65   MRSP_RESOUCE_IN_USE = 12,
    66   MRSP_UNSATISFIED = 13,
    67   MRSP_INCORRECT_STATE = 14,
    68   MRSP_INVALID_PRIORITY = 19,
    69   MRSP_NOT_OWNER_OF_RESOURCE = 23,
    70   MRSP_NO_MEMORY = 26,
    71 
    72   /**
    73    * @brief Internal state used for MRSP_Rival::status to indicate that this
    74    * rival waits for resource ownership.
    75    */
    76   MRSP_WAIT_FOR_OWNERSHIP = 255
    77 } MRSP_Status;
    78 
    7954typedef struct MRSP_Control MRSP_Control;
    8055
     
    125100   *
    126101   * Initially the status is set to MRSP_WAIT_FOR_OWNERSHIP.  The rival will
    127    * busy wait until a status change happens.  This can be MRSP_SUCCESSFUL or
    128    * MRSP_TIMEOUT.  State changes are protected by the MrsP control lock.
     102   * busy wait until a status change happens.  This can be STATUS_SUCCESSFUL or
     103   * STATUS_TIMEOUT.  State changes are protected by the MrsP control lock.
    129104   */
    130   volatile MRSP_Status status;
     105  volatile int status;
    131106
    132107  /**
  • cpukit/score/include/rtems/score/mrspimpl.h

    rd887c1b rdce48791  
    2424#include <rtems/score/resourceimpl.h>
    2525#include <rtems/score/schedulerimpl.h>
     26#include <rtems/score/status.h>
    2627#include <rtems/score/watchdogimpl.h>
    2728#include <rtems/score/wkspace.h>
     
    3637 * @{
    3738 */
     39
     40/**
     41 * @brief Internal state used for MRSP_Rival::status to indicate that this
     42 * rival waits for resource ownership.
     43 */
     44#define MRSP_WAIT_FOR_OWNERSHIP STATUS_MINUS_ONE
    3845
    3946/*
     
    127134}
    128135
    129 RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Initialize(
     136RTEMS_INLINE_ROUTINE Status_Control _MRSP_Initialize(
    130137  MRSP_Control     *mrsp,
    131138  Priority_Control  ceiling_priority,
     
    138145
    139146  if ( initially_locked ) {
    140     return MRSP_INVALID_NUMBER;
     147    return STATUS_INVALID_NUMBER;
    141148  }
    142149
     
    145152  );
    146153  if ( mrsp->ceiling_priorities == NULL ) {
    147     return MRSP_NO_MEMORY;
     154    return STATUS_NO_MEMORY;
    148155  }
    149156
     
    156163  _ISR_lock_Initialize( &mrsp->Lock, "MrsP" );
    157164
    158   return MRSP_SUCCESSFUL;
     165  return STATUS_SUCCESSFUL;
    159166}
    160167
     
    200207    _MRSP_Giant_release( &giant_lock_context );
    201208
    202     rival->status = MRSP_TIMEOUT;
     209    rival->status = STATUS_TIMEOUT;
    203210
    204211    _MRSP_Release( mrsp, &queue_context );
     
    208215}
    209216
    210 RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Wait_for_ownership(
     217RTEMS_INLINE_ROUTINE Status_Control _MRSP_Wait_for_ownership(
    211218  MRSP_Control         *mrsp,
    212219  Resource_Node        *owner,
     
    218225)
    219226{
    220   MRSP_Status status;
     227  Status_Control status;
    221228  MRSP_Rival rival;
    222229  Thread_Life_state life_state;
     
    279286    _ISR_Local_enable( level );
    280287
    281     if ( status == MRSP_TIMEOUT ) {
     288    if ( status == STATUS_TIMEOUT ) {
    282289      _MRSP_Restore_priority( executing, initial_priority );
    283290    }
     
    287294}
    288295
    289 RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Seize(
     296RTEMS_INLINE_ROUTINE Status_Control _MRSP_Seize(
    290297  MRSP_Control         *mrsp,
    291298  Thread_Control       *executing,
     
    295302)
    296303{
    297   MRSP_Status status;
     304  Status_Control status;
    298305  const Scheduler_Control *scheduler = _Scheduler_Get_own( executing );
    299306  uint32_t scheduler_index = _Scheduler_Get_index( scheduler );
     
    309316  if ( !priority_ok) {
    310317    _ISR_lock_ISR_enable( &queue_context->Lock_context );
    311     return MRSP_INVALID_PRIORITY;
     318    return STATUS_MUTEX_CEILING_VIOLATED;
    312319  }
    313320
     
    322329      queue_context
    323330    );
    324     status = MRSP_SUCCESSFUL;
     331    status = STATUS_SUCCESSFUL;
    325332  } else if (
    326333    wait
     
    339346    _MRSP_Release( mrsp, queue_context );
    340347    /* Not available, nested access or deadlock */
    341     status = MRSP_UNSATISFIED;
     348    status = STATUS_UNAVAILABLE;
    342349  }
    343350
     
    345352}
    346353
    347 RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Surrender(
     354RTEMS_INLINE_ROUTINE Status_Control _MRSP_Surrender(
    348355  MRSP_Control         *mrsp,
    349356  Thread_Control       *executing,
     
    357364  if ( _Resource_Get_owner( &mrsp->Resource ) != &executing->Resource_node ) {
    358365    _ISR_lock_ISR_enable( &queue_context->Lock_context );
    359     return MRSP_NOT_OWNER_OF_RESOURCE;
     366    return STATUS_NOT_OWNER;
    360367  }
    361368
     
    367374  ) {
    368375    _ISR_lock_ISR_enable( &queue_context->Lock_context );
    369     return MRSP_INCORRECT_STATE;
     376    return STATUS_RELEASE_ORDER_VIOLATION;
    370377  }
    371378
     
    389396     * potential double extraction in _MRSP_Timeout().
    390397     */
    391     rival->status = MRSP_SUCCESSFUL;
     398    rival->status = STATUS_SUCCESSFUL;
    392399
    393400    new_owner = rival->thread;
     
    414421  _Thread_Dispatch_enable( cpu_self );
    415422
    416   return MRSP_SUCCESSFUL;
    417 }
    418 
    419 RTEMS_INLINE_ROUTINE MRSP_Status _MRSP_Can_destroy( MRSP_Control *mrsp )
     423  return STATUS_SUCCESSFUL;
     424}
     425
     426RTEMS_INLINE_ROUTINE Status_Control _MRSP_Can_destroy( MRSP_Control *mrsp )
    420427{
    421428  if ( _Resource_Get_owner( &mrsp->Resource ) != NULL ) {
    422     return MRSP_RESOUCE_IN_USE;
    423   }
    424 
    425   return MRSP_SUCCESSFUL;
     429    return STATUS_RESOURCE_IN_USE;
     430  }
     431
     432  return STATUS_SUCCESSFUL;
    426433}
    427434
  • cpukit/score/include/rtems/score/thread.h

    rd887c1b rdce48791  
    289289
    290290  /**
    291    * @brief Code to set the timeout return code in _Thread_Timeout().
    292    */
    293   uint32_t timeout_code;
    294 
    295   /**
    296291   * @brief The current thread queue.
    297292   *
  • cpukit/score/include/rtems/score/threadimpl.h

    rd887c1b rdce48791  
    3030#include <rtems/score/resourceimpl.h>
    3131#include <rtems/score/statesimpl.h>
     32#include <rtems/score/status.h>
    3233#include <rtems/score/sysstate.h>
    3334#include <rtems/score/threadqimpl.h>
     
    14511452
    14521453/**
    1453  * @brief Sets the thread wait timeout code.
    1454  *
    1455  * @param[in] the_thread The thread.
    1456  * @param[in] timeout_code The new thread wait timeout code.
    1457  */
    1458 RTEMS_INLINE_ROUTINE void _Thread_Wait_set_timeout_code(
    1459   Thread_Control *the_thread,
    1460   uint32_t        timeout_code
    1461 )
    1462 {
    1463   the_thread->Wait.timeout_code = timeout_code;
    1464 }
    1465 
    1466 /**
    14671454 * @brief Returns the object identifier of the object containing the current
    14681455 * thread wait queue.
     
    14801467Objects_Id _Thread_Wait_get_id( const Thread_Control *the_thread );
    14811468
     1469RTEMS_INLINE_ROUTINE Status_Control _Thread_Wait_get_status(
     1470  const Thread_Control *the_thread
     1471)
     1472{
     1473  return (Status_Control) the_thread->Wait.return_code;
     1474}
     1475
    14821476/**
    14831477 * @brief General purpose thread wait timeout.
  • cpukit/score/include/rtems/score/threadqimpl.h

    rd887c1b rdce48791  
    342342 * @param[in] timeout Interval to wait.  Use WATCHDOG_NO_TIMEOUT to block
    343343 * potentially forever.
    344  * @param[in] timeout_code The return code in case a timeout occurs.
    345344 * @param[in] lock_context The lock context of the lock acquire.
    346345 */
     
    351350  States_Control                 state,
    352351  Watchdog_Interval              timeout,
    353   uint32_t                       timeout_code,
    354352  ISR_lock_Context              *lock_context
    355353);
     
    364362  Thread_Control                *the_thread,
    365363  States_Control                 state,
    366   Watchdog_Interval              timeout,
    367   uint32_t                       timeout_code
     364  Watchdog_Interval              timeout
    368365)
    369366{
     
    377374    state,
    378375    timeout,
    379     timeout_code,
    380376    &lock_context
    381377  );
     
    623619 */
    624620Thread_Control *_Thread_queue_Flush_default_filter(
     621  Thread_Control       *the_thread,
     622  Thread_queue_Queue   *queue,
     623  Thread_queue_Context *queue_context
     624);
     625
     626/**
     627 * @brief Status unavailable thread queue flush filter function.
     628 *
     629 * Sets the thread wait return code of the thread to STATUS_UNAVAILABLE.
     630 *
     631 * @param the_thread The thread to extract.
     632 * @param queue Unused.
     633 * @param queue_context Unused.
     634 *
     635 * @retval the_thread Extract this thread.
     636 */
     637Thread_Control *_Thread_queue_Flush_status_unavailable(
     638  Thread_Control       *the_thread,
     639  Thread_queue_Queue   *queue,
     640  Thread_queue_Context *queue_context
     641);
     642
     643/**
     644 * @brief Status object was deleted thread queue flush filter function.
     645 *
     646 * Sets the thread wait return code of the thread to STATUS_OBJECT_WAS_DELETED
     647 *
     648 * @param the_thread The thread to extract.
     649 * @param queue Unused.
     650 * @param queue_context Unused.
     651 *
     652 * @retval the_thread Extract this thread.
     653 */
     654Thread_Control *_Thread_queue_Flush_status_object_was_deleted(
    625655  Thread_Control       *the_thread,
    626656  Thread_queue_Queue   *queue,
  • cpukit/score/preinstall.am

    rd887c1b rdce48791  
    316316        $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/score/statesimpl.h
    317317PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/score/statesimpl.h
     318
     319$(PROJECT_INCLUDE)/rtems/score/status.h: include/rtems/score/status.h $(PROJECT_INCLUDE)/rtems/score/$(dirstamp)
     320        $(INSTALL_DATA) $< $(PROJECT_INCLUDE)/rtems/score/status.h
     321PREINSTALL_FILES += $(PROJECT_INCLUDE)/rtems/score/status.h
    318322
    319323$(PROJECT_INCLUDE)/rtems/score/sysstate.h: include/rtems/score/sysstate.h $(PROJECT_INCLUDE)/rtems/score/$(dirstamp)
  • cpukit/score/src/condition.c

    rd887c1b rdce48791  
    9292  cpu_self = _Thread_Dispatch_disable_critical( lock_context );
    9393
    94   executing->Wait.return_code = 0;
    9594  _Thread_queue_Enqueue_critical(
    9695    &condition->Queue.Queue,
     
    9998    STATES_WAITING_FOR_SYS_LOCK_CONDITION,
    10099    timeout,
    101     ETIMEDOUT,
    102100    lock_context
    103101  );
     
    153151  executing = cpu_self->executing;
    154152  _Thread_Dispatch_enable( cpu_self );
    155   eno = (int) executing->Wait.return_code;
     153  eno = STATUS_GET_POSIX( _Thread_Wait_get_status( executing ) );
    156154  _Mutex_Acquire( _mutex );
    157155
     
    213211  executing = cpu_self->executing;
    214212  _Thread_Dispatch_enable( cpu_self );
    215   eno = (int) executing->Wait.return_code;
     213  eno = STATUS_GET_POSIX( _Thread_Wait_get_status( executing ) );
    216214  _Mutex_recursive_Acquire( _mutex );
    217215  _mutex->_nest_level = nest_level;
  • cpukit/score/src/corebarrier.c

    rd887c1b rdce48791  
    3232  _Thread_queue_Initialize( &the_barrier->Wait_queue );
    3333}
    34 
    35 Thread_Control *_CORE_barrier_Was_deleted(
    36   Thread_Control       *the_thread,
    37   Thread_queue_Queue   *queue,
    38   Thread_queue_Context *queue_context
    39 )
    40 {
    41   the_thread->Wait.return_code = CORE_BARRIER_WAS_DELETED;
    42 
    43   return the_thread;
    44 }
  • cpukit/score/src/corebarrierwait.c

    rd887c1b rdce48791  
    2121#include <rtems/score/corebarrierimpl.h>
    2222#include <rtems/score/statesimpl.h>
     23#include <rtems/score/threadimpl.h>
    2324
    24 void _CORE_barrier_Seize(
     25Status_Control _CORE_barrier_Seize(
    2526  CORE_barrier_Control *the_barrier,
    2627  Thread_Control       *executing,
     
    3132{
    3233  uint32_t number_of_waiting_threads;
    33 
    34   executing->Wait.return_code = CORE_BARRIER_STATUS_SUCCESSFUL;
    3534
    3635  _CORE_barrier_Acquire_critical( the_barrier, queue_context );
     
    4342      && number_of_waiting_threads == the_barrier->Attributes.maximum_count
    4443  ) {
    45     executing->Wait.return_code = CORE_BARRIER_STATUS_AUTOMATICALLY_RELEASED;
    4644    _CORE_barrier_Surrender( the_barrier, queue_context );
     45    return STATUS_BARRIER_AUTOMATICALLY_RELEASED;
    4746  } else {
    4847    the_barrier->number_of_waiting_threads = number_of_waiting_threads;
     
    5352      STATES_WAITING_FOR_BARRIER,
    5453      timeout,
    55       CORE_BARRIER_TIMEOUT,
    5654      &queue_context->Lock_context
    5755    );
     56    return _Thread_Wait_get_status( executing );
    5857  }
    5958}
  • cpukit/score/src/coremsgbroadcast.c

    rd887c1b rdce48791  
    2222#include <rtems/score/objectimpl.h>
    2323
    24 CORE_message_queue_Status _CORE_message_queue_Broadcast(
     24Status_Control _CORE_message_queue_Broadcast(
    2525  CORE_message_queue_Control *the_message_queue,
    2626  const void                 *buffer,
     
    3535  if ( size > the_message_queue->maximum_message_size ) {
    3636    _ISR_lock_ISR_enable( &queue_context->Lock_context );
    37     return CORE_MESSAGE_QUEUE_STATUS_INVALID_SIZE;
     37    return STATUS_MESSAGE_INVALID_SIZE;
    3838  }
    3939
     
    6161
    6262  *count = number_broadcasted;
    63   return CORE_MESSAGE_QUEUE_STATUS_SUCCESSFUL;
     63  return STATUS_SUCCESSFUL;
    6464}
  • cpukit/score/src/coremsgclose.c

    rd887c1b rdce48791  
    2828)
    2929{
    30   the_thread->Wait.return_code = CORE_MESSAGE_QUEUE_STATUS_WAS_DELETED;
     30  the_thread->Wait.return_code = STATUS_MESSAGE_QUEUE_WAS_DELETED;
    3131
    3232  return the_thread;
  • cpukit/score/src/coremsgseize.c

    rd887c1b rdce48791  
    2323#include <rtems/score/isr.h>
    2424#include <rtems/score/coremsgimpl.h>
    25 #include <rtems/score/thread.h>
     25#include <rtems/score/threadimpl.h>
    2626#include <rtems/score/statesimpl.h>
    2727
    28 void _CORE_message_queue_Seize(
     28Status_Control _CORE_message_queue_Seize(
    2929  CORE_message_queue_Control *the_message_queue,
    3030  Thread_Control             *executing,
     
    3838  CORE_message_queue_Buffer_control *the_message;
    3939
    40   executing->Wait.return_code = CORE_MESSAGE_QUEUE_STATUS_SUCCESSFUL;
    4140  the_message = _CORE_message_queue_Get_pending_message( the_message_queue );
    4241  if ( the_message != NULL ) {
     
    5958      _CORE_message_queue_Free_message_buffer(the_message_queue, the_message);
    6059      _CORE_message_queue_Release( the_message_queue, queue_context );
    61       return;
     60      return STATUS_SUCCESSFUL;
    6261    #else
    6362    {
     
    8180        );
    8281        _CORE_message_queue_Release( the_message_queue, queue_context );
    83         return;
     82        return STATUS_SUCCESSFUL;
    8483      }
    8584
     
    102101        queue_context
    103102      );
    104       return;
     103      return STATUS_SUCCESSFUL;
    105104    }
    106105    #endif
     
    109108  if ( !wait ) {
    110109    _CORE_message_queue_Release( the_message_queue, queue_context );
    111     executing->Wait.return_code = CORE_MESSAGE_QUEUE_STATUS_UNSATISFIED_NOWAIT;
    112     return;
     110    return STATUS_UNSATISFIED;
    113111  }
    114112
     
    123121    STATES_WAITING_FOR_MESSAGE,
    124122    timeout,
    125     CORE_MESSAGE_QUEUE_STATUS_TIMEOUT,
    126123    &queue_context->Lock_context
    127124  );
     125  return _Thread_Wait_get_status( executing );
    128126}
  • cpukit/score/src/coremsgsubmit.c

    rd887c1b rdce48791  
    2323#include <rtems/score/objectimpl.h>
    2424#include <rtems/score/isr.h>
     25#include <rtems/score/threadimpl.h>
    2526#include <rtems/score/statesimpl.h>
    2627#include <rtems/score/wkspace.h>
    2728
    28 CORE_message_queue_Status _CORE_message_queue_Submit(
     29Status_Control _CORE_message_queue_Submit(
    2930  CORE_message_queue_Control       *the_message_queue,
    3031  Thread_Control                   *executing,
     
    4243  if ( size > the_message_queue->maximum_message_size ) {
    4344    _CORE_message_queue_Release( the_message_queue, queue_context );
    44     return CORE_MESSAGE_QUEUE_STATUS_INVALID_SIZE;
     45    return STATUS_MESSAGE_INVALID_SIZE;
    4546  }
    4647
     
    5758  );
    5859  if ( the_thread != NULL ) {
    59     return CORE_MESSAGE_QUEUE_STATUS_SUCCESSFUL;
     60    return STATUS_SUCCESSFUL;
    6061  }
    6162
     
    9697#endif
    9798
    98     return CORE_MESSAGE_QUEUE_STATUS_SUCCESSFUL;
     99    return STATUS_SUCCESSFUL;
    99100  }
    100101
    101102  #if !defined(RTEMS_SCORE_COREMSG_ENABLE_BLOCKING_SEND)
    102103    _CORE_message_queue_Release( the_message_queue, queue_context );
    103     return CORE_MESSAGE_QUEUE_STATUS_TOO_MANY;
     104    return STATUS_TOO_MANY;
    104105  #else
    105106    /*
     
    110111    if ( !wait ) {
    111112      _CORE_message_queue_Release( the_message_queue, queue_context );
    112       return CORE_MESSAGE_QUEUE_STATUS_TOO_MANY;
     113      return STATUS_TOO_MANY;
    113114    }
    114115
     
    119120    if ( _ISR_Is_in_progress() ) {
    120121      _CORE_message_queue_Release( the_message_queue, queue_context );
    121       return CORE_MESSAGE_QUEUE_STATUS_UNSATISFIED;
     122      return STATUS_MESSAGE_QUEUE_WAIT_IN_ISR;
    122123    }
    123124
     
    128129     *  would be to use this variable prior to here.
    129130     */
    130     executing->Wait.return_code = CORE_MESSAGE_QUEUE_STATUS_SUCCESSFUL;
    131131    executing->Wait.return_argument_second.immutable_object = buffer;
    132132    executing->Wait.option = (uint32_t) size;
     
    139139      STATES_WAITING_FOR_MESSAGE,
    140140      timeout,
    141       CORE_MESSAGE_QUEUE_STATUS_TIMEOUT,
    142141      &queue_context->Lock_context
    143142    );
    144     return executing->Wait.return_code;
     143    return _Thread_Wait_get_status( executing );
    145144  #endif
    146145}
  • cpukit/score/src/coremutex.c

    rd887c1b rdce48791  
    2424#include <rtems/score/thread.h>
    2525
    26 CORE_mutex_Status _CORE_mutex_Initialize(
     26Status_Control _CORE_mutex_Initialize(
    2727  CORE_mutex_Control           *the_mutex,
    2828  Thread_Control               *executing,
     
    6565         */
    6666        _Thread_Dispatch_enable( cpu_self );
    67         return CORE_MUTEX_STATUS_CEILING_VIOLATED;
     67        return STATUS_MUTEX_CEILING_VIOLATED;
    6868      }
    6969
     
    8989  }
    9090
    91   return CORE_MUTEX_STATUS_SUCCESSFUL;
     91  return STATUS_SUCCESSFUL;
    9292}
    93 
    94 Thread_Control *_CORE_mutex_Was_deleted(
    95   Thread_Control       *the_thread,
    96   Thread_queue_Queue   *queue,
    97   Thread_queue_Context *queue_context
    98 )
    99 {
    100   the_thread->Wait.return_code = CORE_MUTEX_WAS_DELETED;
    101 
    102   return the_thread;
    103 }
    104 
    105 Thread_Control *_CORE_mutex_Unsatisfied_nowait(
    106   Thread_Control       *the_thread,
    107   Thread_queue_Queue   *queue,
    108   Thread_queue_Context *queue_context
    109 )
    110 {
    111   the_thread->Wait.return_code = CORE_MUTEX_STATUS_UNSATISFIED_NOWAIT;
    112 
    113   return the_thread;
    114 }
  • cpukit/score/src/coremutexseize.c

    rd887c1b rdce48791  
    2525#include <rtems/score/thread.h>
    2626
    27 void _CORE_mutex_Seize_interrupt_blocking(
     27Status_Control _CORE_mutex_Seize_interrupt_blocking(
    2828  CORE_mutex_Control  *the_mutex,
    2929  Thread_Control      *executing,
     
    6868    STATES_WAITING_FOR_MUTEX,
    6969    timeout,
    70     CORE_MUTEX_TIMEOUT,
    7170    lock_context
    7271  );
     
    7574  _Thread_Dispatch_enable( _Per_CPU_Get() );
    7675#endif
     76
     77  return _Thread_Wait_get_status( executing );
    7778}
    7879
  • cpukit/score/src/coremutexsurrender.c

    rd887c1b rdce48791  
    2424#include <rtems/score/thread.h>
    2525
    26 CORE_mutex_Status _CORE_mutex_Surrender(
     26Status_Control _CORE_mutex_Surrender(
    2727  CORE_mutex_Control   *the_mutex,
    2828  Thread_queue_Context *queue_context
     
    4545    if ( !_Thread_Is_executing( holder ) ) {
    4646      _ISR_lock_ISR_enable( &queue_context->Lock_context );
    47       return CORE_MUTEX_STATUS_NOT_OWNER_OF_RESOURCE;
     47      return STATUS_NOT_OWNER;
    4848    }
    4949  }
     
    5555  if ( !the_mutex->nest_count ) {
    5656    _CORE_mutex_Release( the_mutex, queue_context );
    57     return CORE_MUTEX_STATUS_SUCCESSFUL;
     57    return STATUS_SUCCESSFUL;
    5858  }
    5959
     
    7070        case CORE_MUTEX_NESTING_ACQUIRES:
    7171          _CORE_mutex_Release( the_mutex, queue_context );
    72           return CORE_MUTEX_STATUS_SUCCESSFUL;
     72          return STATUS_SUCCESSFUL;
    7373        #if defined(RTEMS_POSIX_API)
    7474          case CORE_MUTEX_NESTING_IS_ERROR:
    7575            /* should never occur */
    7676            _CORE_mutex_Release( the_mutex, queue_context );
    77             return CORE_MUTEX_STATUS_NESTING_NOT_ALLOWED;
     77            return STATUS_NESTING_NOT_ALLOWED;
    7878        #endif
    7979        case CORE_MUTEX_NESTING_BLOCKS:
     
    8484      _CORE_mutex_Release( the_mutex, queue_context );
    8585      /* must be CORE_MUTEX_NESTING_ACQUIRES or we wouldn't be here */
    86       return CORE_MUTEX_STATUS_SUCCESSFUL;
     86      return STATUS_SUCCESSFUL;
    8787    #endif
    8888  }
     
    180180  }
    181181
    182   return CORE_MUTEX_STATUS_SUCCESSFUL;
     182  return STATUS_SUCCESSFUL;
    183183}
  • cpukit/score/src/corerwlockobtainread.c

    rd887c1b rdce48791  
    2020
    2121#include <rtems/score/corerwlockimpl.h>
     22#include <rtems/score/threadimpl.h>
    2223#include <rtems/score/threadqimpl.h>
    2324#include <rtems/score/statesimpl.h>
    2425#include <rtems/score/watchdog.h>
    2526
    26 void _CORE_RWLock_Seize_for_reading(
     27Status_Control _CORE_RWLock_Seize_for_reading(
    2728  CORE_RWLock_Control  *the_rwlock,
    2829  Thread_Control       *executing,
     
    4546      the_rwlock->number_of_readers += 1;
    4647      _CORE_RWLock_Release( the_rwlock, queue_context );
    47       executing->Wait.return_code = CORE_RWLOCK_SUCCESSFUL;
    48       return;
     48      return STATUS_SUCCESSFUL;
    4949
    5050    case CORE_RWLOCK_LOCKED_FOR_READING: {
     
    5757        the_rwlock->number_of_readers += 1;
    5858        _CORE_RWLock_Release( the_rwlock, queue_context );
    59         executing->Wait.return_code = CORE_RWLOCK_SUCCESSFUL;
    60         return;
     59        return STATUS_SUCCESSFUL;
    6160      }
    6261      break;
     
    7271  if ( !wait ) {
    7372    _CORE_RWLock_Release( the_rwlock, queue_context );
    74     executing->Wait.return_code = CORE_RWLOCK_UNAVAILABLE;
    75     return;
     73    return STATUS_UNAVAILABLE;
    7674  }
    7775
     
    8078   */
    8179
    82   executing->Wait.option      = CORE_RWLOCK_THREAD_WAITING_FOR_READ;
    83   executing->Wait.return_code = CORE_RWLOCK_SUCCESSFUL;
     80  executing->Wait.option = CORE_RWLOCK_THREAD_WAITING_FOR_READ;
    8481
    8582  _Thread_queue_Enqueue_critical(
     
    8986     STATES_WAITING_FOR_RWLOCK,
    9087     timeout,
    91      CORE_RWLOCK_TIMEOUT,
    9288     &queue_context->Lock_context
    9389  );
     90  return _Thread_Wait_get_status( executing );
    9491}
  • cpukit/score/src/corerwlockobtainwrite.c

    rd887c1b rdce48791  
    2020
    2121#include <rtems/score/corerwlockimpl.h>
     22#include <rtems/score/threadimpl.h>
    2223#include <rtems/score/threadqimpl.h>
    2324#include <rtems/score/statesimpl.h>
    2425#include <rtems/score/watchdog.h>
    2526
    26 void _CORE_RWLock_Seize_for_writing(
     27Status_Control _CORE_RWLock_Seize_for_writing(
    2728  CORE_RWLock_Control  *the_rwlock,
    2829  Thread_Control       *executing,
     
    4546      the_rwlock->current_state = CORE_RWLOCK_LOCKED_FOR_WRITING;
    4647      _CORE_RWLock_Release( the_rwlock, queue_context );
    47       executing->Wait.return_code = CORE_RWLOCK_SUCCESSFUL;
    48       return;
     48      return STATUS_SUCCESSFUL;
    4949
    5050    case CORE_RWLOCK_LOCKED_FOR_READING:
     
    5959  if ( !wait ) {
    6060    _CORE_RWLock_Release( the_rwlock, queue_context );
    61     executing->Wait.return_code = CORE_RWLOCK_UNAVAILABLE;
    62     return;
     61    return STATUS_UNAVAILABLE;
    6362  }
    6463
     
    6766   */
    6867
    69   executing->Wait.option      = CORE_RWLOCK_THREAD_WAITING_FOR_WRITE;
    70   executing->Wait.return_code = CORE_RWLOCK_SUCCESSFUL;
     68  executing->Wait.option = CORE_RWLOCK_THREAD_WAITING_FOR_WRITE;
    7169
    7270  _Thread_queue_Enqueue_critical(
     
    7674     STATES_WAITING_FOR_RWLOCK,
    7775     timeout,
    78      CORE_RWLOCK_TIMEOUT,
    7976     &queue_context->Lock_context
    8077  );
     78  return _Thread_Wait_get_status( executing );
    8179}
  • cpukit/score/src/corerwlockrelease.c

    rd887c1b rdce48791  
    7474}
    7575
    76 CORE_RWLock_Status _CORE_RWLock_Surrender(
     76Status_Control _CORE_RWLock_Surrender(
    7777  CORE_RWLock_Control  *the_rwlock,
    7878  Thread_queue_Context *queue_context
     
    9191    /* This is an error at the caller site */
    9292    _CORE_RWLock_Release( the_rwlock, queue_context );
    93     return CORE_RWLOCK_SUCCESSFUL;
     93    return STATUS_SUCCESSFUL;
    9494  }
    9595
     
    100100      /* must be unlocked again */
    101101      _CORE_RWLock_Release( the_rwlock, queue_context );
    102       return CORE_RWLOCK_SUCCESSFUL;
     102      return STATUS_SUCCESSFUL;
    103103    }
    104104  }
     
    122122    queue_context
    123123  );
    124   return CORE_RWLOCK_SUCCESSFUL;
     124  return STATUS_SUCCESSFUL;
    125125}
  • cpukit/score/src/coresem.c

    rd887c1b rdce48791  
    3737  }
    3838}
    39 
    40 Thread_Control *_CORE_semaphore_Was_deleted(
    41   Thread_Control       *the_thread,
    42   Thread_queue_Queue   *queue,
    43   Thread_queue_Context *queue_context
    44 )
    45 {
    46   the_thread->Wait.return_code = CORE_SEMAPHORE_WAS_DELETED;
    47 
    48   return the_thread;
    49 }
    50 
    51 Thread_Control *_CORE_semaphore_Unsatisfied_nowait(
    52   Thread_Control       *the_thread,
    53   Thread_queue_Queue   *queue,
    54   Thread_queue_Context *queue_context
    55 )
    56 {
    57   the_thread->Wait.return_code = CORE_SEMAPHORE_STATUS_UNSATISFIED_NOWAIT;
    58 
    59   return the_thread;
    60 }
  • cpukit/score/src/corespinlockrelease.c

    rd887c1b rdce48791  
    2222#include <rtems/score/percpu.h>
    2323
    24 CORE_spinlock_Status _CORE_spinlock_Surrender(
     24Status_Control _CORE_spinlock_Surrender(
    2525  CORE_spinlock_Control *the_spinlock,
    2626  ISR_lock_Context      *lock_context
     
    3737    ) {
    3838      _CORE_spinlock_Release( the_spinlock, lock_context );
    39       return CORE_SPINLOCK_NOT_HOLDER;
     39      return STATUS_NOT_OWNER;
    4040    }
    4141
     
    4848
    4949  _CORE_spinlock_Release( the_spinlock, lock_context );
    50   return CORE_SPINLOCK_SUCCESSFUL;
     50  return STATUS_SUCCESSFUL;
    5151}
  • cpukit/score/src/corespinlockwait.c

    rd887c1b rdce48791  
    2222#include <rtems/score/percpu.h>
    2323
    24 CORE_spinlock_Status _CORE_spinlock_Seize(
     24Status_Control _CORE_spinlock_Seize(
    2525  CORE_spinlock_Control *the_spinlock,
    2626  bool                   wait,
     
    4141         the_spinlock->holder == executing ) {
    4242      _CORE_spinlock_Release( the_spinlock, lock_context );
    43       return CORE_SPINLOCK_HOLDER_RELOCKING;
     43      return STATUS_NESTING_NOT_ALLOWED;
    4444    }
    4545    the_spinlock->users += 1;
     
    4949        the_spinlock->holder = executing;
    5050        _CORE_spinlock_Release( the_spinlock, lock_context );
    51         return CORE_SPINLOCK_SUCCESSFUL;
     51        return STATUS_SUCCESSFUL;
    5252      }
    5353
     
    5858        the_spinlock->users -= 1;
    5959        _CORE_spinlock_Release( the_spinlock, lock_context );
    60         return CORE_SPINLOCK_UNAVAILABLE;
     60        return STATUS_UNAVAILABLE;
    6161      }
    6262
     
    6868          the_spinlock->users -= 1;
    6969          _CORE_spinlock_Release( the_spinlock, lock_context );
    70           return CORE_SPINLOCK_TIMEOUT;
     70          return STATUS_TIMEOUT;
    7171        }
    7272      #endif
  • cpukit/score/src/futex.c

    rd887c1b rdce48791  
    9191      executing,
    9292      STATES_WAITING_FOR_SYS_LOCK_FUTEX,
    93       0,
    94       0,
     93      WATCHDOG_NO_TIMEOUT,
    9594      &lock_context
    9695    );
  • cpukit/score/src/mpci.c

    rd887c1b rdce48791  
    226226}
    227227
    228 uint32_t   _MPCI_Send_request_packet (
    229   uint32_t            destination,
    230   MP_packet_Prefix   *the_packet,
    231   States_Control      extra_state,
    232   uint32_t            timeout_code
     228Status_Control _MPCI_Send_request_packet(
     229  uint32_t          destination,
     230  MP_packet_Prefix *the_packet,
     231  States_Control    extra_state
    233232)
    234233{
     
    261260      executing,
    262261      STATES_WAITING_FOR_RPC_REPLY | extra_state,
    263       the_packet->timeout,
    264       timeout_code
     262      the_packet->timeout
    265263    );
    266264
    267265  _Thread_Dispatch_enable( cpu_self );
    268266
    269   return executing->Wait.return_code;
     267  return _Thread_Wait_get_status( executing );
    270268}
    271269
  • cpukit/score/src/mutex.c

    rd887c1b rdce48791  
    120120    STATES_WAITING_FOR_SYS_LOCK_MUTEX,
    121121    timeout,
    122     ETIMEDOUT,
    123122    lock_context
    124123  );
     
    263262    }
    264263
    265     executing->Wait.return_code = 0;
    266264    _Mutex_Acquire_slow( mutex, owner, executing, ticks, &lock_context );
    267265
    268     return (int) executing->Wait.return_code;
     266    return STATUS_GET_POSIX( _Thread_Wait_get_status( executing ) );
    269267  }
    270268}
     
    383381    }
    384382
    385     executing->Wait.return_code = 0;
    386383    _Mutex_Acquire_slow(
    387384      &mutex->Mutex,
     
    392389    );
    393390
    394     return (int) executing->Wait.return_code;
     391    return STATUS_GET_POSIX( _Thread_Wait_get_status( executing ) );
    395392  }
    396393}
  • cpukit/score/src/semaphore.c

    rd887c1b rdce48791  
    102102      executing,
    103103      STATES_WAITING_FOR_SYS_LOCK_SEMAPHORE,
    104       0,
    105       0,
     104      WATCHDOG_NO_TIMEOUT,
    106105      &lock_context
    107106    );
  • cpukit/score/src/threadmp.c

    rd887c1b rdce48791  
    147147    the_proxy->Wait.option                  = executing->Wait.option;
    148148    the_proxy->Wait.return_code             = executing->Wait.return_code;
    149     the_proxy->Wait.timeout_code            = executing->Wait.timeout_code;
    150149
    151150    the_proxy->thread_queue_callout = _Thread_queue_MP_callout_do_nothing;
  • cpukit/score/src/threadqenqueue.c

    rd887c1b rdce48791  
    2323#include <rtems/score/threaddispatch.h>
    2424#include <rtems/score/threadimpl.h>
     25#include <rtems/score/status.h>
    2526#include <rtems/score/watchdogimpl.h>
    2627
     
    4041  States_Control                 state,
    4142  Watchdog_Interval              timeout,
    42   uint32_t                       timeout_code,
    4343  ISR_lock_Context              *lock_context
    4444)
     
    5555  _Thread_Lock_set( the_thread, &queue->Lock );
    5656
     57  the_thread->Wait.return_code = STATUS_SUCCESSFUL;
    5758  _Thread_Wait_set_queue( the_thread, queue );
    5859  _Thread_Wait_set_operations( the_thread, operations );
     
    7374   */
    7475  if ( timeout != WATCHDOG_NO_TIMEOUT ) {
    75     _Thread_Wait_set_timeout_code( the_thread, timeout_code );
    7676    _Thread_Timer_insert_relative(
    7777      the_thread,
  • cpukit/score/src/threadqflush.c

    rd887c1b rdce48791  
    2020
    2121#include <rtems/score/threadimpl.h>
     22#include <rtems/score/status.h>
    2223
    2324Thread_Control *_Thread_queue_Flush_default_filter(
     
    2728)
    2829{
     30  (void) queue;
     31  (void) queue_context;
     32  return the_thread;
     33}
     34
     35Thread_Control *_Thread_queue_Flush_status_object_was_deleted(
     36  Thread_Control       *the_thread,
     37  Thread_queue_Queue   *queue,
     38  Thread_queue_Context *queue_context
     39)
     40{
     41  the_thread->Wait.return_code = STATUS_OBJECT_WAS_DELETED;
     42
     43  (void) queue;
     44  (void) queue_context;
     45  return the_thread;
     46}
     47
     48Thread_Control *_Thread_queue_Flush_status_unavailable(
     49  Thread_Control       *the_thread,
     50  Thread_queue_Queue   *queue,
     51  Thread_queue_Context *queue_context
     52)
     53{
     54  the_thread->Wait.return_code = STATUS_UNAVAILABLE;
     55
    2956  (void) queue;
    3057  (void) queue_context;
  • cpukit/score/src/threadrestart.c

    rd887c1b rdce48791  
    444444
    445445void _Thread_Join(
    446   Thread_Control    *the_thread,
    447   States_Control     waiting_for_join,
    448   Thread_Control    *executing,
    449   ISR_lock_Context  *lock_context
     446  Thread_Control   *the_thread,
     447  States_Control    waiting_for_join,
     448  Thread_Control   *executing,
     449  ISR_lock_Context *lock_context
    450450)
    451451{
     
    454454
    455455#if defined(RTEMS_POSIX_API)
    456   executing->Wait.return_code = 0;
    457456  executing->Wait.return_argument = NULL;
    458457#endif
     
    464463    waiting_for_join,
    465464    WATCHDOG_NO_TIMEOUT,
    466     0,
    467465    lock_context
    468466  );
  • cpukit/score/src/threadtimeout.c

    rd887c1b rdce48791  
    2121
    2222#include <rtems/score/threadimpl.h>
     23#include <rtems/score/status.h>
    2324
    2425static void _Thread_Do_timeout( Thread_Control *the_thread )
    2526{
    26   the_thread->Wait.return_code = the_thread->Wait.timeout_code;
     27  the_thread->Wait.return_code = STATUS_TIMEOUT;
    2728  ( *the_thread->Wait.operations->extract )(
    2829    the_thread->Wait.queue,
  • testsuites/sptests/spintrcritical10/init.c

    rd887c1b rdce48791  
    6161      *(rtems_event_set *) thread->Wait.return_argument == DEADBEEF
    6262    );
    63     rtems_test_assert(thread->Wait.return_code == RTEMS_SUCCESSFUL);
     63    rtems_test_assert(_Thread_Wait_get_status(thread) == STATUS_SUCCESSFUL);
    6464
    6565    sc = rtems_event_send(thread->Object.id, GREEN);
     
    6969      *(rtems_event_set *) thread->Wait.return_argument == GREEN
    7070    );
    71     rtems_test_assert(thread->Wait.return_code == RTEMS_SUCCESSFUL);
     71    rtems_test_assert(_Thread_Wait_get_status(thread) == STATUS_SUCCESSFUL);
    7272
    7373    sc = rtems_event_send(thread->Object.id, RED);
     
    7777      *(rtems_event_set *) thread->Wait.return_argument == GREEN
    7878    );
    79     rtems_test_assert(thread->Wait.return_code == RTEMS_SUCCESSFUL);
     79    rtems_test_assert(_Thread_Wait_get_status(thread) == STATUS_SUCCESSFUL);
    8080
    8181    _Thread_Timeout(&thread->Timer.Watchdog);
     
    8484      *(rtems_event_set *) thread->Wait.return_argument == GREEN
    8585    );
    86     rtems_test_assert(thread->Wait.return_code == RTEMS_SUCCESSFUL);
     86    rtems_test_assert(_Thread_Wait_get_status(thread) == STATUS_SUCCESSFUL);
    8787
    8888    if (ctx->hit) {
     
    158158      *(rtems_event_set *) thread->Wait.return_argument == DEADBEEF
    159159    );
    160     rtems_test_assert(thread->Wait.return_code == RTEMS_SUCCESSFUL);
     160    rtems_test_assert(_Thread_Wait_get_status(thread) == STATUS_SUCCESSFUL);
    161161
    162162    sc = rtems_event_send(thread->Object.id, GREEN);
     
    166166      *(rtems_event_set *) thread->Wait.return_argument == DEADBEEF
    167167    );
    168     rtems_test_assert(thread->Wait.return_code == RTEMS_SUCCESSFUL);
     168    rtems_test_assert(_Thread_Wait_get_status(thread) == STATUS_SUCCESSFUL);
    169169
    170170    sc = rtems_event_send(thread->Object.id, RED);
     
    174174      *(rtems_event_set *) thread->Wait.return_argument == EVENTS
    175175    );
    176     rtems_test_assert(thread->Wait.return_code == RTEMS_SUCCESSFUL);
     176    rtems_test_assert(_Thread_Wait_get_status(thread) == STATUS_SUCCESSFUL);
    177177
    178178    _Thread_Timeout(&thread->Timer.Watchdog);
     
    181181      *(rtems_event_set *) thread->Wait.return_argument == EVENTS
    182182    );
    183     rtems_test_assert(thread->Wait.return_code == RTEMS_SUCCESSFUL);
     183    rtems_test_assert(_Thread_Wait_get_status(thread) == STATUS_SUCCESSFUL);
    184184
    185185    if (ctx->hit) {
     
    250250      *(rtems_event_set *) thread->Wait.return_argument == DEADBEEF
    251251    );
    252     rtems_test_assert(thread->Wait.return_code == RTEMS_SUCCESSFUL);
     252    rtems_test_assert(_Thread_Wait_get_status(thread) == STATUS_SUCCESSFUL);
    253253
    254254    _Thread_Timeout(&thread->Timer.Watchdog);
     
    257257      *(rtems_event_set *) thread->Wait.return_argument == DEADBEEF
    258258    );
    259     rtems_test_assert(thread->Wait.return_code == RTEMS_TIMEOUT);
     259    rtems_test_assert(_Thread_Wait_get_status(thread) == STATUS_TIMEOUT);
    260260
    261261    sc = rtems_event_send(thread->Object.id, EVENTS);
     
    265265      *(rtems_event_set *) thread->Wait.return_argument == DEADBEEF
    266266    );
    267     rtems_test_assert(thread->Wait.return_code == RTEMS_TIMEOUT);
     267    rtems_test_assert(_Thread_Wait_get_status(thread) == STATUS_TIMEOUT);
    268268
    269269    if (ctx->hit) {
  • testsuites/sptests/spintrcritical20/init.c

    rd887c1b rdce48791  
    1919#include <tmacros.h>
    2020#include <intrcritical.h>
     21#include <rtems/score/threadimpl.h>
    2122#include <rtems/score/threadqimpl.h>
    2223#include <rtems/rtems/semimpl.h>
     
    4546
    4647  ctx->semaphore_task_tcb = _Thread_Get_executing();
    47   _Thread_Wait_set_timeout_code(
    48     ctx->semaphore_task_tcb,
    49     CORE_SEMAPHORE_TIMEOUT
    50   );
    5148
    5249  while (true) {
     
    7774
    7875  rtems_test_assert(
    79     ctx->semaphore_task_tcb->Wait.return_code
    80       == CORE_SEMAPHORE_STATUS_SUCCESSFUL
     76    _Thread_Wait_get_status( ctx->semaphore_task_tcb ) == STATUS_SUCCESSFUL
    8177  );
    8278
     
    9591  _Thread_Timeout(&ctx->semaphore_task_tcb->Timer.Watchdog);
    9692
    97   switch (ctx->semaphore_task_tcb->Wait.return_code) {
    98     case CORE_SEMAPHORE_STATUS_SUCCESSFUL:
     93  switch (_Thread_Wait_get_status(ctx->semaphore_task_tcb)) {
     94    case STATUS_SUCCESSFUL:
    9995      ctx->status_was_successful = true;
    10096      break;
    101     case CORE_SEMAPHORE_TIMEOUT:
     97    case STATUS_TIMEOUT:
    10298      ctx->status_was_timeout = true;
    10399      break;
Note: See TracChangeset for help on using the changeset viewer.