Changes between Version 89 and Version 90 of Developer/SMP


Ignore:
Timestamp:
02/16/15 19:15:31 (9 years ago)
Author:
Sebastian Huber
Comment:

Fix code blocks.

Legend:

Unmodified
Added
Removed
Modified
  • Developer/SMP

    v89 v90  
    119119
    120120The low-level start is guided by the per-CPU control state
    121 [http://www.rtems.org/onlinedocs/doxygen/cpukit/html/group__PerCPU.html#gabad09777c1e3a7b7f3efcae54938d418 Per_CPU_Control::state].  See also ''_Per_CPU_Change_state()'' and
    122 ''_Per_CPU_Wait_for_state()''.
     121[http://www.rtems.org/onlinedocs/doxygen/cpukit/html/group__PerCPU.html#gabad09777c1e3a7b7f3efcae54938d418 Per_CPU_Control::state].  See also {{{_Per_CPU_Change_state()}}} and
     122{{{_Per_CPU_Wait_for_state()}}}.
    123123
    124124
     
    259259
    260260Example ticket lock with C11 atomics.
    261  #include <stdatomic.h>
    262  
    263  struct ticket {
    264         atomic_uint ticket;
    265         atomic_uint now_serving;
    266  };
    267  
    268  void acquire(struct ticket *t)
    269  {
    270         unsigned int my_ticket = atomic_fetch_add_explicit(&t->ticket, 1, memory_order_relaxed);
    271  
    272         while (atomic_load_explicit(&t->now_serving, memory_order_acquire) != my_ticket) {
    273                 /* Wait */
    274         }
    275  }
    276  
    277  void release(struct ticket *t)
    278  {
    279         unsigned int current_ticket = atomic_load_explicit(&t->now_serving, memory_order_relaxed);
    280  
    281         atomic_store_explicit(&t->now_serving, current_ticket + 1U, memory_order_release);
    282  }
     261{{{
     262#!c
     263#include <stdatomic.h>
     264
     265struct ticket {
     266        atomic_uint ticket;
     267        atomic_uint now_serving;
     268};
     269
     270void acquire(struct ticket *t)
     271{
     272        unsigned int my_ticket = atomic_fetch_add_explicit(&t->ticket, 1, memory_order_relaxed);
     273
     274        while (atomic_load_explicit(&t->now_serving, memory_order_acquire) != my_ticket) {
     275                /* Wait */
     276        }
     277}
     278
     279void release(struct ticket *t)
     280{
     281        unsigned int current_ticket = atomic_load_explicit(&t->now_serving, memory_order_relaxed);
     282
     283        atomic_store_explicit(&t->now_serving, current_ticket + 1U, memory_order_release);
     284}
     285}}}
    283286
    284287The generated assembler code looks pretty good.  Please note that GCC generates ''CAS'' instructions and not ''CASA'' instructions.
    285         .file   "ticket.c"
    286         .section        ".text"
    287         .align 4
    288         .global acquire
    289         .type   acquire, #function
    290         .proc   020
    291  acquire:
    292         ld      [%o0], %g1
    293         mov     %g1, %g2
    294  .LL7:
    295         add     %g1, 1, %g1
    296         cas     [%o0], %g2, %g1
    297         cmp     %g1, %g2
    298         bne,a   .LL7
    299          mov    %g1, %g2
    300         add     %o0, 4, %o0
    301  .LL4:
    302         ld      [%o0], %g1
    303         cmp     %g1, %g2
    304         bne     .LL4
    305          nop
    306         jmp     %o7+8
    307          nop
    308         .size   acquire, .-acquire
    309         .align 4
    310         .global release
    311         .type   release, #function
    312         .proc   020
    313  release:
    314         ld      [%o0+4], %g1
    315         add     %g1, 1, %g1
    316         st      %g1, [%o0+4]
    317         jmp     %o7+8
    318          nop
    319         .size   release, .-release
    320         .ident  "GCC: (GNU) 4.9.0 20130917 (experimental)"
     288{{{
     289#!asm
     290        .file   "ticket.c"
     291        .section        ".text"
     292        .align 4
     293        .global acquire
     294        .type   acquire, #function
     295        .proc   020
     296acquire:
     297        ld      [%o0], %g1
     298        mov     %g1, %g2
     299.LL7:
     300        add     %g1, 1, %g1
     301        cas     [%o0], %g2, %g1
     302        cmp     %g1, %g2
     303        bne,a   .LL7
     304         mov    %g1, %g2
     305        add     %o0, 4, %o0
     306.LL4:
     307        ld      [%o0], %g1
     308        cmp     %g1, %g2
     309        bne     .LL4
     310         nop
     311        jmp     %o7+8
     312         nop
     313        .size   acquire, .-acquire
     314        .align 4
     315        .global release
     316        .type   release, #function
     317        .proc   020
     318release:
     319        ld      [%o0+4], %g1
     320        add     %g1, 1, %g1
     321        st      %g1, [%o0+4]
     322        jmp     %o7+8
     323         nop
     324        .size   release, .-release
     325        .ident  "GCC: (GNU) 4.9.0 20130917 (experimental)"
     326}}}
     327
    321328=== Future Directions ===
    322329
     
    437444
    438445Per-CPU control state is available for each configured CPU in a statically
    439 created global table ''_Per_CPU_Information''.  Each per-CPU control is cache
     446created global table {{{_Per_CPU_Information}}}.  Each per-CPU control is cache
    440447aligned to prevent false sharing and to provide simple access via assembly
    441448code.  CPU ports can add custom fields to the per-CPU control.  This is used on
     
    516523storage, POSIX keys or special access functions using the thread control block
    517524of the executing thread.  For Newlib the access to the re-entrancy structure is
    518 now performed via ''__getreent()'',  see also ''__DYNAMIC_REENT__'' in Newlib.  The POSIX keys and the POSIX once function are now available for all RTEMS configurations (they no longer depend on POSIX enabled).  Task variables have been replaced with POSIX keys for the RTEMS shell, the file system environment and the C++ support.
     525now performed via {{{__getreent()}}},  see also {{{__DYNAMIC_REENT__}}} in Newlib.  The POSIX keys and the POSIX once function are now available for all RTEMS configurations (they no longer depend on POSIX enabled).  Task variables have been replaced with POSIX keys for the RTEMS shell, the file system environment and the C++ support.
    519526=== Future Directions ===
    520527
     
    555562
    556563In this sequence it is not possible to enable interrupts around the
    557 ''_Thread_Dispatch()'' call.  This could lead to an unlimited number of interrupt
     564{{{_Thread_Dispatch()}}} call.  This could lead to an unlimited number of interrupt
    558565contexts saved on the thread stack.  To overcome this issue some architectures
    559566use a flag variable that indicates this particular execution environment (e.g.
     
    583590
    584591
    585 The semaphore and mutex objects use ''_Objects_Get_isr_disable()''.  On SMP
     592The semaphore and mutex objects use {{{_Objects_Get_isr_disable()}}}.  On SMP
    586593configurations this first acquires the Giant lock and then interrupts are
    587594disabled.
     
    633640implement busy wait loops which are required by some device drivers.
    634641
    635  /**
    636   * @brief Integer type for CPU counter values.
    637   */
    638  typedef XXX CPU_counter;
    639  
    640  /**
    641   * brief Returns the current CPU counter value.
    642   */
    643  CPU_counter _CPU_counter_Get()
    644  
    645  /**
    646   * brief Mask for arithmetic operations with the CPU counter value.
    647   *
    648   * All arithmetic operations are defined as A = ( C op B ) & MASK.
    649   */
    650  CPU_counter _CPU_counter_Mask()
    651  
    652  /**
    653   * brief Converts a CPU counter value into nanoseconds.
    654   */
    655  uint64_t _CPU_counter_To_nanoseconds( CPU_counter counter )
     642{{{
     643#!c
     644/**
     645 * @brief Integer type for CPU counter values.
     646 */
     647typedef XXX CPU_counter;
     648
     649/**
     650 * brief Returns the current CPU counter value.
     651 */
     652CPU_counter _CPU_counter_Get()
     653
     654/**
     655 * brief Mask for arithmetic operations with the CPU counter value.
     656 *
     657 * All arithmetic operations are defined as A = ( C op B ) & MASK.
     658 */
     659CPU_counter _CPU_counter_Mask()
     660
     661/**
     662 * brief Converts a CPU counter value into nanoseconds.
     663 */
     664uint64_t _CPU_counter_To_nanoseconds( CPU_counter counter )
     665}}}
     666
    656667===  SMP Lock Profiling  ===
    657668
     
    660671(RTEMS_LOCK_PROFILING).  The following statistics are proposed.
    661672
    662  #define SMP_LOCK_STATS_CONTENTION_COUNTS 4
    663  
    664  /**
    665   * @brief SMP lock statistics.
    666   *
    667   * The lock acquire attempt instant is the point in time right after the
    668   * interrupt disable action in the lock acquire sequence.
    669   *
    670   * The lock acquire instant is the point in time right after the lock
    671   * acquisition.  This is the begin of the critical section code execution.
    672   *
    673   * The lock release instant is the point in time right before the interrupt
    674   * enable action in the lock release sequence.
    675   *
    676   * The lock section time is the time elapsed between the lock acquire instant
    677   * and the lock release instant.
    678   *
    679   * The lock acquire time is the time elapsed between the lock acquire attempt
    680   * instant and the lock acquire instant.
    681   */
    682  struct SMP_lock_Stats {
    683  #ifdef RTEMS_LOCK_PROFILING
    684    /**
    685     * @brief The last lock acquire instant in CPU counter ticks.
    686     *
    687     * This value is used to measure the lock section time.
    688     */
    689    CPU_counter acquire_instant;
    690  
    691    /**
    692     * @brief The maximum lock section time in CPU counter ticks.
    693     */
    694    CPU_counter max_section_time;
    695  
    696    /**
    697     * @brief The maximum lock acquire time in CPU counter ticks.
    698     */
    699    CPU_counter max_acquire_time;
    700  
    701    /**
    702     * @brief The count of lock uses.
    703     *
    704     * This value may overflow.
    705     */
    706    uint64_t usage_count;
    707  
    708    /**
    709     * @brief The counts of lock acquire operations with contention.
    710     *
    711     * The contention count for index N corresponds to a lock acquire attempt
    712     * with an initial queue length of N + 1.  The last index corresponds to all
    713     * lock acquire attempts with an initial queue length greater than or equal
    714     * to SMP_LOCK_STATS_CONTENTION_COUNTS.
    715     *
    716     * The values may overflow.
    717     */
    718    uint64_t contention_counts[SMP_LOCK_STATS_CONTENTION_COUNTS];
    719  
    720    /**
    721     * @brief Total lock section time in CPU counter ticks.
    722     *
    723     * The average lock section time is the total section time divided by the
    724     * lock usage count.
    725     *
    726     * This value may overflow.
    727     */
    728    uint64_t total_section_time;
    729  #endif /* RTEMS_LOCK_PROFILING */
    730  }
    731  
    732  struct SMP_lock_Control {
    733    ... lock data ...
    734    SMP_lock_Stats Stats;
    735  };
     673{{{
     674#!c
     675#define SMP_LOCK_STATS_CONTENTION_COUNTS 4
     676
     677/**
     678 * @brief SMP lock statistics.
     679 *
     680 * The lock acquire attempt instant is the point in time right after the
     681 * interrupt disable action in the lock acquire sequence.
     682 *
     683 * The lock acquire instant is the point in time right after the lock
     684 * acquisition.  This is the begin of the critical section code execution.
     685 *
     686 * The lock release instant is the point in time right before the interrupt
     687 * enable action in the lock release sequence.
     688 *
     689 * The lock section time is the time elapsed between the lock acquire instant
     690 * and the lock release instant.
     691 *
     692 * The lock acquire time is the time elapsed between the lock acquire attempt
     693 * instant and the lock acquire instant.
     694 */
     695struct SMP_lock_Stats {
     696#ifdef RTEMS_LOCK_PROFILING
     697  /**
     698   * @brief The last lock acquire instant in CPU counter ticks.
     699   *
     700   * This value is used to measure the lock section time.
     701   */
     702  CPU_counter acquire_instant;
     703
     704  /**
     705   * @brief The maximum lock section time in CPU counter ticks.
     706   */
     707  CPU_counter max_section_time;
     708
     709  /**
     710   * @brief The maximum lock acquire time in CPU counter ticks.
     711   */
     712  CPU_counter max_acquire_time;
     713
     714  /**
     715   * @brief The count of lock uses.
     716   *
     717   * This value may overflow.
     718   */
     719  uint64_t usage_count;
     720
     721  /**
     722   * @brief The counts of lock acquire operations with contention.
     723   *
     724   * The contention count for index N corresponds to a lock acquire attempt
     725   * with an initial queue length of N + 1.  The last index corresponds to all
     726   * lock acquire attempts with an initial queue length greater than or equal
     727   * to SMP_LOCK_STATS_CONTENTION_COUNTS.
     728   *
     729   * The values may overflow.
     730   */
     731  uint64_t contention_counts[SMP_LOCK_STATS_CONTENTION_COUNTS];
     732
     733  /**
     734   * @brief Total lock section time in CPU counter ticks.
     735   *
     736   * The average lock section time is the total section time divided by the
     737   * lock usage count.
     738   *
     739   * This value may overflow.
     740   */
     741  uint64_t total_section_time;
     742#endif /* RTEMS_LOCK_PROFILING */
     743}
     744
     745struct SMP_lock_Control {
     746  ... lock data ...
     747  SMP_lock_Stats Stats;
     748};
     749}}}
    736750
    737751A function should be added to monitor the lock contention.
    738752
    739  /**
    740   * @brief Called in case of lock contention.
    741   *
    742   * @param[in] counter The spin loop iteration counter.
    743   */
    744  void _SMP_lock_Contention_monitor(
    745    const SMP_lock_Control *lock,
    746    int counter
    747  );
     753{{{
     754#!c
     755/**
     756 * @brief Called in case of lock contention.
     757 *
     758 * @param[in] counter The spin loop iteration counter.
     759 */
     760void _SMP_lock_Contention_monitor(
     761  const SMP_lock_Control *lock,
     762  int counter
     763);
     764}}}
    748765
    749766A ticket lock can then look like this:
    750767
    751  void acquire(struct ticket *t)
    752  {
    753         unsigned int my_ticket = atomic_fetch_add_explicit(&t->ticket, 1, memory_order_relaxed);
    754  #ifdef RTEMS_LOCK_PROFILING
    755         int counter = 0;
    756  #endif /* RTEMS_LOCK_PROFILING */
    757  
    758         while (atomic_load_explicit(&t->now_serving, memory_order_acquire) != my_ticket) {
    759  #ifdef RTEMS_LOCK_PROFILING
    760                 ++counter;
    761                 _SMP_lock_Contention_monitor(t, counter);
    762  #endif /* RTEMS_LOCK_PROFILING */
    763         }
    764  }
     768{{{
     769#!c
     770void acquire(struct ticket *t)
     771{
     772        unsigned int my_ticket = atomic_fetch_add_explicit(&t->ticket, 1, memory_order_relaxed);
     773#ifdef RTEMS_LOCK_PROFILING
     774        int counter = 0;
     775#endif /* RTEMS_LOCK_PROFILING */
     776
     777        while (atomic_load_explicit(&t->now_serving, memory_order_acquire) != my_ticket) {
     778#ifdef RTEMS_LOCK_PROFILING
     779                ++counter;
     780                _SMP_lock_Contention_monitor(t, counter);
     781#endif /* RTEMS_LOCK_PROFILING */
     782        }
     783}
     784}}}
    765785
    766786SMP lock statistics can be evaluated use the following method.
    767787
    768  typedef void ( *SMP_lock_Visitor )(
    769    void *arg,
    770    SMP_lock_Control *lock,
    771    SMP_lock_Class lock_class,
    772    Objects_Name lock_name
    773  );
    774  
    775  /**
    776   * @brief Iterates through all system SMP locks and invokes the visitor for
    777   * each lock.
    778   */
    779  void _SMP_lock_Iterate( SMP_lock_Visitor visitor, void *arg );
     788{{{
     789#!c
     790typedef void ( *SMP_lock_Visitor )(
     791  void *arg,
     792  SMP_lock_Control *lock,
     793  SMP_lock_Class lock_class,
     794  Objects_Name lock_name
     795);
     796
     797/**
     798 * @brief Iterates through all system SMP locks and invokes the visitor for
     799 * each lock.
     800 */
     801void _SMP_lock_Iterate( SMP_lock_Visitor visitor, void *arg );
     802}}}
     803
    780804===  Interrupt and Thread Profiling  ===
    781805
     
    789813be monitored per-processor if the hardware supports this.
    790814
    791  /**
    792   * @brief Per-CPU statistics.
    793   */
    794  struct Per_CPU_Stats {
    795  #ifdef RTEMS_INTERRUPT_AND_THREAD_PROFILING
    796    /**
    797     * @brief The thread dispatch disabled begin instant in CPU counter ticks.
    798     *
    799     * This value is used to measure the time of disabled thread dispatching.
    800     */
    801    CPU_counter thread_dispatch_disabled_instant;
    802  
    803    /**
    804     * @brief The last outer-most interrupt begin instant in CPU counter ticks.
    805     *
    806     * This value is used to measure the interrupt processing time.
    807     */
    808    CPU_counter outer_most_interrupt_instant;
    809  
    810    /**
    811     * @brief The maximum interrupt delay in CPU counter ticks if supported by
    812     * the hardware.
    813     */
    814    CPU_counter max_interrupt_delay;
    815  
    816    /**
    817     * @brief The maximum time of disabled thread dispatching in CPU counter
    818     * ticks.
    819     */
    820    CPU_counter max_thread_dispatch_disabled_time;
    821  
    822    /**
    823     * @brief Count of times when the thread dispatch disable level changes from
    824     * zero to one in thread context.
    825     *
    826     * This value may overflow.
    827     */
    828    uint64_t thread_dispatch_disabled_count;
    829  
    830    /**
    831     * @brief Total time of disabled thread dispatching in CPU counter ticks.
    832     *
    833     * The average time of disabled thread dispatching is the total time of
    834     * disabled thread dispatching divided by the thread dispatch disabled
    835     * count.
    836     *
    837     * This value may overflow.
    838     */
    839    uint64_t total_thread_dispatch_disabled_time;
    840  
    841    /**
    842     * @brief Count of times when the interrupt nest level changes from zero to
    843     * one.
    844     *
    845     * This value may overflow.
    846     */
    847    uint64_t interrupt_count;
    848  
    849    /**
    850     * @brief Total time of interrupt processing in CPU counter ticks.
    851     *
    852     * The average time of interrupt processing is the total time of interrupt
    853     * processing divided by the interrupt count.
    854     *
    855     * This value may overflow.
    856     */
    857    uint64_t total_interrupt_time;
    858  #endif /* RTEMS_INTERRUPT_AND_THREAD_PROFILING */
    859  }
    860  
    861  struct Per_CPU_Control {
    862    ... per-CPU data ...
    863    Per_CPU_Stats Stats;
    864  };
     815{{{
     816#!c
     817/**
     818 * @brief Per-CPU statistics.
     819 */
     820struct Per_CPU_Stats {
     821#ifdef RTEMS_INTERRUPT_AND_THREAD_PROFILING
     822  /**
     823   * @brief The thread dispatch disabled begin instant in CPU counter ticks.
     824   *
     825   * This value is used to measure the time of disabled thread dispatching.
     826   */
     827  CPU_counter thread_dispatch_disabled_instant;
     828
     829  /**
     830   * @brief The last outer-most interrupt begin instant in CPU counter ticks.
     831   *
     832   * This value is used to measure the interrupt processing time.
     833   */
     834  CPU_counter outer_most_interrupt_instant;
     835
     836  /**
     837   * @brief The maximum interrupt delay in CPU counter ticks if supported by
     838   * the hardware.
     839   */
     840  CPU_counter max_interrupt_delay;
     841
     842  /**
     843   * @brief The maximum time of disabled thread dispatching in CPU counter
     844   * ticks.
     845   */
     846  CPU_counter max_thread_dispatch_disabled_time;
     847
     848  /**
     849   * @brief Count of times when the thread dispatch disable level changes from
     850   * zero to one in thread context.
     851   *
     852   * This value may overflow.
     853   */
     854  uint64_t thread_dispatch_disabled_count;
     855
     856  /**
     857   * @brief Total time of disabled thread dispatching in CPU counter ticks.
     858   *
     859   * The average time of disabled thread dispatching is the total time of
     860   * disabled thread dispatching divided by the thread dispatch disabled
     861   * count.
     862   *
     863   * This value may overflow.
     864   */
     865  uint64_t total_thread_dispatch_disabled_time;
     866
     867  /**
     868   * @brief Count of times when the interrupt nest level changes from zero to
     869   * one.
     870   *
     871   * This value may overflow.
     872   */
     873  uint64_t interrupt_count;
     874
     875  /**
     876   * @brief Total time of interrupt processing in CPU counter ticks.
     877   *
     878   * The average time of interrupt processing is the total time of interrupt
     879   * processing divided by the interrupt count.
     880   *
     881   * This value may overflow.
     882   */
     883  uint64_t total_interrupt_time;
     884#endif /* RTEMS_INTERRUPT_AND_THREAD_PROFILING */
     885}
     886
     887struct Per_CPU_Control {
     888  ... per-CPU data ...
     889  Per_CPU_Stats Stats;
     890};
     891}}}
     892
    865893==  Interrupt Support  ==
    866894
     
    887915sets of interrupt vectors.
    888916
    889  /**
    890   * @brief Sets the processor affinity set of an interrupt vector.
    891   *
    892   * @param[in] vector The interrupt vector number.
    893   * @param[in] affinity_set_size Size of the specified affinity set buffer in
    894   * bytes.  This value must be positive.
    895   * @param[in] affinity_set The new processor affinity set for the interrupt
    896   * vector.  This pointer must not be @c NULL.  A set bit in the affinity set
    897   * means that the interrupt can occur on this processor and a cleared bit
    898   * means the opposite.
    899   *
    900   * @retval RTEMS_SUCCESSFUL Successful operation.
    901   * @retval RTEMS_INVALID_ID The vector number is invalid.
    902   * @retval RTEMS_INVALID_CPU_SET Invalid processor affinity set.
    903   */
    904  rtems_status_code rtems_interrupt_set_affinity(
    905    rtems_vector vector,
    906    size_t affinity_set_size,
    907    const cpu_set_t *affinity_set
    908  );
    909 
    910  /**
    911   * @brief Gets the processor affinity set of an interrupt vector.
    912   *
    913   * @param[in] vector The interrupt vector number.
    914   * @param[in] affinity_set_size Size of the specified affinity set buffer in
    915   * bytes.  This value must be positive.
    916   * @param[out] affinity_set The current processor affinity set of the
    917   * interrupt vector.  This pointer must not be @c NULL.  A set bit in the
    918   * affinity set means that the interrupt can occur on this processor and a
    919   * cleared bit means the opposite.
    920   *
    921   * @retval RTEMS_SUCCESSFUL Successful operation.
    922   * @retval RTEMS_INVALID_ID The vector number is invalid.
    923   * @retval RTEMS_INVALID_CPU_SET The affinity set buffer is too small for the
    924   * current processor affinity set of the interrupt vector.
    925   */
    926  rtems_status_code rtems_interrupt_get_affinity(
    927    rtems_vector vector,
    928    size_t affinity_set_size,
    929    cpu_set_t *affinity_set
    930  );
     917{{{
     918#!c
     919/**
     920 * @brief Sets the processor affinity set of an interrupt vector.
     921 *
     922 * @param[in] vector The interrupt vector number.
     923 * @param[in] affinity_set_size Size of the specified affinity set buffer in
     924 * bytes.  This value must be positive.
     925 * @param[in] affinity_set The new processor affinity set for the interrupt
     926 * vector.  This pointer must not be @c NULL.  A set bit in the affinity set
     927 * means that the interrupt can occur on this processor and a cleared bit
     928 * means the opposite.
     929 *
     930 * @retval RTEMS_SUCCESSFUL Successful operation.
     931 * @retval RTEMS_INVALID_ID The vector number is invalid.
     932 * @retval RTEMS_INVALID_CPU_SET Invalid processor affinity set.
     933 */
     934rtems_status_code rtems_interrupt_set_affinity(
     935  rtems_vector vector,
     936  size_t affinity_set_size,
     937  const cpu_set_t *affinity_set
     938);
     939
     940/**
     941 * @brief Gets the processor affinity set of an interrupt vector.
     942 *
     943 * @param[in] vector The interrupt vector number.
     944 * @param[in] affinity_set_size Size of the specified affinity set buffer in
     945 * bytes.  This value must be positive.
     946 * @param[out] affinity_set The current processor affinity set of the
     947 * interrupt vector.  This pointer must not be @c NULL.  A set bit in the
     948 * affinity set means that the interrupt can occur on this processor and a
     949 * cleared bit means the opposite.
     950 *
     951 * @retval RTEMS_SUCCESSFUL Successful operation.
     952 * @retval RTEMS_INVALID_ID The vector number is invalid.
     953 * @retval RTEMS_INVALID_CPU_SET The affinity set buffer is too small for the
     954 * current processor affinity set of the interrupt vector.
     955 */
     956rtems_status_code rtems_interrupt_get_affinity(
     957  rtems_vector vector,
     958  size_t affinity_set_size,
     959  cpu_set_t *affinity_set
     960);
     961}}}
     962
    931963==  Clustered Scheduling  ==
    932964
     
    949981Functions for scheduler management.
    950982
    951  /**
    952   * @brief Identifies a scheduler by its name.
    953   *
    954   * The scheduler name is determined by the scheduler configuration.
    955   *
    956   * @param[in] name The scheduler name.
    957   * @param[out] scheduler_id The scheduler identifier associated with the name.
    958   *
    959   * @retval RTEMS_SUCCESSFUL Successful operation.
    960   * @retval RTEMS_INVALID_NAME Invalid scheduler name.
    961   */
    962  rtems_status_code rtems_scheduler_ident(
    963    rtems_name name,
    964    rtems_id *scheduler_id
    965  );
    966 
    967  /**
    968   * @brief Gets the set of processors owned by the scheduler.
    969   *
    970   * @param[in] scheduler_id Identifier of the scheduler.
    971   * @param[in] processor_set_size Size of the specified processor set buffer in
    972   * bytes.  This value must be positive.
    973   * @param[out] processor_set The processor set owned by the scheduler.  This
    974   * pointer must not be @c NULL.  A set bit in the processor set means that
    975   * this processor is owned by the scheduler and a cleared bit means the
    976   * opposite.
    977   *
    978   * @retval RTEMS_SUCCESSFUL Successful operation.
    979   * @retval RTEMS_INVALID_ID Invalid scheduler identifier.
    980   * @retval RTEMS_INVALID_CPU_SET The processor set buffer is too small for the
    981   * set of processors owned by the scheduler.
    982   */
    983  rtems_status_code rtems_scheduler_get_processors(
    984    rtems_id scheduler_id,
    985    size_t processor_set_size,
    986    cpu_set_t *processor_set
    987  );
     983{{{
     984#!c
     985/**
     986 * @brief Identifies a scheduler by its name.
     987 *
     988 * The scheduler name is determined by the scheduler configuration.
     989 *
     990 * @param[in] name The scheduler name.
     991 * @param[out] scheduler_id The scheduler identifier associated with the name.
     992 *
     993 * @retval RTEMS_SUCCESSFUL Successful operation.
     994 * @retval RTEMS_INVALID_NAME Invalid scheduler name.
     995 */
     996rtems_status_code rtems_scheduler_ident(
     997  rtems_name name,
     998  rtems_id *scheduler_id
     999);
     1000
     1001/**
     1002 * @brief Gets the set of processors owned by the scheduler.
     1003 *
     1004 * @param[in] scheduler_id Identifier of the scheduler.
     1005 * @param[in] processor_set_size Size of the specified processor set buffer in
     1006 * bytes.  This value must be positive.
     1007 * @param[out] processor_set The processor set owned by the scheduler.  This
     1008 * pointer must not be @c NULL.  A set bit in the processor set means that
     1009 * this processor is owned by the scheduler and a cleared bit means the
     1010 * opposite.
     1011 *
     1012 * @retval RTEMS_SUCCESSFUL Successful operation.
     1013 * @retval RTEMS_INVALID_ID Invalid scheduler identifier.
     1014 * @retval RTEMS_INVALID_CPU_SET The processor set buffer is too small for the
     1015 * set of processors owned by the scheduler.
     1016 */
     1017rtems_status_code rtems_scheduler_get_processors(
     1018  rtems_id scheduler_id,
     1019  size_t processor_set_size,
     1020  cpu_set_t *processor_set
     1021);
     1022}}}
    9881023
    9891024Each thread needs a processor affinity set in the RTEMS SMP configuration.  The
     
    9951030sets of tasks.
    9961031
    997  /**
    998   * @brief Sets the processor affinity set of a task.
    999   *
    1000   * @param[in] task_id Identifier of the task.  Use @ref RTEMS_SELF to select
    1001   * the executing task.
    1002   * @param[in] affinity_set_size Size of the specified affinity set buffer in
    1003   * bytes.  This value must be positive.
    1004   * @param[in] affinity_set The new processor affinity set for the task.  This
    1005   * pointer must not be @c NULL.  A set bit in the affinity set means that the
    1006   * task can execute on this processor and a cleared bit means the opposite.
    1007   *
    1008   * @retval RTEMS_SUCCESSFUL Successful operation.
    1009   * @retval RTEMS_INVALID_ID Invalid task identifier.
    1010   * @retval RTEMS_INVALID_CPU_SET Invalid processor affinity set.
    1011   */
    1012  rtems_status_code rtems_task_set_affinity(
    1013    rtems_id task_id,
    1014    size_t affinity_set_size,
    1015    const cpu_set_t *affinity_set
    1016  );
    1017 
    1018  /**
    1019   * @brief Gets the processor affinity set of a task.
    1020   *
    1021   * @param[in] task_id Identifier of the task.  Use @ref RTEMS_SELF to select
    1022   * the executing task.
    1023   * @param[in] affinity_set_size Size of the specified affinity set buffer in
    1024   * bytes.  This value must be positive.
    1025   * @param[out] affinity_set The current processor affinity set of the task.
    1026   * This pointer must not be @c NULL.  A set bit in the affinity set means that
    1027   * the task can execute on this processor and a cleared bit means the
    1028   * opposite.
    1029   *
    1030   * @retval RTEMS_SUCCESSFUL Successful operation.
    1031   * @retval RTEMS_INVALID_ID Invalid task identifier.
    1032   * @retval RTEMS_INVALID_CPU_SET The affinity set buffer is too small for the
    1033   * current processor affinity set of the task.
    1034   */
    1035  rtems_status_code rtems_task_get_affinity(
    1036    rtems_id task_id,
    1037    size_t affinity_set_size,
    1038    cpu_set_t *affinity_set
    1039  );
     1032{{{
     1033#!c
     1034/**
     1035 * @brief Sets the processor affinity set of a task.
     1036 *
     1037 * @param[in] task_id Identifier of the task.  Use @ref RTEMS_SELF to select
     1038 * the executing task.
     1039 * @param[in] affinity_set_size Size of the specified affinity set buffer in
     1040 * bytes.  This value must be positive.
     1041 * @param[in] affinity_set The new processor affinity set for the task.  This
     1042 * pointer must not be @c NULL.  A set bit in the affinity set means that the
     1043 * task can execute on this processor and a cleared bit means the opposite.
     1044 *
     1045 * @retval RTEMS_SUCCESSFUL Successful operation.
     1046 * @retval RTEMS_INVALID_ID Invalid task identifier.
     1047 * @retval RTEMS_INVALID_CPU_SET Invalid processor affinity set.
     1048 */
     1049rtems_status_code rtems_task_set_affinity(
     1050  rtems_id task_id,
     1051  size_t affinity_set_size,
     1052  const cpu_set_t *affinity_set
     1053);
     1054
     1055/**
     1056 * @brief Gets the processor affinity set of a task.
     1057 *
     1058 * @param[in] task_id Identifier of the task.  Use @ref RTEMS_SELF to select
     1059 * the executing task.
     1060 * @param[in] affinity_set_size Size of the specified affinity set buffer in
     1061 * bytes.  This value must be positive.
     1062 * @param[out] affinity_set The current processor affinity set of the task.
     1063 * This pointer must not be @c NULL.  A set bit in the affinity set means that
     1064 * the task can execute on this processor and a cleared bit means the
     1065 * opposite.
     1066 *
     1067 * @retval RTEMS_SUCCESSFUL Successful operation.
     1068 * @retval RTEMS_INVALID_ID Invalid task identifier.
     1069 * @retval RTEMS_INVALID_CPU_SET The affinity set buffer is too small for the
     1070 * current processor affinity set of the task.
     1071 */
     1072rtems_status_code rtems_task_get_affinity(
     1073  rtems_id task_id,
     1074  size_t affinity_set_size,
     1075  cpu_set_t *affinity_set
     1076);
     1077}}}
    10401078
    10411079Two new functions should be added to alter and retrieve the scheduler of tasks.
    10421080
    1043  /**
    1044   * @brief Sets the scheduler of a task.
    1045   *
    1046   * @param[in] task_id Identifier of the task.  Use @ref RTEMS_SELF to select
    1047   * the executing task.
    1048   * @param[in] scheduler_id Identifier of the scheduler.
    1049   *
    1050   * @retval RTEMS_SUCCESSFUL Successful operation.
    1051   * @retval RTEMS_INVALID_ID Invalid task identifier.
    1052   * @retval RTEMS_INVALID_SECOND_ID Invalid scheduler identifier.
    1053   *
    1054   * @see rtems_scheduler_ident().
    1055   */
    1056  rtems_status_code rtems_task_set_scheduler(
    1057    rtems_id task_id,
    1058    rtems_id scheduler_id
    1059  );
    1060 
    1061  /**
    1062   * @brief Gets the scheduler of a task.
    1063   *
    1064   * @param[in] task_id Identifier of the task.  Use @ref RTEMS_SELF to select
    1065   * the executing task.
    1066   * @param[out] scheduler_id Identifier of the scheduler.
    1067   *
    1068   * @retval RTEMS_SUCCESSFUL Successful operation.
    1069   * @retval RTEMS_INVALID_ID Invalid task identifier.
    1070   */
    1071  rtems_status_code rtems_task_get_scheduler(
    1072    rtems_id task_id,
    1073    rtems_id *scheduler_id
    1074  );
     1081{{{
     1082#!c
     1083/**
     1084 * @brief Sets the scheduler of a task.
     1085 *
     1086 * @param[in] task_id Identifier of the task.  Use @ref RTEMS_SELF to select
     1087 * the executing task.
     1088 * @param[in] scheduler_id Identifier of the scheduler.
     1089 *
     1090 * @retval RTEMS_SUCCESSFUL Successful operation.
     1091 * @retval RTEMS_INVALID_ID Invalid task identifier.
     1092 * @retval RTEMS_INVALID_SECOND_ID Invalid scheduler identifier.
     1093 *
     1094 * @see rtems_scheduler_ident().
     1095 */
     1096rtems_status_code rtems_task_set_scheduler(
     1097  rtems_id task_id,
     1098  rtems_id scheduler_id
     1099);
     1100
     1101/**
     1102 * @brief Gets the scheduler of a task.
     1103 *
     1104 * @param[in] task_id Identifier of the task.  Use @ref RTEMS_SELF to select
     1105 * the executing task.
     1106 * @param[out] scheduler_id Identifier of the scheduler.
     1107 *
     1108 * @retval RTEMS_SUCCESSFUL Successful operation.
     1109 * @retval RTEMS_INVALID_ID Invalid task identifier.
     1110 */
     1111rtems_status_code rtems_task_get_scheduler(
     1112  rtems_id task_id,
     1113  rtems_id *scheduler_id
     1114);
     1115}}}
     1116
    10751117===  Scheduler Configuration  ===
    10761118
     
    11111153which executes the boot_card() function.
    11121154
    1113  /**
    1114   * @brief Processor configuration.
    1115   *
    1116   * Use RTEMS_CPU_CONFIG_INIT() to initialize this structure.
    1117   */
    1118  typedef struct {
    1119    /**
    1120     * @brief Scheduler instance for this processor.
    1121     *
    1122     * It is possible to omit a scheduler instance for this processor by using
    1123     * the @c NULL pointer.  In this case RTEMS will not use this processor and
    1124     * other operating systems may claim it.
    1125     */
    1126    Scheduler_Control *scheduler;
    1127  } rtems_cpu_config;
    1128  
    1129  /**
    1130   * @brief Processor configuration initializer.
    1131   *
    1132   * @param scheduler The reference to a scheduler instance or @c NULL.
    1133   *
    1134   * @see rtems_cpu_config.
    1135   */
    1136  #define RTEMS_CPU_CONFIG_INIT(scheduler) \
    1137    { ( scheduler ) }
     1155{{{
     1156#!c
     1157/**
     1158 * @brief Processor configuration.
     1159 *
     1160 * Use RTEMS_CPU_CONFIG_INIT() to initialize this structure.
     1161 */
     1162typedef struct {
     1163  /**
     1164   * @brief Scheduler instance for this processor.
     1165   *
     1166   * It is possible to omit a scheduler instance for this processor by using
     1167   * the @c NULL pointer.  In this case RTEMS will not use this processor and
     1168   * other operating systems may claim it.
     1169   */
     1170  Scheduler_Control *scheduler;
     1171} rtems_cpu_config;
     1172
     1173/**
     1174 * @brief Processor configuration initializer.
     1175 *
     1176 * @param scheduler The reference to a scheduler instance or @c NULL.
     1177 *
     1178 * @see rtems_cpu_config.
     1179 */
     1180#define RTEMS_CPU_CONFIG_INIT(scheduler) \
     1181  { ( scheduler ) }
     1182}}}
    11381183
    11391184Scheduler and processor configuration example:
    11401185
    1141  RTEMS_SCHED_DEFINE_FP_SMP(fp0, rtems_build_name(' ', 'F', 'P', '0'), 256);
    1142  RTEMS_SCHED_DEFINE_FP_SMP(fp1, rtems_build_name(' ', 'F', 'P', '1'), 64);
    1143  RTEMS_SCHED_DEFINE_EDF_SMP(edf0, rtems_build_name('E', 'D', 'F', '0'));
    1144  
    1145  const rtems_cpu_config rtems_cpu_config_table[] = {
    1146    RTEMS_CPU_CONFIG_INIT(RTEMS_SCHED_REF_FP_SMP(fp0)),
    1147    RTEMS_CPU_CONFIG_INIT(RTEMS_SCHED_REF_FP_SMP(fp1)),
    1148    RTEMS_CPU_CONFIG_INIT(RTEMS_SCHED_REF_FP_SMP(fp1)),
    1149    RTEMS_CPU_CONFIG_INIT(RTEMS_SCHED_REF_FP_SMP(fp1)),
    1150    RTEMS_CPU_CONFIG_INIT(NULL),
    1151    RTEMS_CPU_CONFIG_INIT(NULL),
    1152    RTEMS_CPU_CONFIG_INIT(RTEMS_SCHED_REF_EDF_SMP(edf0)),
    1153    RTEMS_CPU_CONFIG_INIT(RTEMS_SCHED_REF_EDF_SMP(edf0)
    1154  };
    1155  
    1156  const size_t rtems_cpu_config_count =
    1157 
    1158    RTEMS_ARRAY_SIZE(rtems_cpu_config_table);
     1186{{{
     1187#!c
     1188RTEMS_SCHED_DEFINE_FP_SMP(fp0, rtems_build_name(' ', 'F', 'P', '0'), 256);
     1189RTEMS_SCHED_DEFINE_FP_SMP(fp1, rtems_build_name(' ', 'F', 'P', '1'), 64);
     1190RTEMS_SCHED_DEFINE_EDF_SMP(edf0, rtems_build_name('E', 'D', 'F', '0'));
     1191
     1192const rtems_cpu_config rtems_cpu_config_table[] = {
     1193  RTEMS_CPU_CONFIG_INIT(RTEMS_SCHED_REF_FP_SMP(fp0)),
     1194  RTEMS_CPU_CONFIG_INIT(RTEMS_SCHED_REF_FP_SMP(fp1)),
     1195  RTEMS_CPU_CONFIG_INIT(RTEMS_SCHED_REF_FP_SMP(fp1)),
     1196  RTEMS_CPU_CONFIG_INIT(RTEMS_SCHED_REF_FP_SMP(fp1)),
     1197  RTEMS_CPU_CONFIG_INIT(NULL),
     1198  RTEMS_CPU_CONFIG_INIT(NULL),
     1199  RTEMS_CPU_CONFIG_INIT(RTEMS_SCHED_REF_EDF_SMP(edf0)),
     1200  RTEMS_CPU_CONFIG_INIT(RTEMS_SCHED_REF_EDF_SMP(edf0)
     1201};
     1202
     1203const size_t rtems_cpu_config_count =
     1204
     1205  RTEMS_ARRAY_SIZE(rtems_cpu_config_table);
     1206}}}
    11591207
    11601208An alternative to the processor configuration table would be to specify in the
     
    11721220a scheduler control context as the first parameter, e.g.
    11731221
    1174  typedef struct Scheduler_Control Scheduler_Control;
    1175  
    1176  typedef struct {
    1177    [...]
    1178    void ( *set_affinity )(
    1179      Scheduler_Control *self,
    1180      Thread_Control *thread,
    1181      size_t affinity_set_size,
    1182      const cpu_set_t *affinity_set
    1183    );
    1184    [...]
    1185  } Scheduler_Operations;
    1186  
    1187  /**
    1188   * @brief General scheduler control.
    1189   */
    1190  struct Scheduler_Control {
    1191    /**
    1192     * @brief The scheduler operations.
    1193     */
    1194    Scheduler_Operations Operations;
    1195  
    1196    /**
    1197     * @brief Size of the owned processor set in bytes.
    1198     */
    1199    size_t owned_cpu_set_size
    1200  
    1201    /**
    1202     * @brief Reference to the owned processor set.
    1203     *
    1204     * A set bit means this processor is owned by this scheduler instance, a
    1205     * cleared bit means the opposite.
    1206     */
    1207    cpu_set_t *owned_cpu_set;
    1208  };
     1222{{{
     1223#!c
     1224typedef struct Scheduler_Control Scheduler_Control;
     1225
     1226typedef struct {
     1227  [...]
     1228  void ( *set_affinity )(
     1229    Scheduler_Control *self,
     1230    Thread_Control *thread,
     1231    size_t affinity_set_size,
     1232    const cpu_set_t *affinity_set
     1233  );
     1234  [...]
     1235} Scheduler_Operations;
     1236
     1237/**
     1238 * @brief General scheduler control.
     1239 */
     1240struct Scheduler_Control {
     1241  /**
     1242   * @brief The scheduler operations.
     1243   */
     1244  Scheduler_Operations Operations;
     1245
     1246  /**
     1247   * @brief Size of the owned processor set in bytes.
     1248   */
     1249  size_t owned_cpu_set_size
     1250
     1251  /**
     1252   * @brief Reference to the owned processor set.
     1253   *
     1254   * A set bit means this processor is owned by this scheduler instance, a
     1255   * cleared bit means the opposite.
     1256   */
     1257  cpu_set_t *owned_cpu_set;
     1258};
     1259}}}
    12091260
    12101261Single processor configurations benefit also from this change since it makes
     
    12491300A new semaphore attribute enables MrsP.
    12501301
    1251  /**
    1252   * @brief Semaphore attribute to select the multiprocessor resource sharing
    1253   * protocol MrsP.
    1254   *
    1255   * This attribute is mutually exclusive with RTEMS_PRIORITY_CEILING and
    1256   * RTEMS_INHERIT_PRIORITY.
    1257   */
    1258  #define RTEMS_MULTIPROCESSOR_RESOURCE_SHARING 0x00000100
     1302{{{
     1303#!c
     1304/**
     1305 * @brief Semaphore attribute to select the multiprocessor resource sharing
     1306 * protocol MrsP.
     1307 *
     1308 * This attribute is mutually exclusive with RTEMS_PRIORITY_CEILING and
     1309 * RTEMS_INHERIT_PRIORITY.
     1310 */
     1311#define RTEMS_MULTIPROCESSOR_RESOURCE_SHARING 0x00000100
     1312}}}
    12591313
    12601314For MrsP we need the ability to specify the priority ceilings per scheduler
    12611315domain.
    12621316
    1263  typedef struct {
    1264    rtems_id scheduler_id;
    1265    rtems_task_priority priority;
    1266  } rtems_task_priority_by_scheduler;
    1267 
    1268  /**
    1269   * @brief Sets the priority ceilings per scheduler for a semaphore with
    1270   * priority ceiling protocol.
    1271   *
    1272   * @param[in] semaphore_id Identifier of the semaphore.
    1273   * @param[in] priority_ceilings A table with priority ceilings by scheduler.
    1274   * In case one scheduler appears multiple times, the setting with the highest
    1275   * index will be used.  This semaphore object is then bound to the specified
    1276   * scheduler domains.  It is an error to use this semaphore object on other
    1277   * scheduler domains.  The specified schedulers must be compatible, e.g.
    1278   * migration from one scheduler domain to another must be defined.
    1279   * @param[in] priority_ceilings_count Count of priority ceilings by scheduler
    1280   * pairs in the table.
    1281   *
    1282   * @retval RTEMS_SUCCESSFUL Successful operation.
    1283   * @retval RTEMS_INVALID_ID Invalid semaphore identifier.
    1284   * @retval RTEMS_INVALID_SECOND_ID Invalid scheduler identifier in the table.
    1285   * @retval RTEMS_INVALID_PRIORITY Invalid task priority in the table.
    1286   */
    1287  rtems_status_code rtems_semaphore_set_priority_ceilings(
    1288    rtems_id semaphore_id,
    1289    const rtems_task_priority_by_scheduler *priority_ceilings,
    1290    size_t priority_ceilings_count
    1291  );
     1317{{{
     1318#!c
     1319typedef struct {
     1320  rtems_id scheduler_id;
     1321  rtems_task_priority priority;
     1322} rtems_task_priority_by_scheduler;
     1323
     1324/**
     1325 * @brief Sets the priority ceilings per scheduler for a semaphore with
     1326 * priority ceiling protocol.
     1327 *
     1328 * @param[in] semaphore_id Identifier of the semaphore.
     1329 * @param[in] priority_ceilings A table with priority ceilings by scheduler.
     1330 * In case one scheduler appears multiple times, the setting with the highest
     1331 * index will be used.  This semaphore object is then bound to the specified
     1332 * scheduler domains.  It is an error to use this semaphore object on other
     1333 * scheduler domains.  The specified schedulers must be compatible, e.g.
     1334 * migration from one scheduler domain to another must be defined.
     1335 * @param[in] priority_ceilings_count Count of priority ceilings by scheduler
     1336 * pairs in the table.
     1337 *
     1338 * @retval RTEMS_SUCCESSFUL Successful operation.
     1339 * @retval RTEMS_INVALID_ID Invalid semaphore identifier.
     1340 * @retval RTEMS_INVALID_SECOND_ID Invalid scheduler identifier in the table.
     1341 * @retval RTEMS_INVALID_PRIORITY Invalid task priority in the table.
     1342 */
     1343rtems_status_code rtems_semaphore_set_priority_ceilings(
     1344  rtems_id semaphore_id,
     1345  const rtems_task_priority_by_scheduler *priority_ceilings,
     1346  size_t priority_ceilings_count
     1347);
     1348}}}
     1349
    12921350===  Implementation  ===
    12931351
     
    13311389this possible with the current structure?
    13321390
    1333  mutex_obtain(id, wait, timeout):
    1334         <span style="color:red">level = ISR_disable()</span>
    1335         mtx = mutex_get(id)
    1336         executing = get_executing_thread()
    1337         wait_control = executing.get_wait_control()
    1338         wait_control.set_status(SUCCESS)
    1339         if !mtx.is_locked():
    1340                 mtx.lock(executing)
    1341                 if mtx.use_ceiling_protocol():
    1342                         thread_dispatch_disable()
    1343                         <span style="color:red">ISR_enable(level)</span>
    1344                         executing.boost_priority(mtx.get_ceiling())
    1345                         thread_dispatch_enable()
    1346                 else:
    1347                         <span style="color:red">ISR_enable(level)</span>
    1348         else if mtx.is_holder(executing):
    1349                 mtx.increment_nest_level()
    1350                 <span style="color:red">ISR_enable(level)</span>
    1351         else if !wait:
    1352                 <span style="color:red">ISR_enable(level)</span>
    1353                 wait_control.set_status(UNSATISFIED)
    1354         else:
    1355                 wait_queue = mtx.get_wait_queue()
    1356                 wait_queue.set_sync_status(NOTHING_HAPPENED)
    1357                 executing.set_wait_queue(wait_queue))
    1358                 thread_dispatch_disable()
    1359                 <span style="color:red">ISR_enable(level)</span>
    1360                 if mtx.use_inherit_priority():
    1361                         mtx.get_holder().boost_priority(executing.get_priority()))
    1362                 <span style="color:fuchsia">level = ISR_disable()</span>
    1363                 if executing.is_ready():
    1364                         executing.set_state(MUTEX_BLOCKING_STATE)
    1365                         scheduler_block(executing)
    1366                 else:
    1367                         executing.add_state(MUTEX_BLOCKING_STATE)
    1368                 <span style="color:fuchsia">ISR_enable(level)</span>
    1369                 if timeout:
    1370                         timer_start(timeout, executing, mtx)
    1371                 <span style="color:blue">level = ISR_disable()</span>
    1372                 search_thread = wait_queue.first()
    1373                 while search_thread != wait_queue.tail():
    1374                         if executing.priority() <= search_thread.priority():
    1375                                 break
    1376                         <span style="color:blue">ISR_enable(level)</span>
    1377                         <span style="color:blue">level = ISR_disable()</span>
    1378                         if search_thread.is_state_set(MUTEX_BLOCKING_STATE):
    1379                                 search_thread = search_thread.next()
    1380                         else:
    1381                                 search_thread = wait_queue.first()
    1382                 sync_status = wait_queue.get_sync_status()
    1383                 if sync_state == NOTHING_HAPPENED:
    1384                         wait_queue.set_sync_status(SYNCHRONIZED)
    1385                         wait_queue.enqueue(search_thread, executing)
    1386                         executing.set_wait_queue(wait_queue)
    1387                         <span style="color:blue">ISR_enable(level)</span>
    1388                 else:
    1389                         executing.set_wait_queue(NULL)
    1390                         if executing.is_timer_active():
    1391                                 executing.deactivate_timer()
    1392                                 <span style="color:blue">ISR_enable(level)</span>
    1393                                 executing.remove_timer()
    1394                         else:
    1395                                 <span style="color:blue">ISR_enable(level)</span>
    1396                         <span style="color:fuchsia">level = ISR_disable()</span>
    1397                         if executing.is_state_set(MUTEX_BLOCKING_STATE):
    1398                                 executing.clear_state(MUTEX_BLOCKING_STATE)
    1399                                 if executing.is_ready():
    1400                                         scheduler_unblock(executing)
    1401                         <span style="color:fuchsia">ISR_enable(level)</span>
    1402                 thread_dispatch_enable()
    1403         return wait_control.get_status()
    1404  
    1405  mutex_release(id):
    1406         thread_dispatch_disable()
    1407         mtx = mutex_get(id)
    1408         executing = get_executing_thread()
    1409         nest_level = mtx.decrement_nest_level()
    1410         if nest_level == 0:
    1411                 if mtx.use_ceiling_protocol() or mtx.use_inherit_priority():
    1412                         executing.restore_priority()
    1413                 wait_queue = mtx.get_wait_queue()
    1414                 thread = NULL
    1415                 <span style="color:red">level = ISR_disable()</span>
    1416                 thread = wait_queue.dequeue()
    1417                 if thread != NULL:
    1418                         thread.set_wait_queue(NULL)
    1419                         if thread.is_timer_active():
    1420                                 thread.deactivate_timer()
    1421                                 <span style="color:red">ISR_enable(level)</span>
    1422                                 thread.remove_timer()
    1423                         else:
    1424                                 <span style="color:red">ISR_enable(level)</span>
    1425                         <span style="color:fuchsia">level = ISR_disable()</span>
    1426                         if thread.is_state_set(MUTEX_BLOCKING_STATE):
    1427                                 thread.clear_state(MUTEX_BLOCKING_STATE)
    1428                                 if thread.is_ready():
    1429                                         scheduler_unblock(thread)
    1430                         <span style="color:fuchsia">ISR_enable(level)</span>
    1431                 else:
    1432                         <span style="color:red">ISR_enable(level)</span>
    1433                 <span style="color:blue">level = ISR_disable()</span>
    1434                 if thread == NULL:
    1435                         sync_status = wait_queue.get_sync_status()
    1436                         if sync_status == TIMEOUT || sync_status == NOTHING_HAPPENED:
    1437                                 wait_queue.set_sync_status(SATISFIED)
    1438                                 thread = executing
    1439                 <span style="color:blue">ISR_enable(level)</span>
    1440                 if thread != NULL:
    1441                         mtx.new_holder(thread)
    1442                         if mtx.use_ceiling_protocol():
    1443                                 thread.boost_priority(mtx.get_ceiling())
    1444                 else:
    1445                         mtx.unlock()
    1446         thread_dispatch_enable()
    1447  
    1448  
    1449  mutex_timeout(thread, mtx):
    1450         <span style="color:red">level = ISR_disable()</span>
    1451         wait_queue = thread.get_wait_queue()
    1452         if wait_queue != NULL:
    1453                 sync_status = wait_queue.get_sync_status()
    1454                 if sync_status != SYNCHRONIZED and thread.is_executing():
    1455                         if sync_status != SATISFIED:
    1456                                 wait_queue.set_sync_status(TIMEOUT)
    1457                                 wait_control = executing.get_wait_control()
    1458                                 wai
     1391{{{
     1392#!html
     1393<pre>
     1394mutex_obtain(id, wait, timeout):
     1395        <span style="color:red">level = ISR_disable()</span>
     1396        mtx = mutex_get(id)
     1397        executing = get_executing_thread()
     1398        wait_control = executing.get_wait_control()
     1399        wait_control.set_status(SUCCESS)
     1400        if !mtx.is_locked():
     1401                mtx.lock(executing)
     1402                if mtx.use_ceiling_protocol():
     1403                        thread_dispatch_disable()
     1404                        <span style="color:red">ISR_enable(level)</span>
     1405                        executing.boost_priority(mtx.get_ceiling())
     1406                        thread_dispatch_enable()
     1407                else:
     1408                        <span style="color:red">ISR_enable(level)</span>
     1409        else if mtx.is_holder(executing):
     1410                mtx.increment_nest_level()
     1411                <span style="color:red">ISR_enable(level)</span>
     1412        else if !wait:
     1413                <span style="color:red">ISR_enable(level)</span>
     1414                wait_control.set_status(UNSATISFIED)
     1415        else:
     1416                wait_queue = mtx.get_wait_queue()
     1417                wait_queue.set_sync_status(NOTHING_HAPPENED)
     1418                executing.set_wait_queue(wait_queue))
     1419                thread_dispatch_disable()
     1420                <span style="color:red">ISR_enable(level)</span>
     1421                if mtx.use_inherit_priority():
     1422                        mtx.get_holder().boost_priority(executing.get_priority()))
     1423                <span style="color:fuchsia">level = ISR_disable()</span>
     1424                if executing.is_ready():
     1425                        executing.set_state(MUTEX_BLOCKING_STATE)
     1426                        scheduler_block(executing)
     1427                else:
     1428                        executing.add_state(MUTEX_BLOCKING_STATE)
     1429                <span style="color:fuchsia">ISR_enable(level)</span>
     1430                if timeout:
     1431                        timer_start(timeout, executing, mtx)
     1432                <span style="color:blue">level = ISR_disable()</span>
     1433                search_thread = wait_queue.first()
     1434                while search_thread != wait_queue.tail():
     1435                        if executing.priority() <= search_thread.priority():
     1436                                break
     1437                        <span style="color:blue">ISR_enable(level)</span>
     1438                        <span style="color:blue">level = ISR_disable()</span>
     1439                        if search_thread.is_state_set(MUTEX_BLOCKING_STATE):
     1440                                search_thread = search_thread.next()
     1441                        else:
     1442                                search_thread = wait_queue.first()
     1443                sync_status = wait_queue.get_sync_status()
     1444                if sync_state == NOTHING_HAPPENED:
     1445                        wait_queue.set_sync_status(SYNCHRONIZED)
     1446                        wait_queue.enqueue(search_thread, executing)
     1447                        executing.set_wait_queue(wait_queue)
     1448                        <span style="color:blue">ISR_enable(level)</span>
     1449                else:
     1450                        executing.set_wait_queue(NULL)
     1451                        if executing.is_timer_active():
     1452                                executing.deactivate_timer()
     1453                                <span style="color:blue">ISR_enable(level)</span>
     1454                                executing.remove_timer()
     1455                        else:
     1456                                <span style="color:blue">ISR_enable(level)</span>
     1457                        <span style="color:fuchsia">level = ISR_disable()</span>
     1458                        if executing.is_state_set(MUTEX_BLOCKING_STATE):
     1459                                executing.clear_state(MUTEX_BLOCKING_STATE)
     1460                                if executing.is_ready():
     1461                                        scheduler_unblock(executing)
     1462                        <span style="color:fuchsia">ISR_enable(level)</span>
     1463                thread_dispatch_enable()
     1464        return wait_control.get_status()
     1465
     1466mutex_release(id):
     1467        thread_dispatch_disable()
     1468        mtx = mutex_get(id)
     1469        executing = get_executing_thread()
     1470        nest_level = mtx.decrement_nest_level()
     1471        if nest_level == 0:
     1472                if mtx.use_ceiling_protocol() or mtx.use_inherit_priority():
     1473                        executing.restore_priority()
     1474                wait_queue = mtx.get_wait_queue()
     1475                thread = NULL
     1476                <span style="color:red">level = ISR_disable()</span>
     1477                thread = wait_queue.dequeue()
     1478                if thread != NULL:
     1479                        thread.set_wait_queue(NULL)
     1480                        if thread.is_timer_active():
     1481                                thread.deactivate_timer()
     1482                                <span style="color:red">ISR_enable(level)</span>
     1483                                thread.remove_timer()
     1484                        else:
     1485                                <span style="color:red">ISR_enable(level)</span>
     1486                        <span style="color:fuchsia">level = ISR_disable()</span>
     1487                        if thread.is_state_set(MUTEX_BLOCKING_STATE):
     1488                                thread.clear_state(MUTEX_BLOCKING_STATE)
     1489                                if thread.is_ready():
     1490                                        scheduler_unblock(thread)
     1491                        <span style="color:fuchsia">ISR_enable(level)</span>
     1492                else:
     1493                        <span style="color:red">ISR_enable(level)</span>
     1494                <span style="color:blue">level = ISR_disable()</span>
     1495                if thread == NULL:
     1496                        sync_status = wait_queue.get_sync_status()
     1497                        if sync_status == TIMEOUT || sync_status == NOTHING_HAPPENED:
     1498                                wait_queue.set_sync_status(SATISFIED)
     1499                                thread = executing
     1500                <span style="color:blue">ISR_enable(level)</span>
     1501                if thread != NULL:
     1502                        mtx.new_holder(thread)
     1503                        if mtx.use_ceiling_protocol():
     1504                                thread.boost_priority(mtx.get_ceiling())
     1505                else:
     1506                        mtx.unlock()
     1507        thread_dispatch_enable()
     1508
     1509mutex_timeout(thread, mtx):
     1510        <span style="color:red">level = ISR_disable()</span>
     1511        wait_queue = thread.get_wait_queue()
     1512        if wait_queue != NULL:
     1513                sync_status = wait_queue.get_sync_status()
     1514                if sync_status != SYNCHRONIZED and thread.is_executing():
     1515                        if sync_status != SATISFIED:
     1516                                wait_queue.set_sync_status(TIMEOUT)
     1517                                wait_control = executing.get_wait_control()
     1518                                wait_control.set_status(TIMEOUT)
     1519                        <span style="color:red">ISR_enable(level)</span>
     1520                else:
     1521                        <span style="color:red">ISR_enable(level)</span>
     1522                        <span style
     1523</pre>
     1524}}}