662 | | #define SMP_LOCK_STATS_CONTENTION_COUNTS 4 |
663 | | |
664 | | /** |
665 | | * @brief SMP lock statistics. |
666 | | * |
667 | | * The lock acquire attempt instant is the point in time right after the |
668 | | * interrupt disable action in the lock acquire sequence. |
669 | | * |
670 | | * The lock acquire instant is the point in time right after the lock |
671 | | * acquisition. This is the begin of the critical section code execution. |
672 | | * |
673 | | * The lock release instant is the point in time right before the interrupt |
674 | | * enable action in the lock release sequence. |
675 | | * |
676 | | * The lock section time is the time elapsed between the lock acquire instant |
677 | | * and the lock release instant. |
678 | | * |
679 | | * The lock acquire time is the time elapsed between the lock acquire attempt |
680 | | * instant and the lock acquire instant. |
681 | | */ |
682 | | struct SMP_lock_Stats { |
683 | | #ifdef RTEMS_LOCK_PROFILING |
684 | | /** |
685 | | * @brief The last lock acquire instant in CPU counter ticks. |
686 | | * |
687 | | * This value is used to measure the lock section time. |
688 | | */ |
689 | | CPU_counter acquire_instant; |
690 | | |
691 | | /** |
692 | | * @brief The maximum lock section time in CPU counter ticks. |
693 | | */ |
694 | | CPU_counter max_section_time; |
695 | | |
696 | | /** |
697 | | * @brief The maximum lock acquire time in CPU counter ticks. |
698 | | */ |
699 | | CPU_counter max_acquire_time; |
700 | | |
701 | | /** |
702 | | * @brief The count of lock uses. |
703 | | * |
704 | | * This value may overflow. |
705 | | */ |
706 | | uint64_t usage_count; |
707 | | |
708 | | /** |
709 | | * @brief The counts of lock acquire operations with contention. |
710 | | * |
711 | | * The contention count for index N corresponds to a lock acquire attempt |
712 | | * with an initial queue length of N + 1. The last index corresponds to all |
713 | | * lock acquire attempts with an initial queue length greater than or equal |
714 | | * to SMP_LOCK_STATS_CONTENTION_COUNTS. |
715 | | * |
716 | | * The values may overflow. |
717 | | */ |
718 | | uint64_t contention_counts[SMP_LOCK_STATS_CONTENTION_COUNTS]; |
719 | | |
720 | | /** |
721 | | * @brief Total lock section time in CPU counter ticks. |
722 | | * |
723 | | * The average lock section time is the total section time divided by the |
724 | | * lock usage count. |
725 | | * |
726 | | * This value may overflow. |
727 | | */ |
728 | | uint64_t total_section_time; |
729 | | #endif /* RTEMS_LOCK_PROFILING */ |
730 | | } |
731 | | |
732 | | struct SMP_lock_Control { |
733 | | ... lock data ... |
734 | | SMP_lock_Stats Stats; |
735 | | }; |
| 673 | {{{ |
| 674 | #!c |
| 675 | #define SMP_LOCK_STATS_CONTENTION_COUNTS 4 |
| 676 | |
| 677 | /** |
| 678 | * @brief SMP lock statistics. |
| 679 | * |
| 680 | * The lock acquire attempt instant is the point in time right after the |
| 681 | * interrupt disable action in the lock acquire sequence. |
| 682 | * |
| 683 | * The lock acquire instant is the point in time right after the lock |
| 684 | * acquisition. This is the begin of the critical section code execution. |
| 685 | * |
| 686 | * The lock release instant is the point in time right before the interrupt |
| 687 | * enable action in the lock release sequence. |
| 688 | * |
| 689 | * The lock section time is the time elapsed between the lock acquire instant |
| 690 | * and the lock release instant. |
| 691 | * |
| 692 | * The lock acquire time is the time elapsed between the lock acquire attempt |
| 693 | * instant and the lock acquire instant. |
| 694 | */ |
| 695 | struct SMP_lock_Stats { |
| 696 | #ifdef RTEMS_LOCK_PROFILING |
| 697 | /** |
| 698 | * @brief The last lock acquire instant in CPU counter ticks. |
| 699 | * |
| 700 | * This value is used to measure the lock section time. |
| 701 | */ |
| 702 | CPU_counter acquire_instant; |
| 703 | |
| 704 | /** |
| 705 | * @brief The maximum lock section time in CPU counter ticks. |
| 706 | */ |
| 707 | CPU_counter max_section_time; |
| 708 | |
| 709 | /** |
| 710 | * @brief The maximum lock acquire time in CPU counter ticks. |
| 711 | */ |
| 712 | CPU_counter max_acquire_time; |
| 713 | |
| 714 | /** |
| 715 | * @brief The count of lock uses. |
| 716 | * |
| 717 | * This value may overflow. |
| 718 | */ |
| 719 | uint64_t usage_count; |
| 720 | |
| 721 | /** |
| 722 | * @brief The counts of lock acquire operations with contention. |
| 723 | * |
| 724 | * The contention count for index N corresponds to a lock acquire attempt |
| 725 | * with an initial queue length of N + 1. The last index corresponds to all |
| 726 | * lock acquire attempts with an initial queue length greater than or equal |
| 727 | * to SMP_LOCK_STATS_CONTENTION_COUNTS. |
| 728 | * |
| 729 | * The values may overflow. |
| 730 | */ |
| 731 | uint64_t contention_counts[SMP_LOCK_STATS_CONTENTION_COUNTS]; |
| 732 | |
| 733 | /** |
| 734 | * @brief Total lock section time in CPU counter ticks. |
| 735 | * |
| 736 | * The average lock section time is the total section time divided by the |
| 737 | * lock usage count. |
| 738 | * |
| 739 | * This value may overflow. |
| 740 | */ |
| 741 | uint64_t total_section_time; |
| 742 | #endif /* RTEMS_LOCK_PROFILING */ |
| 743 | } |
| 744 | |
| 745 | struct SMP_lock_Control { |
| 746 | ... lock data ... |
| 747 | SMP_lock_Stats Stats; |
| 748 | }; |
| 749 | }}} |
791 | | /** |
792 | | * @brief Per-CPU statistics. |
793 | | */ |
794 | | struct Per_CPU_Stats { |
795 | | #ifdef RTEMS_INTERRUPT_AND_THREAD_PROFILING |
796 | | /** |
797 | | * @brief The thread dispatch disabled begin instant in CPU counter ticks. |
798 | | * |
799 | | * This value is used to measure the time of disabled thread dispatching. |
800 | | */ |
801 | | CPU_counter thread_dispatch_disabled_instant; |
802 | | |
803 | | /** |
804 | | * @brief The last outer-most interrupt begin instant in CPU counter ticks. |
805 | | * |
806 | | * This value is used to measure the interrupt processing time. |
807 | | */ |
808 | | CPU_counter outer_most_interrupt_instant; |
809 | | |
810 | | /** |
811 | | * @brief The maximum interrupt delay in CPU counter ticks if supported by |
812 | | * the hardware. |
813 | | */ |
814 | | CPU_counter max_interrupt_delay; |
815 | | |
816 | | /** |
817 | | * @brief The maximum time of disabled thread dispatching in CPU counter |
818 | | * ticks. |
819 | | */ |
820 | | CPU_counter max_thread_dispatch_disabled_time; |
821 | | |
822 | | /** |
823 | | * @brief Count of times when the thread dispatch disable level changes from |
824 | | * zero to one in thread context. |
825 | | * |
826 | | * This value may overflow. |
827 | | */ |
828 | | uint64_t thread_dispatch_disabled_count; |
829 | | |
830 | | /** |
831 | | * @brief Total time of disabled thread dispatching in CPU counter ticks. |
832 | | * |
833 | | * The average time of disabled thread dispatching is the total time of |
834 | | * disabled thread dispatching divided by the thread dispatch disabled |
835 | | * count. |
836 | | * |
837 | | * This value may overflow. |
838 | | */ |
839 | | uint64_t total_thread_dispatch_disabled_time; |
840 | | |
841 | | /** |
842 | | * @brief Count of times when the interrupt nest level changes from zero to |
843 | | * one. |
844 | | * |
845 | | * This value may overflow. |
846 | | */ |
847 | | uint64_t interrupt_count; |
848 | | |
849 | | /** |
850 | | * @brief Total time of interrupt processing in CPU counter ticks. |
851 | | * |
852 | | * The average time of interrupt processing is the total time of interrupt |
853 | | * processing divided by the interrupt count. |
854 | | * |
855 | | * This value may overflow. |
856 | | */ |
857 | | uint64_t total_interrupt_time; |
858 | | #endif /* RTEMS_INTERRUPT_AND_THREAD_PROFILING */ |
859 | | } |
860 | | |
861 | | struct Per_CPU_Control { |
862 | | ... per-CPU data ... |
863 | | Per_CPU_Stats Stats; |
864 | | }; |
| 815 | {{{ |
| 816 | #!c |
| 817 | /** |
| 818 | * @brief Per-CPU statistics. |
| 819 | */ |
| 820 | struct Per_CPU_Stats { |
| 821 | #ifdef RTEMS_INTERRUPT_AND_THREAD_PROFILING |
| 822 | /** |
| 823 | * @brief The thread dispatch disabled begin instant in CPU counter ticks. |
| 824 | * |
| 825 | * This value is used to measure the time of disabled thread dispatching. |
| 826 | */ |
| 827 | CPU_counter thread_dispatch_disabled_instant; |
| 828 | |
| 829 | /** |
| 830 | * @brief The last outer-most interrupt begin instant in CPU counter ticks. |
| 831 | * |
| 832 | * This value is used to measure the interrupt processing time. |
| 833 | */ |
| 834 | CPU_counter outer_most_interrupt_instant; |
| 835 | |
| 836 | /** |
| 837 | * @brief The maximum interrupt delay in CPU counter ticks if supported by |
| 838 | * the hardware. |
| 839 | */ |
| 840 | CPU_counter max_interrupt_delay; |
| 841 | |
| 842 | /** |
| 843 | * @brief The maximum time of disabled thread dispatching in CPU counter |
| 844 | * ticks. |
| 845 | */ |
| 846 | CPU_counter max_thread_dispatch_disabled_time; |
| 847 | |
| 848 | /** |
| 849 | * @brief Count of times when the thread dispatch disable level changes from |
| 850 | * zero to one in thread context. |
| 851 | * |
| 852 | * This value may overflow. |
| 853 | */ |
| 854 | uint64_t thread_dispatch_disabled_count; |
| 855 | |
| 856 | /** |
| 857 | * @brief Total time of disabled thread dispatching in CPU counter ticks. |
| 858 | * |
| 859 | * The average time of disabled thread dispatching is the total time of |
| 860 | * disabled thread dispatching divided by the thread dispatch disabled |
| 861 | * count. |
| 862 | * |
| 863 | * This value may overflow. |
| 864 | */ |
| 865 | uint64_t total_thread_dispatch_disabled_time; |
| 866 | |
| 867 | /** |
| 868 | * @brief Count of times when the interrupt nest level changes from zero to |
| 869 | * one. |
| 870 | * |
| 871 | * This value may overflow. |
| 872 | */ |
| 873 | uint64_t interrupt_count; |
| 874 | |
| 875 | /** |
| 876 | * @brief Total time of interrupt processing in CPU counter ticks. |
| 877 | * |
| 878 | * The average time of interrupt processing is the total time of interrupt |
| 879 | * processing divided by the interrupt count. |
| 880 | * |
| 881 | * This value may overflow. |
| 882 | */ |
| 883 | uint64_t total_interrupt_time; |
| 884 | #endif /* RTEMS_INTERRUPT_AND_THREAD_PROFILING */ |
| 885 | } |
| 886 | |
| 887 | struct Per_CPU_Control { |
| 888 | ... per-CPU data ... |
| 889 | Per_CPU_Stats Stats; |
| 890 | }; |
| 891 | }}} |
| 892 | |
889 | | /** |
890 | | * @brief Sets the processor affinity set of an interrupt vector. |
891 | | * |
892 | | * @param[in] vector The interrupt vector number. |
893 | | * @param[in] affinity_set_size Size of the specified affinity set buffer in |
894 | | * bytes. This value must be positive. |
895 | | * @param[in] affinity_set The new processor affinity set for the interrupt |
896 | | * vector. This pointer must not be @c NULL. A set bit in the affinity set |
897 | | * means that the interrupt can occur on this processor and a cleared bit |
898 | | * means the opposite. |
899 | | * |
900 | | * @retval RTEMS_SUCCESSFUL Successful operation. |
901 | | * @retval RTEMS_INVALID_ID The vector number is invalid. |
902 | | * @retval RTEMS_INVALID_CPU_SET Invalid processor affinity set. |
903 | | */ |
904 | | rtems_status_code rtems_interrupt_set_affinity( |
905 | | rtems_vector vector, |
906 | | size_t affinity_set_size, |
907 | | const cpu_set_t *affinity_set |
908 | | ); |
909 | | |
910 | | /** |
911 | | * @brief Gets the processor affinity set of an interrupt vector. |
912 | | * |
913 | | * @param[in] vector The interrupt vector number. |
914 | | * @param[in] affinity_set_size Size of the specified affinity set buffer in |
915 | | * bytes. This value must be positive. |
916 | | * @param[out] affinity_set The current processor affinity set of the |
917 | | * interrupt vector. This pointer must not be @c NULL. A set bit in the |
918 | | * affinity set means that the interrupt can occur on this processor and a |
919 | | * cleared bit means the opposite. |
920 | | * |
921 | | * @retval RTEMS_SUCCESSFUL Successful operation. |
922 | | * @retval RTEMS_INVALID_ID The vector number is invalid. |
923 | | * @retval RTEMS_INVALID_CPU_SET The affinity set buffer is too small for the |
924 | | * current processor affinity set of the interrupt vector. |
925 | | */ |
926 | | rtems_status_code rtems_interrupt_get_affinity( |
927 | | rtems_vector vector, |
928 | | size_t affinity_set_size, |
929 | | cpu_set_t *affinity_set |
930 | | ); |
| 917 | {{{ |
| 918 | #!c |
| 919 | /** |
| 920 | * @brief Sets the processor affinity set of an interrupt vector. |
| 921 | * |
| 922 | * @param[in] vector The interrupt vector number. |
| 923 | * @param[in] affinity_set_size Size of the specified affinity set buffer in |
| 924 | * bytes. This value must be positive. |
| 925 | * @param[in] affinity_set The new processor affinity set for the interrupt |
| 926 | * vector. This pointer must not be @c NULL. A set bit in the affinity set |
| 927 | * means that the interrupt can occur on this processor and a cleared bit |
| 928 | * means the opposite. |
| 929 | * |
| 930 | * @retval RTEMS_SUCCESSFUL Successful operation. |
| 931 | * @retval RTEMS_INVALID_ID The vector number is invalid. |
| 932 | * @retval RTEMS_INVALID_CPU_SET Invalid processor affinity set. |
| 933 | */ |
| 934 | rtems_status_code rtems_interrupt_set_affinity( |
| 935 | rtems_vector vector, |
| 936 | size_t affinity_set_size, |
| 937 | const cpu_set_t *affinity_set |
| 938 | ); |
| 939 | |
| 940 | /** |
| 941 | * @brief Gets the processor affinity set of an interrupt vector. |
| 942 | * |
| 943 | * @param[in] vector The interrupt vector number. |
| 944 | * @param[in] affinity_set_size Size of the specified affinity set buffer in |
| 945 | * bytes. This value must be positive. |
| 946 | * @param[out] affinity_set The current processor affinity set of the |
| 947 | * interrupt vector. This pointer must not be @c NULL. A set bit in the |
| 948 | * affinity set means that the interrupt can occur on this processor and a |
| 949 | * cleared bit means the opposite. |
| 950 | * |
| 951 | * @retval RTEMS_SUCCESSFUL Successful operation. |
| 952 | * @retval RTEMS_INVALID_ID The vector number is invalid. |
| 953 | * @retval RTEMS_INVALID_CPU_SET The affinity set buffer is too small for the |
| 954 | * current processor affinity set of the interrupt vector. |
| 955 | */ |
| 956 | rtems_status_code rtems_interrupt_get_affinity( |
| 957 | rtems_vector vector, |
| 958 | size_t affinity_set_size, |
| 959 | cpu_set_t *affinity_set |
| 960 | ); |
| 961 | }}} |
| 962 | |
951 | | /** |
952 | | * @brief Identifies a scheduler by its name. |
953 | | * |
954 | | * The scheduler name is determined by the scheduler configuration. |
955 | | * |
956 | | * @param[in] name The scheduler name. |
957 | | * @param[out] scheduler_id The scheduler identifier associated with the name. |
958 | | * |
959 | | * @retval RTEMS_SUCCESSFUL Successful operation. |
960 | | * @retval RTEMS_INVALID_NAME Invalid scheduler name. |
961 | | */ |
962 | | rtems_status_code rtems_scheduler_ident( |
963 | | rtems_name name, |
964 | | rtems_id *scheduler_id |
965 | | ); |
966 | | |
967 | | /** |
968 | | * @brief Gets the set of processors owned by the scheduler. |
969 | | * |
970 | | * @param[in] scheduler_id Identifier of the scheduler. |
971 | | * @param[in] processor_set_size Size of the specified processor set buffer in |
972 | | * bytes. This value must be positive. |
973 | | * @param[out] processor_set The processor set owned by the scheduler. This |
974 | | * pointer must not be @c NULL. A set bit in the processor set means that |
975 | | * this processor is owned by the scheduler and a cleared bit means the |
976 | | * opposite. |
977 | | * |
978 | | * @retval RTEMS_SUCCESSFUL Successful operation. |
979 | | * @retval RTEMS_INVALID_ID Invalid scheduler identifier. |
980 | | * @retval RTEMS_INVALID_CPU_SET The processor set buffer is too small for the |
981 | | * set of processors owned by the scheduler. |
982 | | */ |
983 | | rtems_status_code rtems_scheduler_get_processors( |
984 | | rtems_id scheduler_id, |
985 | | size_t processor_set_size, |
986 | | cpu_set_t *processor_set |
987 | | ); |
| 983 | {{{ |
| 984 | #!c |
| 985 | /** |
| 986 | * @brief Identifies a scheduler by its name. |
| 987 | * |
| 988 | * The scheduler name is determined by the scheduler configuration. |
| 989 | * |
| 990 | * @param[in] name The scheduler name. |
| 991 | * @param[out] scheduler_id The scheduler identifier associated with the name. |
| 992 | * |
| 993 | * @retval RTEMS_SUCCESSFUL Successful operation. |
| 994 | * @retval RTEMS_INVALID_NAME Invalid scheduler name. |
| 995 | */ |
| 996 | rtems_status_code rtems_scheduler_ident( |
| 997 | rtems_name name, |
| 998 | rtems_id *scheduler_id |
| 999 | ); |
| 1000 | |
| 1001 | /** |
| 1002 | * @brief Gets the set of processors owned by the scheduler. |
| 1003 | * |
| 1004 | * @param[in] scheduler_id Identifier of the scheduler. |
| 1005 | * @param[in] processor_set_size Size of the specified processor set buffer in |
| 1006 | * bytes. This value must be positive. |
| 1007 | * @param[out] processor_set The processor set owned by the scheduler. This |
| 1008 | * pointer must not be @c NULL. A set bit in the processor set means that |
| 1009 | * this processor is owned by the scheduler and a cleared bit means the |
| 1010 | * opposite. |
| 1011 | * |
| 1012 | * @retval RTEMS_SUCCESSFUL Successful operation. |
| 1013 | * @retval RTEMS_INVALID_ID Invalid scheduler identifier. |
| 1014 | * @retval RTEMS_INVALID_CPU_SET The processor set buffer is too small for the |
| 1015 | * set of processors owned by the scheduler. |
| 1016 | */ |
| 1017 | rtems_status_code rtems_scheduler_get_processors( |
| 1018 | rtems_id scheduler_id, |
| 1019 | size_t processor_set_size, |
| 1020 | cpu_set_t *processor_set |
| 1021 | ); |
| 1022 | }}} |
997 | | /** |
998 | | * @brief Sets the processor affinity set of a task. |
999 | | * |
1000 | | * @param[in] task_id Identifier of the task. Use @ref RTEMS_SELF to select |
1001 | | * the executing task. |
1002 | | * @param[in] affinity_set_size Size of the specified affinity set buffer in |
1003 | | * bytes. This value must be positive. |
1004 | | * @param[in] affinity_set The new processor affinity set for the task. This |
1005 | | * pointer must not be @c NULL. A set bit in the affinity set means that the |
1006 | | * task can execute on this processor and a cleared bit means the opposite. |
1007 | | * |
1008 | | * @retval RTEMS_SUCCESSFUL Successful operation. |
1009 | | * @retval RTEMS_INVALID_ID Invalid task identifier. |
1010 | | * @retval RTEMS_INVALID_CPU_SET Invalid processor affinity set. |
1011 | | */ |
1012 | | rtems_status_code rtems_task_set_affinity( |
1013 | | rtems_id task_id, |
1014 | | size_t affinity_set_size, |
1015 | | const cpu_set_t *affinity_set |
1016 | | ); |
1017 | | |
1018 | | /** |
1019 | | * @brief Gets the processor affinity set of a task. |
1020 | | * |
1021 | | * @param[in] task_id Identifier of the task. Use @ref RTEMS_SELF to select |
1022 | | * the executing task. |
1023 | | * @param[in] affinity_set_size Size of the specified affinity set buffer in |
1024 | | * bytes. This value must be positive. |
1025 | | * @param[out] affinity_set The current processor affinity set of the task. |
1026 | | * This pointer must not be @c NULL. A set bit in the affinity set means that |
1027 | | * the task can execute on this processor and a cleared bit means the |
1028 | | * opposite. |
1029 | | * |
1030 | | * @retval RTEMS_SUCCESSFUL Successful operation. |
1031 | | * @retval RTEMS_INVALID_ID Invalid task identifier. |
1032 | | * @retval RTEMS_INVALID_CPU_SET The affinity set buffer is too small for the |
1033 | | * current processor affinity set of the task. |
1034 | | */ |
1035 | | rtems_status_code rtems_task_get_affinity( |
1036 | | rtems_id task_id, |
1037 | | size_t affinity_set_size, |
1038 | | cpu_set_t *affinity_set |
1039 | | ); |
| 1032 | {{{ |
| 1033 | #!c |
| 1034 | /** |
| 1035 | * @brief Sets the processor affinity set of a task. |
| 1036 | * |
| 1037 | * @param[in] task_id Identifier of the task. Use @ref RTEMS_SELF to select |
| 1038 | * the executing task. |
| 1039 | * @param[in] affinity_set_size Size of the specified affinity set buffer in |
| 1040 | * bytes. This value must be positive. |
| 1041 | * @param[in] affinity_set The new processor affinity set for the task. This |
| 1042 | * pointer must not be @c NULL. A set bit in the affinity set means that the |
| 1043 | * task can execute on this processor and a cleared bit means the opposite. |
| 1044 | * |
| 1045 | * @retval RTEMS_SUCCESSFUL Successful operation. |
| 1046 | * @retval RTEMS_INVALID_ID Invalid task identifier. |
| 1047 | * @retval RTEMS_INVALID_CPU_SET Invalid processor affinity set. |
| 1048 | */ |
| 1049 | rtems_status_code rtems_task_set_affinity( |
| 1050 | rtems_id task_id, |
| 1051 | size_t affinity_set_size, |
| 1052 | const cpu_set_t *affinity_set |
| 1053 | ); |
| 1054 | |
| 1055 | /** |
| 1056 | * @brief Gets the processor affinity set of a task. |
| 1057 | * |
| 1058 | * @param[in] task_id Identifier of the task. Use @ref RTEMS_SELF to select |
| 1059 | * the executing task. |
| 1060 | * @param[in] affinity_set_size Size of the specified affinity set buffer in |
| 1061 | * bytes. This value must be positive. |
| 1062 | * @param[out] affinity_set The current processor affinity set of the task. |
| 1063 | * This pointer must not be @c NULL. A set bit in the affinity set means that |
| 1064 | * the task can execute on this processor and a cleared bit means the |
| 1065 | * opposite. |
| 1066 | * |
| 1067 | * @retval RTEMS_SUCCESSFUL Successful operation. |
| 1068 | * @retval RTEMS_INVALID_ID Invalid task identifier. |
| 1069 | * @retval RTEMS_INVALID_CPU_SET The affinity set buffer is too small for the |
| 1070 | * current processor affinity set of the task. |
| 1071 | */ |
| 1072 | rtems_status_code rtems_task_get_affinity( |
| 1073 | rtems_id task_id, |
| 1074 | size_t affinity_set_size, |
| 1075 | cpu_set_t *affinity_set |
| 1076 | ); |
| 1077 | }}} |
1141 | | RTEMS_SCHED_DEFINE_FP_SMP(fp0, rtems_build_name(' ', 'F', 'P', '0'), 256); |
1142 | | RTEMS_SCHED_DEFINE_FP_SMP(fp1, rtems_build_name(' ', 'F', 'P', '1'), 64); |
1143 | | RTEMS_SCHED_DEFINE_EDF_SMP(edf0, rtems_build_name('E', 'D', 'F', '0')); |
1144 | | |
1145 | | const rtems_cpu_config rtems_cpu_config_table[] = { |
1146 | | RTEMS_CPU_CONFIG_INIT(RTEMS_SCHED_REF_FP_SMP(fp0)), |
1147 | | RTEMS_CPU_CONFIG_INIT(RTEMS_SCHED_REF_FP_SMP(fp1)), |
1148 | | RTEMS_CPU_CONFIG_INIT(RTEMS_SCHED_REF_FP_SMP(fp1)), |
1149 | | RTEMS_CPU_CONFIG_INIT(RTEMS_SCHED_REF_FP_SMP(fp1)), |
1150 | | RTEMS_CPU_CONFIG_INIT(NULL), |
1151 | | RTEMS_CPU_CONFIG_INIT(NULL), |
1152 | | RTEMS_CPU_CONFIG_INIT(RTEMS_SCHED_REF_EDF_SMP(edf0)), |
1153 | | RTEMS_CPU_CONFIG_INIT(RTEMS_SCHED_REF_EDF_SMP(edf0) |
1154 | | }; |
1155 | | |
1156 | | const size_t rtems_cpu_config_count = |
1157 | | |
1158 | | RTEMS_ARRAY_SIZE(rtems_cpu_config_table); |
| 1186 | {{{ |
| 1187 | #!c |
| 1188 | RTEMS_SCHED_DEFINE_FP_SMP(fp0, rtems_build_name(' ', 'F', 'P', '0'), 256); |
| 1189 | RTEMS_SCHED_DEFINE_FP_SMP(fp1, rtems_build_name(' ', 'F', 'P', '1'), 64); |
| 1190 | RTEMS_SCHED_DEFINE_EDF_SMP(edf0, rtems_build_name('E', 'D', 'F', '0')); |
| 1191 | |
| 1192 | const rtems_cpu_config rtems_cpu_config_table[] = { |
| 1193 | RTEMS_CPU_CONFIG_INIT(RTEMS_SCHED_REF_FP_SMP(fp0)), |
| 1194 | RTEMS_CPU_CONFIG_INIT(RTEMS_SCHED_REF_FP_SMP(fp1)), |
| 1195 | RTEMS_CPU_CONFIG_INIT(RTEMS_SCHED_REF_FP_SMP(fp1)), |
| 1196 | RTEMS_CPU_CONFIG_INIT(RTEMS_SCHED_REF_FP_SMP(fp1)), |
| 1197 | RTEMS_CPU_CONFIG_INIT(NULL), |
| 1198 | RTEMS_CPU_CONFIG_INIT(NULL), |
| 1199 | RTEMS_CPU_CONFIG_INIT(RTEMS_SCHED_REF_EDF_SMP(edf0)), |
| 1200 | RTEMS_CPU_CONFIG_INIT(RTEMS_SCHED_REF_EDF_SMP(edf0) |
| 1201 | }; |
| 1202 | |
| 1203 | const size_t rtems_cpu_config_count = |
| 1204 | |
| 1205 | RTEMS_ARRAY_SIZE(rtems_cpu_config_table); |
| 1206 | }}} |
1263 | | typedef struct { |
1264 | | rtems_id scheduler_id; |
1265 | | rtems_task_priority priority; |
1266 | | } rtems_task_priority_by_scheduler; |
1267 | | |
1268 | | /** |
1269 | | * @brief Sets the priority ceilings per scheduler for a semaphore with |
1270 | | * priority ceiling protocol. |
1271 | | * |
1272 | | * @param[in] semaphore_id Identifier of the semaphore. |
1273 | | * @param[in] priority_ceilings A table with priority ceilings by scheduler. |
1274 | | * In case one scheduler appears multiple times, the setting with the highest |
1275 | | * index will be used. This semaphore object is then bound to the specified |
1276 | | * scheduler domains. It is an error to use this semaphore object on other |
1277 | | * scheduler domains. The specified schedulers must be compatible, e.g. |
1278 | | * migration from one scheduler domain to another must be defined. |
1279 | | * @param[in] priority_ceilings_count Count of priority ceilings by scheduler |
1280 | | * pairs in the table. |
1281 | | * |
1282 | | * @retval RTEMS_SUCCESSFUL Successful operation. |
1283 | | * @retval RTEMS_INVALID_ID Invalid semaphore identifier. |
1284 | | * @retval RTEMS_INVALID_SECOND_ID Invalid scheduler identifier in the table. |
1285 | | * @retval RTEMS_INVALID_PRIORITY Invalid task priority in the table. |
1286 | | */ |
1287 | | rtems_status_code rtems_semaphore_set_priority_ceilings( |
1288 | | rtems_id semaphore_id, |
1289 | | const rtems_task_priority_by_scheduler *priority_ceilings, |
1290 | | size_t priority_ceilings_count |
1291 | | ); |
| 1317 | {{{ |
| 1318 | #!c |
| 1319 | typedef struct { |
| 1320 | rtems_id scheduler_id; |
| 1321 | rtems_task_priority priority; |
| 1322 | } rtems_task_priority_by_scheduler; |
| 1323 | |
| 1324 | /** |
| 1325 | * @brief Sets the priority ceilings per scheduler for a semaphore with |
| 1326 | * priority ceiling protocol. |
| 1327 | * |
| 1328 | * @param[in] semaphore_id Identifier of the semaphore. |
| 1329 | * @param[in] priority_ceilings A table with priority ceilings by scheduler. |
| 1330 | * In case one scheduler appears multiple times, the setting with the highest |
| 1331 | * index will be used. This semaphore object is then bound to the specified |
| 1332 | * scheduler domains. It is an error to use this semaphore object on other |
| 1333 | * scheduler domains. The specified schedulers must be compatible, e.g. |
| 1334 | * migration from one scheduler domain to another must be defined. |
| 1335 | * @param[in] priority_ceilings_count Count of priority ceilings by scheduler |
| 1336 | * pairs in the table. |
| 1337 | * |
| 1338 | * @retval RTEMS_SUCCESSFUL Successful operation. |
| 1339 | * @retval RTEMS_INVALID_ID Invalid semaphore identifier. |
| 1340 | * @retval RTEMS_INVALID_SECOND_ID Invalid scheduler identifier in the table. |
| 1341 | * @retval RTEMS_INVALID_PRIORITY Invalid task priority in the table. |
| 1342 | */ |
| 1343 | rtems_status_code rtems_semaphore_set_priority_ceilings( |
| 1344 | rtems_id semaphore_id, |
| 1345 | const rtems_task_priority_by_scheduler *priority_ceilings, |
| 1346 | size_t priority_ceilings_count |
| 1347 | ); |
| 1348 | }}} |
| 1349 | |
1333 | | mutex_obtain(id, wait, timeout): |
1334 | | <span style="color:red">level = ISR_disable()</span> |
1335 | | mtx = mutex_get(id) |
1336 | | executing = get_executing_thread() |
1337 | | wait_control = executing.get_wait_control() |
1338 | | wait_control.set_status(SUCCESS) |
1339 | | if !mtx.is_locked(): |
1340 | | mtx.lock(executing) |
1341 | | if mtx.use_ceiling_protocol(): |
1342 | | thread_dispatch_disable() |
1343 | | <span style="color:red">ISR_enable(level)</span> |
1344 | | executing.boost_priority(mtx.get_ceiling()) |
1345 | | thread_dispatch_enable() |
1346 | | else: |
1347 | | <span style="color:red">ISR_enable(level)</span> |
1348 | | else if mtx.is_holder(executing): |
1349 | | mtx.increment_nest_level() |
1350 | | <span style="color:red">ISR_enable(level)</span> |
1351 | | else if !wait: |
1352 | | <span style="color:red">ISR_enable(level)</span> |
1353 | | wait_control.set_status(UNSATISFIED) |
1354 | | else: |
1355 | | wait_queue = mtx.get_wait_queue() |
1356 | | wait_queue.set_sync_status(NOTHING_HAPPENED) |
1357 | | executing.set_wait_queue(wait_queue)) |
1358 | | thread_dispatch_disable() |
1359 | | <span style="color:red">ISR_enable(level)</span> |
1360 | | if mtx.use_inherit_priority(): |
1361 | | mtx.get_holder().boost_priority(executing.get_priority())) |
1362 | | <span style="color:fuchsia">level = ISR_disable()</span> |
1363 | | if executing.is_ready(): |
1364 | | executing.set_state(MUTEX_BLOCKING_STATE) |
1365 | | scheduler_block(executing) |
1366 | | else: |
1367 | | executing.add_state(MUTEX_BLOCKING_STATE) |
1368 | | <span style="color:fuchsia">ISR_enable(level)</span> |
1369 | | if timeout: |
1370 | | timer_start(timeout, executing, mtx) |
1371 | | <span style="color:blue">level = ISR_disable()</span> |
1372 | | search_thread = wait_queue.first() |
1373 | | while search_thread != wait_queue.tail(): |
1374 | | if executing.priority() <= search_thread.priority(): |
1375 | | break |
1376 | | <span style="color:blue">ISR_enable(level)</span> |
1377 | | <span style="color:blue">level = ISR_disable()</span> |
1378 | | if search_thread.is_state_set(MUTEX_BLOCKING_STATE): |
1379 | | search_thread = search_thread.next() |
1380 | | else: |
1381 | | search_thread = wait_queue.first() |
1382 | | sync_status = wait_queue.get_sync_status() |
1383 | | if sync_state == NOTHING_HAPPENED: |
1384 | | wait_queue.set_sync_status(SYNCHRONIZED) |
1385 | | wait_queue.enqueue(search_thread, executing) |
1386 | | executing.set_wait_queue(wait_queue) |
1387 | | <span style="color:blue">ISR_enable(level)</span> |
1388 | | else: |
1389 | | executing.set_wait_queue(NULL) |
1390 | | if executing.is_timer_active(): |
1391 | | executing.deactivate_timer() |
1392 | | <span style="color:blue">ISR_enable(level)</span> |
1393 | | executing.remove_timer() |
1394 | | else: |
1395 | | <span style="color:blue">ISR_enable(level)</span> |
1396 | | <span style="color:fuchsia">level = ISR_disable()</span> |
1397 | | if executing.is_state_set(MUTEX_BLOCKING_STATE): |
1398 | | executing.clear_state(MUTEX_BLOCKING_STATE) |
1399 | | if executing.is_ready(): |
1400 | | scheduler_unblock(executing) |
1401 | | <span style="color:fuchsia">ISR_enable(level)</span> |
1402 | | thread_dispatch_enable() |
1403 | | return wait_control.get_status() |
1404 | | |
1405 | | mutex_release(id): |
1406 | | thread_dispatch_disable() |
1407 | | mtx = mutex_get(id) |
1408 | | executing = get_executing_thread() |
1409 | | nest_level = mtx.decrement_nest_level() |
1410 | | if nest_level == 0: |
1411 | | if mtx.use_ceiling_protocol() or mtx.use_inherit_priority(): |
1412 | | executing.restore_priority() |
1413 | | wait_queue = mtx.get_wait_queue() |
1414 | | thread = NULL |
1415 | | <span style="color:red">level = ISR_disable()</span> |
1416 | | thread = wait_queue.dequeue() |
1417 | | if thread != NULL: |
1418 | | thread.set_wait_queue(NULL) |
1419 | | if thread.is_timer_active(): |
1420 | | thread.deactivate_timer() |
1421 | | <span style="color:red">ISR_enable(level)</span> |
1422 | | thread.remove_timer() |
1423 | | else: |
1424 | | <span style="color:red">ISR_enable(level)</span> |
1425 | | <span style="color:fuchsia">level = ISR_disable()</span> |
1426 | | if thread.is_state_set(MUTEX_BLOCKING_STATE): |
1427 | | thread.clear_state(MUTEX_BLOCKING_STATE) |
1428 | | if thread.is_ready(): |
1429 | | scheduler_unblock(thread) |
1430 | | <span style="color:fuchsia">ISR_enable(level)</span> |
1431 | | else: |
1432 | | <span style="color:red">ISR_enable(level)</span> |
1433 | | <span style="color:blue">level = ISR_disable()</span> |
1434 | | if thread == NULL: |
1435 | | sync_status = wait_queue.get_sync_status() |
1436 | | if sync_status == TIMEOUT || sync_status == NOTHING_HAPPENED: |
1437 | | wait_queue.set_sync_status(SATISFIED) |
1438 | | thread = executing |
1439 | | <span style="color:blue">ISR_enable(level)</span> |
1440 | | if thread != NULL: |
1441 | | mtx.new_holder(thread) |
1442 | | if mtx.use_ceiling_protocol(): |
1443 | | thread.boost_priority(mtx.get_ceiling()) |
1444 | | else: |
1445 | | mtx.unlock() |
1446 | | thread_dispatch_enable() |
1447 | | |
1448 | | |
1449 | | mutex_timeout(thread, mtx): |
1450 | | <span style="color:red">level = ISR_disable()</span> |
1451 | | wait_queue = thread.get_wait_queue() |
1452 | | if wait_queue != NULL: |
1453 | | sync_status = wait_queue.get_sync_status() |
1454 | | if sync_status != SYNCHRONIZED and thread.is_executing(): |
1455 | | if sync_status != SATISFIED: |
1456 | | wait_queue.set_sync_status(TIMEOUT) |
1457 | | wait_control = executing.get_wait_control() |
1458 | | wai |
| 1391 | {{{ |
| 1392 | #!html |
| 1393 | <pre> |
| 1394 | mutex_obtain(id, wait, timeout): |
| 1395 | <span style="color:red">level = ISR_disable()</span> |
| 1396 | mtx = mutex_get(id) |
| 1397 | executing = get_executing_thread() |
| 1398 | wait_control = executing.get_wait_control() |
| 1399 | wait_control.set_status(SUCCESS) |
| 1400 | if !mtx.is_locked(): |
| 1401 | mtx.lock(executing) |
| 1402 | if mtx.use_ceiling_protocol(): |
| 1403 | thread_dispatch_disable() |
| 1404 | <span style="color:red">ISR_enable(level)</span> |
| 1405 | executing.boost_priority(mtx.get_ceiling()) |
| 1406 | thread_dispatch_enable() |
| 1407 | else: |
| 1408 | <span style="color:red">ISR_enable(level)</span> |
| 1409 | else if mtx.is_holder(executing): |
| 1410 | mtx.increment_nest_level() |
| 1411 | <span style="color:red">ISR_enable(level)</span> |
| 1412 | else if !wait: |
| 1413 | <span style="color:red">ISR_enable(level)</span> |
| 1414 | wait_control.set_status(UNSATISFIED) |
| 1415 | else: |
| 1416 | wait_queue = mtx.get_wait_queue() |
| 1417 | wait_queue.set_sync_status(NOTHING_HAPPENED) |
| 1418 | executing.set_wait_queue(wait_queue)) |
| 1419 | thread_dispatch_disable() |
| 1420 | <span style="color:red">ISR_enable(level)</span> |
| 1421 | if mtx.use_inherit_priority(): |
| 1422 | mtx.get_holder().boost_priority(executing.get_priority())) |
| 1423 | <span style="color:fuchsia">level = ISR_disable()</span> |
| 1424 | if executing.is_ready(): |
| 1425 | executing.set_state(MUTEX_BLOCKING_STATE) |
| 1426 | scheduler_block(executing) |
| 1427 | else: |
| 1428 | executing.add_state(MUTEX_BLOCKING_STATE) |
| 1429 | <span style="color:fuchsia">ISR_enable(level)</span> |
| 1430 | if timeout: |
| 1431 | timer_start(timeout, executing, mtx) |
| 1432 | <span style="color:blue">level = ISR_disable()</span> |
| 1433 | search_thread = wait_queue.first() |
| 1434 | while search_thread != wait_queue.tail(): |
| 1435 | if executing.priority() <= search_thread.priority(): |
| 1436 | break |
| 1437 | <span style="color:blue">ISR_enable(level)</span> |
| 1438 | <span style="color:blue">level = ISR_disable()</span> |
| 1439 | if search_thread.is_state_set(MUTEX_BLOCKING_STATE): |
| 1440 | search_thread = search_thread.next() |
| 1441 | else: |
| 1442 | search_thread = wait_queue.first() |
| 1443 | sync_status = wait_queue.get_sync_status() |
| 1444 | if sync_state == NOTHING_HAPPENED: |
| 1445 | wait_queue.set_sync_status(SYNCHRONIZED) |
| 1446 | wait_queue.enqueue(search_thread, executing) |
| 1447 | executing.set_wait_queue(wait_queue) |
| 1448 | <span style="color:blue">ISR_enable(level)</span> |
| 1449 | else: |
| 1450 | executing.set_wait_queue(NULL) |
| 1451 | if executing.is_timer_active(): |
| 1452 | executing.deactivate_timer() |
| 1453 | <span style="color:blue">ISR_enable(level)</span> |
| 1454 | executing.remove_timer() |
| 1455 | else: |
| 1456 | <span style="color:blue">ISR_enable(level)</span> |
| 1457 | <span style="color:fuchsia">level = ISR_disable()</span> |
| 1458 | if executing.is_state_set(MUTEX_BLOCKING_STATE): |
| 1459 | executing.clear_state(MUTEX_BLOCKING_STATE) |
| 1460 | if executing.is_ready(): |
| 1461 | scheduler_unblock(executing) |
| 1462 | <span style="color:fuchsia">ISR_enable(level)</span> |
| 1463 | thread_dispatch_enable() |
| 1464 | return wait_control.get_status() |
| 1465 | |
| 1466 | mutex_release(id): |
| 1467 | thread_dispatch_disable() |
| 1468 | mtx = mutex_get(id) |
| 1469 | executing = get_executing_thread() |
| 1470 | nest_level = mtx.decrement_nest_level() |
| 1471 | if nest_level == 0: |
| 1472 | if mtx.use_ceiling_protocol() or mtx.use_inherit_priority(): |
| 1473 | executing.restore_priority() |
| 1474 | wait_queue = mtx.get_wait_queue() |
| 1475 | thread = NULL |
| 1476 | <span style="color:red">level = ISR_disable()</span> |
| 1477 | thread = wait_queue.dequeue() |
| 1478 | if thread != NULL: |
| 1479 | thread.set_wait_queue(NULL) |
| 1480 | if thread.is_timer_active(): |
| 1481 | thread.deactivate_timer() |
| 1482 | <span style="color:red">ISR_enable(level)</span> |
| 1483 | thread.remove_timer() |
| 1484 | else: |
| 1485 | <span style="color:red">ISR_enable(level)</span> |
| 1486 | <span style="color:fuchsia">level = ISR_disable()</span> |
| 1487 | if thread.is_state_set(MUTEX_BLOCKING_STATE): |
| 1488 | thread.clear_state(MUTEX_BLOCKING_STATE) |
| 1489 | if thread.is_ready(): |
| 1490 | scheduler_unblock(thread) |
| 1491 | <span style="color:fuchsia">ISR_enable(level)</span> |
| 1492 | else: |
| 1493 | <span style="color:red">ISR_enable(level)</span> |
| 1494 | <span style="color:blue">level = ISR_disable()</span> |
| 1495 | if thread == NULL: |
| 1496 | sync_status = wait_queue.get_sync_status() |
| 1497 | if sync_status == TIMEOUT || sync_status == NOTHING_HAPPENED: |
| 1498 | wait_queue.set_sync_status(SATISFIED) |
| 1499 | thread = executing |
| 1500 | <span style="color:blue">ISR_enable(level)</span> |
| 1501 | if thread != NULL: |
| 1502 | mtx.new_holder(thread) |
| 1503 | if mtx.use_ceiling_protocol(): |
| 1504 | thread.boost_priority(mtx.get_ceiling()) |
| 1505 | else: |
| 1506 | mtx.unlock() |
| 1507 | thread_dispatch_enable() |
| 1508 | |
| 1509 | mutex_timeout(thread, mtx): |
| 1510 | <span style="color:red">level = ISR_disable()</span> |
| 1511 | wait_queue = thread.get_wait_queue() |
| 1512 | if wait_queue != NULL: |
| 1513 | sync_status = wait_queue.get_sync_status() |
| 1514 | if sync_status != SYNCHRONIZED and thread.is_executing(): |
| 1515 | if sync_status != SATISFIED: |
| 1516 | wait_queue.set_sync_status(TIMEOUT) |
| 1517 | wait_control = executing.get_wait_control() |
| 1518 | wait_control.set_status(TIMEOUT) |
| 1519 | <span style="color:red">ISR_enable(level)</span> |
| 1520 | else: |
| 1521 | <span style="color:red">ISR_enable(level)</span> |
| 1522 | <span style |
| 1523 | </pre> |
| 1524 | }}} |