Changeset 5bd822a7 in rtems
- Timestamp:
- 11/26/14 10:51:34 (9 years ago)
- Branches:
- 4.11, 5, master
- Children:
- 0ff1c29
- Parents:
- 79569ae
- git-author:
- Sebastian Huber <sebastian.huber@…> (11/26/14 10:51:34)
- git-committer:
- Sebastian Huber <sebastian.huber@…> (11/27/14 09:33:31)
- Files:
-
- 3 edited
Legend:
- Unmodified
- Added
- Removed
-
cpukit/score/include/rtems/score/schedulerimpl.h
r79569ae r5bd822a7 950 950 * @brief Use an idle thread for this scheduler node. 951 951 * 952 * A thread in the SCHEDULER_HELP_ACTIVE_OWNER owner state may use an idle 953 * thread for the scheduler node owned by itself in case it executes currently 954 * using another scheduler node or in case it is in a blocking state. 952 * A thread in the SCHEDULER_HELP_ACTIVE_OWNER or SCHEDULER_HELP_ACTIVE_RIVAL 953 * helping state may use an idle thread for the scheduler node owned by itself 954 * in case it executes currently using another scheduler node or in case it is 955 * in a blocking state. 955 956 * 956 957 * @param[in] context The scheduler instance context. … … 966 967 Thread_Control *idle = ( *get_idle_thread )( context ); 967 968 968 _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ); 969 _Assert( 970 node->help_state == SCHEDULER_HELP_ACTIVE_OWNER 971 || node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL 972 ); 969 973 _Assert( _Scheduler_Node_get_idle( node ) == NULL ); 970 974 _Assert( … … 1010 1014 if ( user->Scheduler.state == THREAD_SCHEDULER_READY ) { 1011 1015 _Scheduler_Thread_set_scheduler_and_node( user, node, owner ); 1016 } else if ( owner->Scheduler.state == THREAD_SCHEDULER_BLOCKED ) { 1017 _Scheduler_Use_idle_thread( context, node, get_idle_thread ); 1012 1018 } else { 1013 1019 _Scheduler_Node_set_user( node, owner ); … … 1073 1079 * 1074 1080 * @param[in] context The scheduler instance context. 1081 * @param[in] thread The thread which wants to get blocked referencing this 1082 * node. This is not necessarily the user of this node in case the node 1083 * participates in the scheduler helping protocol. 1075 1084 * @param[in] node The node which wants to get blocked. 1076 1085 * @param[in] is_scheduled This node is scheduled. … … 1088 1097 ) 1089 1098 { 1090 bool block; 1091 Thread_Control *old_user = _Scheduler_Node_get_user( node ); 1092 Thread_Control *new_user = NULL; 1093 1094 _Scheduler_Thread_change_state( old_user, THREAD_SCHEDULER_BLOCKED ); 1095 1096 if ( is_scheduled ) { 1097 if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) { 1099 Thread_Control *old_user; 1100 Thread_Control *new_user; 1101 1102 _Scheduler_Thread_change_state( thread, THREAD_SCHEDULER_BLOCKED ); 1103 1104 if ( node->help_state == SCHEDULER_HELP_YOURSELF ) { 1105 _Assert( thread == _Scheduler_Node_get_user( node ) ); 1106 1107 return true; 1108 } 1109 1110 new_user = NULL; 1111 1112 if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) { 1113 if ( is_scheduled ) { 1114 _Assert( thread == _Scheduler_Node_get_user( node ) ); 1115 old_user = thread; 1098 1116 new_user = _Scheduler_Use_idle_thread( context, node, get_idle_thread ); 1099 } else if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL ) { 1100 Thread_Control *owner = _Scheduler_Node_get_owner( node ); 1101 1102 if ( thread == old_user && owner != old_user ) { 1103 new_user = owner; 1104 _Scheduler_Node_set_user( node, new_user ); 1117 } 1118 } else if ( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL ) { 1119 if ( is_scheduled ) { 1120 old_user = _Scheduler_Node_get_user( node ); 1121 1122 if ( thread == old_user ) { 1123 Thread_Control *owner = _Scheduler_Node_get_owner( node ); 1124 1125 if ( 1126 thread != owner 1127 && owner->Scheduler.state == THREAD_SCHEDULER_READY 1128 ) { 1129 new_user = owner; 1130 _Scheduler_Node_set_user( node, new_user ); 1131 } else { 1132 new_user = _Scheduler_Use_idle_thread( context, node, get_idle_thread ); 1133 } 1105 1134 } 1106 1135 } 1136 } else { 1137 /* Not implemented, this is part of the OMIP support path. */ 1138 _Assert(0); 1107 1139 } 1108 1140 … … 1113 1145 _Thread_Set_CPU( new_user, cpu ); 1114 1146 _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, new_user ); 1115 1116 block = false; 1117 } else { 1118 block = true; 1119 } 1120 1121 return block; 1147 } 1148 1149 return false; 1122 1150 } 1123 1151 … … 1147 1175 Thread_Control *old_user = _Scheduler_Node_get_user( node ); 1148 1176 Per_CPU_Control *cpu = _Thread_Get_CPU( old_user ); 1177 Thread_Control *idle = _Scheduler_Release_idle_thread( 1178 context, 1179 node, 1180 release_idle_thread 1181 ); 1182 Thread_Control *owner = _Scheduler_Node_get_owner( node ); 1183 Thread_Control *new_user; 1149 1184 1150 1185 if ( node->help_state == SCHEDULER_HELP_ACTIVE_OWNER ) { 1151 Thread_Control *idle = _Scheduler_Release_idle_thread(1152 context,1153 node,1154 release_idle_thread1155 );1156 1157 1186 _Assert( idle != NULL ); 1158 (void) idle; 1187 new_user = the_thread; 1188 } else if ( idle != NULL ) { 1189 _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL ); 1190 new_user = the_thread; 1191 } else if ( the_thread != owner ) { 1192 _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL ); 1193 _Assert( old_user != the_thread ); 1194 _Scheduler_Thread_change_state( owner, THREAD_SCHEDULER_READY ); 1195 new_user = the_thread; 1196 _Scheduler_Node_set_user( node, new_user ); 1159 1197 } else { 1160 1198 _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL ); 1161 1162 _Scheduler_Thread_change_state( old_user, THREAD_SCHEDULER_READY ); 1163 _Scheduler_Node_set_user( node, the_thread ); 1164 } 1165 1166 _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_SCHEDULED ); 1167 _Thread_Set_CPU( the_thread, cpu ); 1168 _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, the_thread ); 1199 _Assert( old_user != the_thread ); 1200 _Scheduler_Thread_change_state( the_thread, THREAD_SCHEDULER_READY ); 1201 new_user = NULL; 1202 } 1203 1204 if ( new_user != NULL ) { 1205 _Scheduler_Thread_change_state( new_user, THREAD_SCHEDULER_SCHEDULED ); 1206 _Thread_Set_CPU( new_user, cpu ); 1207 _Thread_Dispatch_update_heir( _Per_CPU_Get(), cpu, new_user ); 1208 } 1169 1209 1170 1210 unblock = false; … … 1244 1284 new_user = needs_help; 1245 1285 } else { 1246 _Assert( node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL ); 1286 _Assert( 1287 node->help_state == SCHEDULER_HELP_ACTIVE_OWNER 1288 || node->help_state == SCHEDULER_HELP_ACTIVE_RIVAL 1289 ); 1247 1290 _Assert( offers_help->Scheduler.node == offers_help->Scheduler.own_node ); 1248 1291 -
testsuites/smptests/smpmrsp01/init.c
r79569ae r5bd822a7 55 55 rtems_id main_task_id; 56 56 rtems_id migration_task_id; 57 rtems_id high_task_id; 57 rtems_id low_task_id[2]; 58 rtems_id high_task_id[2]; 58 59 rtems_id timer_id; 59 60 rtems_id counting_sem_id; … … 69 70 size_t switch_index; 70 71 switch_event switch_events[32]; 71 volatile bool run; 72 volatile bool high_run[2]; 73 volatile bool low_run[2]; 72 74 } test_context; 73 75 … … 718 720 sc = rtems_semaphore_delete(sem_c_id); 719 721 rtems_test_assert(sc == RTEMS_SUCCESSFUL); 720 721 change_prio(RTEMS_SELF, 2);722 rtems_test_assert(sc == RTEMS_SUCCESSFUL);723 722 } 724 723 … … 754 753 barrier(ctx, &barrier_state); 755 754 756 rtems_task_suspend(RTEMS_SELF); 757 rtems_test_assert(0); 755 while (true) { 756 /* Do nothing */ 757 } 758 758 } 759 759 … … 764 764 765 765 sc = rtems_task_start( 766 ctx->high_task_id ,766 ctx->high_task_id[0], 767 767 run_task, 768 (rtems_task_argument) &ctx-> run769 ); 770 rtems_test_assert(sc == RTEMS_SUCCESSFUL); 771 772 sc = rtems_task_suspend(ctx->high_task_id );773 rtems_test_assert(sc == RTEMS_SUCCESSFUL); 774 775 sc = rtems_task_resume(ctx->high_task_id );768 (rtems_task_argument) &ctx->high_run[0] 769 ); 770 rtems_test_assert(sc == RTEMS_SUCCESSFUL); 771 772 sc = rtems_task_suspend(ctx->high_task_id[0]); 773 rtems_test_assert(sc == RTEMS_SUCCESSFUL); 774 775 sc = rtems_task_resume(ctx->high_task_id[0]); 776 776 rtems_test_assert(sc == RTEMS_SUCCESSFUL); 777 777 … … 784 784 rtems_test_assert(sc == RTEMS_SUCCESSFUL); 785 785 786 sc = rtems_task_suspend(ctx->high_task_id );786 sc = rtems_task_suspend(ctx->high_task_id[0]); 787 787 rtems_test_assert(sc == RTEMS_SUCCESSFUL); 788 788 } … … 803 803 rtems_test_assert(sc == RTEMS_SUCCESSFUL); 804 804 805 rtems_test_assert(!ctx->run); 806 } 807 808 static void unblock_ready_rival(test_context *ctx) 805 rtems_test_assert(!ctx->high_run[0]); 806 } 807 808 static void unblock_owner_before_rival_timer(rtems_id timer_id, void *arg) 809 { 810 test_context *ctx = arg; 811 rtems_status_code sc; 812 813 sc = rtems_task_suspend(ctx->high_task_id[0]); 814 rtems_test_assert(sc == RTEMS_SUCCESSFUL); 815 816 sc = rtems_task_suspend(ctx->high_task_id[1]); 817 rtems_test_assert(sc == RTEMS_SUCCESSFUL); 818 } 819 820 static void unblock_owner_after_rival_timer(rtems_id timer_id, void *arg) 821 { 822 test_context *ctx = arg; 823 rtems_status_code sc; 824 825 sc = rtems_task_suspend(ctx->high_task_id[1]); 826 rtems_test_assert(sc == RTEMS_SUCCESSFUL); 827 828 sc = rtems_task_suspend(ctx->high_task_id[0]); 829 rtems_test_assert(sc == RTEMS_SUCCESSFUL); 830 } 831 832 static void various_block_unblock(test_context *ctx) 809 833 { 810 834 rtems_status_code sc; 811 835 SMP_barrier_State barrier_state = SMP_BARRIER_STATE_INITIALIZER; 812 813 sc = rtems_task_start(ctx->worker_ids[0], ready_unlock_worker, 0);814 rtems_test_assert(sc == RTEMS_SUCCESSFUL);815 836 816 837 /* Worker obtain (F) */ … … 823 844 rtems_test_assert(sc == RTEMS_SUCCESSFUL); 824 845 825 sc = rtems_task_set_scheduler(ctx->high_task_id, ctx->scheduler_ids[1]); 826 rtems_test_assert(sc == RTEMS_SUCCESSFUL); 827 828 sc = rtems_task_resume(ctx->high_task_id); 829 rtems_test_assert(sc == RTEMS_SUCCESSFUL); 830 831 while (!ctx->run) { 846 sc = rtems_task_wake_after(2); 847 rtems_test_assert(sc == RTEMS_SUCCESSFUL); 848 849 sc = rtems_task_start( 850 ctx->high_task_id[1], 851 run_task, 852 (rtems_task_argument) &ctx->high_run[1] 853 ); 854 rtems_test_assert(sc == RTEMS_SUCCESSFUL); 855 856 while (!ctx->high_run[1]) { 832 857 /* Do noting */ 833 858 } … … 836 861 rtems_test_assert(sc == RTEMS_SUCCESSFUL); 837 862 838 sc = rtems_task_suspend(ctx->high_task_id); 863 /* Try to schedule a blocked active rival */ 864 865 sc = rtems_task_suspend(ctx->worker_ids[0]); 866 rtems_test_assert(sc == RTEMS_SUCCESSFUL); 867 868 sc = rtems_task_suspend(ctx->high_task_id[1]); 869 rtems_test_assert(sc == RTEMS_SUCCESSFUL); 870 871 sc = rtems_task_resume(ctx->high_task_id[1]); 872 rtems_test_assert(sc == RTEMS_SUCCESSFUL); 873 874 sc = rtems_task_resume(ctx->worker_ids[0]); 875 rtems_test_assert(sc == RTEMS_SUCCESSFUL); 876 877 rtems_test_assert(rtems_get_current_processor() == 0); 878 879 /* Use node of the active rival */ 880 881 sc = rtems_task_suspend(ctx->high_task_id[1]); 882 rtems_test_assert(sc == RTEMS_SUCCESSFUL); 883 884 sc = rtems_task_resume(ctx->high_task_id[0]); 885 rtems_test_assert(sc == RTEMS_SUCCESSFUL); 886 887 rtems_test_assert(rtems_get_current_processor() == 1); 888 889 sc = rtems_task_suspend(ctx->worker_ids[0]); 890 rtems_test_assert(sc == RTEMS_SUCCESSFUL); 891 892 sc = rtems_task_resume(ctx->worker_ids[0]); 893 rtems_test_assert(sc == RTEMS_SUCCESSFUL); 894 895 /* 896 * Try to schedule an active rival with an already scheduled active owner 897 * user. 898 */ 899 900 sc = rtems_timer_fire_after( 901 ctx->timer_id, 902 2, 903 unblock_owner_before_rival_timer, 904 ctx 905 ); 906 rtems_test_assert(sc == RTEMS_SUCCESSFUL); 907 908 /* This will take the processor away from us, the timer will help later */ 909 sc = rtems_task_resume(ctx->high_task_id[1]); 910 rtems_test_assert(sc == RTEMS_SUCCESSFUL); 911 912 /* 913 * Try to schedule an active owner with an already scheduled active rival 914 * user. 915 */ 916 917 sc = rtems_task_resume(ctx->high_task_id[0]); 918 rtems_test_assert(sc == RTEMS_SUCCESSFUL); 919 920 sc = rtems_timer_fire_after( 921 ctx->timer_id, 922 2, 923 unblock_owner_after_rival_timer, 924 ctx 925 ); 926 rtems_test_assert(sc == RTEMS_SUCCESSFUL); 927 928 /* This will take the processor away from us, the timer will help later */ 929 sc = rtems_task_resume(ctx->high_task_id[1]); 839 930 rtems_test_assert(sc == RTEMS_SUCCESSFUL); 840 931 … … 842 933 rtems_test_assert(sc == RTEMS_SUCCESSFUL); 843 934 935 rtems_test_assert(rtems_get_current_processor() == 0); 936 844 937 assert_prio(RTEMS_SELF, 4); 845 938 846 /* Worker done ( F) */939 /* Worker done (G) */ 847 940 barrier(ctx, &barrier_state); 848 941 } 849 942 850 static void test_mrsp_unblock_ready(test_context *ctx) 851 { 852 rtems_status_code sc; 853 854 puts("test MrsP unblock ready"); 855 856 ctx->run = false; 943 static void start_low_task(test_context *ctx, size_t i) 944 { 945 rtems_status_code sc; 946 947 sc = rtems_task_create( 948 rtems_build_name('L', 'O', 'W', '0' + i), 949 5, 950 RTEMS_MINIMUM_STACK_SIZE, 951 RTEMS_DEFAULT_MODES, 952 RTEMS_DEFAULT_ATTRIBUTES, 953 &ctx->low_task_id[i] 954 ); 955 rtems_test_assert(sc == RTEMS_SUCCESSFUL); 956 957 sc = rtems_task_set_scheduler(ctx->low_task_id[i], ctx->scheduler_ids[i]); 958 rtems_test_assert(sc == RTEMS_SUCCESSFUL); 959 960 sc = rtems_task_start( 961 ctx->low_task_id[i], 962 run_task, 963 (rtems_task_argument) &ctx->low_run[i] 964 ); 965 rtems_test_assert(sc == RTEMS_SUCCESSFUL); 966 } 967 968 static void test_mrsp_various_block_and_unblock(test_context *ctx) 969 { 970 rtems_status_code sc; 971 972 puts("test MrsP various block and unblock"); 857 973 858 974 change_prio(RTEMS_SELF, 4); 975 976 reset_switch_events(ctx); 977 978 ctx->low_run[0] = false; 979 ctx->low_run[1] = false; 980 ctx->high_run[0] = false; 981 ctx->high_run[1] = false; 859 982 860 983 sc = rtems_semaphore_create( … … 871 994 872 995 sc = rtems_task_create( 873 rtems_build_name('H', 'I', 'G', ' H'),996 rtems_build_name('H', 'I', 'G', '0'), 874 997 2, 875 998 RTEMS_MINIMUM_STACK_SIZE, 876 999 RTEMS_DEFAULT_MODES, 877 1000 RTEMS_DEFAULT_ATTRIBUTES, 878 &ctx->high_task_id 879 ); 1001 &ctx->high_task_id[0] 1002 ); 1003 rtems_test_assert(sc == RTEMS_SUCCESSFUL); 1004 1005 sc = rtems_task_create( 1006 rtems_build_name('H', 'I', 'G', '1'), 1007 2, 1008 RTEMS_MINIMUM_STACK_SIZE, 1009 RTEMS_DEFAULT_MODES, 1010 RTEMS_DEFAULT_ATTRIBUTES, 1011 &ctx->high_task_id[1] 1012 ); 1013 rtems_test_assert(sc == RTEMS_SUCCESSFUL); 1014 1015 sc = rtems_task_set_scheduler(ctx->high_task_id[1], ctx->scheduler_ids[1]); 880 1016 rtems_test_assert(sc == RTEMS_SUCCESSFUL); 881 1017 … … 893 1029 rtems_test_assert(sc == RTEMS_SUCCESSFUL); 894 1030 1031 sc = rtems_task_start(ctx->worker_ids[0], ready_unlock_worker, 0); 1032 rtems_test_assert(sc == RTEMS_SUCCESSFUL); 1033 895 1034 sc = rtems_timer_create( 896 1035 rtems_build_name('T', 'I', 'M', 'R'), … … 899 1038 rtems_test_assert(sc == RTEMS_SUCCESSFUL); 900 1039 1040 /* In case these tasks run, then we have a MrsP protocol violation */ 1041 start_low_task(ctx, 0); 1042 start_low_task(ctx, 1); 1043 901 1044 unblock_ready_owner(ctx); 902 unblock_ready_rival(ctx); 1045 various_block_unblock(ctx); 1046 1047 rtems_test_assert(!ctx->low_run[0]); 1048 rtems_test_assert(!ctx->low_run[1]); 1049 1050 print_switch_events(ctx); 903 1051 904 1052 sc = rtems_timer_delete(ctx->timer_id); 905 1053 rtems_test_assert(sc == RTEMS_SUCCESSFUL); 906 1054 1055 sc = rtems_task_delete(ctx->high_task_id[0]); 1056 rtems_test_assert(sc == RTEMS_SUCCESSFUL); 1057 1058 sc = rtems_task_delete(ctx->high_task_id[1]); 1059 rtems_test_assert(sc == RTEMS_SUCCESSFUL); 1060 907 1061 sc = rtems_task_delete(ctx->worker_ids[0]); 908 1062 rtems_test_assert(sc == RTEMS_SUCCESSFUL); 909 1063 910 sc = rtems_task_delete(ctx->high_task_id); 1064 sc = rtems_task_delete(ctx->low_task_id[0]); 1065 rtems_test_assert(sc == RTEMS_SUCCESSFUL); 1066 1067 sc = rtems_task_delete(ctx->low_task_id[1]); 911 1068 rtems_test_assert(sc == RTEMS_SUCCESSFUL); 912 1069 913 1070 sc = rtems_semaphore_delete(ctx->mrsp_ids[0]); 914 rtems_test_assert(sc == RTEMS_SUCCESSFUL);915 916 change_prio(RTEMS_SELF, 2);917 1071 rtems_test_assert(sc == RTEMS_SUCCESSFUL); 918 1072 } … … 1422 1576 test_mrsp_deadlock_error(ctx); 1423 1577 test_mrsp_multiple_obtain(); 1424 test_mrsp_ unblock_ready(ctx);1578 test_mrsp_various_block_and_unblock(ctx); 1425 1579 test_mrsp_obtain_and_sleep_and_release(ctx); 1426 1580 test_mrsp_obtain_and_release_with_help(ctx); -
testsuites/smptests/smpmrsp01/smpmrsp01.scn
r79569ae r5bd822a7 6 6 test MrsP deadlock error 7 7 test MrsP multiple obtain 8 test MrsP unblock ready 8 test MrsP various block and unblock 9 [1] IDLE -> WORK (prio 4, node WORK) 10 [0] MAIN -> IDLE (prio 3, node MAIN) 11 [0] IDLE -> MAIN (prio 3, node MAIN) 12 [1] WORK -> IDLE (prio 3, node WORK) 13 [1] IDLE -> HIG1 (prio 2, node HIG1) 14 [1] HIG1 -> IDLE (prio 3, node WORK) 15 [1] IDLE -> HIG1 (prio 2, node HIG1) 16 [1] HIG1 -> WORK (prio 3, node WORK) 17 [1] WORK -> MAIN (prio 3, node WORK) 18 [0] MAIN -> HIG0 (prio 2, node HIG0) 19 [1] MAIN -> HIG1 (prio 2, node HIG1) 20 [1] HIG1 -> WORK (prio 3, node WORK) 21 [0] HIG0 -> MAIN (prio 3, node MAIN) 22 [1] WORK -> MAIN (prio 3, node WORK) 23 [0] MAIN -> HIG0 (prio 2, node HIG0) 24 [1] MAIN -> HIG1 (prio 2, node HIG1) 25 [1] HIG1 -> MAIN (prio 3, node WORK) 26 [0] HIG0 -> IDLE (prio 4, node MAIN) 27 [1] MAIN -> WORK (prio 3, node WORK) 28 [0] IDLE -> MAIN (prio 4, node MAIN) 9 29 test MrsP obtain and sleep and release 10 30 [0] MAIN -> RUN (prio 2, node RUN)
Note: See TracChangeset
for help on using the changeset viewer.