1 | /** |
---|
2 | * @file |
---|
3 | * |
---|
4 | * @brief CPU Usage Top |
---|
5 | * @ingroup libmisc_cpuuse CPU Usage |
---|
6 | */ |
---|
7 | |
---|
8 | /* |
---|
9 | * COPYRIGHT (c) 2015. Chris Johns <chrisj@rtems.org> |
---|
10 | * |
---|
11 | * COPYRIGHT (c) 2014. |
---|
12 | * On-Line Applications Research Corporation (OAR). |
---|
13 | * |
---|
14 | * The license and distribution terms for this file may be |
---|
15 | * found in the file LICENSE in this distribution or at |
---|
16 | * http://www.rtems.org/license/LICENSE. |
---|
17 | */ |
---|
18 | |
---|
19 | /* |
---|
20 | * Based on the old capture engine ct-load. |
---|
21 | */ |
---|
22 | |
---|
23 | #ifdef HAVE_CONFIG_H |
---|
24 | #include "config.h" |
---|
25 | #endif |
---|
26 | |
---|
27 | #include <stdbool.h> |
---|
28 | #include <string.h> |
---|
29 | #include <stdlib.h> |
---|
30 | #include <stdio.h> |
---|
31 | #include <ctype.h> |
---|
32 | #include <inttypes.h> |
---|
33 | |
---|
34 | #include <rtems/cpuuse.h> |
---|
35 | #include <rtems/printer.h> |
---|
36 | #include <rtems/malloc.h> |
---|
37 | #include <rtems/score/objectimpl.h> |
---|
38 | #include <rtems/score/protectedheap.h> |
---|
39 | #include <rtems/score/threadimpl.h> |
---|
40 | #include <rtems/score/todimpl.h> |
---|
41 | #include <rtems/score/watchdogimpl.h> |
---|
42 | #include <rtems/score/wkspace.h> |
---|
43 | |
---|
44 | #include "cpuuseimpl.h" |
---|
45 | |
---|
46 | /* |
---|
47 | * Use a struct for all data to allow more than one top and to support the |
---|
48 | * thread iterator. |
---|
49 | */ |
---|
50 | typedef struct |
---|
51 | { |
---|
52 | volatile bool thread_run; |
---|
53 | volatile bool thread_active; |
---|
54 | volatile bool single_page; |
---|
55 | volatile uint32_t sort_order; |
---|
56 | volatile uint32_t poll_rate_usecs; |
---|
57 | volatile uint32_t show; |
---|
58 | const rtems_printer* printer; |
---|
59 | Timestamp_Control zero; |
---|
60 | Timestamp_Control uptime; |
---|
61 | Timestamp_Control last_uptime; |
---|
62 | Timestamp_Control period; |
---|
63 | int task_count; /* Number of tasks. */ |
---|
64 | int last_task_count; /* Number of tasks in the previous sample. */ |
---|
65 | int task_size; /* The size of the arrays */ |
---|
66 | Thread_Control** tasks; /* List of tasks in this sample. */ |
---|
67 | Thread_Control** last_tasks; /* List of tasks in the last sample. */ |
---|
68 | Timestamp_Control* usage; /* Usage of task's in this sample. */ |
---|
69 | Timestamp_Control* last_usage; /* Usage of task's in the last sample. */ |
---|
70 | Timestamp_Control* current_usage; /* Current usage for this sample. */ |
---|
71 | Timestamp_Control total; /* Total run run, should equal the uptime. */ |
---|
72 | Timestamp_Control idle; /* Time spent in idle. */ |
---|
73 | Timestamp_Control current; /* Current time run in this period. */ |
---|
74 | Timestamp_Control current_idle; /* Current time in idle this period. */ |
---|
75 | uint32_t stack_size; /* Size of stack allocated. */ |
---|
76 | } rtems_cpu_usage_data; |
---|
77 | |
---|
78 | /* |
---|
79 | * Sort orders. |
---|
80 | */ |
---|
81 | #define RTEMS_TOP_SORT_ID (0) |
---|
82 | #define RTEMS_TOP_SORT_REAL_PRI (1) |
---|
83 | #define RTEMS_TOP_SORT_CURRENT_PRI (2) |
---|
84 | #define RTEMS_TOP_SORT_TOTAL (3) |
---|
85 | #define RTEMS_TOP_SORT_CURRENT (4) |
---|
86 | #define RTEMS_TOP_SORT_MAX (4) |
---|
87 | |
---|
88 | static inline bool equal_to_uint32_t( uint32_t * lhs, uint32_t * rhs ) |
---|
89 | { |
---|
90 | if ( *lhs == *rhs ) |
---|
91 | return true; |
---|
92 | else |
---|
93 | return false; |
---|
94 | } |
---|
95 | |
---|
96 | static inline bool less_than_uint32_t( uint32_t * lhs, uint32_t * rhs ) |
---|
97 | { |
---|
98 | if ( *lhs < *rhs ) |
---|
99 | return true; |
---|
100 | else |
---|
101 | return false; |
---|
102 | } |
---|
103 | |
---|
104 | #define CPU_usage_Equal_to( _lhs, _rhs ) _Timestamp_Equal_to( _lhs, _rhs ) |
---|
105 | #define CPU_usage_Set_to_zero( _time ) _Timestamp_Set_to_zero( _time ) |
---|
106 | #define CPU_usage_Less_than( _lhs, _rhs ) _Timestamp_Less_than( _lhs, _rhs ) |
---|
107 | |
---|
108 | static void |
---|
109 | print_memsize(rtems_cpu_usage_data* data, const uintptr_t size, const char* label) |
---|
110 | { |
---|
111 | if (size > (1024 * 1024)) |
---|
112 | rtems_printf(data->printer, "%4" PRIuPTR "M %s", size / (1024 * 1024), label); |
---|
113 | else if (size > 1024) |
---|
114 | rtems_printf(data->printer, "%4" PRIuPTR "K %s", size / 1024, label); |
---|
115 | else |
---|
116 | rtems_printf(data->printer, "%4" PRIuPTR " %s", size, label); |
---|
117 | } |
---|
118 | |
---|
119 | static int |
---|
120 | print_time(rtems_cpu_usage_data* data, |
---|
121 | const Timestamp_Control* time, |
---|
122 | const int length) |
---|
123 | { |
---|
124 | uint32_t secs = _Timestamp_Get_seconds( time ); |
---|
125 | uint32_t usecs = _Timestamp_Get_nanoseconds( time ) / TOD_NANOSECONDS_PER_MICROSECOND; |
---|
126 | int len = 0; |
---|
127 | |
---|
128 | if (secs > 60) |
---|
129 | { |
---|
130 | uint32_t mins = secs / 60; |
---|
131 | if (mins > 60) |
---|
132 | { |
---|
133 | uint32_t hours = mins / 60; |
---|
134 | if (hours > 24) |
---|
135 | { |
---|
136 | len += rtems_printf(data->printer, "%" PRIu32 "d", hours / 24); |
---|
137 | hours %= 24; |
---|
138 | } |
---|
139 | len += rtems_printf(data->printer, "%" PRIu32 "hr", hours); |
---|
140 | mins %= 60; |
---|
141 | } |
---|
142 | len += rtems_printf(data->printer, "%" PRIu32 "m", mins); |
---|
143 | secs %= 60; |
---|
144 | } |
---|
145 | len += rtems_printf(data->printer, "%" PRIu32 ".%06" PRIu32, secs, usecs); |
---|
146 | |
---|
147 | if (len < length) |
---|
148 | rtems_printf(data->printer, "%*c", length - len, ' '); |
---|
149 | |
---|
150 | return len; |
---|
151 | } |
---|
152 | |
---|
153 | /* |
---|
154 | * Count the number of tasks. |
---|
155 | */ |
---|
156 | static bool |
---|
157 | task_counter(Thread_Control *thrad, void* arg) |
---|
158 | { |
---|
159 | rtems_cpu_usage_data* data = (rtems_cpu_usage_data*) arg; |
---|
160 | ++data->task_count; |
---|
161 | |
---|
162 | return false; |
---|
163 | } |
---|
164 | |
---|
165 | /* |
---|
166 | * Create the sorted table with the current and total usage. |
---|
167 | */ |
---|
168 | static bool |
---|
169 | task_usage(Thread_Control* thread, void* arg) |
---|
170 | { |
---|
171 | rtems_cpu_usage_data* data = (rtems_cpu_usage_data*) arg; |
---|
172 | Timestamp_Control usage; |
---|
173 | Timestamp_Control current = data->zero; |
---|
174 | int j; |
---|
175 | |
---|
176 | data->stack_size += thread->Start.Initial_stack.size; |
---|
177 | |
---|
178 | _Thread_Get_CPU_time_used(thread, &usage); |
---|
179 | |
---|
180 | for (j = 0; j < data->last_task_count; j++) |
---|
181 | { |
---|
182 | if (thread == data->last_tasks[j]) |
---|
183 | { |
---|
184 | _Timestamp_Subtract(&data->last_usage[j], &usage, ¤t); |
---|
185 | break; |
---|
186 | } |
---|
187 | } |
---|
188 | |
---|
189 | /* |
---|
190 | * When not using nanosecond CPU usage resolution, we have to count the |
---|
191 | * number of "ticks" we gave credit for to give the user a rough guideline as |
---|
192 | * to what each number means proportionally. |
---|
193 | */ |
---|
194 | _Timestamp_Add_to(&data->total, &usage); |
---|
195 | _Timestamp_Add_to(&data->current, ¤t); |
---|
196 | |
---|
197 | if (thread->Object.id == 0x09010001) |
---|
198 | { |
---|
199 | data->idle = usage; |
---|
200 | data->current_idle = current; |
---|
201 | } |
---|
202 | |
---|
203 | /* |
---|
204 | * Create the tasks to display soring as we create. |
---|
205 | */ |
---|
206 | for (j = 0; j < data->task_count; j++) |
---|
207 | { |
---|
208 | if (data->tasks[j]) |
---|
209 | { |
---|
210 | int k; |
---|
211 | |
---|
212 | /* |
---|
213 | * Sort on the current load. |
---|
214 | */ |
---|
215 | switch (data->sort_order) |
---|
216 | { |
---|
217 | default: |
---|
218 | data->sort_order = RTEMS_TOP_SORT_CURRENT; |
---|
219 | /* drop through */ |
---|
220 | case RTEMS_TOP_SORT_CURRENT: |
---|
221 | if (CPU_usage_Equal_to(¤t, &data->zero) || |
---|
222 | CPU_usage_Less_than(¤t, &data->current_usage[j])) |
---|
223 | continue; |
---|
224 | case RTEMS_TOP_SORT_TOTAL: |
---|
225 | if (CPU_usage_Equal_to(&usage, &data->zero) || |
---|
226 | CPU_usage_Less_than(&usage, &data->usage[j])) |
---|
227 | continue; |
---|
228 | case RTEMS_TOP_SORT_REAL_PRI: |
---|
229 | if (thread->Real_priority.priority > data->tasks[j]->Real_priority.priority) |
---|
230 | continue; |
---|
231 | case RTEMS_TOP_SORT_CURRENT_PRI: |
---|
232 | if ( |
---|
233 | _Thread_Get_priority( thread ) |
---|
234 | > _Thread_Get_priority( data->tasks[j] ) |
---|
235 | ) { |
---|
236 | continue; |
---|
237 | } |
---|
238 | case RTEMS_TOP_SORT_ID: |
---|
239 | if (thread->Object.id < data->tasks[j]->Object.id) |
---|
240 | continue; |
---|
241 | } |
---|
242 | |
---|
243 | for (k = (data->task_count - 1); k >= j; k--) |
---|
244 | { |
---|
245 | data->tasks[k + 1] = data->tasks[k]; |
---|
246 | data->usage[k + 1] = data->usage[k]; |
---|
247 | data->current_usage[k + 1] = data->current_usage[k]; |
---|
248 | } |
---|
249 | } |
---|
250 | data->tasks[j] = thread; |
---|
251 | data->usage[j] = usage; |
---|
252 | data->current_usage[j] = current; |
---|
253 | break; |
---|
254 | } |
---|
255 | |
---|
256 | return false; |
---|
257 | } |
---|
258 | |
---|
259 | /* |
---|
260 | * rtems_cpuusage_top_thread |
---|
261 | * |
---|
262 | * This function displays the load of the tasks on an ANSI terminal. |
---|
263 | */ |
---|
264 | |
---|
265 | static void |
---|
266 | rtems_cpuusage_top_thread (rtems_task_argument arg) |
---|
267 | { |
---|
268 | rtems_cpu_usage_data* data = (rtems_cpu_usage_data*) arg; |
---|
269 | char name[13]; |
---|
270 | int i; |
---|
271 | Heap_Information_block wksp; |
---|
272 | uint32_t ival, fval; |
---|
273 | int task_count; |
---|
274 | rtems_event_set out; |
---|
275 | rtems_status_code sc; |
---|
276 | bool first_time = true; |
---|
277 | |
---|
278 | data->thread_active = true; |
---|
279 | |
---|
280 | _TOD_Get_uptime(&data->last_uptime); |
---|
281 | |
---|
282 | CPU_usage_Set_to_zero(&data->zero); |
---|
283 | |
---|
284 | while (data->thread_run) |
---|
285 | { |
---|
286 | Timestamp_Control uptime_at_last_reset = CPU_usage_Uptime_at_last_reset; |
---|
287 | size_t tasks_size; |
---|
288 | size_t usage_size; |
---|
289 | Timestamp_Control load; |
---|
290 | |
---|
291 | data->task_count = 0; |
---|
292 | _Thread_Iterate(task_counter, data); |
---|
293 | |
---|
294 | tasks_size = sizeof(Thread_Control*) * (data->task_count + 1); |
---|
295 | usage_size = sizeof(Timestamp_Control) * (data->task_count + 1); |
---|
296 | |
---|
297 | if (data->task_count > data->task_size) |
---|
298 | { |
---|
299 | data->tasks = realloc(data->tasks, tasks_size); |
---|
300 | data->usage = realloc(data->usage, usage_size); |
---|
301 | data->current_usage = realloc(data->current_usage, usage_size); |
---|
302 | if ((data->tasks == NULL) || (data->usage == NULL) || (data->current_usage == NULL)) |
---|
303 | { |
---|
304 | rtems_printf(data->printer, "top worker: error: no memory\n"); |
---|
305 | data->thread_run = false; |
---|
306 | break; |
---|
307 | } |
---|
308 | } |
---|
309 | |
---|
310 | memset(data->tasks, 0, tasks_size); |
---|
311 | memset(data->usage, 0, usage_size); |
---|
312 | memset(data->current_usage, 0, usage_size); |
---|
313 | |
---|
314 | _Timestamp_Set_to_zero(&data->total); |
---|
315 | _Timestamp_Set_to_zero(&data->current); |
---|
316 | data->stack_size = 0; |
---|
317 | |
---|
318 | _TOD_Get_uptime(&data->uptime); |
---|
319 | _Timestamp_Subtract(&uptime_at_last_reset, &data->uptime, &data->uptime); |
---|
320 | _Timestamp_Subtract(&data->last_uptime, &data->uptime, &data->period); |
---|
321 | data->last_uptime = data->uptime; |
---|
322 | |
---|
323 | _Thread_Iterate(task_usage, data); |
---|
324 | |
---|
325 | if (data->task_count > data->task_size) |
---|
326 | { |
---|
327 | data->last_tasks = realloc(data->last_tasks, tasks_size); |
---|
328 | data->last_usage = realloc(data->last_usage, usage_size); |
---|
329 | if ((data->last_tasks == NULL) || (data->last_usage == NULL)) |
---|
330 | { |
---|
331 | rtems_printf(data->printer, "top worker: error: no memory\n"); |
---|
332 | data->thread_run = false; |
---|
333 | break; |
---|
334 | } |
---|
335 | data->task_size = data->task_count; |
---|
336 | } |
---|
337 | |
---|
338 | memcpy(data->last_tasks, data->tasks, tasks_size); |
---|
339 | memcpy(data->last_usage, data->usage, usage_size); |
---|
340 | data->last_task_count = data->task_count; |
---|
341 | |
---|
342 | /* |
---|
343 | * We need to loop again to get suitable current usage values as we need a |
---|
344 | * last sample to work. |
---|
345 | */ |
---|
346 | if (first_time) |
---|
347 | { |
---|
348 | rtems_task_wake_after(RTEMS_MILLISECONDS_TO_TICKS(500)); |
---|
349 | first_time = false; |
---|
350 | continue; |
---|
351 | } |
---|
352 | |
---|
353 | _Protected_heap_Get_information(&_Workspace_Area, &wksp); |
---|
354 | |
---|
355 | if (data->single_page) |
---|
356 | rtems_printf(data->printer, |
---|
357 | "\x1b[H\x1b[J" |
---|
358 | " ENTER:Exit SPACE:Refresh" |
---|
359 | " S:Scroll A:All <>:Order +/-:Lines\n"); |
---|
360 | rtems_printf(data->printer, "\n"); |
---|
361 | |
---|
362 | /* |
---|
363 | * Uptime and period of this sample. |
---|
364 | */ |
---|
365 | rtems_printf(data->printer, "Uptime: "); |
---|
366 | print_time(data, &data->uptime, 20); |
---|
367 | rtems_printf(data->printer, " Period: "); |
---|
368 | print_time(data, &data->period, 20); |
---|
369 | |
---|
370 | /* |
---|
371 | * Task count, load and idle levels. |
---|
372 | */ |
---|
373 | rtems_printf(data->printer, "\nTasks: %4i ", data->task_count); |
---|
374 | |
---|
375 | _Timestamp_Subtract(&data->idle, &data->total, &load); |
---|
376 | _Timestamp_Divide(&load, &data->uptime, &ival, &fval); |
---|
377 | rtems_printf(data->printer, |
---|
378 | "Load Average: %4" PRIu32 ".%03" PRIu32 "%%", ival, fval); |
---|
379 | _Timestamp_Subtract(&data->current_idle, &data->current, &load); |
---|
380 | _Timestamp_Divide(&load, &data->period, &ival, &fval); |
---|
381 | rtems_printf(data->printer, |
---|
382 | " Load: %4" PRIu32 ".%03" PRIu32 "%%", ival, fval); |
---|
383 | _Timestamp_Divide(&data->current_idle, &data->period, &ival, &fval); |
---|
384 | rtems_printf(data->printer, |
---|
385 | " Idle: %4" PRIu32 ".%03" PRIu32 "%%", ival, fval); |
---|
386 | |
---|
387 | /* |
---|
388 | * Memory usage. |
---|
389 | */ |
---|
390 | if (rtems_configuration_get_unified_work_area()) |
---|
391 | { |
---|
392 | rtems_printf(data->printer, "\nMem: "); |
---|
393 | print_memsize(data, wksp.Free.total, "free"); |
---|
394 | print_memsize(data, wksp.Used.total, "used"); |
---|
395 | } |
---|
396 | else |
---|
397 | { |
---|
398 | Heap_Information_block libc_heap; |
---|
399 | malloc_info(&libc_heap); |
---|
400 | rtems_printf(data->printer, "\nMem: Wksp: "); |
---|
401 | print_memsize(data, wksp.Free.total, "free"); |
---|
402 | print_memsize(data, wksp.Used.total, "used Heap: "); |
---|
403 | print_memsize(data, libc_heap.Free.total, "free"); |
---|
404 | print_memsize(data, libc_heap.Used.total, "used"); |
---|
405 | } |
---|
406 | |
---|
407 | print_memsize(data, data->stack_size, "stack\n"); |
---|
408 | |
---|
409 | rtems_printf(data->printer, |
---|
410 | "\n" |
---|
411 | " ID | NAME | RPRI | CPRI | TIME | TOTAL | CURRENT\n" |
---|
412 | "-%s---------+---------------------+-%s-----%s-----+---------------------+-%s------+--%s----\n", |
---|
413 | data->sort_order == RTEMS_TOP_SORT_ID ? "^^" : "--", |
---|
414 | data->sort_order == RTEMS_TOP_SORT_REAL_PRI ? "^^" : "--", |
---|
415 | data->sort_order == RTEMS_TOP_SORT_CURRENT_PRI ? "^^" : "--", |
---|
416 | data->sort_order == RTEMS_TOP_SORT_TOTAL ? "^^" : "--", |
---|
417 | data->sort_order == RTEMS_TOP_SORT_CURRENT ? "^^" : "--" |
---|
418 | ); |
---|
419 | |
---|
420 | task_count = 0; |
---|
421 | |
---|
422 | for (i = 0; i < data->task_count; i++) |
---|
423 | { |
---|
424 | Thread_Control* thread = data->tasks[i]; |
---|
425 | Timestamp_Control usage; |
---|
426 | Timestamp_Control current_usage; |
---|
427 | |
---|
428 | if (thread == NULL) |
---|
429 | break; |
---|
430 | |
---|
431 | if (data->single_page && (data->show != 0) && (i >= data->show)) |
---|
432 | break; |
---|
433 | |
---|
434 | /* |
---|
435 | * We need to count the number displayed to clear the remainder of the |
---|
436 | * the display. |
---|
437 | */ |
---|
438 | ++task_count; |
---|
439 | |
---|
440 | /* |
---|
441 | * If the API os POSIX print the entry point. |
---|
442 | */ |
---|
443 | rtems_object_get_name(thread->Object.id, sizeof(name), name); |
---|
444 | if (name[0] == '\0') |
---|
445 | snprintf(name, sizeof(name) - 1, "(%p)", thread->Start.Entry.Kinds.Numeric.entry); |
---|
446 | |
---|
447 | rtems_printf(data->printer, |
---|
448 | " 0x%08" PRIx32 " | %-19s | %3" PRId64 " | %3" PRId64 " | ", |
---|
449 | thread->Object.id, |
---|
450 | name, |
---|
451 | thread->Real_priority.priority, |
---|
452 | _Thread_Get_priority(thread)); |
---|
453 | |
---|
454 | usage = data->usage[i]; |
---|
455 | current_usage = data->current_usage[i]; |
---|
456 | |
---|
457 | /* |
---|
458 | * Print the information |
---|
459 | */ |
---|
460 | print_time(data, &usage, 19); |
---|
461 | _Timestamp_Divide(&usage, &data->total, &ival, &fval); |
---|
462 | rtems_printf(data->printer, |
---|
463 | " |%4" PRIu32 ".%03" PRIu32, ival, fval); |
---|
464 | _Timestamp_Divide(¤t_usage, &data->period, &ival, &fval); |
---|
465 | rtems_printf(data->printer, |
---|
466 | " |%4" PRIu32 ".%03" PRIu32 "\n", ival, fval); |
---|
467 | } |
---|
468 | |
---|
469 | if (data->single_page && (data->show != 0) && (task_count < data->show)) |
---|
470 | { |
---|
471 | i = data->show - task_count; |
---|
472 | while (i > 0) |
---|
473 | { |
---|
474 | rtems_printf(data->printer, "\x1b[K\n"); |
---|
475 | i--; |
---|
476 | } |
---|
477 | } |
---|
478 | |
---|
479 | sc = rtems_event_receive(RTEMS_EVENT_1, |
---|
480 | RTEMS_EVENT_ANY, |
---|
481 | RTEMS_MILLISECONDS_TO_TICKS (data->poll_rate_usecs), |
---|
482 | &out); |
---|
483 | if ((sc != RTEMS_SUCCESSFUL) && (sc != RTEMS_TIMEOUT)) |
---|
484 | { |
---|
485 | rtems_printf(data->printer, |
---|
486 | "error: event receive: %s\n", rtems_status_text(sc)); |
---|
487 | break; |
---|
488 | } |
---|
489 | } |
---|
490 | |
---|
491 | free(data->tasks); |
---|
492 | free(data->last_tasks); |
---|
493 | free(data->last_usage); |
---|
494 | free(data->current_usage); |
---|
495 | |
---|
496 | data->thread_active = false; |
---|
497 | |
---|
498 | rtems_task_exit(); |
---|
499 | } |
---|
500 | |
---|
501 | void rtems_cpu_usage_top_with_plugin( |
---|
502 | const rtems_printer *printer |
---|
503 | ) |
---|
504 | { |
---|
505 | rtems_status_code sc; |
---|
506 | rtems_task_priority priority; |
---|
507 | rtems_name name; |
---|
508 | rtems_id id; |
---|
509 | rtems_cpu_usage_data data; |
---|
510 | int show_lines = 25; |
---|
511 | |
---|
512 | memset(&data, 0, sizeof(data)); |
---|
513 | |
---|
514 | data.thread_run = true; |
---|
515 | data.single_page = true; |
---|
516 | data.sort_order = RTEMS_TOP_SORT_CURRENT; |
---|
517 | data.poll_rate_usecs = 3000; |
---|
518 | data.show = show_lines; |
---|
519 | data.printer = printer; |
---|
520 | |
---|
521 | sc = rtems_task_set_priority (RTEMS_SELF, RTEMS_CURRENT_PRIORITY, &priority); |
---|
522 | |
---|
523 | if (sc != RTEMS_SUCCESSFUL) |
---|
524 | { |
---|
525 | rtems_printf (printer, |
---|
526 | "error: cannot obtain the current priority: %s\n", rtems_status_text (sc)); |
---|
527 | return; |
---|
528 | } |
---|
529 | |
---|
530 | name = rtems_build_name('C', 'P', 'l', 't'); |
---|
531 | |
---|
532 | sc = rtems_task_create (name, priority, 4 * 1024, |
---|
533 | RTEMS_PREEMPT | RTEMS_TIMESLICE | RTEMS_NO_ASR, |
---|
534 | RTEMS_FLOATING_POINT | RTEMS_LOCAL, |
---|
535 | &id); |
---|
536 | |
---|
537 | if (sc != RTEMS_SUCCESSFUL) |
---|
538 | { |
---|
539 | rtems_printf (printer, |
---|
540 | "error: cannot create helper thread: %s\n", rtems_status_text (sc)); |
---|
541 | return; |
---|
542 | } |
---|
543 | |
---|
544 | sc = rtems_task_start (id, rtems_cpuusage_top_thread, (rtems_task_argument) &data); |
---|
545 | if (sc != RTEMS_SUCCESSFUL) |
---|
546 | { |
---|
547 | rtems_printf (printer, |
---|
548 | "error: cannot start helper thread: %s\n", rtems_status_text (sc)); |
---|
549 | rtems_task_delete (id); |
---|
550 | return; |
---|
551 | } |
---|
552 | |
---|
553 | while (true) |
---|
554 | { |
---|
555 | int c = getchar (); |
---|
556 | |
---|
557 | if ((c == '\r') || (c == '\n') || (c == 'q') || (c == 'Q')) |
---|
558 | { |
---|
559 | int loops = 50; |
---|
560 | |
---|
561 | data.thread_run = false; |
---|
562 | |
---|
563 | rtems_event_send(id, RTEMS_EVENT_1); |
---|
564 | |
---|
565 | while (loops && data.thread_active) |
---|
566 | rtems_task_wake_after (RTEMS_MICROSECONDS_TO_TICKS (100000)); |
---|
567 | |
---|
568 | rtems_printf (printer, "load monitoring stopped.\n"); |
---|
569 | return; |
---|
570 | } |
---|
571 | else if (c == '<') |
---|
572 | { |
---|
573 | if (data.sort_order == 0) |
---|
574 | data.sort_order = RTEMS_TOP_SORT_MAX; |
---|
575 | else |
---|
576 | --data.sort_order; |
---|
577 | rtems_event_send(id, RTEMS_EVENT_1); |
---|
578 | } |
---|
579 | else if (c == '>') |
---|
580 | { |
---|
581 | if (data.sort_order >= RTEMS_TOP_SORT_MAX) |
---|
582 | data.sort_order = 0; |
---|
583 | else |
---|
584 | ++data.sort_order; |
---|
585 | rtems_event_send(id, RTEMS_EVENT_1); |
---|
586 | } |
---|
587 | else if ((c == 's') || (c == 'S')) |
---|
588 | { |
---|
589 | data.single_page = !data.single_page; |
---|
590 | rtems_event_send(id, RTEMS_EVENT_1); |
---|
591 | } |
---|
592 | else if ((c == 'a') || (c == 'A')) |
---|
593 | { |
---|
594 | if (data.show == 0) |
---|
595 | data.show = show_lines; |
---|
596 | else |
---|
597 | data.show = 0; |
---|
598 | rtems_event_send(id, RTEMS_EVENT_1); |
---|
599 | } |
---|
600 | else if (c == '+') |
---|
601 | { |
---|
602 | ++show_lines; |
---|
603 | if (data.show != 0) |
---|
604 | data.show = show_lines; |
---|
605 | } |
---|
606 | else if (c == '-') |
---|
607 | { |
---|
608 | if (show_lines > 5) |
---|
609 | --show_lines; |
---|
610 | if (data.show != 0) |
---|
611 | data.show = show_lines; |
---|
612 | } |
---|
613 | else if (c == ' ') |
---|
614 | { |
---|
615 | rtems_event_send(id, RTEMS_EVENT_1); |
---|
616 | } |
---|
617 | } |
---|
618 | } |
---|
619 | |
---|
620 | void rtems_cpu_usage_top (void) |
---|
621 | { |
---|
622 | rtems_printer printer; |
---|
623 | rtems_print_printer_printk (&printer); |
---|
624 | rtems_cpu_usage_top_with_plugin (&printer); |
---|
625 | } |
---|