1 | /** |
---|
2 | * @file |
---|
3 | * |
---|
4 | * @brief CPU Usage Top |
---|
5 | * @ingroup libmisc_cpuuse CPU Usage |
---|
6 | */ |
---|
7 | |
---|
8 | /* |
---|
9 | * COPYRIGHT (c) 2015. Chris Johns <chrisj@rtems.org> |
---|
10 | * |
---|
11 | * COPYRIGHT (c) 2014. |
---|
12 | * On-Line Applications Research Corporation (OAR). |
---|
13 | * |
---|
14 | * The license and distribution terms for this file may be |
---|
15 | * found in the file LICENSE in this distribution or at |
---|
16 | * http://www.rtems.org/license/LICENSE. |
---|
17 | */ |
---|
18 | |
---|
19 | /* |
---|
20 | * Based on the old capture engine ct-load. |
---|
21 | */ |
---|
22 | |
---|
23 | #ifdef HAVE_CONFIG_H |
---|
24 | #include "config.h" |
---|
25 | #endif |
---|
26 | |
---|
27 | #include <stdbool.h> |
---|
28 | #include <string.h> |
---|
29 | #include <stdlib.h> |
---|
30 | #include <stdio.h> |
---|
31 | #include <ctype.h> |
---|
32 | #include <inttypes.h> |
---|
33 | |
---|
34 | #include <rtems/cpuuse.h> |
---|
35 | #include <rtems/malloc.h> |
---|
36 | #include <rtems/score/objectimpl.h> |
---|
37 | #include <rtems/score/protectedheap.h> |
---|
38 | #include <rtems/score/threadimpl.h> |
---|
39 | #include <rtems/score/todimpl.h> |
---|
40 | #include <rtems/score/watchdogimpl.h> |
---|
41 | #include <rtems/score/wkspace.h> |
---|
42 | |
---|
43 | /* |
---|
44 | * Common variable to sync the load monitor task. |
---|
45 | */ |
---|
46 | typedef struct |
---|
47 | { |
---|
48 | void* context; |
---|
49 | rtems_printk_plugin_t print; |
---|
50 | } rtems_cpu_usage_plugin; |
---|
51 | |
---|
52 | /* |
---|
53 | * Use a struct for all data to allow more than one top and to support the |
---|
54 | * thread iterator. |
---|
55 | */ |
---|
56 | typedef struct |
---|
57 | { |
---|
58 | volatile bool thread_run; |
---|
59 | volatile bool thread_active; |
---|
60 | volatile bool single_page; |
---|
61 | volatile uint32_t sort_order; |
---|
62 | volatile uint32_t poll_rate_usecs; |
---|
63 | volatile uint32_t show; |
---|
64 | rtems_cpu_usage_plugin plugin; |
---|
65 | Timestamp_Control zero; |
---|
66 | Timestamp_Control uptime; |
---|
67 | Timestamp_Control last_uptime; |
---|
68 | Timestamp_Control period; |
---|
69 | int task_count; /* Number of tasks. */ |
---|
70 | int last_task_count; /* Number of tasks in the previous sample. */ |
---|
71 | int task_size; /* The size of the arrays */ |
---|
72 | Thread_Control** tasks; /* List of tasks in this sample. */ |
---|
73 | Thread_Control** last_tasks; /* List of tasks in the last sample. */ |
---|
74 | Timestamp_Control* usage; /* Usage of task's in this sample. */ |
---|
75 | Timestamp_Control* last_usage; /* Usage of task's in the last sample. */ |
---|
76 | Timestamp_Control* current_usage; /* Current usage for this sample. */ |
---|
77 | Timestamp_Control total; /* Total run run, should equal the uptime. */ |
---|
78 | Timestamp_Control idle; /* Time spent in idle. */ |
---|
79 | Timestamp_Control current; /* Current time run in this period. */ |
---|
80 | Timestamp_Control current_idle; /* Current time in idle this period. */ |
---|
81 | uint32_t stack_size; /* Size of stack allocated. */ |
---|
82 | } rtems_cpu_usage_data; |
---|
83 | |
---|
84 | /* |
---|
85 | * Sort orders. |
---|
86 | */ |
---|
87 | #define RTEMS_TOP_SORT_ID (0) |
---|
88 | #define RTEMS_TOP_SORT_REAL_PRI (1) |
---|
89 | #define RTEMS_TOP_SORT_CURRENT_PRI (2) |
---|
90 | #define RTEMS_TOP_SORT_TOTAL (3) |
---|
91 | #define RTEMS_TOP_SORT_CURRENT (4) |
---|
92 | #define RTEMS_TOP_SORT_MAX (4) |
---|
93 | |
---|
94 | /* |
---|
95 | * Private version of the iterator with an arg. This will be moved |
---|
96 | * to the public version in 5.0. |
---|
97 | */ |
---|
98 | |
---|
99 | typedef void (*rtems_per_thread_routine_2)( Thread_Control *, void* ); |
---|
100 | |
---|
101 | void rtems_iterate_over_all_threads_2(rtems_per_thread_routine_2 routine, |
---|
102 | void* arg); |
---|
103 | |
---|
104 | void rtems_iterate_over_all_threads_2(rtems_per_thread_routine_2 routine, |
---|
105 | void* arg) |
---|
106 | { |
---|
107 | uint32_t i; |
---|
108 | uint32_t api_index; |
---|
109 | Thread_Control *the_thread; |
---|
110 | Objects_Information *information; |
---|
111 | |
---|
112 | if ( !routine ) |
---|
113 | return; |
---|
114 | |
---|
115 | for ( api_index = 1 ; api_index <= OBJECTS_APIS_LAST ; api_index++ ) { |
---|
116 | #if !defined(RTEMS_POSIX_API) || defined(RTEMS_DEBUG) |
---|
117 | if ( !_Objects_Information_table[ api_index ] ) |
---|
118 | continue; |
---|
119 | #endif |
---|
120 | information = _Objects_Information_table[ api_index ][ 1 ]; |
---|
121 | if ( information ) { |
---|
122 | for ( i=1 ; i <= information->maximum ; i++ ) { |
---|
123 | the_thread = (Thread_Control *)information->local_table[ i ]; |
---|
124 | if ( the_thread ) |
---|
125 | (*routine)(the_thread, arg); |
---|
126 | } |
---|
127 | } |
---|
128 | } |
---|
129 | } |
---|
130 | |
---|
131 | static inline bool equal_to_uint32_t( uint32_t * lhs, uint32_t * rhs ) |
---|
132 | { |
---|
133 | if ( *lhs == *rhs ) |
---|
134 | return true; |
---|
135 | else |
---|
136 | return false; |
---|
137 | } |
---|
138 | |
---|
139 | static inline bool less_than_uint32_t( uint32_t * lhs, uint32_t * rhs ) |
---|
140 | { |
---|
141 | if ( *lhs < *rhs ) |
---|
142 | return true; |
---|
143 | else |
---|
144 | return false; |
---|
145 | } |
---|
146 | |
---|
147 | #define CPU_usage_Equal_to( _lhs, _rhs ) \ |
---|
148 | _Timestamp_Equal_to( _lhs, _rhs ) |
---|
149 | |
---|
150 | #define CPU_usage_Set_to_zero( _time ) \ |
---|
151 | _Timestamp_Set_to_zero( _time ) |
---|
152 | |
---|
153 | #define CPU_usage_Less_than( _lhs, _rhs ) \ |
---|
154 | _Timestamp_Less_than( _lhs, _rhs ) |
---|
155 | |
---|
156 | static void |
---|
157 | print_memsize(rtems_cpu_usage_data* data, const uint32_t size, const char* label) |
---|
158 | { |
---|
159 | if (size > (1024 * 1024)) |
---|
160 | (*data->plugin.print)(data->plugin.context, "%4" PRIu32 "M %s", |
---|
161 | size / (1024 * 1024), label); |
---|
162 | else if (size > 1024) |
---|
163 | (*data->plugin.print)(data->plugin.context, "%4" PRIu32 "K %s", |
---|
164 | size / 1024, label); |
---|
165 | else |
---|
166 | (*data->plugin.print)(data->plugin.context, "%4" PRIu32 " %s", |
---|
167 | size, label); |
---|
168 | } |
---|
169 | |
---|
170 | static int |
---|
171 | print_time(rtems_cpu_usage_data* data, |
---|
172 | const Timestamp_Control* time, |
---|
173 | const int length) |
---|
174 | { |
---|
175 | uint32_t secs = _Timestamp_Get_seconds( time ); |
---|
176 | uint32_t usecs = _Timestamp_Get_nanoseconds( time ) / TOD_NANOSECONDS_PER_MICROSECOND; |
---|
177 | int len = 0; |
---|
178 | |
---|
179 | if (secs > 60) |
---|
180 | { |
---|
181 | uint32_t mins = secs / 60; |
---|
182 | if (mins > 60) |
---|
183 | { |
---|
184 | uint32_t hours = mins / 60; |
---|
185 | if (hours > 24) |
---|
186 | { |
---|
187 | len += (*data->plugin.print)(data->plugin.context, "%" PRIu32 "d", hours / 24); |
---|
188 | hours %= 24; |
---|
189 | } |
---|
190 | len += (*data->plugin.print)(data->plugin.context, "%" PRIu32 "hr", hours); |
---|
191 | mins %= 60; |
---|
192 | } |
---|
193 | len += (*data->plugin.print)(data->plugin.context, "%" PRIu32 "m", mins); |
---|
194 | secs %= 60; |
---|
195 | } |
---|
196 | len += (*data->plugin.print)(data->plugin.context, "%" PRIu32 ".%06" PRIu32, secs, usecs); |
---|
197 | |
---|
198 | if (len < length) |
---|
199 | (*data->plugin.print)(data->plugin.context, "%*c", length - len, ' '); |
---|
200 | |
---|
201 | return len; |
---|
202 | } |
---|
203 | |
---|
204 | /* |
---|
205 | * Count the number of tasks. |
---|
206 | */ |
---|
207 | static void |
---|
208 | task_counter(Thread_Control *thrad, void* arg) |
---|
209 | { |
---|
210 | rtems_cpu_usage_data* data = (rtems_cpu_usage_data*) arg; |
---|
211 | ++data->task_count; |
---|
212 | } |
---|
213 | |
---|
214 | /* |
---|
215 | * Create the sorted table with the current and total usage. |
---|
216 | */ |
---|
217 | static void |
---|
218 | task_usage(Thread_Control* thread, void* arg) |
---|
219 | { |
---|
220 | rtems_cpu_usage_data* data = (rtems_cpu_usage_data*) arg; |
---|
221 | Timestamp_Control usage; |
---|
222 | Timestamp_Control current = data->zero; |
---|
223 | int j; |
---|
224 | |
---|
225 | data->stack_size += thread->Start.Initial_stack.size; |
---|
226 | |
---|
227 | _Thread_Get_CPU_time_used(thread, &usage); |
---|
228 | |
---|
229 | for (j = 0; j < data->last_task_count; j++) |
---|
230 | { |
---|
231 | if (thread == data->last_tasks[j]) |
---|
232 | { |
---|
233 | _Timestamp_Subtract(&data->last_usage[j], &usage, ¤t); |
---|
234 | break; |
---|
235 | } |
---|
236 | } |
---|
237 | |
---|
238 | /* |
---|
239 | * When not using nanosecond CPU usage resolution, we have to count the |
---|
240 | * number of "ticks" we gave credit for to give the user a rough guideline as |
---|
241 | * to what each number means proportionally. |
---|
242 | */ |
---|
243 | _Timestamp_Add_to(&data->total, &usage); |
---|
244 | _Timestamp_Add_to(&data->current, ¤t); |
---|
245 | |
---|
246 | if (thread->Object.id == 0x09010001) |
---|
247 | { |
---|
248 | data->idle = usage; |
---|
249 | data->current_idle = current; |
---|
250 | } |
---|
251 | |
---|
252 | /* |
---|
253 | * Create the tasks to display soring as we create. |
---|
254 | */ |
---|
255 | for (j = 0; j < data->task_count; j++) |
---|
256 | { |
---|
257 | if (data->tasks[j]) |
---|
258 | { |
---|
259 | int k; |
---|
260 | |
---|
261 | /* |
---|
262 | * Sort on the current load. |
---|
263 | */ |
---|
264 | switch (data->sort_order) |
---|
265 | { |
---|
266 | default: |
---|
267 | data->sort_order = RTEMS_TOP_SORT_CURRENT; |
---|
268 | /* drop through */ |
---|
269 | case RTEMS_TOP_SORT_CURRENT: |
---|
270 | if (CPU_usage_Equal_to(¤t, &data->zero) || |
---|
271 | CPU_usage_Less_than(¤t, &data->current_usage[j])) |
---|
272 | continue; |
---|
273 | case RTEMS_TOP_SORT_TOTAL: |
---|
274 | if (CPU_usage_Equal_to(&usage, &data->zero) || |
---|
275 | CPU_usage_Less_than(&usage, &data->usage[j])) |
---|
276 | continue; |
---|
277 | case RTEMS_TOP_SORT_REAL_PRI: |
---|
278 | if (thread->real_priority > data->tasks[j]->real_priority) |
---|
279 | continue; |
---|
280 | case RTEMS_TOP_SORT_CURRENT_PRI: |
---|
281 | if (thread->current_priority > data->tasks[j]->current_priority) |
---|
282 | continue; |
---|
283 | case RTEMS_TOP_SORT_ID: |
---|
284 | if (thread->Object.id < data->tasks[j]->Object.id) |
---|
285 | continue; |
---|
286 | } |
---|
287 | |
---|
288 | for (k = (data->task_count - 1); k >= j; k--) |
---|
289 | { |
---|
290 | data->tasks[k + 1] = data->tasks[k]; |
---|
291 | data->usage[k + 1] = data->usage[k]; |
---|
292 | data->current_usage[k + 1] = data->current_usage[k]; |
---|
293 | } |
---|
294 | } |
---|
295 | data->tasks[j] = thread; |
---|
296 | data->usage[j] = usage; |
---|
297 | data->current_usage[j] = current; |
---|
298 | break; |
---|
299 | } |
---|
300 | } |
---|
301 | |
---|
302 | /* |
---|
303 | * rtems_cpuusage_top_thread |
---|
304 | * |
---|
305 | * This function displays the load of the tasks on an ANSI terminal. |
---|
306 | */ |
---|
307 | |
---|
308 | static void |
---|
309 | rtems_cpuusage_top_thread (rtems_task_argument arg) |
---|
310 | { |
---|
311 | rtems_cpu_usage_data* data = (rtems_cpu_usage_data*) arg; |
---|
312 | char name[13]; |
---|
313 | int i; |
---|
314 | Heap_Information_block wksp; |
---|
315 | uint32_t ival, fval; |
---|
316 | int task_count; |
---|
317 | rtems_event_set out; |
---|
318 | rtems_status_code sc; |
---|
319 | bool first_time = true; |
---|
320 | |
---|
321 | data->thread_active = true; |
---|
322 | |
---|
323 | _TOD_Get_uptime(&data->last_uptime); |
---|
324 | |
---|
325 | CPU_usage_Set_to_zero(&data->zero); |
---|
326 | |
---|
327 | while (data->thread_run) |
---|
328 | { |
---|
329 | Timestamp_Control uptime_at_last_reset = CPU_usage_Uptime_at_last_reset; |
---|
330 | size_t tasks_size; |
---|
331 | size_t usage_size; |
---|
332 | Timestamp_Control load; |
---|
333 | |
---|
334 | data->task_count = 0; |
---|
335 | rtems_iterate_over_all_threads_2(task_counter, data); |
---|
336 | |
---|
337 | tasks_size = sizeof(Thread_Control*) * (data->task_count + 1); |
---|
338 | usage_size = sizeof(Timestamp_Control) * (data->task_count + 1); |
---|
339 | |
---|
340 | if (data->task_count > data->task_size) |
---|
341 | { |
---|
342 | data->tasks = realloc(data->tasks, tasks_size); |
---|
343 | data->usage = realloc(data->usage, usage_size); |
---|
344 | data->current_usage = realloc(data->current_usage, usage_size); |
---|
345 | if ((data->tasks == NULL) || (data->usage == NULL) || (data->current_usage == NULL)) |
---|
346 | { |
---|
347 | (*data->plugin.print)(data->plugin.context, "top worker: error: no memory\n"); |
---|
348 | data->thread_run = false; |
---|
349 | break; |
---|
350 | } |
---|
351 | } |
---|
352 | |
---|
353 | memset(data->tasks, 0, tasks_size); |
---|
354 | memset(data->usage, 0, usage_size); |
---|
355 | memset(data->current_usage, 0, usage_size); |
---|
356 | |
---|
357 | _Timestamp_Set_to_zero(&data->total); |
---|
358 | _Timestamp_Set_to_zero(&data->current); |
---|
359 | data->stack_size = 0; |
---|
360 | |
---|
361 | _TOD_Get_uptime(&data->uptime); |
---|
362 | _Timestamp_Subtract(&uptime_at_last_reset, &data->uptime, &data->uptime); |
---|
363 | _Timestamp_Subtract(&data->last_uptime, &data->uptime, &data->period); |
---|
364 | data->last_uptime = data->uptime; |
---|
365 | |
---|
366 | rtems_iterate_over_all_threads_2(task_usage, data); |
---|
367 | |
---|
368 | if (data->task_count > data->task_size) |
---|
369 | { |
---|
370 | data->last_tasks = realloc(data->last_tasks, tasks_size); |
---|
371 | data->last_usage = realloc(data->last_usage, usage_size); |
---|
372 | if ((data->last_tasks == NULL) || (data->last_usage == NULL)) |
---|
373 | { |
---|
374 | (*data->plugin.print)(data->plugin.context, "top worker: error: no memory\n"); |
---|
375 | data->thread_run = false; |
---|
376 | break; |
---|
377 | } |
---|
378 | data->task_size = data->task_count; |
---|
379 | } |
---|
380 | |
---|
381 | memcpy(data->last_tasks, data->tasks, tasks_size); |
---|
382 | memcpy(data->last_usage, data->usage, usage_size); |
---|
383 | data->last_task_count = data->task_count; |
---|
384 | |
---|
385 | /* |
---|
386 | * We need to loop again to get suitable current usage values as we need a |
---|
387 | * last sample to work. |
---|
388 | */ |
---|
389 | if (first_time) |
---|
390 | { |
---|
391 | rtems_task_wake_after(RTEMS_MILLISECONDS_TO_TICKS(500)); |
---|
392 | first_time = false; |
---|
393 | continue; |
---|
394 | } |
---|
395 | |
---|
396 | _Protected_heap_Get_information(&_Workspace_Area, &wksp); |
---|
397 | |
---|
398 | if (data->single_page) |
---|
399 | (*data->plugin.print)(data->plugin.context, |
---|
400 | "\x1b[H\x1b[J" |
---|
401 | " ENTER:Exit SPACE:Refresh" |
---|
402 | " S:Scroll A:All <>:Order +/-:Lines\n"); |
---|
403 | (*data->plugin.print)(data->plugin.context,"\n"); |
---|
404 | |
---|
405 | /* |
---|
406 | * Uptime and period of this sample. |
---|
407 | */ |
---|
408 | (*data->plugin.print)(data->plugin.context, "Uptime: "); |
---|
409 | print_time(data, &data->uptime, 20); |
---|
410 | (*data->plugin.print)(data->plugin.context, " Period: "); |
---|
411 | print_time(data, &data->period, 20); |
---|
412 | |
---|
413 | /* |
---|
414 | * Task count, load and idle levels. |
---|
415 | */ |
---|
416 | (*data->plugin.print)(data->plugin.context, "\nTasks: %4i ", data->task_count); |
---|
417 | |
---|
418 | _Timestamp_Subtract(&data->idle, &data->total, &load); |
---|
419 | _Timestamp_Divide(&load, &data->uptime, &ival, &fval); |
---|
420 | (*data->plugin.print)(data->plugin.context, |
---|
421 | "Load Average: %4" PRIu32 ".%03" PRIu32 "%%", ival, fval); |
---|
422 | _Timestamp_Subtract(&data->current_idle, &data->current, &load); |
---|
423 | _Timestamp_Divide(&load, &data->period, &ival, &fval); |
---|
424 | (*data->plugin.print)(data->plugin.context, |
---|
425 | " Load: %4" PRIu32 ".%03" PRIu32 "%%", ival, fval); |
---|
426 | _Timestamp_Divide(&data->current_idle, &data->period, &ival, &fval); |
---|
427 | (*data->plugin.print)(data->plugin.context, |
---|
428 | " Idle: %4" PRIu32 ".%03" PRIu32 "%%", ival, fval); |
---|
429 | |
---|
430 | /* |
---|
431 | * Memory usage. |
---|
432 | */ |
---|
433 | if (rtems_configuration_get_unified_work_area()) |
---|
434 | { |
---|
435 | (*data->plugin.print)(data->plugin.context, "\nMem: "); |
---|
436 | print_memsize(data, wksp.Free.total, "free"); |
---|
437 | print_memsize(data, wksp.Used.total, "used"); |
---|
438 | } |
---|
439 | else |
---|
440 | { |
---|
441 | region_information_block libc_heap; |
---|
442 | malloc_info(&libc_heap); |
---|
443 | (*data->plugin.print)(data->plugin.context, "\nMem: Wksp: "); |
---|
444 | print_memsize(data, wksp.Free.total, "free"); |
---|
445 | print_memsize(data, wksp.Used.total, "used Heap: "); |
---|
446 | print_memsize(data, libc_heap.Free.total, "free"); |
---|
447 | print_memsize(data, libc_heap.Used.total, "used"); |
---|
448 | } |
---|
449 | |
---|
450 | print_memsize(data, data->stack_size, "stack\n"); |
---|
451 | |
---|
452 | (*data->plugin.print)(data->plugin.context, |
---|
453 | "\n" |
---|
454 | " ID | NAME | RPRI | CPRI | TIME | TOTAL | CURRENT\n" |
---|
455 | "-%s---------+---------------------+-%s-----%s-----+---------------------+-%s------+--%s----\n", |
---|
456 | data->sort_order == RTEMS_TOP_SORT_ID ? "^^" : "--", |
---|
457 | data->sort_order == RTEMS_TOP_SORT_REAL_PRI ? "^^" : "--", |
---|
458 | data->sort_order == RTEMS_TOP_SORT_CURRENT_PRI ? "^^" : "--", |
---|
459 | data->sort_order == RTEMS_TOP_SORT_TOTAL ? "^^" : "--", |
---|
460 | data->sort_order == RTEMS_TOP_SORT_CURRENT ? "^^" : "--" |
---|
461 | ); |
---|
462 | |
---|
463 | task_count = 0; |
---|
464 | |
---|
465 | for (i = 0; i < data->task_count; i++) |
---|
466 | { |
---|
467 | Thread_Control* thread = data->tasks[i]; |
---|
468 | Timestamp_Control usage; |
---|
469 | Timestamp_Control current_usage; |
---|
470 | |
---|
471 | if (thread == NULL) |
---|
472 | break; |
---|
473 | |
---|
474 | if (data->single_page && (data->show != 0) && (i >= data->show)) |
---|
475 | break; |
---|
476 | |
---|
477 | /* |
---|
478 | * We need to count the number displayed to clear the remainder of the |
---|
479 | * the display. |
---|
480 | */ |
---|
481 | ++task_count; |
---|
482 | |
---|
483 | /* |
---|
484 | * If the API os POSIX print the entry point. |
---|
485 | */ |
---|
486 | rtems_object_get_name(thread->Object.id, sizeof(name), name); |
---|
487 | if (name[0] == '\0') |
---|
488 | snprintf(name, sizeof(name) - 1, "(%p)", thread->Start.Entry.Kinds.Numeric.entry); |
---|
489 | |
---|
490 | (*data->plugin.print)(data->plugin.context, |
---|
491 | " 0x%08" PRIx32 " | %-19s | %3" PRId32 " | %3" PRId32 " | ", |
---|
492 | thread->Object.id, |
---|
493 | name, |
---|
494 | thread->real_priority, |
---|
495 | thread->current_priority); |
---|
496 | |
---|
497 | usage = data->usage[i]; |
---|
498 | current_usage = data->current_usage[i]; |
---|
499 | |
---|
500 | /* |
---|
501 | * Print the information |
---|
502 | */ |
---|
503 | print_time(data, &usage, 19); |
---|
504 | _Timestamp_Divide(&usage, &data->total, &ival, &fval); |
---|
505 | (*data->plugin.print)(data->plugin.context, |
---|
506 | " |%4" PRIu32 ".%03" PRIu32, ival, fval); |
---|
507 | _Timestamp_Divide(¤t_usage, &data->period, &ival, &fval); |
---|
508 | (*data->plugin.print)(data->plugin.context, |
---|
509 | " |%4" PRIu32 ".%03" PRIu32 "\n", ival, fval); |
---|
510 | } |
---|
511 | |
---|
512 | if (data->single_page && (data->show != 0) && (task_count < data->show)) |
---|
513 | { |
---|
514 | i = data->show - task_count; |
---|
515 | while (i > 0) |
---|
516 | { |
---|
517 | (*data->plugin.print)(data->plugin.context, "\x1b[K\n"); |
---|
518 | i--; |
---|
519 | } |
---|
520 | } |
---|
521 | |
---|
522 | sc = rtems_event_receive(RTEMS_EVENT_1, |
---|
523 | RTEMS_EVENT_ANY, |
---|
524 | RTEMS_MILLISECONDS_TO_TICKS (data->poll_rate_usecs), |
---|
525 | &out); |
---|
526 | if ((sc != RTEMS_SUCCESSFUL) && (sc != RTEMS_TIMEOUT)) |
---|
527 | { |
---|
528 | (*data->plugin.print)(data->plugin.context, |
---|
529 | "error: event receive: %s\n", rtems_status_text(sc)); |
---|
530 | break; |
---|
531 | } |
---|
532 | } |
---|
533 | |
---|
534 | free(data->tasks); |
---|
535 | free(data->last_tasks); |
---|
536 | free(data->last_usage); |
---|
537 | free(data->current_usage); |
---|
538 | |
---|
539 | data->thread_active = false; |
---|
540 | |
---|
541 | rtems_task_delete (RTEMS_SELF); |
---|
542 | } |
---|
543 | |
---|
544 | void rtems_cpu_usage_top_with_plugin( |
---|
545 | void *context, |
---|
546 | rtems_printk_plugin_t print |
---|
547 | ) |
---|
548 | { |
---|
549 | rtems_status_code sc; |
---|
550 | rtems_task_priority priority; |
---|
551 | rtems_name name; |
---|
552 | rtems_id id; |
---|
553 | rtems_cpu_usage_data data; |
---|
554 | int show_lines = 25; |
---|
555 | |
---|
556 | if ( !print ) |
---|
557 | return; |
---|
558 | |
---|
559 | memset(&data, 0, sizeof(data)); |
---|
560 | |
---|
561 | data.thread_run = true; |
---|
562 | data.single_page = true; |
---|
563 | data.sort_order = RTEMS_TOP_SORT_CURRENT; |
---|
564 | data.poll_rate_usecs = 3000; |
---|
565 | data.show = show_lines; |
---|
566 | data.plugin.context = context; |
---|
567 | data.plugin.print = print; |
---|
568 | |
---|
569 | sc = rtems_task_set_priority (RTEMS_SELF, RTEMS_CURRENT_PRIORITY, &priority); |
---|
570 | |
---|
571 | if (sc != RTEMS_SUCCESSFUL) |
---|
572 | { |
---|
573 | (*print)( |
---|
574 | context, |
---|
575 | "error: cannot obtain the current priority: %s\n", |
---|
576 | rtems_status_text (sc) |
---|
577 | ); |
---|
578 | return; |
---|
579 | } |
---|
580 | |
---|
581 | name = rtems_build_name('C', 'P', 'l', 't'); |
---|
582 | |
---|
583 | sc = rtems_task_create (name, priority, 4 * 1024, |
---|
584 | RTEMS_NO_FLOATING_POINT | RTEMS_LOCAL, |
---|
585 | RTEMS_PREEMPT | RTEMS_TIMESLICE | RTEMS_NO_ASR, |
---|
586 | &id); |
---|
587 | |
---|
588 | if (sc != RTEMS_SUCCESSFUL) |
---|
589 | { |
---|
590 | (*print)( |
---|
591 | context, |
---|
592 | "error: cannot create helper thread: %s\n", |
---|
593 | rtems_status_text (sc) |
---|
594 | ); |
---|
595 | return; |
---|
596 | } |
---|
597 | |
---|
598 | sc = rtems_task_start ( |
---|
599 | id, rtems_cpuusage_top_thread, (rtems_task_argument) &data |
---|
600 | ); |
---|
601 | if (sc != RTEMS_SUCCESSFUL) |
---|
602 | { |
---|
603 | (*print)( |
---|
604 | context, |
---|
605 | "error: cannot start helper thread: %s\n", |
---|
606 | rtems_status_text (sc) |
---|
607 | ); |
---|
608 | rtems_task_delete (id); |
---|
609 | return; |
---|
610 | } |
---|
611 | |
---|
612 | while (true) |
---|
613 | { |
---|
614 | int c = getchar (); |
---|
615 | |
---|
616 | if ((c == '\r') || (c == '\n') || (c == 'q') || (c == 'Q')) |
---|
617 | { |
---|
618 | int loops = 50; |
---|
619 | |
---|
620 | data.thread_run = false; |
---|
621 | |
---|
622 | rtems_event_send(id, RTEMS_EVENT_1); |
---|
623 | |
---|
624 | while (loops && data.thread_active) |
---|
625 | rtems_task_wake_after (RTEMS_MICROSECONDS_TO_TICKS (100000)); |
---|
626 | |
---|
627 | (*print)(context, "load monitoring stopped.\n"); |
---|
628 | return; |
---|
629 | } |
---|
630 | else if (c == '<') |
---|
631 | { |
---|
632 | if (data.sort_order == 0) |
---|
633 | data.sort_order = RTEMS_TOP_SORT_MAX; |
---|
634 | else |
---|
635 | --data.sort_order; |
---|
636 | rtems_event_send(id, RTEMS_EVENT_1); |
---|
637 | } |
---|
638 | else if (c == '>') |
---|
639 | { |
---|
640 | if (data.sort_order >= RTEMS_TOP_SORT_MAX) |
---|
641 | data.sort_order = 0; |
---|
642 | else |
---|
643 | ++data.sort_order; |
---|
644 | rtems_event_send(id, RTEMS_EVENT_1); |
---|
645 | } |
---|
646 | else if ((c == 's') || (c == 'S')) |
---|
647 | { |
---|
648 | data.single_page = !data.single_page; |
---|
649 | rtems_event_send(id, RTEMS_EVENT_1); |
---|
650 | } |
---|
651 | else if ((c == 'a') || (c == 'A')) |
---|
652 | { |
---|
653 | if (data.show == 0) |
---|
654 | data.show = show_lines; |
---|
655 | else |
---|
656 | data.show = 0; |
---|
657 | rtems_event_send(id, RTEMS_EVENT_1); |
---|
658 | } |
---|
659 | else if (c == '+') |
---|
660 | { |
---|
661 | ++show_lines; |
---|
662 | if (data.show != 0) |
---|
663 | data.show = show_lines; |
---|
664 | } |
---|
665 | else if (c == '-') |
---|
666 | { |
---|
667 | if (show_lines > 5) |
---|
668 | --show_lines; |
---|
669 | if (data.show != 0) |
---|
670 | data.show = show_lines; |
---|
671 | } |
---|
672 | else if (c == ' ') |
---|
673 | { |
---|
674 | rtems_event_send(id, RTEMS_EVENT_1); |
---|
675 | } |
---|
676 | } |
---|
677 | } |
---|
678 | |
---|
679 | void rtems_cpu_usage_top( void ) |
---|
680 | { |
---|
681 | rtems_cpu_usage_top_with_plugin( NULL, printk_plugin ); |
---|
682 | } |
---|