source: rtems/cpukit/libmisc/cpuuse/cpuusagetop.c @ d53862a

5
Last change on this file since d53862a was d53862a, checked in by Sebastian Huber <sebastian.huber@…>, on 11/08/18 at 06:56:54

rtems: Deprecate region_information_block

The region_information_block typedef as no corresponding API. It has no
proper namespace prefix. A user can do nothing with it.

Close #3591.

  • Property mode set to 100644
File size: 17.6 KB
RevLine 
[6031da4]1/**
2 * @file
3 *
4 * @brief CPU Usage Top
5 * @ingroup libmisc_cpuuse CPU Usage
6 */
7
8/*
[a8e4352]9 *  COPYRIGHT (c) 2015. Chris Johns <chrisj@rtems.org>
10 *
[6031da4]11 *  COPYRIGHT (c) 2014.
12 *  On-Line Applications Research Corporation (OAR).
13 *
14 *  The license and distribution terms for this file may be
15 *  found in the file LICENSE in this distribution or at
16 *  http://www.rtems.org/license/LICENSE.
17 */
18
[a8e4352]19/*
20 * Based on the old capture engine ct-load.
21 */
22
[6031da4]23#ifdef HAVE_CONFIG_H
24#include "config.h"
25#endif
26
[a8e4352]27#include <stdbool.h>
[6031da4]28#include <string.h>
29#include <stdlib.h>
30#include <stdio.h>
31#include <ctype.h>
32#include <inttypes.h>
33
34#include <rtems/cpuuse.h>
[506bfc8]35#include <rtems/printer.h>
[a8e4352]36#include <rtems/malloc.h>
[6031da4]37#include <rtems/score/objectimpl.h>
[a8e4352]38#include <rtems/score/protectedheap.h>
[6031da4]39#include <rtems/score/threadimpl.h>
40#include <rtems/score/todimpl.h>
41#include <rtems/score/watchdogimpl.h>
[a8e4352]42#include <rtems/score/wkspace.h>
[6031da4]43
[5e072f6d]44#include "cpuuseimpl.h"
45
[a8e4352]46/*
47 * Use a struct for all data to allow more than one top and to support the
48 * thread iterator.
49 */
50typedef struct
51{
52  volatile bool          thread_run;
53  volatile bool          thread_active;
54  volatile bool          single_page;
55  volatile uint32_t      sort_order;
56  volatile uint32_t      poll_rate_usecs;
57  volatile uint32_t      show;
[24d0ee57]58  const rtems_printer*   printer;
[d297c81d]59  Timestamp_Control      zero;
[a8e4352]60  Timestamp_Control      uptime;
61  Timestamp_Control      last_uptime;
62  Timestamp_Control      period;
63  int                    task_count;        /* Number of tasks. */
64  int                    last_task_count;   /* Number of tasks in the previous sample. */
65  int                    task_size;         /* The size of the arrays */
66  Thread_Control**       tasks;             /* List of tasks in this sample. */
67  Thread_Control**       last_tasks;        /* List of tasks in the last sample. */
[d297c81d]68  Timestamp_Control*     usage;             /* Usage of task's in this sample. */
69  Timestamp_Control*     last_usage;        /* Usage of task's in the last sample. */
70  Timestamp_Control*     current_usage;     /* Current usage for this sample. */
[a8e4352]71  Timestamp_Control      total;             /* Total run run, should equal the uptime. */
72  Timestamp_Control      idle;              /* Time spent in idle. */
73  Timestamp_Control      current;           /* Current time run in this period. */
74  Timestamp_Control      current_idle;      /* Current time in idle this period. */
75  uint32_t               stack_size;        /* Size of stack allocated. */
76} rtems_cpu_usage_data;
[6031da4]77
[a8e4352]78/*
79 * Sort orders.
80 */
81#define RTEMS_TOP_SORT_ID            (0)
82#define RTEMS_TOP_SORT_REAL_PRI      (1)
83#define RTEMS_TOP_SORT_CURRENT_PRI   (2)
84#define RTEMS_TOP_SORT_TOTAL         (3)
85#define RTEMS_TOP_SORT_CURRENT       (4)
86#define RTEMS_TOP_SORT_MAX           (4)
87
[4123e244]88static inline bool equal_to_uint32_t( uint32_t * lhs, uint32_t * rhs )
89{
90   if ( *lhs == *rhs )
91     return true;
[a8e4352]92   else
[4123e244]93     return false;
94}
95
96static inline bool less_than_uint32_t( uint32_t * lhs, uint32_t * rhs )
97{
98   if ( *lhs < *rhs )
99    return true;
100   else
101    return false;
102}
103
[24d0ee57]104#define CPU_usage_Equal_to( _lhs, _rhs )  _Timestamp_Equal_to( _lhs, _rhs )
105#define CPU_usage_Set_to_zero( _time )    _Timestamp_Set_to_zero( _time )
106#define CPU_usage_Less_than( _lhs, _rhs ) _Timestamp_Less_than( _lhs, _rhs )
[4123e244]107
[a8e4352]108static void
[93934f88]109print_memsize(rtems_cpu_usage_data* data, const uintptr_t size, const char* label)
[a8e4352]110{
111  if (size > (1024 * 1024))
[93934f88]112    rtems_printf(data->printer, "%4" PRIuPTR "M %s", size / (1024 * 1024), label);
[a8e4352]113  else if (size > 1024)
[93934f88]114    rtems_printf(data->printer, "%4" PRIuPTR "K %s", size / 1024, label);
[a8e4352]115  else
[93934f88]116    rtems_printf(data->printer, "%4" PRIuPTR " %s", size, label);
[a8e4352]117}
118
119static int
120print_time(rtems_cpu_usage_data*    data,
121           const Timestamp_Control* time,
122           const int                length)
123{
124  uint32_t secs = _Timestamp_Get_seconds( time );
125  uint32_t usecs = _Timestamp_Get_nanoseconds( time ) / TOD_NANOSECONDS_PER_MICROSECOND;
126  int      len = 0;
127
128  if (secs > 60)
129  {
130    uint32_t mins = secs / 60;
131    if (mins > 60)
132    {
133      uint32_t hours = mins / 60;
134      if (hours > 24)
135      {
[24d0ee57]136        len += rtems_printf(data->printer, "%" PRIu32 "d", hours / 24);
[a8e4352]137        hours %= 24;
138      }
[24d0ee57]139      len += rtems_printf(data->printer, "%" PRIu32 "hr", hours);
[a8e4352]140      mins %= 60;
141    }
[24d0ee57]142    len += rtems_printf(data->printer, "%" PRIu32 "m", mins);
[a8e4352]143    secs %= 60;
144  }
[24d0ee57]145  len += rtems_printf(data->printer, "%" PRIu32 ".%06" PRIu32, secs, usecs);
[a8e4352]146
147  if (len < length)
[24d0ee57]148    rtems_printf(data->printer, "%*c", length - len, ' ');
[a8e4352]149
150  return len;
151}
152
153/*
154 * Count the number of tasks.
155 */
[d271c3bb]156static bool
[a8e4352]157task_counter(Thread_Control *thrad, void* arg)
158{
159  rtems_cpu_usage_data* data = (rtems_cpu_usage_data*) arg;
160  ++data->task_count;
[d271c3bb]161
162  return false;
[a8e4352]163}
164
165/*
166 * Create the sorted table with the current and total usage.
167 */
[d271c3bb]168static bool
[a8e4352]169task_usage(Thread_Control* thread, void* arg)
170{
171  rtems_cpu_usage_data* data = (rtems_cpu_usage_data*) arg;
[d37adfe5]172  Timestamp_Control     usage;
[d297c81d]173  Timestamp_Control     current = data->zero;
[a8e4352]174  int                   j;
175
176  data->stack_size += thread->Start.Initial_stack.size;
177
[d37adfe5]178  _Thread_Get_CPU_time_used(thread, &usage);
179
[a8e4352]180  for (j = 0; j < data->last_task_count; j++)
181  {
182    if (thread == data->last_tasks[j])
183    {
184      _Timestamp_Subtract(&data->last_usage[j], &usage, &current);
185      break;
186    }
187  }
188
189  /*
190   * When not using nanosecond CPU usage resolution, we have to count the
191   * number of "ticks" we gave credit for to give the user a rough guideline as
192   * to what each number means proportionally.
193   */
194  _Timestamp_Add_to(&data->total, &usage);
195  _Timestamp_Add_to(&data->current, &current);
196
197  if (thread->Object.id == 0x09010001)
198  {
199    data->idle = usage;
200    data->current_idle = current;
201  }
202
203  /*
204   * Create the tasks to display soring as we create.
205   */
206  for (j = 0; j < data->task_count; j++)
207  {
208    if (data->tasks[j])
209    {
210      int k;
211
212      /*
213       * Sort on the current load.
214       */
215      switch (data->sort_order)
216      {
217        default:
218          data->sort_order = RTEMS_TOP_SORT_CURRENT;
219          /* drop through */
220        case RTEMS_TOP_SORT_CURRENT:
221          if (CPU_usage_Equal_to(&current, &data->zero) ||
222              CPU_usage_Less_than(&current, &data->current_usage[j]))
223            continue;
224        case RTEMS_TOP_SORT_TOTAL:
225          if (CPU_usage_Equal_to(&usage, &data->zero) ||
226              CPU_usage_Less_than(&usage, &data->usage[j]))
227            continue;
228        case RTEMS_TOP_SORT_REAL_PRI:
[300f6a48]229          if (thread->Real_priority.priority > data->tasks[j]->Real_priority.priority)
[a8e4352]230            continue;
231        case RTEMS_TOP_SORT_CURRENT_PRI:
[b20b736]232          if (
233            _Thread_Get_priority( thread )
234              > _Thread_Get_priority( data->tasks[j] )
235          ) {
[a8e4352]236            continue;
[b20b736]237          }
[a8e4352]238        case RTEMS_TOP_SORT_ID:
239          if (thread->Object.id < data->tasks[j]->Object.id)
240            continue;
241      }
242
243      for (k = (data->task_count - 1); k >= j; k--)
244      {
245        data->tasks[k + 1] = data->tasks[k];
246        data->usage[k + 1]  = data->usage[k];
247        data->current_usage[k + 1]  = data->current_usage[k];
248      }
249    }
250    data->tasks[j] = thread;
251    data->usage[j] = usage;
252    data->current_usage[j] = current;
253    break;
254  }
[d271c3bb]255
256  return false;
[a8e4352]257}
258
[6031da4]259/*
260 * rtems_cpuusage_top_thread
261 *
262 * This function displays the load of the tasks on an ANSI terminal.
263 */
264
265static void
266rtems_cpuusage_top_thread (rtems_task_argument arg)
267{
[a8e4352]268  rtems_cpu_usage_data*  data = (rtems_cpu_usage_data*) arg;
269  char                   name[13];
270  int                    i;
271  Heap_Information_block wksp;
272  uint32_t               ival, fval;
273  int                    task_count;
274  rtems_event_set        out;
275  rtems_status_code      sc;
276  bool                   first_time = true;
[6031da4]277
[a8e4352]278  data->thread_active = true;
[6031da4]279
[a8e4352]280  _TOD_Get_uptime(&data->last_uptime);
[6031da4]281
[a8e4352]282  CPU_usage_Set_to_zero(&data->zero);
283
284  while (data->thread_run)
285  {
286    Timestamp_Control uptime_at_last_reset = CPU_usage_Uptime_at_last_reset;
287    size_t            tasks_size;
288    size_t            usage_size;
289    Timestamp_Control load;
290
291    data->task_count = 0;
[d271c3bb]292    _Thread_Iterate(task_counter, data);
[a8e4352]293
294    tasks_size = sizeof(Thread_Control*) * (data->task_count + 1);
[d297c81d]295    usage_size = sizeof(Timestamp_Control) * (data->task_count + 1);
[a8e4352]296
297    if (data->task_count > data->task_size)
298    {
299      data->tasks = realloc(data->tasks, tasks_size);
300      data->usage = realloc(data->usage, usage_size);
301      data->current_usage = realloc(data->current_usage, usage_size);
302      if ((data->tasks == NULL) || (data->usage == NULL) || (data->current_usage == NULL))
303      {
[24d0ee57]304        rtems_printf(data->printer, "top worker: error: no memory\n");
[a8e4352]305        data->thread_run = false;
306        break;
[6031da4]307      }
308    }
309
[a8e4352]310    memset(data->tasks, 0, tasks_size);
311    memset(data->usage, 0, usage_size);
312    memset(data->current_usage, 0, usage_size);
[6031da4]313
[a8e4352]314    _Timestamp_Set_to_zero(&data->total);
315    _Timestamp_Set_to_zero(&data->current);
316    data->stack_size = 0;
317
318    _TOD_Get_uptime(&data->uptime);
319    _Timestamp_Subtract(&uptime_at_last_reset, &data->uptime, &data->uptime);
320    _Timestamp_Subtract(&data->last_uptime, &data->uptime, &data->period);
321    data->last_uptime = data->uptime;
322
[d271c3bb]323    _Thread_Iterate(task_usage, data);
[a8e4352]324
325    if (data->task_count > data->task_size)
326    {
327      data->last_tasks = realloc(data->last_tasks, tasks_size);
328      data->last_usage = realloc(data->last_usage, usage_size);
329      if ((data->last_tasks == NULL) || (data->last_usage == NULL))
330      {
[24d0ee57]331        rtems_printf(data->printer, "top worker: error: no memory\n");
[a8e4352]332        data->thread_run = false;
333        break;
334      }
335      data->task_size = data->task_count;
336    }
337
338    memcpy(data->last_tasks, data->tasks, tasks_size);
339    memcpy(data->last_usage, data->usage, usage_size);
340    data->last_task_count = data->task_count;
341
342    /*
343     * We need to loop again to get suitable current usage values as we need a
344     * last sample to work.
345     */
346    if (first_time)
347    {
348      rtems_task_wake_after(RTEMS_MILLISECONDS_TO_TICKS(500));
349      first_time = false;
350      continue;
351    }
352
353    _Protected_heap_Get_information(&_Workspace_Area, &wksp);
354
355    if (data->single_page)
[24d0ee57]356      rtems_printf(data->printer,
357                   "\x1b[H\x1b[J"
358                   " ENTER:Exit  SPACE:Refresh"
359                   "  S:Scroll  A:All  <>:Order  +/-:Lines\n");
360    rtems_printf(data->printer, "\n");
[a8e4352]361
362    /*
363     * Uptime and period of this sample.
364     */
[24d0ee57]365    rtems_printf(data->printer, "Uptime: ");
[a8e4352]366    print_time(data, &data->uptime, 20);
[24d0ee57]367    rtems_printf(data->printer, " Period: ");
[a8e4352]368    print_time(data, &data->period, 20);
369
370    /*
371     * Task count, load and idle levels.
372     */
[24d0ee57]373    rtems_printf(data->printer, "\nTasks: %4i  ", data->task_count);
[a8e4352]374
375    _Timestamp_Subtract(&data->idle, &data->total, &load);
376    _Timestamp_Divide(&load, &data->uptime, &ival, &fval);
[24d0ee57]377    rtems_printf(data->printer,
378                 "Load Average: %4" PRIu32 ".%03" PRIu32 "%%", ival, fval);
[a8e4352]379    _Timestamp_Subtract(&data->current_idle, &data->current, &load);
380    _Timestamp_Divide(&load, &data->period, &ival, &fval);
[24d0ee57]381    rtems_printf(data->printer,
382                 "  Load: %4" PRIu32 ".%03" PRIu32 "%%", ival, fval);
[a8e4352]383    _Timestamp_Divide(&data->current_idle, &data->period, &ival, &fval);
[24d0ee57]384    rtems_printf(data->printer,
385                 "  Idle: %4" PRIu32 ".%03" PRIu32 "%%", ival, fval);
[a8e4352]386
387    /*
388     * Memory usage.
389     */
390    if (rtems_configuration_get_unified_work_area())
391    {
[24d0ee57]392      rtems_printf(data->printer, "\nMem: ");
[a8e4352]393      print_memsize(data, wksp.Free.total, "free");
394      print_memsize(data, wksp.Used.total, "used");
395    }
396    else
397    {
[d53862a]398      Heap_Information_block libc_heap;
[a8e4352]399      malloc_info(&libc_heap);
[24d0ee57]400      rtems_printf(data->printer, "\nMem: Wksp: ");
[a8e4352]401      print_memsize(data, wksp.Free.total, "free");
402      print_memsize(data, wksp.Used.total, "used  Heap: ");
403      print_memsize(data, libc_heap.Free.total, "free");
404      print_memsize(data, libc_heap.Used.total, "used");
405    }
[6031da4]406
[a8e4352]407    print_memsize(data, data->stack_size, "stack\n");
408
[24d0ee57]409    rtems_printf(data->printer,
[a8e4352]410       "\n"
411        " ID         | NAME                | RPRI | CPRI   | TIME                | TOTAL   | CURRENT\n"
412        "-%s---------+---------------------+-%s-----%s-----+---------------------+-%s------+--%s----\n",
413       data->sort_order == RTEMS_TOP_SORT_ID ? "^^" : "--",
414       data->sort_order == RTEMS_TOP_SORT_REAL_PRI ? "^^" : "--",
415       data->sort_order == RTEMS_TOP_SORT_CURRENT_PRI ? "^^" : "--",
416                          data->sort_order == RTEMS_TOP_SORT_TOTAL ? "^^" : "--",
417       data->sort_order == RTEMS_TOP_SORT_CURRENT ? "^^" : "--"
[6031da4]418    );
419
[a8e4352]420    task_count = 0;
421
422    for (i = 0; i < data->task_count; i++)
423    {
424      Thread_Control*   thread = data->tasks[i];
425      Timestamp_Control usage;
426      Timestamp_Control current_usage;
427
428      if (thread == NULL)
429        break;
[6031da4]430
[a8e4352]431      if (data->single_page && (data->show != 0) && (i >= data->show))
[6031da4]432        break;
433
434      /*
[a8e4352]435       * We need to count the number displayed to clear the remainder of the
436       * the display.
[6031da4]437       */
[a8e4352]438      ++task_count;
[6031da4]439
[a8e4352]440      /*
441       * If the API os POSIX print the entry point.
442       */
443      rtems_object_get_name(thread->Object.id, sizeof(name), name);
444      if (name[0] == '\0')
[ccd5434]445        snprintf(name, sizeof(name) - 1, "(%p)", thread->Start.Entry.Kinds.Numeric.entry);
[a8e4352]446
[24d0ee57]447      rtems_printf(data->printer,
[300f6a48]448                   " 0x%08" PRIx32 " | %-19s |  %3" PRId64 " |  %3" PRId64 "   | ",
[24d0ee57]449                   thread->Object.id,
450                   name,
[300f6a48]451                   thread->Real_priority.priority,
[b20b736]452                   _Thread_Get_priority(thread));
[a8e4352]453
454      usage = data->usage[i];
455      current_usage = data->current_usage[i];
456
457      /*
458       * Print the information
459       */
460      print_time(data, &usage, 19);
461      _Timestamp_Divide(&usage, &data->total, &ival, &fval);
[24d0ee57]462      rtems_printf(data->printer,
463                   " |%4" PRIu32 ".%03" PRIu32, ival, fval);
[a8e4352]464      _Timestamp_Divide(&current_usage, &data->period, &ival, &fval);
[24d0ee57]465      rtems_printf(data->printer,
466                   " |%4" PRIu32 ".%03" PRIu32 "\n", ival, fval);
[6031da4]467    }
468
[a8e4352]469    if (data->single_page && (data->show != 0) && (task_count < data->show))
[6031da4]470    {
[a8e4352]471      i = data->show - task_count;
472      while (i > 0)
[6031da4]473      {
[24d0ee57]474        rtems_printf(data->printer, "\x1b[K\n");
[a8e4352]475        i--;
[6031da4]476      }
477    }
478
[a8e4352]479    sc = rtems_event_receive(RTEMS_EVENT_1,
480                             RTEMS_EVENT_ANY,
481                             RTEMS_MILLISECONDS_TO_TICKS (data->poll_rate_usecs),
482                             &out);
483    if ((sc != RTEMS_SUCCESSFUL) && (sc != RTEMS_TIMEOUT))
484    {
[24d0ee57]485      rtems_printf(data->printer,
486                   "error: event receive: %s\n", rtems_status_text(sc));
[a8e4352]487      break;
488    }
[6031da4]489  }
[a8e4352]490
491  free(data->tasks);
492  free(data->last_tasks);
493  free(data->last_usage);
494  free(data->current_usage);
495
496  data->thread_active = false;
497
[f004b2b8]498  rtems_task_exit();
[6031da4]499}
500
501void rtems_cpu_usage_top_with_plugin(
[24d0ee57]502  const rtems_printer *printer
[6031da4]503)
504{
[a8e4352]505  rtems_status_code      sc;
506  rtems_task_priority    priority;
507  rtems_name             name;
508  rtems_id               id;
509  rtems_cpu_usage_data   data;
510  int                    show_lines = 25;
[6031da4]511
[a8e4352]512  memset(&data, 0, sizeof(data));
513
514  data.thread_run = true;
515  data.single_page = true;
516  data.sort_order = RTEMS_TOP_SORT_CURRENT;
517  data.poll_rate_usecs = 3000;
518  data.show = show_lines;
[24d0ee57]519  data.printer = printer;
[6031da4]520
521  sc = rtems_task_set_priority (RTEMS_SELF, RTEMS_CURRENT_PRIORITY, &priority);
522
523  if (sc != RTEMS_SUCCESSFUL)
524  {
[24d0ee57]525    rtems_printf (printer,
526                  "error: cannot obtain the current priority: %s\n", rtems_status_text (sc));
[6031da4]527    return;
528  }
529
530  name = rtems_build_name('C', 'P', 'l', 't');
531
532  sc = rtems_task_create (name, priority, 4 * 1024,
533                          RTEMS_NO_FLOATING_POINT | RTEMS_LOCAL,
534                          RTEMS_PREEMPT | RTEMS_TIMESLICE | RTEMS_NO_ASR,
535                          &id);
536
537  if (sc != RTEMS_SUCCESSFUL)
538  {
[24d0ee57]539    rtems_printf (printer,
540                  "error: cannot create helper thread: %s\n", rtems_status_text (sc));
[6031da4]541    return;
542  }
543
[24d0ee57]544  sc = rtems_task_start (id, rtems_cpuusage_top_thread, (rtems_task_argument) &data);
[6031da4]545  if (sc != RTEMS_SUCCESSFUL)
546  {
[24d0ee57]547    rtems_printf (printer,
548                  "error: cannot start helper thread: %s\n", rtems_status_text (sc));
[6031da4]549    rtems_task_delete (id);
550    return;
551  }
552
[a8e4352]553  while (true)
[6031da4]554  {
555    int c = getchar ();
556
[a8e4352]557    if ((c == '\r') || (c == '\n') || (c == 'q') || (c == 'Q'))
[6031da4]558    {
[a8e4352]559      int loops = 50;
[6031da4]560
[a8e4352]561      data.thread_run = false;
[6031da4]562
[a8e4352]563      rtems_event_send(id, RTEMS_EVENT_1);
564
565      while (loops && data.thread_active)
566        rtems_task_wake_after (RTEMS_MICROSECONDS_TO_TICKS (100000));
[6031da4]567
[24d0ee57]568      rtems_printf (printer, "load monitoring stopped.\n");
[6031da4]569      return;
570    }
[a8e4352]571    else if (c == '<')
572    {
573      if (data.sort_order == 0)
574        data.sort_order = RTEMS_TOP_SORT_MAX;
575      else
576        --data.sort_order;
577      rtems_event_send(id, RTEMS_EVENT_1);
578    }
579    else if (c == '>')
580    {
581      if (data.sort_order >= RTEMS_TOP_SORT_MAX)
582        data.sort_order = 0;
583      else
584        ++data.sort_order;
585      rtems_event_send(id, RTEMS_EVENT_1);
586    }
587    else if ((c == 's') || (c == 'S'))
588    {
589      data.single_page = !data.single_page;
590      rtems_event_send(id, RTEMS_EVENT_1);
591    }
592    else if ((c == 'a') || (c == 'A'))
593    {
594      if (data.show == 0)
595        data.show = show_lines;
596      else
597        data.show = 0;
598      rtems_event_send(id, RTEMS_EVENT_1);
599    }
600    else if (c == '+')
601    {
602      ++show_lines;
603      if (data.show != 0)
604        data.show = show_lines;
605    }
606    else if (c == '-')
607    {
608      if (show_lines > 5)
609        --show_lines;
610      if (data.show != 0)
611        data.show = show_lines;
612    }
613    else if (c == ' ')
614    {
615      rtems_event_send(id, RTEMS_EVENT_1);
616    }
[6031da4]617  }
618}
619
[24d0ee57]620void rtems_cpu_usage_top (void)
[6031da4]621{
[24d0ee57]622  rtems_printer printer;
623  rtems_print_printer_printk (&printer);
624  rtems_cpu_usage_top_with_plugin (&printer);
[6031da4]625}
Note: See TracBrowser for help on using the repository browser.