source: rtems/cpukit/libmisc/cpuuse/cpuusagetop.c @ a8e4352

4.11
Last change on this file since a8e4352 was a8e4352, checked in by Chris Johns <chrisj@…>, on Apr 29, 2015 at 5:24:00 AM

libmisc/cpuuse: Top support for current load.

The cpuuse top command now supports the current load where the list of
tasks is ordered based on the current load rather than the total cpu usage.
This lets you see what is using the processor at any specific instance.

The ability to sort on a range of thread values is now supported.

Added memory usage stats for unified and separate workspace and C heaps as
well as displaying the allocated stack space.

Added a few more command keys to refresh the display, show all tasks in the
system, control the lines display and a scrolling mode that does not clear
the display on each refresh.

Removed support for tick kernel builds. The tick support in the kernel is to
be removed.

  • Property mode set to 100644
File size: 20.7 KB
Line 
1/**
2 * @file
3 *
4 * @brief CPU Usage Top
5 * @ingroup libmisc_cpuuse CPU Usage
6 */
7
8/*
9 *  COPYRIGHT (c) 2015. Chris Johns <chrisj@rtems.org>
10 *
11 *  COPYRIGHT (c) 2014.
12 *  On-Line Applications Research Corporation (OAR).
13 *
14 *  The license and distribution terms for this file may be
15 *  found in the file LICENSE in this distribution or at
16 *  http://www.rtems.org/license/LICENSE.
17 */
18
19/*
20 * Based on the old capture engine ct-load.
21 */
22
23#ifdef HAVE_CONFIG_H
24#include "config.h"
25#endif
26
27#include <stdbool.h>
28#include <string.h>
29#include <stdlib.h>
30#include <stdio.h>
31#include <ctype.h>
32#include <inttypes.h>
33
34#include <rtems/cpuuse.h>
35#include <rtems/malloc.h>
36#include <rtems/score/objectimpl.h>
37#include <rtems/score/protectedheap.h>
38#include <rtems/score/threadimpl.h>
39#include <rtems/score/todimpl.h>
40#include <rtems/score/watchdogimpl.h>
41#include <rtems/score/wkspace.h>
42
43/*
44 * Common variable to sync the load monitor task.
45 */
46typedef struct
47{
48  void*                  context;
49  rtems_printk_plugin_t  print;
50} rtems_cpu_usage_plugin;
51
52/*
53 * Use a struct for all data to allow more than one top and to support the
54 * thread iterator.
55 */
56typedef struct
57{
58  volatile bool          thread_run;
59  volatile bool          thread_active;
60  volatile bool          single_page;
61  volatile uint32_t      sort_order;
62  volatile uint32_t      poll_rate_usecs;
63  volatile uint32_t      show;
64  rtems_cpu_usage_plugin plugin;
65  Thread_CPU_usage_t     zero;
66  Timestamp_Control      uptime;
67  Timestamp_Control      last_uptime;
68  Timestamp_Control      period;
69  int                    task_count;        /* Number of tasks. */
70  int                    last_task_count;   /* Number of tasks in the previous sample. */
71  int                    task_size;         /* The size of the arrays */
72  Thread_Control**       tasks;             /* List of tasks in this sample. */
73  Thread_Control**       last_tasks;        /* List of tasks in the last sample. */
74  Thread_CPU_usage_t*    usage;             /* Usage of task's in this sample. */
75  Thread_CPU_usage_t*    last_usage;        /* Usage of task's in the last sample. */
76  Thread_CPU_usage_t*    current_usage;     /* Current usage for this sample. */
77  Timestamp_Control      total;             /* Total run run, should equal the uptime. */
78  Timestamp_Control      idle;              /* Time spent in idle. */
79  Timestamp_Control      current;           /* Current time run in this period. */
80  Timestamp_Control      current_idle;      /* Current time in idle this period. */
81  uint32_t               stack_size;        /* Size of stack allocated. */
82} rtems_cpu_usage_data;
83
84/*
85 * Sort orders.
86 */
87#define RTEMS_TOP_SORT_ID            (0)
88#define RTEMS_TOP_SORT_REAL_PRI      (1)
89#define RTEMS_TOP_SORT_CURRENT_PRI   (2)
90#define RTEMS_TOP_SORT_TOTAL         (3)
91#define RTEMS_TOP_SORT_CURRENT       (4)
92#define RTEMS_TOP_SORT_MAX           (4)
93
94/*
95 * Private version of the iterator with an arg. This will be moved
96 * to the public version in 5.0.
97 */
98
99typedef void (*rtems_per_thread_routine_2)( Thread_Control *, void* );
100
101void rtems_iterate_over_all_threads_2(rtems_per_thread_routine_2 routine,
102                                      void*                      arg);
103
104void rtems_iterate_over_all_threads_2(rtems_per_thread_routine_2 routine,
105                                      void*                      arg)
106{
107  uint32_t             i;
108  uint32_t             api_index;
109  Thread_Control      *the_thread;
110  Objects_Information *information;
111
112  if ( !routine )
113    return;
114
115  for ( api_index = 1 ; api_index <= OBJECTS_APIS_LAST ; api_index++ ) {
116    #if !defined(RTEMS_POSIX_API) || defined(RTEMS_DEBUG)
117      if ( !_Objects_Information_table[ api_index ] )
118        continue;
119    #endif
120    information = _Objects_Information_table[ api_index ][ 1 ];
121    if ( information ) {
122      for ( i=1 ; i <= information->maximum ; i++ ) {
123        the_thread = (Thread_Control *)information->local_table[ i ];
124        if ( the_thread )
125          (*routine)(the_thread, arg);
126      }
127    }
128  }
129}
130
131static inline bool equal_to_uint32_t( uint32_t * lhs, uint32_t * rhs )
132{
133   if ( *lhs == *rhs )
134     return true;
135   else
136     return false;
137}
138
139static inline bool less_than_uint32_t( uint32_t * lhs, uint32_t * rhs )
140{
141   if ( *lhs < *rhs )
142    return true;
143   else
144    return false;
145}
146
147#ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__
148  #define CPU_usage_Equal_to( _lhs, _rhs ) \
149          _Timestamp_Equal_to( _lhs, _rhs )
150#else
151  #define CPU_usage_Equal_to( _lhs, _rhs ) \
152          equal_to_uint32_t( _lhs, _rhs )
153#endif
154
155#ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__
156  #define CPU_usage_Set_to_zero( _time ) \
157         _Timestamp_Set_to_zero( _time )
158#else
159  #define CPU_usage_Set_to_zero( _time ) \
160       do { \
161         *_time = 0; \
162       } while (0)
163#endif
164
165#ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__
166  #define CPU_usage_Less_than( _lhs, _rhs ) \
167        _Timestamp_Less_than( _lhs, _rhs )
168#else
169  #define CPU_usage_Less_than( _lhs, _rhs ) \
170         less_than_uint32_t( _lhs, _rhs )
171#endif
172
173static void
174print_memsize(rtems_cpu_usage_data* data, const uint32_t size, const char* label)
175{
176  if (size > (1024 * 1024))
177    (*data->plugin.print)(data->plugin.context, "%4" PRIu32 "M %s",
178                          size / (1024 * 1024), label);
179  else if (size > 1024)
180    (*data->plugin.print)(data->plugin.context, "%4" PRIu32 "K %s",
181                          size / 1024, label);
182  else
183    (*data->plugin.print)(data->plugin.context, "%4" PRIu32 " %s",
184                          size, label);
185}
186
187static int
188print_time(rtems_cpu_usage_data*    data,
189           const Timestamp_Control* time,
190           const int                length)
191{
192  uint32_t secs = _Timestamp_Get_seconds( time );
193  uint32_t usecs = _Timestamp_Get_nanoseconds( time ) / TOD_NANOSECONDS_PER_MICROSECOND;
194  int      len = 0;
195
196  if (secs > 60)
197  {
198    uint32_t mins = secs / 60;
199    if (mins > 60)
200    {
201      uint32_t hours = mins / 60;
202      if (hours > 24)
203      {
204        len += (*data->plugin.print)(data->plugin.context, "%" PRIu32 "d", hours / 24);
205        hours %= 24;
206      }
207      len += (*data->plugin.print)(data->plugin.context, "%" PRIu32 "hr", hours);
208      mins %= 60;
209    }
210    len += (*data->plugin.print)(data->plugin.context, "%" PRIu32 "m", mins);
211    secs %= 60;
212  }
213  len += (*data->plugin.print)(data->plugin.context, "%" PRIu32 ".%06" PRIu32, secs, usecs);
214
215  if (len < length)
216    (*data->plugin.print)(data->plugin.context, "%*c", length - len, ' ');
217
218  return len;
219}
220
221/*
222 * Count the number of tasks.
223 */
224static void
225task_counter(Thread_Control *thrad, void* arg)
226{
227  rtems_cpu_usage_data* data = (rtems_cpu_usage_data*) arg;
228  ++data->task_count;
229}
230
231/*
232 * Create the sorted table with the current and total usage.
233 */
234static void
235task_usage(Thread_Control* thread, void* arg)
236{
237  rtems_cpu_usage_data* data = (rtems_cpu_usage_data*) arg;
238  Thread_CPU_usage_t    usage = thread->cpu_time_used;
239  Thread_CPU_usage_t    current = data->zero;
240  int                   j;
241
242  data->stack_size += thread->Start.Initial_stack.size;
243
244  for (j = 0; j < data->last_task_count; j++)
245  {
246    if (thread == data->last_tasks[j])
247    {
248      _Timestamp_Subtract(&data->last_usage[j], &usage, &current);
249      break;
250    }
251  }
252
253  /*
254   * When not using nanosecond CPU usage resolution, we have to count the
255   * number of "ticks" we gave credit for to give the user a rough guideline as
256   * to what each number means proportionally.
257   */
258  _Timestamp_Add_to(&data->total, &usage);
259  _Timestamp_Add_to(&data->current, &current);
260
261  if (thread->Object.id == 0x09010001)
262  {
263    data->idle = usage;
264    data->current_idle = current;
265  }
266
267  /*
268   * Create the tasks to display soring as we create.
269   */
270  for (j = 0; j < data->task_count; j++)
271  {
272    if (data->tasks[j])
273    {
274      int k;
275
276      /*
277       * Sort on the current load.
278       */
279      switch (data->sort_order)
280      {
281        default:
282          data->sort_order = RTEMS_TOP_SORT_CURRENT;
283          /* drop through */
284        case RTEMS_TOP_SORT_CURRENT:
285          if (CPU_usage_Equal_to(&current, &data->zero) ||
286              CPU_usage_Less_than(&current, &data->current_usage[j]))
287            continue;
288        case RTEMS_TOP_SORT_TOTAL:
289          if (CPU_usage_Equal_to(&usage, &data->zero) ||
290              CPU_usage_Less_than(&usage, &data->usage[j]))
291            continue;
292        case RTEMS_TOP_SORT_REAL_PRI:
293          if (thread->real_priority > data->tasks[j]->real_priority)
294            continue;
295        case RTEMS_TOP_SORT_CURRENT_PRI:
296          if (thread->current_priority > data->tasks[j]->current_priority)
297            continue;
298        case RTEMS_TOP_SORT_ID:
299          if (thread->Object.id < data->tasks[j]->Object.id)
300            continue;
301      }
302
303      for (k = (data->task_count - 1); k >= j; k--)
304      {
305        data->tasks[k + 1] = data->tasks[k];
306        data->usage[k + 1]  = data->usage[k];
307        data->current_usage[k + 1]  = data->current_usage[k];
308      }
309    }
310    data->tasks[j] = thread;
311    data->usage[j] = usage;
312    data->current_usage[j] = current;
313    break;
314  }
315}
316
317/*
318 * rtems_cpuusage_top_thread
319 *
320 * This function displays the load of the tasks on an ANSI terminal.
321 */
322
323static void
324rtems_cpuusage_top_thread (rtems_task_argument arg)
325{
326  rtems_cpu_usage_data*  data = (rtems_cpu_usage_data*) arg;
327  char                   name[13];
328  int                    i;
329  Heap_Information_block wksp;
330  uint32_t               ival, fval;
331  int                    task_count;
332  rtems_event_set        out;
333  rtems_status_code      sc;
334  bool                   first_time = true;
335
336  data->thread_active = true;
337
338  _TOD_Get_uptime(&data->last_uptime);
339
340  CPU_usage_Set_to_zero(&data->zero);
341
342  while (data->thread_run)
343  {
344    Timestamp_Control uptime_at_last_reset = CPU_usage_Uptime_at_last_reset;
345    size_t            tasks_size;
346    size_t            usage_size;
347    Timestamp_Control load;
348
349    data->task_count = 0;
350    rtems_iterate_over_all_threads_2(task_counter, data);
351
352    tasks_size = sizeof(Thread_Control*) * (data->task_count + 1);
353    usage_size = sizeof(Thread_CPU_usage_t) * (data->task_count + 1);
354
355    if (data->task_count > data->task_size)
356    {
357      data->tasks = realloc(data->tasks, tasks_size);
358      data->usage = realloc(data->usage, usage_size);
359      data->current_usage = realloc(data->current_usage, usage_size);
360      if ((data->tasks == NULL) || (data->usage == NULL) || (data->current_usage == NULL))
361      {
362        (*data->plugin.print)(data->plugin.context, "top worker: error: no memory\n");
363        data->thread_run = false;
364        break;
365      }
366    }
367
368    memset(data->tasks, 0, tasks_size);
369    memset(data->usage, 0, usage_size);
370    memset(data->current_usage, 0, usage_size);
371
372    _Timestamp_Set_to_zero(&data->total);
373    _Timestamp_Set_to_zero(&data->current);
374    data->stack_size = 0;
375
376    _TOD_Get_uptime(&data->uptime);
377    _Timestamp_Subtract(&uptime_at_last_reset, &data->uptime, &data->uptime);
378    _Timestamp_Subtract(&data->last_uptime, &data->uptime, &data->period);
379    data->last_uptime = data->uptime;
380
381    rtems_iterate_over_all_threads_2(task_usage, data);
382
383    if (data->task_count > data->task_size)
384    {
385      data->last_tasks = realloc(data->last_tasks, tasks_size);
386      data->last_usage = realloc(data->last_usage, usage_size);
387      if ((data->last_tasks == NULL) || (data->last_usage == NULL))
388      {
389        (*data->plugin.print)(data->plugin.context, "top worker: error: no memory\n");
390        data->thread_run = false;
391        break;
392      }
393      data->task_size = data->task_count;
394    }
395
396    memcpy(data->last_tasks, data->tasks, tasks_size);
397    memcpy(data->last_usage, data->usage, usage_size);
398    data->last_task_count = data->task_count;
399
400    /*
401     * We need to loop again to get suitable current usage values as we need a
402     * last sample to work.
403     */
404    if (first_time)
405    {
406      rtems_task_wake_after(RTEMS_MILLISECONDS_TO_TICKS(500));
407      first_time = false;
408      continue;
409    }
410
411    _Protected_heap_Get_information(&_Workspace_Area, &wksp);
412
413    if (data->single_page)
414      (*data->plugin.print)(data->plugin.context,
415                            "\x1b[H\x1b[J"
416                            " ENTER:Exit  SPACE:Refresh"
417                            "  S:Scroll  A:All  <>:Order  +/-:Lines\n");
418    (*data->plugin.print)(data->plugin.context,"\n");
419
420    /*
421     * Uptime and period of this sample.
422     */
423    (*data->plugin.print)(data->plugin.context, "Uptime: ");
424    print_time(data, &data->uptime, 20);
425    (*data->plugin.print)(data->plugin.context, " Period: ");
426    print_time(data, &data->period, 20);
427
428    /*
429     * Task count, load and idle levels.
430     */
431    (*data->plugin.print)(data->plugin.context, "\nTasks: %4i  ", data->task_count);
432
433    _Timestamp_Subtract(&data->idle, &data->total, &load);
434    _Timestamp_Divide(&load, &data->uptime, &ival, &fval);
435    (*data->plugin.print)(data->plugin.context,
436                          "Load Average: %4" PRIu32 ".%03" PRIu32 "%%", ival, fval);
437    _Timestamp_Subtract(&data->current_idle, &data->current, &load);
438    _Timestamp_Divide(&load, &data->period, &ival, &fval);
439    (*data->plugin.print)(data->plugin.context,
440                          "  Load: %4" PRIu32 ".%03" PRIu32 "%%", ival, fval);
441    _Timestamp_Divide(&data->current_idle, &data->period, &ival, &fval);
442    (*data->plugin.print)(data->plugin.context,
443                          "  Idle: %4" PRIu32 ".%03" PRIu32 "%%", ival, fval);
444
445    /*
446     * Memory usage.
447     */
448    if (rtems_configuration_get_unified_work_area())
449    {
450      (*data->plugin.print)(data->plugin.context, "\nMem: ");
451      print_memsize(data, wksp.Free.total, "free");
452      print_memsize(data, wksp.Used.total, "used");
453    }
454    else
455    {
456      region_information_block libc_heap;
457      malloc_info(&libc_heap);
458      (*data->plugin.print)(data->plugin.context, "\nMem: Wksp: ");
459      print_memsize(data, wksp.Free.total, "free");
460      print_memsize(data, wksp.Used.total, "used  Heap: ");
461      print_memsize(data, libc_heap.Free.total, "free");
462      print_memsize(data, libc_heap.Used.total, "used");
463    }
464
465    print_memsize(data, data->stack_size, "stack\n");
466
467    (*data->plugin.print)(data->plugin.context,
468       "\n"
469        " ID         | NAME                | RPRI | CPRI   | TIME                | TOTAL   | CURRENT\n"
470        "-%s---------+---------------------+-%s-----%s-----+---------------------+-%s------+--%s----\n",
471       data->sort_order == RTEMS_TOP_SORT_ID ? "^^" : "--",
472       data->sort_order == RTEMS_TOP_SORT_REAL_PRI ? "^^" : "--",
473       data->sort_order == RTEMS_TOP_SORT_CURRENT_PRI ? "^^" : "--",
474                          data->sort_order == RTEMS_TOP_SORT_TOTAL ? "^^" : "--",
475       data->sort_order == RTEMS_TOP_SORT_CURRENT ? "^^" : "--"
476    );
477
478    task_count = 0;
479
480    for (i = 0; i < data->task_count; i++)
481    {
482      Thread_Control*   thread = data->tasks[i];
483      Timestamp_Control last;
484      Timestamp_Control usage;
485      Timestamp_Control current_usage;
486
487      if (thread == NULL)
488        break;
489
490      if (data->single_page && (data->show != 0) && (i >= data->show))
491        break;
492
493      /*
494       * We need to count the number displayed to clear the remainder of the
495       * the display.
496       */
497      ++task_count;
498
499      /*
500       * If the API os POSIX print the entry point.
501       */
502      rtems_object_get_name(thread->Object.id, sizeof(name), name);
503      if (name[0] == '\0')
504        snprintf(name, sizeof(name) - 1, "(%p)", thread->Start.entry_point);
505
506      (*data->plugin.print)(data->plugin.context,
507                            " 0x%08" PRIx32 " | %-19s |  %3" PRId32 " |  %3" PRId32 "   | ",
508                            thread->Object.id,
509                            name,
510                            thread->real_priority,
511                            thread->current_priority);
512
513      usage = data->usage[i];
514      current_usage = data->current_usage[i];
515
516      /*
517       * If this is the currently executing thread, account for time since
518       * the last context switch.
519       */
520      if (_Thread_Get_time_of_last_context_switch(thread, &last))
521      {
522        Timestamp_Control used;
523        Timestamp_Control now;
524
525        /*
526         * Get the current uptime and assume we are not pre-empted to
527         * measure the time from the last switch this thread and now.
528         */
529        _TOD_Get_uptime(&now);
530        _Timestamp_Subtract(&last, &now, &used);
531        _Timestamp_Add_to(&usage, &used);
532        _Timestamp_Add_to(&current_usage, &used);
533      }
534
535      /*
536       * Print the information
537       */
538      print_time(data, &usage, 19);
539      _Timestamp_Divide(&usage, &data->total, &ival, &fval);
540      (*data->plugin.print)(data->plugin.context,
541                            " |%4" PRIu32 ".%03" PRIu32, ival, fval);
542      _Timestamp_Divide(&current_usage, &data->period, &ival, &fval);
543      (*data->plugin.print)(data->plugin.context,
544                            " |%4" PRIu32 ".%03" PRIu32 "\n", ival, fval);
545    }
546
547    if (data->single_page && (data->show != 0) && (task_count < data->show))
548    {
549      i = data->show - task_count;
550      while (i > 0)
551      {
552        (*data->plugin.print)(data->plugin.context, "\x1b[K\n");
553        i--;
554      }
555    }
556
557    sc = rtems_event_receive(RTEMS_EVENT_1,
558                             RTEMS_EVENT_ANY,
559                             RTEMS_MILLISECONDS_TO_TICKS (data->poll_rate_usecs),
560                             &out);
561    if ((sc != RTEMS_SUCCESSFUL) && (sc != RTEMS_TIMEOUT))
562    {
563      (*data->plugin.print)(data->plugin.context,
564                            "error: event receive: %s\n", rtems_status_text(sc));
565      break;
566    }
567  }
568
569  free(data->tasks);
570  free(data->last_tasks);
571  free(data->last_usage);
572  free(data->current_usage);
573
574  data->thread_active = false;
575
576  rtems_task_delete (RTEMS_SELF);
577}
578
579void rtems_cpu_usage_top_with_plugin(
580  void                  *context,
581  rtems_printk_plugin_t  print
582)
583{
584#ifdef __RTEMS_USE_TICKS_FOR_STATISTICS__
585  if ( !print )
586    return;
587  (*print)(context, "error: tick kernels not supported\n");
588#else
589  rtems_status_code      sc;
590  rtems_task_priority    priority;
591  rtems_name             name;
592  rtems_id               id;
593  rtems_cpu_usage_data   data;
594  int                    show_lines = 25;
595
596  if ( !print )
597    return;
598
599  memset(&data, 0, sizeof(data));
600
601  data.thread_run = true;
602  data.single_page = true;
603  data.sort_order = RTEMS_TOP_SORT_CURRENT;
604  data.poll_rate_usecs = 3000;
605  data.show = show_lines;
606  data.plugin.context = context;
607  data.plugin.print = print;
608
609  sc = rtems_task_set_priority (RTEMS_SELF, RTEMS_CURRENT_PRIORITY, &priority);
610
611  if (sc != RTEMS_SUCCESSFUL)
612  {
613    (*print)(
614       context,
615       "error: cannot obtain the current priority: %s\n",
616       rtems_status_text (sc)
617    );
618    return;
619  }
620
621  name = rtems_build_name('C', 'P', 'l', 't');
622
623  sc = rtems_task_create (name, priority, 4 * 1024,
624                          RTEMS_NO_FLOATING_POINT | RTEMS_LOCAL,
625                          RTEMS_PREEMPT | RTEMS_TIMESLICE | RTEMS_NO_ASR,
626                          &id);
627
628  if (sc != RTEMS_SUCCESSFUL)
629  {
630    (*print)(
631       context,
632       "error: cannot create helper thread: %s\n",
633       rtems_status_text (sc)
634    );
635    return;
636  }
637
638  sc = rtems_task_start (
639    id, rtems_cpuusage_top_thread, (rtems_task_argument) &data
640  );
641  if (sc != RTEMS_SUCCESSFUL)
642  {
643    (*print)(
644       context,
645       "error: cannot start helper thread: %s\n",
646       rtems_status_text (sc)
647    );
648    rtems_task_delete (id);
649    return;
650  }
651
652  while (true)
653  {
654    int c = getchar ();
655
656    if ((c == '\r') || (c == '\n') || (c == 'q') || (c == 'Q'))
657    {
658      int loops = 50;
659
660      data.thread_run = false;
661
662      rtems_event_send(id, RTEMS_EVENT_1);
663
664      while (loops && data.thread_active)
665        rtems_task_wake_after (RTEMS_MICROSECONDS_TO_TICKS (100000));
666
667      (*print)(context, "load monitoring stopped.\n");
668      return;
669    }
670    else if (c == '<')
671    {
672      if (data.sort_order == 0)
673        data.sort_order = RTEMS_TOP_SORT_MAX;
674      else
675        --data.sort_order;
676      rtems_event_send(id, RTEMS_EVENT_1);
677    }
678    else if (c == '>')
679    {
680      if (data.sort_order >= RTEMS_TOP_SORT_MAX)
681        data.sort_order = 0;
682      else
683        ++data.sort_order;
684      rtems_event_send(id, RTEMS_EVENT_1);
685    }
686    else if ((c == 's') || (c == 'S'))
687    {
688      data.single_page = !data.single_page;
689      rtems_event_send(id, RTEMS_EVENT_1);
690    }
691    else if ((c == 'a') || (c == 'A'))
692    {
693      if (data.show == 0)
694        data.show = show_lines;
695      else
696        data.show = 0;
697      rtems_event_send(id, RTEMS_EVENT_1);
698    }
699    else if (c == '+')
700    {
701      ++show_lines;
702      if (data.show != 0)
703        data.show = show_lines;
704    }
705    else if (c == '-')
706    {
707      if (show_lines > 5)
708        --show_lines;
709      if (data.show != 0)
710        data.show = show_lines;
711    }
712    else if (c == ' ')
713    {
714      rtems_event_send(id, RTEMS_EVENT_1);
715    }
716  }
717#endif
718}
719
720void rtems_cpu_usage_top( void )
721{
722  rtems_cpu_usage_top_with_plugin( NULL, printk_plugin );
723}
Note: See TracBrowser for help on using the repository browser.