source: rtems/cpukit/libmisc/cpuuse/cpuusagetop.c @ e6b31b27

4.11
Last change on this file since e6b31b27 was e6b31b27, checked in by Joel Sherrill <joel.sherrill@…>, on May 27, 2015 at 3:13:58 PM

Remove use ticks for statistics configure option.

This was obsolete and broken based upon recent time keeping changes.

Thie build option was previously enabled by adding
USE_TICKS_FOR_STATISTICS=1 to the configure command line.

This propagated into the code as preprocessor conditionals
using the RTEMS_USE_TICKS_FOR_STATISTICS conditional.

  • Property mode set to 100644
File size: 20.1 KB
Line 
1/**
2 * @file
3 *
4 * @brief CPU Usage Top
5 * @ingroup libmisc_cpuuse CPU Usage
6 */
7
8/*
9 *  COPYRIGHT (c) 2015. Chris Johns <chrisj@rtems.org>
10 *
11 *  COPYRIGHT (c) 2014.
12 *  On-Line Applications Research Corporation (OAR).
13 *
14 *  The license and distribution terms for this file may be
15 *  found in the file LICENSE in this distribution or at
16 *  http://www.rtems.org/license/LICENSE.
17 */
18
19/*
20 * Based on the old capture engine ct-load.
21 */
22
23#ifdef HAVE_CONFIG_H
24#include "config.h"
25#endif
26
27#include <stdbool.h>
28#include <string.h>
29#include <stdlib.h>
30#include <stdio.h>
31#include <ctype.h>
32#include <inttypes.h>
33
34#include <rtems/cpuuse.h>
35#include <rtems/malloc.h>
36#include <rtems/score/objectimpl.h>
37#include <rtems/score/protectedheap.h>
38#include <rtems/score/threadimpl.h>
39#include <rtems/score/todimpl.h>
40#include <rtems/score/watchdogimpl.h>
41#include <rtems/score/wkspace.h>
42
43/*
44 * Common variable to sync the load monitor task.
45 */
46typedef struct
47{
48  void*                  context;
49  rtems_printk_plugin_t  print;
50} rtems_cpu_usage_plugin;
51
52/*
53 * Use a struct for all data to allow more than one top and to support the
54 * thread iterator.
55 */
56typedef struct
57{
58  volatile bool          thread_run;
59  volatile bool          thread_active;
60  volatile bool          single_page;
61  volatile uint32_t      sort_order;
62  volatile uint32_t      poll_rate_usecs;
63  volatile uint32_t      show;
64  rtems_cpu_usage_plugin plugin;
65  Thread_CPU_usage_t     zero;
66  Timestamp_Control      uptime;
67  Timestamp_Control      last_uptime;
68  Timestamp_Control      period;
69  int                    task_count;        /* Number of tasks. */
70  int                    last_task_count;   /* Number of tasks in the previous sample. */
71  int                    task_size;         /* The size of the arrays */
72  Thread_Control**       tasks;             /* List of tasks in this sample. */
73  Thread_Control**       last_tasks;        /* List of tasks in the last sample. */
74  Thread_CPU_usage_t*    usage;             /* Usage of task's in this sample. */
75  Thread_CPU_usage_t*    last_usage;        /* Usage of task's in the last sample. */
76  Thread_CPU_usage_t*    current_usage;     /* Current usage for this sample. */
77  Timestamp_Control      total;             /* Total run run, should equal the uptime. */
78  Timestamp_Control      idle;              /* Time spent in idle. */
79  Timestamp_Control      current;           /* Current time run in this period. */
80  Timestamp_Control      current_idle;      /* Current time in idle this period. */
81  uint32_t               stack_size;        /* Size of stack allocated. */
82} rtems_cpu_usage_data;
83
84/*
85 * Sort orders.
86 */
87#define RTEMS_TOP_SORT_ID            (0)
88#define RTEMS_TOP_SORT_REAL_PRI      (1)
89#define RTEMS_TOP_SORT_CURRENT_PRI   (2)
90#define RTEMS_TOP_SORT_TOTAL         (3)
91#define RTEMS_TOP_SORT_CURRENT       (4)
92#define RTEMS_TOP_SORT_MAX           (4)
93
94/*
95 * Private version of the iterator with an arg. This will be moved
96 * to the public version in 5.0.
97 */
98
99typedef void (*rtems_per_thread_routine_2)( Thread_Control *, void* );
100
101void rtems_iterate_over_all_threads_2(rtems_per_thread_routine_2 routine,
102                                      void*                      arg);
103
104void rtems_iterate_over_all_threads_2(rtems_per_thread_routine_2 routine,
105                                      void*                      arg)
106{
107  uint32_t             i;
108  uint32_t             api_index;
109  Thread_Control      *the_thread;
110  Objects_Information *information;
111
112  if ( !routine )
113    return;
114
115  for ( api_index = 1 ; api_index <= OBJECTS_APIS_LAST ; api_index++ ) {
116    #if !defined(RTEMS_POSIX_API) || defined(RTEMS_DEBUG)
117      if ( !_Objects_Information_table[ api_index ] )
118        continue;
119    #endif
120    information = _Objects_Information_table[ api_index ][ 1 ];
121    if ( information ) {
122      for ( i=1 ; i <= information->maximum ; i++ ) {
123        the_thread = (Thread_Control *)information->local_table[ i ];
124        if ( the_thread )
125          (*routine)(the_thread, arg);
126      }
127    }
128  }
129}
130
131static inline bool equal_to_uint32_t( uint32_t * lhs, uint32_t * rhs )
132{
133   if ( *lhs == *rhs )
134     return true;
135   else
136     return false;
137}
138
139static inline bool less_than_uint32_t( uint32_t * lhs, uint32_t * rhs )
140{
141   if ( *lhs < *rhs )
142    return true;
143   else
144    return false;
145}
146
147#define CPU_usage_Equal_to( _lhs, _rhs ) \
148        _Timestamp_Equal_to( _lhs, _rhs )
149
150#define CPU_usage_Set_to_zero( _time ) \
151       _Timestamp_Set_to_zero( _time )
152
153#define CPU_usage_Less_than( _lhs, _rhs ) \
154      _Timestamp_Less_than( _lhs, _rhs )
155
156static void
157print_memsize(rtems_cpu_usage_data* data, const uint32_t size, const char* label)
158{
159  if (size > (1024 * 1024))
160    (*data->plugin.print)(data->plugin.context, "%4" PRIu32 "M %s",
161                          size / (1024 * 1024), label);
162  else if (size > 1024)
163    (*data->plugin.print)(data->plugin.context, "%4" PRIu32 "K %s",
164                          size / 1024, label);
165  else
166    (*data->plugin.print)(data->plugin.context, "%4" PRIu32 " %s",
167                          size, label);
168}
169
170static int
171print_time(rtems_cpu_usage_data*    data,
172           const Timestamp_Control* time,
173           const int                length)
174{
175  uint32_t secs = _Timestamp_Get_seconds( time );
176  uint32_t usecs = _Timestamp_Get_nanoseconds( time ) / TOD_NANOSECONDS_PER_MICROSECOND;
177  int      len = 0;
178
179  if (secs > 60)
180  {
181    uint32_t mins = secs / 60;
182    if (mins > 60)
183    {
184      uint32_t hours = mins / 60;
185      if (hours > 24)
186      {
187        len += (*data->plugin.print)(data->plugin.context, "%" PRIu32 "d", hours / 24);
188        hours %= 24;
189      }
190      len += (*data->plugin.print)(data->plugin.context, "%" PRIu32 "hr", hours);
191      mins %= 60;
192    }
193    len += (*data->plugin.print)(data->plugin.context, "%" PRIu32 "m", mins);
194    secs %= 60;
195  }
196  len += (*data->plugin.print)(data->plugin.context, "%" PRIu32 ".%06" PRIu32, secs, usecs);
197
198  if (len < length)
199    (*data->plugin.print)(data->plugin.context, "%*c", length - len, ' ');
200
201  return len;
202}
203
204/*
205 * Count the number of tasks.
206 */
207static void
208task_counter(Thread_Control *thrad, void* arg)
209{
210  rtems_cpu_usage_data* data = (rtems_cpu_usage_data*) arg;
211  ++data->task_count;
212}
213
214/*
215 * Create the sorted table with the current and total usage.
216 */
217static void
218task_usage(Thread_Control* thread, void* arg)
219{
220  rtems_cpu_usage_data* data = (rtems_cpu_usage_data*) arg;
221  Thread_CPU_usage_t    usage = thread->cpu_time_used;
222  Thread_CPU_usage_t    current = data->zero;
223  int                   j;
224
225  data->stack_size += thread->Start.Initial_stack.size;
226
227  for (j = 0; j < data->last_task_count; j++)
228  {
229    if (thread == data->last_tasks[j])
230    {
231      _Timestamp_Subtract(&data->last_usage[j], &usage, &current);
232      break;
233    }
234  }
235
236  /*
237   * When not using nanosecond CPU usage resolution, we have to count the
238   * number of "ticks" we gave credit for to give the user a rough guideline as
239   * to what each number means proportionally.
240   */
241  _Timestamp_Add_to(&data->total, &usage);
242  _Timestamp_Add_to(&data->current, &current);
243
244  if (thread->Object.id == 0x09010001)
245  {
246    data->idle = usage;
247    data->current_idle = current;
248  }
249
250  /*
251   * Create the tasks to display soring as we create.
252   */
253  for (j = 0; j < data->task_count; j++)
254  {
255    if (data->tasks[j])
256    {
257      int k;
258
259      /*
260       * Sort on the current load.
261       */
262      switch (data->sort_order)
263      {
264        default:
265          data->sort_order = RTEMS_TOP_SORT_CURRENT;
266          /* drop through */
267        case RTEMS_TOP_SORT_CURRENT:
268          if (CPU_usage_Equal_to(&current, &data->zero) ||
269              CPU_usage_Less_than(&current, &data->current_usage[j]))
270            continue;
271        case RTEMS_TOP_SORT_TOTAL:
272          if (CPU_usage_Equal_to(&usage, &data->zero) ||
273              CPU_usage_Less_than(&usage, &data->usage[j]))
274            continue;
275        case RTEMS_TOP_SORT_REAL_PRI:
276          if (thread->real_priority > data->tasks[j]->real_priority)
277            continue;
278        case RTEMS_TOP_SORT_CURRENT_PRI:
279          if (thread->current_priority > data->tasks[j]->current_priority)
280            continue;
281        case RTEMS_TOP_SORT_ID:
282          if (thread->Object.id < data->tasks[j]->Object.id)
283            continue;
284      }
285
286      for (k = (data->task_count - 1); k >= j; k--)
287      {
288        data->tasks[k + 1] = data->tasks[k];
289        data->usage[k + 1]  = data->usage[k];
290        data->current_usage[k + 1]  = data->current_usage[k];
291      }
292    }
293    data->tasks[j] = thread;
294    data->usage[j] = usage;
295    data->current_usage[j] = current;
296    break;
297  }
298}
299
300/*
301 * rtems_cpuusage_top_thread
302 *
303 * This function displays the load of the tasks on an ANSI terminal.
304 */
305
306static void
307rtems_cpuusage_top_thread (rtems_task_argument arg)
308{
309  rtems_cpu_usage_data*  data = (rtems_cpu_usage_data*) arg;
310  char                   name[13];
311  int                    i;
312  Heap_Information_block wksp;
313  uint32_t               ival, fval;
314  int                    task_count;
315  rtems_event_set        out;
316  rtems_status_code      sc;
317  bool                   first_time = true;
318
319  data->thread_active = true;
320
321  _TOD_Get_uptime(&data->last_uptime);
322
323  CPU_usage_Set_to_zero(&data->zero);
324
325  while (data->thread_run)
326  {
327    Timestamp_Control uptime_at_last_reset = CPU_usage_Uptime_at_last_reset;
328    size_t            tasks_size;
329    size_t            usage_size;
330    Timestamp_Control load;
331
332    data->task_count = 0;
333    rtems_iterate_over_all_threads_2(task_counter, data);
334
335    tasks_size = sizeof(Thread_Control*) * (data->task_count + 1);
336    usage_size = sizeof(Thread_CPU_usage_t) * (data->task_count + 1);
337
338    if (data->task_count > data->task_size)
339    {
340      data->tasks = realloc(data->tasks, tasks_size);
341      data->usage = realloc(data->usage, usage_size);
342      data->current_usage = realloc(data->current_usage, usage_size);
343      if ((data->tasks == NULL) || (data->usage == NULL) || (data->current_usage == NULL))
344      {
345        (*data->plugin.print)(data->plugin.context, "top worker: error: no memory\n");
346        data->thread_run = false;
347        break;
348      }
349    }
350
351    memset(data->tasks, 0, tasks_size);
352    memset(data->usage, 0, usage_size);
353    memset(data->current_usage, 0, usage_size);
354
355    _Timestamp_Set_to_zero(&data->total);
356    _Timestamp_Set_to_zero(&data->current);
357    data->stack_size = 0;
358
359    _TOD_Get_uptime(&data->uptime);
360    _Timestamp_Subtract(&uptime_at_last_reset, &data->uptime, &data->uptime);
361    _Timestamp_Subtract(&data->last_uptime, &data->uptime, &data->period);
362    data->last_uptime = data->uptime;
363
364    rtems_iterate_over_all_threads_2(task_usage, data);
365
366    if (data->task_count > data->task_size)
367    {
368      data->last_tasks = realloc(data->last_tasks, tasks_size);
369      data->last_usage = realloc(data->last_usage, usage_size);
370      if ((data->last_tasks == NULL) || (data->last_usage == NULL))
371      {
372        (*data->plugin.print)(data->plugin.context, "top worker: error: no memory\n");
373        data->thread_run = false;
374        break;
375      }
376      data->task_size = data->task_count;
377    }
378
379    memcpy(data->last_tasks, data->tasks, tasks_size);
380    memcpy(data->last_usage, data->usage, usage_size);
381    data->last_task_count = data->task_count;
382
383    /*
384     * We need to loop again to get suitable current usage values as we need a
385     * last sample to work.
386     */
387    if (first_time)
388    {
389      rtems_task_wake_after(RTEMS_MILLISECONDS_TO_TICKS(500));
390      first_time = false;
391      continue;
392    }
393
394    _Protected_heap_Get_information(&_Workspace_Area, &wksp);
395
396    if (data->single_page)
397      (*data->plugin.print)(data->plugin.context,
398                            "\x1b[H\x1b[J"
399                            " ENTER:Exit  SPACE:Refresh"
400                            "  S:Scroll  A:All  <>:Order  +/-:Lines\n");
401    (*data->plugin.print)(data->plugin.context,"\n");
402
403    /*
404     * Uptime and period of this sample.
405     */
406    (*data->plugin.print)(data->plugin.context, "Uptime: ");
407    print_time(data, &data->uptime, 20);
408    (*data->plugin.print)(data->plugin.context, " Period: ");
409    print_time(data, &data->period, 20);
410
411    /*
412     * Task count, load and idle levels.
413     */
414    (*data->plugin.print)(data->plugin.context, "\nTasks: %4i  ", data->task_count);
415
416    _Timestamp_Subtract(&data->idle, &data->total, &load);
417    _Timestamp_Divide(&load, &data->uptime, &ival, &fval);
418    (*data->plugin.print)(data->plugin.context,
419                          "Load Average: %4" PRIu32 ".%03" PRIu32 "%%", ival, fval);
420    _Timestamp_Subtract(&data->current_idle, &data->current, &load);
421    _Timestamp_Divide(&load, &data->period, &ival, &fval);
422    (*data->plugin.print)(data->plugin.context,
423                          "  Load: %4" PRIu32 ".%03" PRIu32 "%%", ival, fval);
424    _Timestamp_Divide(&data->current_idle, &data->period, &ival, &fval);
425    (*data->plugin.print)(data->plugin.context,
426                          "  Idle: %4" PRIu32 ".%03" PRIu32 "%%", ival, fval);
427
428    /*
429     * Memory usage.
430     */
431    if (rtems_configuration_get_unified_work_area())
432    {
433      (*data->plugin.print)(data->plugin.context, "\nMem: ");
434      print_memsize(data, wksp.Free.total, "free");
435      print_memsize(data, wksp.Used.total, "used");
436    }
437    else
438    {
439      region_information_block libc_heap;
440      malloc_info(&libc_heap);
441      (*data->plugin.print)(data->plugin.context, "\nMem: Wksp: ");
442      print_memsize(data, wksp.Free.total, "free");
443      print_memsize(data, wksp.Used.total, "used  Heap: ");
444      print_memsize(data, libc_heap.Free.total, "free");
445      print_memsize(data, libc_heap.Used.total, "used");
446    }
447
448    print_memsize(data, data->stack_size, "stack\n");
449
450    (*data->plugin.print)(data->plugin.context,
451       "\n"
452        " ID         | NAME                | RPRI | CPRI   | TIME                | TOTAL   | CURRENT\n"
453        "-%s---------+---------------------+-%s-----%s-----+---------------------+-%s------+--%s----\n",
454       data->sort_order == RTEMS_TOP_SORT_ID ? "^^" : "--",
455       data->sort_order == RTEMS_TOP_SORT_REAL_PRI ? "^^" : "--",
456       data->sort_order == RTEMS_TOP_SORT_CURRENT_PRI ? "^^" : "--",
457                          data->sort_order == RTEMS_TOP_SORT_TOTAL ? "^^" : "--",
458       data->sort_order == RTEMS_TOP_SORT_CURRENT ? "^^" : "--"
459    );
460
461    task_count = 0;
462
463    for (i = 0; i < data->task_count; i++)
464    {
465      Thread_Control*   thread = data->tasks[i];
466      Timestamp_Control last;
467      Timestamp_Control usage;
468      Timestamp_Control current_usage;
469
470      if (thread == NULL)
471        break;
472
473      if (data->single_page && (data->show != 0) && (i >= data->show))
474        break;
475
476      /*
477       * We need to count the number displayed to clear the remainder of the
478       * the display.
479       */
480      ++task_count;
481
482      /*
483       * If the API os POSIX print the entry point.
484       */
485      rtems_object_get_name(thread->Object.id, sizeof(name), name);
486      if (name[0] == '\0')
487        snprintf(name, sizeof(name) - 1, "(%p)", thread->Start.entry_point);
488
489      (*data->plugin.print)(data->plugin.context,
490                            " 0x%08" PRIx32 " | %-19s |  %3" PRId32 " |  %3" PRId32 "   | ",
491                            thread->Object.id,
492                            name,
493                            thread->real_priority,
494                            thread->current_priority);
495
496      usage = data->usage[i];
497      current_usage = data->current_usage[i];
498
499      /*
500       * If this is the currently executing thread, account for time since
501       * the last context switch.
502       */
503      if (_Thread_Get_time_of_last_context_switch(thread, &last))
504      {
505        Timestamp_Control used;
506        Timestamp_Control now;
507
508        /*
509         * Get the current uptime and assume we are not pre-empted to
510         * measure the time from the last switch this thread and now.
511         */
512        _TOD_Get_uptime(&now);
513        _Timestamp_Subtract(&last, &now, &used);
514        _Timestamp_Add_to(&usage, &used);
515        _Timestamp_Add_to(&current_usage, &used);
516      }
517
518      /*
519       * Print the information
520       */
521      print_time(data, &usage, 19);
522      _Timestamp_Divide(&usage, &data->total, &ival, &fval);
523      (*data->plugin.print)(data->plugin.context,
524                            " |%4" PRIu32 ".%03" PRIu32, ival, fval);
525      _Timestamp_Divide(&current_usage, &data->period, &ival, &fval);
526      (*data->plugin.print)(data->plugin.context,
527                            " |%4" PRIu32 ".%03" PRIu32 "\n", ival, fval);
528    }
529
530    if (data->single_page && (data->show != 0) && (task_count < data->show))
531    {
532      i = data->show - task_count;
533      while (i > 0)
534      {
535        (*data->plugin.print)(data->plugin.context, "\x1b[K\n");
536        i--;
537      }
538    }
539
540    sc = rtems_event_receive(RTEMS_EVENT_1,
541                             RTEMS_EVENT_ANY,
542                             RTEMS_MILLISECONDS_TO_TICKS (data->poll_rate_usecs),
543                             &out);
544    if ((sc != RTEMS_SUCCESSFUL) && (sc != RTEMS_TIMEOUT))
545    {
546      (*data->plugin.print)(data->plugin.context,
547                            "error: event receive: %s\n", rtems_status_text(sc));
548      break;
549    }
550  }
551
552  free(data->tasks);
553  free(data->last_tasks);
554  free(data->last_usage);
555  free(data->current_usage);
556
557  data->thread_active = false;
558
559  rtems_task_delete (RTEMS_SELF);
560}
561
562void rtems_cpu_usage_top_with_plugin(
563  void                  *context,
564  rtems_printk_plugin_t  print
565)
566{
567  rtems_status_code      sc;
568  rtems_task_priority    priority;
569  rtems_name             name;
570  rtems_id               id;
571  rtems_cpu_usage_data   data;
572  int                    show_lines = 25;
573
574  if ( !print )
575    return;
576
577  memset(&data, 0, sizeof(data));
578
579  data.thread_run = true;
580  data.single_page = true;
581  data.sort_order = RTEMS_TOP_SORT_CURRENT;
582  data.poll_rate_usecs = 3000;
583  data.show = show_lines;
584  data.plugin.context = context;
585  data.plugin.print = print;
586
587  sc = rtems_task_set_priority (RTEMS_SELF, RTEMS_CURRENT_PRIORITY, &priority);
588
589  if (sc != RTEMS_SUCCESSFUL)
590  {
591    (*print)(
592       context,
593       "error: cannot obtain the current priority: %s\n",
594       rtems_status_text (sc)
595    );
596    return;
597  }
598
599  name = rtems_build_name('C', 'P', 'l', 't');
600
601  sc = rtems_task_create (name, priority, 4 * 1024,
602                          RTEMS_NO_FLOATING_POINT | RTEMS_LOCAL,
603                          RTEMS_PREEMPT | RTEMS_TIMESLICE | RTEMS_NO_ASR,
604                          &id);
605
606  if (sc != RTEMS_SUCCESSFUL)
607  {
608    (*print)(
609       context,
610       "error: cannot create helper thread: %s\n",
611       rtems_status_text (sc)
612    );
613    return;
614  }
615
616  sc = rtems_task_start (
617    id, rtems_cpuusage_top_thread, (rtems_task_argument) &data
618  );
619  if (sc != RTEMS_SUCCESSFUL)
620  {
621    (*print)(
622       context,
623       "error: cannot start helper thread: %s\n",
624       rtems_status_text (sc)
625    );
626    rtems_task_delete (id);
627    return;
628  }
629
630  while (true)
631  {
632    int c = getchar ();
633
634    if ((c == '\r') || (c == '\n') || (c == 'q') || (c == 'Q'))
635    {
636      int loops = 50;
637
638      data.thread_run = false;
639
640      rtems_event_send(id, RTEMS_EVENT_1);
641
642      while (loops && data.thread_active)
643        rtems_task_wake_after (RTEMS_MICROSECONDS_TO_TICKS (100000));
644
645      (*print)(context, "load monitoring stopped.\n");
646      return;
647    }
648    else if (c == '<')
649    {
650      if (data.sort_order == 0)
651        data.sort_order = RTEMS_TOP_SORT_MAX;
652      else
653        --data.sort_order;
654      rtems_event_send(id, RTEMS_EVENT_1);
655    }
656    else if (c == '>')
657    {
658      if (data.sort_order >= RTEMS_TOP_SORT_MAX)
659        data.sort_order = 0;
660      else
661        ++data.sort_order;
662      rtems_event_send(id, RTEMS_EVENT_1);
663    }
664    else if ((c == 's') || (c == 'S'))
665    {
666      data.single_page = !data.single_page;
667      rtems_event_send(id, RTEMS_EVENT_1);
668    }
669    else if ((c == 'a') || (c == 'A'))
670    {
671      if (data.show == 0)
672        data.show = show_lines;
673      else
674        data.show = 0;
675      rtems_event_send(id, RTEMS_EVENT_1);
676    }
677    else if (c == '+')
678    {
679      ++show_lines;
680      if (data.show != 0)
681        data.show = show_lines;
682    }
683    else if (c == '-')
684    {
685      if (show_lines > 5)
686        --show_lines;
687      if (data.show != 0)
688        data.show = show_lines;
689    }
690    else if (c == ' ')
691    {
692      rtems_event_send(id, RTEMS_EVENT_1);
693    }
694  }
695}
696
697void rtems_cpu_usage_top( void )
698{
699  rtems_cpu_usage_top_with_plugin( NULL, printk_plugin );
700}
Note: See TracBrowser for help on using the repository browser.