Ignore:
Timestamp:
Apr 29, 2015, 5:24:00 AM (5 years ago)
Author:
Chris Johns <chrisj@…>
Branches:
4.11, master
Children:
c639cf2
Parents:
40d24d5
Message:

libmisc/cpuuse: Top support for current load.

The cpuuse top command now supports the current load where the list of
tasks is ordered based on the current load rather than the total cpu usage.
This lets you see what is using the processor at any specific instance.

The ability to sort on a range of thread values is now supported.

Added memory usage stats for unified and separate workspace and C heaps as
well as displaying the allocated stack space.

Added a few more command keys to refresh the display, show all tasks in the
system, control the lines display and a scrolling mode that does not clear
the display on each refresh.

Removed support for tick kernel builds. The tick support in the kernel is to
be removed.

File:
1 edited

Legend:

Unmodified
Added
Removed
  • cpukit/libmisc/cpuuse/cpuusagetop.c

    r40d24d5 ra8e4352  
    77
    88/*
     9 *  COPYRIGHT (c) 2015. Chris Johns <chrisj@rtems.org>
     10 *
    911 *  COPYRIGHT (c) 2014.
    1012 *  On-Line Applications Research Corporation (OAR).
     
    1517 */
    1618
     19/*
     20 * Based on the old capture engine ct-load.
     21 */
     22
    1723#ifdef HAVE_CONFIG_H
    1824#include "config.h"
    1925#endif
    2026
     27#include <stdbool.h>
    2128#include <string.h>
    2229#include <stdlib.h>
     
    2633
    2734#include <rtems/cpuuse.h>
     35#include <rtems/malloc.h>
    2836#include <rtems/score/objectimpl.h>
     37#include <rtems/score/protectedheap.h>
    2938#include <rtems/score/threadimpl.h>
    3039#include <rtems/score/todimpl.h>
    3140#include <rtems/score/watchdogimpl.h>
     41#include <rtems/score/wkspace.h>
    3242
    3343/*
    3444 * Common variable to sync the load monitor task.
    3545 */
    36 static volatile int rtems_cpuusage_top_thread_active;
    37 
    38 typedef struct {
    39   void                  *context;
     46typedef struct
     47{
     48  void*                  context;
    4049  rtems_printk_plugin_t  print;
    41 }rtems_cpu_usage_plugin_t;
    42 
    43 #define RTEMS_CPUUSAGE_TOP_MAX_LOAD_TASKS (20)
    44 
     50} rtems_cpu_usage_plugin;
     51
     52/*
     53 * Use a struct for all data to allow more than one top and to support the
     54 * thread iterator.
     55 */
     56typedef struct
     57{
     58  volatile bool          thread_run;
     59  volatile bool          thread_active;
     60  volatile bool          single_page;
     61  volatile uint32_t      sort_order;
     62  volatile uint32_t      poll_rate_usecs;
     63  volatile uint32_t      show;
     64  rtems_cpu_usage_plugin plugin;
     65  Thread_CPU_usage_t     zero;
     66  Timestamp_Control      uptime;
     67  Timestamp_Control      last_uptime;
     68  Timestamp_Control      period;
     69  int                    task_count;        /* Number of tasks. */
     70  int                    last_task_count;   /* Number of tasks in the previous sample. */
     71  int                    task_size;         /* The size of the arrays */
     72  Thread_Control**       tasks;             /* List of tasks in this sample. */
     73  Thread_Control**       last_tasks;        /* List of tasks in the last sample. */
     74  Thread_CPU_usage_t*    usage;             /* Usage of task's in this sample. */
     75  Thread_CPU_usage_t*    last_usage;        /* Usage of task's in the last sample. */
     76  Thread_CPU_usage_t*    current_usage;     /* Current usage for this sample. */
     77  Timestamp_Control      total;             /* Total run run, should equal the uptime. */
     78  Timestamp_Control      idle;              /* Time spent in idle. */
     79  Timestamp_Control      current;           /* Current time run in this period. */
     80  Timestamp_Control      current_idle;      /* Current time in idle this period. */
     81  uint32_t               stack_size;        /* Size of stack allocated. */
     82} rtems_cpu_usage_data;
     83
     84/*
     85 * Sort orders.
     86 */
     87#define RTEMS_TOP_SORT_ID            (0)
     88#define RTEMS_TOP_SORT_REAL_PRI      (1)
     89#define RTEMS_TOP_SORT_CURRENT_PRI   (2)
     90#define RTEMS_TOP_SORT_TOTAL         (3)
     91#define RTEMS_TOP_SORT_CURRENT       (4)
     92#define RTEMS_TOP_SORT_MAX           (4)
     93
     94/*
     95 * Private version of the iterator with an arg. This will be moved
     96 * to the public version in 5.0.
     97 */
     98
     99typedef void (*rtems_per_thread_routine_2)( Thread_Control *, void* );
     100
     101void rtems_iterate_over_all_threads_2(rtems_per_thread_routine_2 routine,
     102                                      void*                      arg);
     103
     104void rtems_iterate_over_all_threads_2(rtems_per_thread_routine_2 routine,
     105                                      void*                      arg)
     106{
     107  uint32_t             i;
     108  uint32_t             api_index;
     109  Thread_Control      *the_thread;
     110  Objects_Information *information;
     111
     112  if ( !routine )
     113    return;
     114
     115  for ( api_index = 1 ; api_index <= OBJECTS_APIS_LAST ; api_index++ ) {
     116    #if !defined(RTEMS_POSIX_API) || defined(RTEMS_DEBUG)
     117      if ( !_Objects_Information_table[ api_index ] )
     118        continue;
     119    #endif
     120    information = _Objects_Information_table[ api_index ][ 1 ];
     121    if ( information ) {
     122      for ( i=1 ; i <= information->maximum ; i++ ) {
     123        the_thread = (Thread_Control *)information->local_table[ i ];
     124        if ( the_thread )
     125          (*routine)(the_thread, arg);
     126      }
     127    }
     128  }
     129}
    45130
    46131static inline bool equal_to_uint32_t( uint32_t * lhs, uint32_t * rhs )
     
    48133   if ( *lhs == *rhs )
    49134     return true;
    50    else 
     135   else
    51136     return false;
    52137}
     
    61146
    62147#ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__
    63   #define _Thread_CPU_usage_Equal_to( _lhs, _rhs ) \
     148  #define CPU_usage_Equal_to( _lhs, _rhs ) \
    64149          _Timestamp_Equal_to( _lhs, _rhs )
    65150#else
    66   #define _Thread_CPU_usage_Equal_to( _lhs, _rhs ) \
     151  #define CPU_usage_Equal_to( _lhs, _rhs ) \
    67152          equal_to_uint32_t( _lhs, _rhs )
    68153#endif
    69154
    70155#ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__
    71 #define  _Thread_CPU_usage_Set_to_zero( _time ) \
     156  #define CPU_usage_Set_to_zero( _time ) \
    72157         _Timestamp_Set_to_zero( _time )
    73158#else
    74 #define  _Thread_CPU_usage_Set_to_zero( _time ) \
     159  #define CPU_usage_Set_to_zero( _time ) \
    75160       do { \
    76161         *_time = 0; \
     
    79164
    80165#ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__
    81 #define _Thread_CPU_usage_Less_than( _lhs, _rhs ) \
     166  #define CPU_usage_Less_than( _lhs, _rhs ) \
    82167        _Timestamp_Less_than( _lhs, _rhs )
    83168#else
    84 #define _Thread_CPU_usage_Less_than( _lhs, _rhs ) \
     169  #define CPU_usage_Less_than( _lhs, _rhs ) \
    85170         less_than_uint32_t( _lhs, _rhs )
    86171#endif
     172
     173static void
     174print_memsize(rtems_cpu_usage_data* data, const uint32_t size, const char* label)
     175{
     176  if (size > (1024 * 1024))
     177    (*data->plugin.print)(data->plugin.context, "%4" PRIu32 "M %s",
     178                          size / (1024 * 1024), label);
     179  else if (size > 1024)
     180    (*data->plugin.print)(data->plugin.context, "%4" PRIu32 "K %s",
     181                          size / 1024, label);
     182  else
     183    (*data->plugin.print)(data->plugin.context, "%4" PRIu32 " %s",
     184                          size, label);
     185}
     186
     187static int
     188print_time(rtems_cpu_usage_data*    data,
     189           const Timestamp_Control* time,
     190           const int                length)
     191{
     192  uint32_t secs = _Timestamp_Get_seconds( time );
     193  uint32_t usecs = _Timestamp_Get_nanoseconds( time ) / TOD_NANOSECONDS_PER_MICROSECOND;
     194  int      len = 0;
     195
     196  if (secs > 60)
     197  {
     198    uint32_t mins = secs / 60;
     199    if (mins > 60)
     200    {
     201      uint32_t hours = mins / 60;
     202      if (hours > 24)
     203      {
     204        len += (*data->plugin.print)(data->plugin.context, "%" PRIu32 "d", hours / 24);
     205        hours %= 24;
     206      }
     207      len += (*data->plugin.print)(data->plugin.context, "%" PRIu32 "hr", hours);
     208      mins %= 60;
     209    }
     210    len += (*data->plugin.print)(data->plugin.context, "%" PRIu32 "m", mins);
     211    secs %= 60;
     212  }
     213  len += (*data->plugin.print)(data->plugin.context, "%" PRIu32 ".%06" PRIu32, secs, usecs);
     214
     215  if (len < length)
     216    (*data->plugin.print)(data->plugin.context, "%*c", length - len, ' ');
     217
     218  return len;
     219}
     220
     221/*
     222 * Count the number of tasks.
     223 */
     224static void
     225task_counter(Thread_Control *thrad, void* arg)
     226{
     227  rtems_cpu_usage_data* data = (rtems_cpu_usage_data*) arg;
     228  ++data->task_count;
     229}
     230
     231/*
     232 * Create the sorted table with the current and total usage.
     233 */
     234static void
     235task_usage(Thread_Control* thread, void* arg)
     236{
     237  rtems_cpu_usage_data* data = (rtems_cpu_usage_data*) arg;
     238  Thread_CPU_usage_t    usage = thread->cpu_time_used;
     239  Thread_CPU_usage_t    current = data->zero;
     240  int                   j;
     241
     242  data->stack_size += thread->Start.Initial_stack.size;
     243
     244  for (j = 0; j < data->last_task_count; j++)
     245  {
     246    if (thread == data->last_tasks[j])
     247    {
     248      _Timestamp_Subtract(&data->last_usage[j], &usage, &current);
     249      break;
     250    }
     251  }
     252
     253  /*
     254   * When not using nanosecond CPU usage resolution, we have to count the
     255   * number of "ticks" we gave credit for to give the user a rough guideline as
     256   * to what each number means proportionally.
     257   */
     258  _Timestamp_Add_to(&data->total, &usage);
     259  _Timestamp_Add_to(&data->current, &current);
     260
     261  if (thread->Object.id == 0x09010001)
     262  {
     263    data->idle = usage;
     264    data->current_idle = current;
     265  }
     266
     267  /*
     268   * Create the tasks to display soring as we create.
     269   */
     270  for (j = 0; j < data->task_count; j++)
     271  {
     272    if (data->tasks[j])
     273    {
     274      int k;
     275
     276      /*
     277       * Sort on the current load.
     278       */
     279      switch (data->sort_order)
     280      {
     281        default:
     282          data->sort_order = RTEMS_TOP_SORT_CURRENT;
     283          /* drop through */
     284        case RTEMS_TOP_SORT_CURRENT:
     285          if (CPU_usage_Equal_to(&current, &data->zero) ||
     286              CPU_usage_Less_than(&current, &data->current_usage[j]))
     287            continue;
     288        case RTEMS_TOP_SORT_TOTAL:
     289          if (CPU_usage_Equal_to(&usage, &data->zero) ||
     290              CPU_usage_Less_than(&usage, &data->usage[j]))
     291            continue;
     292        case RTEMS_TOP_SORT_REAL_PRI:
     293          if (thread->real_priority > data->tasks[j]->real_priority)
     294            continue;
     295        case RTEMS_TOP_SORT_CURRENT_PRI:
     296          if (thread->current_priority > data->tasks[j]->current_priority)
     297            continue;
     298        case RTEMS_TOP_SORT_ID:
     299          if (thread->Object.id < data->tasks[j]->Object.id)
     300            continue;
     301      }
     302
     303      for (k = (data->task_count - 1); k >= j; k--)
     304      {
     305        data->tasks[k + 1] = data->tasks[k];
     306        data->usage[k + 1]  = data->usage[k];
     307        data->current_usage[k + 1]  = data->current_usage[k];
     308      }
     309    }
     310    data->tasks[j] = thread;
     311    data->usage[j] = usage;
     312    data->current_usage[j] = current;
     313    break;
     314  }
     315}
    87316
    88317/*
     
    95324rtems_cpuusage_top_thread (rtems_task_argument arg)
    96325{
    97   uint32_t                  api_index;
    98   Thread_Control*           the_thread;
    99   int                       i;
    100   int                       j;
    101   int                       k;
    102   Objects_Information*      information;
    103   char                      name[13];
    104   int                       task_count = 0;
    105   uint32_t                  seconds, nanoseconds;
    106   rtems_cpu_usage_plugin_t* plugin = (rtems_cpu_usage_plugin_t*)arg;
    107   Thread_Control*           load_tasks[RTEMS_CPUUSAGE_TOP_MAX_LOAD_TASKS + 1];
    108   Thread_CPU_usage_t        load[RTEMS_CPUUSAGE_TOP_MAX_LOAD_TASKS + 1];
    109   Thread_CPU_usage_t        zero;
    110   Timestamp_Control         uptime;
    111   uint32_t                  ival, fval;
    112 
    113   while (true) {
    114     #ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__
    115       Timestamp_Control  total, ran, uptime_at_last_reset;
    116     #else
    117       uint32_t           total_units = 0;
    118     #endif
    119 
    120     rtems_cpuusage_top_thread_active = 1;
    121 
    122     _Thread_CPU_usage_Set_to_zero( &zero);
    123     memset (load_tasks, 0, sizeof (load_tasks));
    124     for (i=0; i< (RTEMS_CPUUSAGE_TOP_MAX_LOAD_TASKS + 1); i++)
    125       _Thread_CPU_usage_Set_to_zero( &load[i] );
    126 
    127    /*
    128      * Iterate over the tasks and sort the highest load tasks
    129      * into our local arrays. We only handle a limited number of
    130      * tasks.
     326  rtems_cpu_usage_data*  data = (rtems_cpu_usage_data*) arg;
     327  char                   name[13];
     328  int                    i;
     329  Heap_Information_block wksp;
     330  uint32_t               ival, fval;
     331  int                    task_count;
     332  rtems_event_set        out;
     333  rtems_status_code      sc;
     334  bool                   first_time = true;
     335
     336  data->thread_active = true;
     337
     338  _TOD_Get_uptime(&data->last_uptime);
     339
     340  CPU_usage_Set_to_zero(&data->zero);
     341
     342  while (data->thread_run)
     343  {
     344    Timestamp_Control uptime_at_last_reset = CPU_usage_Uptime_at_last_reset;
     345    size_t            tasks_size;
     346    size_t            usage_size;
     347    Timestamp_Control load;
     348
     349    data->task_count = 0;
     350    rtems_iterate_over_all_threads_2(task_counter, data);
     351
     352    tasks_size = sizeof(Thread_Control*) * (data->task_count + 1);
     353    usage_size = sizeof(Thread_CPU_usage_t) * (data->task_count + 1);
     354
     355    if (data->task_count > data->task_size)
     356    {
     357      data->tasks = realloc(data->tasks, tasks_size);
     358      data->usage = realloc(data->usage, usage_size);
     359      data->current_usage = realloc(data->current_usage, usage_size);
     360      if ((data->tasks == NULL) || (data->usage == NULL) || (data->current_usage == NULL))
     361      {
     362        (*data->plugin.print)(data->plugin.context, "top worker: error: no memory\n");
     363        data->thread_run = false;
     364        break;
     365      }
     366    }
     367
     368    memset(data->tasks, 0, tasks_size);
     369    memset(data->usage, 0, usage_size);
     370    memset(data->current_usage, 0, usage_size);
     371
     372    _Timestamp_Set_to_zero(&data->total);
     373    _Timestamp_Set_to_zero(&data->current);
     374    data->stack_size = 0;
     375
     376    _TOD_Get_uptime(&data->uptime);
     377    _Timestamp_Subtract(&uptime_at_last_reset, &data->uptime, &data->uptime);
     378    _Timestamp_Subtract(&data->last_uptime, &data->uptime, &data->period);
     379    data->last_uptime = data->uptime;
     380
     381    rtems_iterate_over_all_threads_2(task_usage, data);
     382
     383    if (data->task_count > data->task_size)
     384    {
     385      data->last_tasks = realloc(data->last_tasks, tasks_size);
     386      data->last_usage = realloc(data->last_usage, usage_size);
     387      if ((data->last_tasks == NULL) || (data->last_usage == NULL))
     388      {
     389        (*data->plugin.print)(data->plugin.context, "top worker: error: no memory\n");
     390        data->thread_run = false;
     391        break;
     392      }
     393      data->task_size = data->task_count;
     394    }
     395
     396    memcpy(data->last_tasks, data->tasks, tasks_size);
     397    memcpy(data->last_usage, data->usage, usage_size);
     398    data->last_task_count = data->task_count;
     399
     400    /*
     401     * We need to loop again to get suitable current usage values as we need a
     402     * last sample to work.
    131403     */
    132     for ( api_index = 1 ; api_index <= OBJECTS_APIS_LAST ; api_index++ ) {
    133       #if !defined(RTEMS_POSIX_API) || defined(RTEMS_DEBUG)
    134         if ( !_Objects_Information_table[ api_index ] )
    135           continue;
    136       #endif
    137 
    138       information = _Objects_Information_table[ api_index ][ 1 ];
    139       if ( information ) {
    140         for ( i=1 ; i <= information->maximum ; i++ ) {
    141           the_thread = (Thread_Control *)information->local_table[ i ];
    142           if ( the_thread ) {
    143             Thread_CPU_usage_t usage = the_thread->cpu_time_used;
    144 
    145             /*
    146              *  When not using nanosecond CPU usage resolution, we have to count
    147              *  the number of "ticks" we gave credit for to give the user a rough
    148              *  guideline as to what each number means proportionally.
    149              */
    150             #ifdef __RTEMS_USE_TICKS_FOR_STATISTICS__
    151               total_units += usage;
    152             #endif
    153 
    154             /* Count the number of tasks and sort this load value */
    155             task_count++;
    156             for (j = 0; j < RTEMS_CPUUSAGE_TOP_MAX_LOAD_TASKS; j++) {
    157               if (load_tasks[j]) {
    158                 if ( _Thread_CPU_usage_Equal_to( &usage, &zero) ||
    159                      _Thread_CPU_usage_Less_than( &usage, &load[j]))
    160                   continue;
    161                 for (k = (RTEMS_CPUUSAGE_TOP_MAX_LOAD_TASKS - 1); k >= j; k--){
    162                   load_tasks[k + 1] = load_tasks[k];
    163                   load[k + 1]  = load[k];
    164                 }
    165               }
    166               load_tasks[j] = the_thread;
    167               load[j]  = usage;
    168               break;
    169             }
    170           }
    171         }
     404    if (first_time)
     405    {
     406      rtems_task_wake_after(RTEMS_MILLISECONDS_TO_TICKS(500));
     407      first_time = false;
     408      continue;
     409    }
     410
     411    _Protected_heap_Get_information(&_Workspace_Area, &wksp);
     412
     413    if (data->single_page)
     414      (*data->plugin.print)(data->plugin.context,
     415                            "\x1b[H\x1b[J"
     416                            " ENTER:Exit  SPACE:Refresh"
     417                            "  S:Scroll  A:All  <>:Order  +/-:Lines\n");
     418    (*data->plugin.print)(data->plugin.context,"\n");
     419
     420    /*
     421     * Uptime and period of this sample.
     422     */
     423    (*data->plugin.print)(data->plugin.context, "Uptime: ");
     424    print_time(data, &data->uptime, 20);
     425    (*data->plugin.print)(data->plugin.context, " Period: ");
     426    print_time(data, &data->period, 20);
     427
     428    /*
     429     * Task count, load and idle levels.
     430     */
     431    (*data->plugin.print)(data->plugin.context, "\nTasks: %4i  ", data->task_count);
     432
     433    _Timestamp_Subtract(&data->idle, &data->total, &load);
     434    _Timestamp_Divide(&load, &data->uptime, &ival, &fval);
     435    (*data->plugin.print)(data->plugin.context,
     436                          "Load Average: %4" PRIu32 ".%03" PRIu32 "%%", ival, fval);
     437    _Timestamp_Subtract(&data->current_idle, &data->current, &load);
     438    _Timestamp_Divide(&load, &data->period, &ival, &fval);
     439    (*data->plugin.print)(data->plugin.context,
     440                          "  Load: %4" PRIu32 ".%03" PRIu32 "%%", ival, fval);
     441    _Timestamp_Divide(&data->current_idle, &data->period, &ival, &fval);
     442    (*data->plugin.print)(data->plugin.context,
     443                          "  Idle: %4" PRIu32 ".%03" PRIu32 "%%", ival, fval);
     444
     445    /*
     446     * Memory usage.
     447     */
     448    if (rtems_configuration_get_unified_work_area())
     449    {
     450      (*data->plugin.print)(data->plugin.context, "\nMem: ");
     451      print_memsize(data, wksp.Free.total, "free");
     452      print_memsize(data, wksp.Used.total, "used");
     453    }
     454    else
     455    {
     456      region_information_block libc_heap;
     457      malloc_info(&libc_heap);
     458      (*data->plugin.print)(data->plugin.context, "\nMem: Wksp: ");
     459      print_memsize(data, wksp.Free.total, "free");
     460      print_memsize(data, wksp.Used.total, "used  Heap: ");
     461      print_memsize(data, libc_heap.Free.total, "free");
     462      print_memsize(data, libc_heap.Used.total, "used");
     463    }
     464
     465    print_memsize(data, data->stack_size, "stack\n");
     466
     467    (*data->plugin.print)(data->plugin.context,
     468       "\n"
     469        " ID         | NAME                | RPRI | CPRI   | TIME                | TOTAL   | CURRENT\n"
     470        "-%s---------+---------------------+-%s-----%s-----+---------------------+-%s------+--%s----\n",
     471       data->sort_order == RTEMS_TOP_SORT_ID ? "^^" : "--",
     472       data->sort_order == RTEMS_TOP_SORT_REAL_PRI ? "^^" : "--",
     473       data->sort_order == RTEMS_TOP_SORT_CURRENT_PRI ? "^^" : "--",
     474                          data->sort_order == RTEMS_TOP_SORT_TOTAL ? "^^" : "--",
     475       data->sort_order == RTEMS_TOP_SORT_CURRENT ? "^^" : "--"
     476    );
     477
     478    task_count = 0;
     479
     480    for (i = 0; i < data->task_count; i++)
     481    {
     482      Thread_Control*   thread = data->tasks[i];
     483      Timestamp_Control last;
     484      Timestamp_Control usage;
     485      Timestamp_Control current_usage;
     486
     487      if (thread == NULL)
     488        break;
     489
     490      if (data->single_page && (data->show != 0) && (i >= data->show))
     491        break;
     492
     493      /*
     494       * We need to count the number displayed to clear the remainder of the
     495       * the display.
     496       */
     497      ++task_count;
     498
     499      /*
     500       * If the API os POSIX print the entry point.
     501       */
     502      rtems_object_get_name(thread->Object.id, sizeof(name), name);
     503      if (name[0] == '\0')
     504        snprintf(name, sizeof(name) - 1, "(%p)", thread->Start.entry_point);
     505
     506      (*data->plugin.print)(data->plugin.context,
     507                            " 0x%08" PRIx32 " | %-19s |  %3" PRId32 " |  %3" PRId32 "   | ",
     508                            thread->Object.id,
     509                            name,
     510                            thread->real_priority,
     511                            thread->current_priority);
     512
     513      usage = data->usage[i];
     514      current_usage = data->current_usage[i];
     515
     516      /*
     517       * If this is the currently executing thread, account for time since
     518       * the last context switch.
     519       */
     520      if (_Thread_Get_time_of_last_context_switch(thread, &last))
     521      {
     522        Timestamp_Control used;
     523        Timestamp_Control now;
     524
     525        /*
     526         * Get the current uptime and assume we are not pre-empted to
     527         * measure the time from the last switch this thread and now.
     528         */
     529        _TOD_Get_uptime(&now);
     530        _Timestamp_Subtract(&last, &now, &used);
     531        _Timestamp_Add_to(&usage, &used);
     532        _Timestamp_Add_to(&current_usage, &used);
    172533      }
    173     }
    174 
    175     #ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__
    176       _Timestamp_Set_to_zero( &total );
    177       uptime_at_last_reset = CPU_usage_Uptime_at_last_reset;
    178     #endif
    179 
    180     _TOD_Get_uptime( &uptime );
    181     seconds = _Timestamp_Get_seconds( &uptime );
    182     nanoseconds = _Timestamp_Get_nanoseconds( &uptime ) /
    183                   TOD_NANOSECONDS_PER_MICROSECOND;
    184     (*plugin->print)(plugin->context, "\x1b[H\x1b[J Press ENTER to exit.\n\n");
    185     (*plugin->print)(plugin->context, "uptime: ");
    186     (*plugin->print)(plugin->context,
    187       "%7" PRIu32 ".%06" PRIu32 "\n",  seconds, nanoseconds
    188     );
    189 
    190     (*plugin->print)(
    191        plugin->context,
    192        "-------------------------------------------------------------------------------\n"
    193        "                              CPU USAGE BY THREAD\n"
    194        "------------+---------------------+---------------+---------------+------------\n"
    195        #ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__
    196         " ID         | NAME                | RPRI | CPRI   | SECONDS       | PERCENT\n"
    197        #else
    198         " ID         | NAME                | RPRI | CPRI   | TICKS         | PERCENT\n"
    199        #endif
    200        "------------+---------------------+---------------+---------------+------------\n"
    201     );
    202 
    203     for (i = 0; i < RTEMS_CPUUSAGE_TOP_MAX_LOAD_TASKS; i++) {
    204 
    205       if (!load_tasks[i])
    206         break;
    207534
    208535      /*
    209        * If this is the currently executing thread, account for time
    210        * since the last context switch.
     536       * Print the information
    211537       */
    212       the_thread = load_tasks[i];
    213 
    214       rtems_object_get_name( the_thread->Object.id, sizeof(name), name );
    215       (*plugin->print)(
    216         plugin->context,
    217         " 0x%08" PRIx32 " | %-19s |  %3" PRId32 " |  %3" PRId32 "   |",
    218         the_thread->Object.id,
    219         name,
    220         the_thread->real_priority,
    221         the_thread->current_priority
    222       );
    223 
    224       #ifndef __RTEMS_USE_TICKS_FOR_STATISTICS__
     538      print_time(data, &usage, 19);
     539      _Timestamp_Divide(&usage, &data->total, &ival, &fval);
     540      (*data->plugin.print)(data->plugin.context,
     541                            " |%4" PRIu32 ".%03" PRIu32, ival, fval);
     542      _Timestamp_Divide(&current_usage, &data->period, &ival, &fval);
     543      (*data->plugin.print)(data->plugin.context,
     544                            " |%4" PRIu32 ".%03" PRIu32 "\n", ival, fval);
     545    }
     546
     547    if (data->single_page && (data->show != 0) && (task_count < data->show))
     548    {
     549      i = data->show - task_count;
     550      while (i > 0)
    225551      {
    226         Timestamp_Control last;
    227 
    228         /*
    229          * If this is the currently executing thread, account for time
    230          * since the last context switch.
    231          */
    232         ran = load[i];
    233         if ( _Thread_Get_time_of_last_context_switch( the_thread, &last ) ) {
    234           Timestamp_Control used;
    235           _TOD_Get_uptime( &uptime );
    236           _Timestamp_Subtract( &last, &uptime, &used );
    237           _Timestamp_Add_to( &ran, &used );
    238         } else {
    239           _TOD_Get_uptime( &uptime );
    240         }
    241         _Timestamp_Subtract( &uptime_at_last_reset, &uptime, &total );
    242         _Timestamp_Divide( &ran, &total, &ival, &fval );
    243 
    244         /*
    245          * Print the information
    246          */
    247 
    248         seconds = _Timestamp_Get_seconds( &ran );
    249         nanoseconds = _Timestamp_Get_nanoseconds( &ran ) /
    250           TOD_NANOSECONDS_PER_MICROSECOND;
    251        (*plugin->print)( plugin->context,
    252           "%7" PRIu32 ".%06" PRIu32 " |%4" PRIu32 ".%03" PRIu32 "\n",
    253           seconds, nanoseconds,
    254             ival, fval
    255         );
     552        (*data->plugin.print)(data->plugin.context, "\x1b[K\n");
     553        i--;
    256554      }
    257       #else
    258         if (total_units) {
    259           uint64_t ival_64;
    260 
    261           ival_64 = load[i];
    262           ival_64 *= 100000;
    263           ival = ival_64 / total_units;
    264         } else {
    265           ival = 0;
    266         }
    267 
    268         fval = ival % 1000;
    269         ival /= 1000;
    270        (*plugin->print)( plugin->context,
    271           "%14" PRIu32 " |%4" PRIu32 ".%03" PRIu32 "\n",
    272           load[i],
    273           ival,
    274           fval
    275         );
    276       #endif
    277     }
    278 
    279     if (task_count < RTEMS_CPUUSAGE_TOP_MAX_LOAD_TASKS)
    280     {
    281       j = RTEMS_CPUUSAGE_TOP_MAX_LOAD_TASKS - task_count;
    282       while (j > 0)
    283       {
    284        (*plugin->print)( plugin->context, "\x1b[K\n");
    285         j--;
    286       }
    287     }
    288 
    289     rtems_cpuusage_top_thread_active = 0;
    290 
    291     rtems_task_wake_after (RTEMS_MICROSECONDS_TO_TICKS (5000000));
    292   }
     555    }
     556
     557    sc = rtems_event_receive(RTEMS_EVENT_1,
     558                             RTEMS_EVENT_ANY,
     559                             RTEMS_MILLISECONDS_TO_TICKS (data->poll_rate_usecs),
     560                             &out);
     561    if ((sc != RTEMS_SUCCESSFUL) && (sc != RTEMS_TIMEOUT))
     562    {
     563      (*data->plugin.print)(data->plugin.context,
     564                            "error: event receive: %s\n", rtems_status_text(sc));
     565      break;
     566    }
     567  }
     568
     569  free(data->tasks);
     570  free(data->last_tasks);
     571  free(data->last_usage);
     572  free(data->current_usage);
     573
     574  data->thread_active = false;
     575
     576  rtems_task_delete (RTEMS_SELF);
    293577}
    294578
     
    298582)
    299583{
    300   rtems_status_code   sc;
    301   rtems_task_priority priority;
    302   rtems_name          name;
    303   rtems_id            id;
    304   rtems_cpu_usage_plugin_t  plugin;
    305 
     584#ifdef __RTEMS_USE_TICKS_FOR_STATISTICS__
    306585  if ( !print )
    307586    return;
    308 
    309   plugin.context = context;
    310   plugin.print   = print;
     587  (*print)(context, "error: tick kernels not supported\n");
     588#else
     589  rtems_status_code      sc;
     590  rtems_task_priority    priority;
     591  rtems_name             name;
     592  rtems_id               id;
     593  rtems_cpu_usage_data   data;
     594  int                    show_lines = 25;
     595
     596  if ( !print )
     597    return;
     598
     599  memset(&data, 0, sizeof(data));
     600
     601  data.thread_run = true;
     602  data.single_page = true;
     603  data.sort_order = RTEMS_TOP_SORT_CURRENT;
     604  data.poll_rate_usecs = 3000;
     605  data.show = show_lines;
     606  data.plugin.context = context;
     607  data.plugin.print = print;
    311608
    312609  sc = rtems_task_set_priority (RTEMS_SELF, RTEMS_CURRENT_PRIORITY, &priority);
     
    340637
    341638  sc = rtems_task_start (
    342     id, rtems_cpuusage_top_thread, (rtems_task_argument)&plugin
     639    id, rtems_cpuusage_top_thread, (rtems_task_argument) &data
    343640  );
    344641  if (sc != RTEMS_SUCCESSFUL)
     
    353650  }
    354651
    355   for (;;)
     652  while (true)
    356653  {
    357654    int c = getchar ();
    358655
    359     if ((c == '\r') || (c == '\n'))
    360     {
    361       int loops = 20;
    362 
    363       while (loops && rtems_cpuusage_top_thread_active)
     656    if ((c == '\r') || (c == '\n') || (c == 'q') || (c == 'Q'))
     657    {
     658      int loops = 50;
     659
     660      data.thread_run = false;
     661
     662      rtems_event_send(id, RTEMS_EVENT_1);
     663
     664      while (loops && data.thread_active)
    364665        rtems_task_wake_after (RTEMS_MICROSECONDS_TO_TICKS (100000));
    365 
    366       rtems_task_delete (id);
    367666
    368667      (*print)(context, "load monitoring stopped.\n");
    369668      return;
    370669    }
    371   }
     670    else if (c == '<')
     671    {
     672      if (data.sort_order == 0)
     673        data.sort_order = RTEMS_TOP_SORT_MAX;
     674      else
     675        --data.sort_order;
     676      rtems_event_send(id, RTEMS_EVENT_1);
     677    }
     678    else if (c == '>')
     679    {
     680      if (data.sort_order >= RTEMS_TOP_SORT_MAX)
     681        data.sort_order = 0;
     682      else
     683        ++data.sort_order;
     684      rtems_event_send(id, RTEMS_EVENT_1);
     685    }
     686    else if ((c == 's') || (c == 'S'))
     687    {
     688      data.single_page = !data.single_page;
     689      rtems_event_send(id, RTEMS_EVENT_1);
     690    }
     691    else if ((c == 'a') || (c == 'A'))
     692    {
     693      if (data.show == 0)
     694        data.show = show_lines;
     695      else
     696        data.show = 0;
     697      rtems_event_send(id, RTEMS_EVENT_1);
     698    }
     699    else if (c == '+')
     700    {
     701      ++show_lines;
     702      if (data.show != 0)
     703        data.show = show_lines;
     704    }
     705    else if (c == '-')
     706    {
     707      if (show_lines > 5)
     708        --show_lines;
     709      if (data.show != 0)
     710        data.show = show_lines;
     711    }
     712    else if (c == ' ')
     713    {
     714      rtems_event_send(id, RTEMS_EVENT_1);
     715    }
     716  }
     717#endif
    372718}
    373719
Note: See TracChangeset for help on using the changeset viewer.