source: rtems/cpukit/include/rtems/profiling.h @ 21275b58

5
Last change on this file since 21275b58 was 2afb22b, checked in by Chris Johns <chrisj@…>, on 12/23/17 at 07:18:56

Remove make preinstall

A speciality of the RTEMS build system was the make preinstall step. It
copied header files from arbitrary locations into the build tree. The
header files were included via the -Bsome/build/tree/path GCC command
line option.

This has at least seven problems:

  • The make preinstall step itself needs time and disk space.
  • Errors in header files show up in the build tree copy. This makes it hard for editors to open the right file to fix the error.
  • There is no clear relationship between source and build tree header files. This makes an audit of the build process difficult.
  • The visibility of all header files in the build tree makes it difficult to enforce API barriers. For example it is discouraged to use BSP-specifics in the cpukit.
  • An introduction of a new build system is difficult.
  • Include paths specified by the -B option are system headers. This may suppress warnings.
  • The parallel build had sporadic failures on some hosts.

This patch removes the make preinstall step. All installed header
files are moved to dedicated include directories in the source tree.
Let @RTEMS_CPU@ be the target architecture, e.g. arm, powerpc, sparc,
etc. Let @RTEMS_BSP_FAMILIY@ be a BSP family base directory, e.g.
erc32, imx, qoriq, etc.

The new cpukit include directories are:

  • cpukit/include
  • cpukit/score/cpu/@RTEMS_CPU@/include
  • cpukit/libnetworking

The new BSP include directories are:

  • bsps/include
  • bsps/@RTEMS_CPU@/include
  • bsps/@RTEMS_CPU@/@RTEMS_BSP_FAMILIY@/include

There are build tree include directories for generated files.

The include directory order favours the most general header file, e.g.
it is not possible to override general header files via the include path
order.

The "bootstrap -p" option was removed. The new "bootstrap -H" option
should be used to regenerate the "headers.am" files.

Update #3254.

  • Property mode set to 100644
File size: 9.0 KB
Line 
1/**
2 * @file
3 *
4 * @ingroup Profiling
5 *
6 * @brief Profiling API
7 */
8
9/*
10 * Copyright (c) 2014 embedded brains GmbH.  All rights reserved.
11 *
12 *  embedded brains GmbH
13 *  Dornierstr. 4
14 *  82178 Puchheim
15 *  Germany
16 *  <rtems@embedded-brains.de>
17 *
18 * The license and distribution terms for this file may be
19 * found in the file LICENSE in this distribution or at
20 * http://www.rtems.org/license/LICENSE.
21 */
22
23#ifndef _RTEMS_PROFILING_H
24#define _RTEMS_PROFILING_H
25
26#include <stdint.h>
27
28#include <rtems/print.h>
29
30#ifdef __cplusplus
31extern "C" {
32#endif /* __cplusplus */
33
34/**
35 * @defgroup Profiling Profiling Support
36 *
37 * @brief The profiling support offers functions to report profiling
38 * information available in the system.
39 *
40 * Profiling support is by default disabled.  It must be enabled via the
41 * configure command line with the <tt>--enable-profiling</tt> option.  In this
42 * case the RTEMS_PROFILING pre-processor symbol is defined and profiling
43 * statistics will be gathered during system run-time.  The profiling support
44 * increases the time of critical sections and has some memory overhead.  The
45 * overhead should be acceptable for most applications.  The aim of the
46 * profiling implementation is to be available even for production systems so
47 * that verification is simplified.
48 *
49 * Profiling information includes critical timing values such as the maximum
50 * time of disabled thread dispatching which is a measure for the thread
51 * dispatch latency.  On SMP configurations statistics of all SMP locks in the
52 * system are available.
53 *
54 * Profiling information can be retrieved via rtems_profiling_iterate() and
55 * reported as an XML dump via rtems_profiling_report_xml().  These functions
56 * are always available, but actual profiling data is only available if enabled
57 * at build configuration time.
58 *
59 * @{
60 */
61
62/**
63 * @brief Type of profiling data.
64 */
65typedef enum {
66  /**
67   * @brief Type of per-CPU profiling data.
68   *
69   * @see rtems_profiling_per_cpu.
70   */
71  RTEMS_PROFILING_PER_CPU,
72
73  /**
74   * @brief Type of SMP lock profiling data.
75   *
76   * @see rtems_profiling_smp_lock.
77   */
78  RTEMS_PROFILING_SMP_LOCK
79} rtems_profiling_type;
80
81/**
82 * @brief The profiling data header.
83 */
84typedef struct {
85  /**
86   * @brief The profiling data type.
87   */
88  rtems_profiling_type type;
89} rtems_profiling_header;
90
91/**
92 * @brief Per-CPU profiling data.
93 *
94 * Theoretically all values in this structure can overflow, but the integer
95 * types are chosen so that they cannot overflow in practice.  On systems with
96 * a 1GHz CPU counter, the 64-bit integers can overflow in about 58 years.
97 * Since the system should not spend most of the time in critical sections the
98 * actual system run-time is much longer.  Several other counters in the system
99 * will overflow before we get a problem in the profiling area.
100 */
101typedef struct {
102  /**
103   * @brief The profiling data header.
104   */
105  rtems_profiling_header header;
106
107  /**
108   * @brief The processor index of this profiling data.
109   */
110  uint32_t processor_index;
111
112  /**
113   * @brief The maximum time of disabled thread dispatching in nanoseconds.
114   */
115  uint32_t max_thread_dispatch_disabled_time;
116
117  /**
118   * @brief Count of times when the thread dispatch disable level changes from
119   * zero to one in thread context.
120   *
121   * This value may overflow.
122   */
123  uint64_t thread_dispatch_disabled_count;
124
125  /**
126   * @brief Total time of disabled thread dispatching in nanoseconds.
127   *
128   * The average time of disabled thread dispatching is the total time of
129   * disabled thread dispatching divided by the thread dispatch disabled
130   * count.
131   *
132   * This value may overflow.
133   */
134  uint64_t total_thread_dispatch_disabled_time;
135
136  /**
137   * @brief The maximum interrupt delay in nanoseconds if supported by the
138   * hardware.
139   *
140   * The interrupt delay is the time interval from the recognition of an
141   * interrupt signal by the hardware up to the execution start of the
142   * corresponding high-level handler.  The interrupt delay is the main
143   * contributor to the interrupt latency.  To measure this time hardware
144   * support is required.  A time stamp unit must capture the interrupt signal
145   * recognition time.  If no hardware support is available, then this field
146   * will have a constant value of zero.
147   */
148  uint32_t max_interrupt_delay;
149
150  /**
151   * @brief The maximum time spent to process a single sequence of nested
152   * interrupts in nanoseconds.
153   *
154   * This is the time interval between the change of the interrupt nest level
155   * from zero to one and the change back from one to zero.  It is the measured
156   * worst-case execution time of interrupt service routines.  Please note that
157   * in case of nested interrupts this time includes the combined execution
158   * time and not the maximum time of an individual interrupt service routine.
159   */
160  uint32_t max_interrupt_time;
161
162  /**
163   * @brief Count of times when the interrupt nest level changes from zero to
164   * one.
165   *
166   * This value may overflow.
167   */
168  uint64_t interrupt_count;
169
170  /**
171   * @brief Total time of interrupt processing in nanoseconds.
172   *
173   * The average time of interrupt processing is the total time of interrupt
174   * processing divided by the interrupt count.
175   *
176   * This value may overflow.
177   */
178  uint64_t total_interrupt_time;
179} rtems_profiling_per_cpu;
180
181/**
182 * @brief Count of lock contention counters for SMP lock profiling.
183 */
184#define RTEMS_PROFILING_SMP_LOCK_CONTENTION_COUNTS 4
185
186/**
187 * @brief SMP lock profiling data.
188 *
189 * The lock acquire attempt instant is the point in time right after the
190 * interrupt disable action in the lock acquire sequence.
191 *
192 * The lock acquire instant is the point in time right after the lock
193 * acquisition.  This is the begin of the critical section code execution.
194 *
195 * The lock acquire time is the time elapsed between the lock acquire attempt
196 * instant and the lock acquire instant.
197 *
198 * The lock release instant is the point in time right before the interrupt
199 * enable action in the lock release sequence.
200 *
201 * The lock section time is the time elapsed between the lock acquire instant
202 * and the lock release instant.
203 */
204typedef struct {
205  /**
206   * @brief The profiling data header.
207   */
208  rtems_profiling_header header;
209
210  /**
211   * @brief The lock name.
212   */
213  const char *name;
214
215  /**
216   * @brief The maximum lock acquire time in nanoseconds.
217   */
218  uint32_t max_acquire_time;
219
220  /**
221   * @brief The maximum lock section time in nanoseconds.
222   */
223  uint32_t max_section_time;
224
225  /**
226   * @brief The count of lock uses.
227   *
228   * This value may overflow.
229   */
230  uint64_t usage_count;
231
232  /**
233   * @brief Total lock acquire time in nanoseconds.
234   *
235   * The average lock acquire time is the total acquire time divided by the
236   * lock usage count.  The ration of the total section and total acquire times
237   * gives a measure for the lock contention.
238   *
239   * This value may overflow.
240   */
241  uint64_t total_acquire_time;
242
243  /**
244   * @brief Total lock section time in nanoseconds.
245   *
246   * The average lock section time is the total section time divided by the
247   * lock usage count.
248   *
249   * This value may overflow.
250   */
251  uint64_t total_section_time;
252
253  /**
254   * @brief The counts of lock acquire operations by contention.
255   *
256   * The contention count for index N corresponds to a lock acquire attempt
257   * with an initial queue length of N.  The last index corresponds to all
258   * lock acquire attempts with an initial queue length greater than or equal
259   * to RTEMS_PROFILING_SMP_LOCK_CONTENTION_COUNTS minus one.
260   *
261   * The values may overflow.
262   */
263  uint64_t contention_counts[RTEMS_PROFILING_SMP_LOCK_CONTENTION_COUNTS];
264} rtems_profiling_smp_lock;
265
266/**
267 * @brief Collection of profiling data.
268 */
269typedef union {
270  /**
271   * @brief Header to specify the actual profiling data.
272   */
273  rtems_profiling_header header;
274
275  /**
276   * @brief Per-CPU profiling data if indicated by the header.
277   */
278  rtems_profiling_per_cpu per_cpu;
279
280  /**
281   * @brief SMP lock profiling data if indicated by the header.
282   */
283  rtems_profiling_smp_lock smp_lock;
284} rtems_profiling_data;
285
286/**
287 * @brief Visitor function for the profiling iteration.
288 *
289 * @param[in, out] arg The visitor argument.
290 * @param[in] data The current profiling data.
291 *
292 * @see rtems_profiling_iterate().
293 */
294typedef void (*rtems_profiling_visitor)(
295  void *arg,
296  const rtems_profiling_data *data
297);
298
299/**
300 * @brief Iterates through all profiling data of the system.
301 *
302 * @param[in] visitor The visitor.
303 * @param[in, out] visitor_arg The visitor argument.
304 */
305void rtems_profiling_iterate(
306  rtems_profiling_visitor visitor,
307  void *visitor_arg
308);
309
310/**
311 * @brief Reports profiling data as XML.
312 *
313 * @param[in] name The name of the profiling report.
314 * @param[in] printer The RTEMS printer to send the output too.
315 * @param[in] indentation_level The current indentation level.
316 * @param[in] indentation The string used for indentation.
317 *
318 * @returns As specified by printf().
319 */
320int rtems_profiling_report_xml(
321  const char *name,
322  const rtems_printer *printer,
323  uint32_t indentation_level,
324  const char *indentation
325);
326
327/** @} */
328
329#ifdef __cplusplus
330}
331#endif /* __cplusplus */
332
333#endif /* _RTEMS_PROFILING_H */
Note: See TracBrowser for help on using the repository browser.