1 | /* |
---|
2 | * SPDX-License-Identifier: BSD-2-Clause |
---|
3 | * |
---|
4 | * Copyright (C) 2018 embedded brains GmbH |
---|
5 | * |
---|
6 | * Redistribution and use in source and binary forms, with or without |
---|
7 | * modification, are permitted provided that the following conditions |
---|
8 | * are met: |
---|
9 | * 1. Redistributions of source code must retain the above copyright |
---|
10 | * notice, this list of conditions and the following disclaimer. |
---|
11 | * 2. Redistributions in binary form must reproduce the above copyright |
---|
12 | * notice, this list of conditions and the following disclaimer in the |
---|
13 | * documentation and/or other materials provided with the distribution. |
---|
14 | * |
---|
15 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
---|
16 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
---|
17 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
---|
18 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE |
---|
19 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
---|
20 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
---|
21 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
---|
22 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
---|
23 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
---|
24 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
---|
25 | * POSSIBILITY OF SUCH DAMAGE. |
---|
26 | */ |
---|
27 | |
---|
28 | #include <t.h> |
---|
29 | |
---|
30 | #include <alloca.h> |
---|
31 | #include <inttypes.h> |
---|
32 | #include <stdlib.h> |
---|
33 | #include <string.h> |
---|
34 | |
---|
35 | #include <rtems.h> |
---|
36 | |
---|
37 | #define WAKEUP_EVENT RTEMS_EVENT_0 |
---|
38 | |
---|
39 | typedef struct { |
---|
40 | struct T_measure_runtime_context *master; |
---|
41 | rtems_id id; |
---|
42 | volatile unsigned int *chunk; |
---|
43 | } load_context; |
---|
44 | |
---|
45 | struct T_measure_runtime_context { |
---|
46 | T_destructor destructor; |
---|
47 | size_t sample_count; |
---|
48 | T_ticks *samples; |
---|
49 | size_t cache_line_size; |
---|
50 | size_t chunk_size; |
---|
51 | volatile unsigned int *chunk; |
---|
52 | rtems_id runner; |
---|
53 | uint32_t load_count; |
---|
54 | load_context *load_contexts; |
---|
55 | }; |
---|
56 | |
---|
57 | static unsigned int |
---|
58 | dirty_data_cache(volatile unsigned int *chunk, size_t chunk_size, |
---|
59 | size_t cache_line_size, unsigned int token) |
---|
60 | { |
---|
61 | size_t m; |
---|
62 | size_t k; |
---|
63 | size_t i; |
---|
64 | |
---|
65 | m = chunk_size / sizeof(chunk[0]); |
---|
66 | k = cache_line_size / sizeof(chunk[0]); |
---|
67 | |
---|
68 | for (i = 0; i < m; i += k) { |
---|
69 | chunk[i] = i + token; |
---|
70 | } |
---|
71 | |
---|
72 | return i + token; |
---|
73 | } |
---|
74 | |
---|
75 | static void |
---|
76 | wait_for_worker(void) |
---|
77 | { |
---|
78 | rtems_event_set events; |
---|
79 | |
---|
80 | (void)rtems_event_receive(WAKEUP_EVENT, RTEMS_EVENT_ALL | RTEMS_WAIT, |
---|
81 | RTEMS_NO_TIMEOUT, &events); |
---|
82 | } |
---|
83 | |
---|
84 | static void |
---|
85 | wakeup_master(const T_measure_runtime_context *ctx) |
---|
86 | { |
---|
87 | (void)rtems_event_send(ctx->runner, WAKEUP_EVENT); |
---|
88 | } |
---|
89 | |
---|
90 | static void |
---|
91 | suspend_worker(const load_context *lctx) |
---|
92 | { |
---|
93 | (void)rtems_task_suspend(lctx->id); |
---|
94 | } |
---|
95 | |
---|
96 | static void |
---|
97 | restart_worker(const load_context *lctx) |
---|
98 | { |
---|
99 | (void)rtems_task_restart(lctx->id, (rtems_task_argument)lctx); |
---|
100 | wait_for_worker(); |
---|
101 | } |
---|
102 | |
---|
103 | static void |
---|
104 | load_worker(rtems_task_argument arg) |
---|
105 | { |
---|
106 | const load_context *lctx; |
---|
107 | T_measure_runtime_context *ctx; |
---|
108 | unsigned int token; |
---|
109 | volatile unsigned int *chunk; |
---|
110 | size_t chunk_size; |
---|
111 | size_t cache_line_size; |
---|
112 | |
---|
113 | lctx = (const load_context *)arg; |
---|
114 | ctx = lctx->master; |
---|
115 | chunk = lctx->chunk; |
---|
116 | chunk_size = ctx->chunk_size; |
---|
117 | cache_line_size = ctx->cache_line_size; |
---|
118 | token = (unsigned int)rtems_scheduler_get_processor(); |
---|
119 | |
---|
120 | token = dirty_data_cache(chunk, chunk_size, cache_line_size, token); |
---|
121 | wakeup_master(ctx); |
---|
122 | |
---|
123 | while (true) { |
---|
124 | token = dirty_data_cache(chunk, chunk_size, cache_line_size, |
---|
125 | token); |
---|
126 | } |
---|
127 | } |
---|
128 | |
---|
129 | static void |
---|
130 | destroy_worker(const T_measure_runtime_context *ctx) |
---|
131 | { |
---|
132 | uint32_t load; |
---|
133 | |
---|
134 | for (load = 0; load < ctx->load_count; ++load) { |
---|
135 | const load_context *lctx; |
---|
136 | |
---|
137 | lctx = &ctx->load_contexts[load]; |
---|
138 | |
---|
139 | if (lctx->chunk != ctx->chunk) { |
---|
140 | free(RTEMS_DEVOLATILE(unsigned int *, lctx->chunk)); |
---|
141 | } |
---|
142 | |
---|
143 | |
---|
144 | if (lctx->id != 0) { |
---|
145 | rtems_task_delete(lctx->id); |
---|
146 | } |
---|
147 | } |
---|
148 | } |
---|
149 | |
---|
150 | static void |
---|
151 | destroy(T_destructor *dtor) |
---|
152 | { |
---|
153 | T_measure_runtime_context *ctx; |
---|
154 | |
---|
155 | ctx = (T_measure_runtime_context *)dtor; |
---|
156 | destroy_worker(ctx); |
---|
157 | free(ctx); |
---|
158 | } |
---|
159 | |
---|
160 | static void * |
---|
161 | add_offset(const volatile void *p, uintptr_t o) |
---|
162 | { |
---|
163 | return (void *)((uintptr_t)p + o); |
---|
164 | } |
---|
165 | |
---|
166 | static void * |
---|
167 | align_up(const volatile void *p, uintptr_t a) |
---|
168 | { |
---|
169 | return (void *)(((uintptr_t)p + a - 1) & ~(a - 1)); |
---|
170 | } |
---|
171 | |
---|
172 | T_measure_runtime_context * |
---|
173 | T_measure_runtime_create(const T_measure_runtime_config *config) |
---|
174 | { |
---|
175 | T_measure_runtime_context *ctx; |
---|
176 | size_t sample_size; |
---|
177 | size_t cache_line_size; |
---|
178 | size_t chunk_size; |
---|
179 | size_t load_size; |
---|
180 | uint32_t load_count; |
---|
181 | bool success; |
---|
182 | uint32_t i; |
---|
183 | #ifdef RTEMS_SMP |
---|
184 | cpu_set_t cpu; |
---|
185 | #endif |
---|
186 | |
---|
187 | sample_size = config->sample_count * sizeof(ctx->samples[0]); |
---|
188 | |
---|
189 | cache_line_size = rtems_cache_get_data_line_size(); |
---|
190 | |
---|
191 | if (cache_line_size == 0) { |
---|
192 | cache_line_size = 8; |
---|
193 | } |
---|
194 | |
---|
195 | chunk_size = rtems_cache_get_data_cache_size(0); |
---|
196 | |
---|
197 | if (chunk_size == 0) { |
---|
198 | chunk_size = cache_line_size; |
---|
199 | } |
---|
200 | |
---|
201 | chunk_size *= 2; |
---|
202 | |
---|
203 | load_count = rtems_get_processor_count(); |
---|
204 | load_size = load_count * sizeof(ctx->load_contexts[0]); |
---|
205 | |
---|
206 | ctx = malloc(sizeof(*ctx) + sample_size + load_size + chunk_size + |
---|
207 | 2 * cache_line_size); |
---|
208 | |
---|
209 | if (ctx == NULL) { |
---|
210 | return NULL; |
---|
211 | } |
---|
212 | |
---|
213 | ctx->sample_count = config->sample_count; |
---|
214 | ctx->samples = add_offset(ctx, sizeof(*ctx)); |
---|
215 | ctx->cache_line_size = cache_line_size; |
---|
216 | ctx->chunk_size = chunk_size; |
---|
217 | ctx->chunk = add_offset(ctx->samples, sample_size); |
---|
218 | ctx->runner = rtems_task_self(); |
---|
219 | ctx->load_count = load_count; |
---|
220 | ctx->load_contexts = add_offset(ctx->chunk, chunk_size); |
---|
221 | ctx->samples = align_up(ctx->samples, cache_line_size); |
---|
222 | ctx->chunk = align_up(ctx->chunk, cache_line_size); |
---|
223 | |
---|
224 | memset(ctx->load_contexts, 0, load_size); |
---|
225 | success = true; |
---|
226 | |
---|
227 | for (i = 0; i < load_count; ++i) { |
---|
228 | rtems_status_code sc; |
---|
229 | rtems_id id; |
---|
230 | load_context *lctx; |
---|
231 | #ifdef RTEMS_SMP |
---|
232 | rtems_task_priority priority; |
---|
233 | rtems_id scheduler; |
---|
234 | |
---|
235 | sc = rtems_scheduler_ident_by_processor(i, &scheduler); |
---|
236 | if (sc != RTEMS_SUCCESSFUL) { |
---|
237 | continue; |
---|
238 | } |
---|
239 | #endif |
---|
240 | |
---|
241 | sc = rtems_task_create(rtems_build_name('L', 'O', 'A', 'D'), |
---|
242 | RTEMS_MAXIMUM_PRIORITY - 1, RTEMS_MINIMUM_STACK_SIZE, |
---|
243 | RTEMS_DEFAULT_MODES, RTEMS_DEFAULT_ATTRIBUTES, &id); |
---|
244 | if (sc != RTEMS_SUCCESSFUL) { |
---|
245 | success = false; |
---|
246 | break; |
---|
247 | } |
---|
248 | |
---|
249 | lctx = &ctx->load_contexts[i]; |
---|
250 | |
---|
251 | lctx->master = ctx; |
---|
252 | lctx->id = id; |
---|
253 | |
---|
254 | lctx->chunk = malloc(chunk_size); |
---|
255 | if (lctx->chunk == NULL) { |
---|
256 | lctx->chunk = ctx->chunk; |
---|
257 | } |
---|
258 | |
---|
259 | #ifdef RTEMS_SMP |
---|
260 | (void)rtems_scheduler_get_maximum_priority(scheduler, &priority); |
---|
261 | (void)rtems_task_set_scheduler(id, scheduler, priority - 1); |
---|
262 | |
---|
263 | CPU_ZERO(&cpu); |
---|
264 | CPU_SET((int)i, &cpu); |
---|
265 | (void)rtems_task_set_affinity(id, sizeof(cpu), &cpu); |
---|
266 | #endif |
---|
267 | |
---|
268 | (void)rtems_task_start(id, load_worker, |
---|
269 | (rtems_task_argument)lctx); |
---|
270 | |
---|
271 | wait_for_worker(); |
---|
272 | suspend_worker(lctx); |
---|
273 | } |
---|
274 | |
---|
275 | if (success) { |
---|
276 | #ifdef RTEMS_SMP |
---|
277 | CPU_ZERO(&cpu); |
---|
278 | CPU_SET(0, &cpu); |
---|
279 | (void)rtems_task_set_affinity(RTEMS_SELF, sizeof(cpu), &cpu); |
---|
280 | #endif |
---|
281 | } else { |
---|
282 | destroy(&ctx->destructor); |
---|
283 | return NULL; |
---|
284 | } |
---|
285 | |
---|
286 | T_add_destructor(&ctx->destructor, destroy); |
---|
287 | return ctx; |
---|
288 | } |
---|
289 | |
---|
290 | static int |
---|
291 | cmp(const void *ap, const void *bp) |
---|
292 | { |
---|
293 | T_ticks a; |
---|
294 | T_ticks b; |
---|
295 | |
---|
296 | a = *(const T_ticks *)ap; |
---|
297 | b = *(const T_ticks *)bp; |
---|
298 | |
---|
299 | if (a < b) { |
---|
300 | return -1; |
---|
301 | } else if (a > b) { |
---|
302 | return 1; |
---|
303 | } else { |
---|
304 | return 0; |
---|
305 | } |
---|
306 | } |
---|
307 | |
---|
308 | static void |
---|
309 | measure_variant_begin(const char *name, const char *variant) |
---|
310 | { |
---|
311 | T_printf("M:B:%s\n", name); |
---|
312 | T_printf("M:V:%s\n", variant); |
---|
313 | } |
---|
314 | |
---|
315 | static T_time |
---|
316 | accumulate(const T_ticks *samples, size_t sample_count) |
---|
317 | { |
---|
318 | T_time a; |
---|
319 | size_t i; |
---|
320 | |
---|
321 | a = 0; |
---|
322 | |
---|
323 | for (i = 0; i < sample_count; ++i) { |
---|
324 | a += T_ticks_to_time(samples[i]); |
---|
325 | } |
---|
326 | |
---|
327 | return a; |
---|
328 | } |
---|
329 | |
---|
330 | static T_ticks |
---|
331 | median_absolute_deviation(T_ticks *samples, size_t sample_count) |
---|
332 | { |
---|
333 | T_ticks median; |
---|
334 | size_t i; |
---|
335 | |
---|
336 | median = samples[sample_count / 2]; |
---|
337 | |
---|
338 | for (i = 0; i < sample_count / 2; ++i) { |
---|
339 | samples[i] = median - samples[i]; |
---|
340 | } |
---|
341 | |
---|
342 | for (; i < sample_count; ++i) { |
---|
343 | samples[i] = samples[i] - median; |
---|
344 | } |
---|
345 | |
---|
346 | qsort(samples, sample_count, sizeof(samples[0]), cmp); |
---|
347 | return samples[sample_count / 2]; |
---|
348 | } |
---|
349 | |
---|
350 | static void |
---|
351 | report_sorted_samples(const T_measure_runtime_context *ctx) |
---|
352 | { |
---|
353 | size_t sample_count; |
---|
354 | const T_ticks *samples; |
---|
355 | T_time_string ts; |
---|
356 | T_ticks last; |
---|
357 | T_ticks v; |
---|
358 | size_t count; |
---|
359 | size_t i; |
---|
360 | |
---|
361 | sample_count = ctx->sample_count; |
---|
362 | samples = ctx->samples; |
---|
363 | last = 0; |
---|
364 | --last; |
---|
365 | count = 0; |
---|
366 | |
---|
367 | for (i = 0; i < sample_count; ++i) { |
---|
368 | v = samples[i]; |
---|
369 | ++count; |
---|
370 | |
---|
371 | if (v != last) { |
---|
372 | uint32_t sa; |
---|
373 | uint32_t sb; |
---|
374 | uint32_t nsa; |
---|
375 | uint32_t nsb; |
---|
376 | T_time t; |
---|
377 | |
---|
378 | T_time_to_seconds_and_nanoseconds(T_ticks_to_time(last), |
---|
379 | &sa, &nsa); |
---|
380 | t = T_ticks_to_time(v); |
---|
381 | T_time_to_seconds_and_nanoseconds(t, &sb, &nsb); |
---|
382 | |
---|
383 | if (sa != sb || nsa != nsb) { |
---|
384 | T_printf("M:S:%zu:%s\n", count, |
---|
385 | T_time_to_string_ns(t, ts)); |
---|
386 | count = 0; |
---|
387 | } |
---|
388 | |
---|
389 | last = v; |
---|
390 | } |
---|
391 | } |
---|
392 | |
---|
393 | if (count > 0) { |
---|
394 | T_printf("M:S:%zu:%s\n", count, |
---|
395 | T_ticks_to_string_ns(last, ts)); |
---|
396 | } |
---|
397 | } |
---|
398 | |
---|
399 | static void |
---|
400 | measure_variant_end(const T_measure_runtime_context *ctx, |
---|
401 | const T_measure_runtime_request *req, T_time begin) |
---|
402 | { |
---|
403 | size_t sample_count; |
---|
404 | T_ticks *samples; |
---|
405 | T_time_string ts; |
---|
406 | T_time d; |
---|
407 | T_ticks v; |
---|
408 | T_time a; |
---|
409 | |
---|
410 | sample_count = ctx->sample_count; |
---|
411 | samples = ctx->samples; |
---|
412 | d = T_now() - begin; |
---|
413 | a = accumulate(samples, sample_count); |
---|
414 | qsort(samples, sample_count, sizeof(samples[0]), cmp); |
---|
415 | T_printf("M:N:%zu\n", sample_count); |
---|
416 | |
---|
417 | if ((req->flags & T_MEASURE_RUNTIME_REPORT_SAMPLES) != 0) { |
---|
418 | report_sorted_samples(ctx); |
---|
419 | } |
---|
420 | |
---|
421 | v = samples[0]; |
---|
422 | T_printf("M:MI:%s\n", T_ticks_to_string_ns(v, ts)); |
---|
423 | v = samples[(1 * sample_count) / 100]; |
---|
424 | T_printf("M:P1:%s\n", T_ticks_to_string_ns(v, ts)); |
---|
425 | v = samples[(1 * sample_count) / 4]; |
---|
426 | T_printf("M:Q1:%s\n", T_ticks_to_string_ns(v, ts)); |
---|
427 | v = samples[sample_count / 2]; |
---|
428 | T_printf("M:Q2:%s\n", T_ticks_to_string_ns(v, ts)); |
---|
429 | v = samples[(3 * sample_count) / 4]; |
---|
430 | T_printf("M:Q3:%s\n", T_ticks_to_string_ns(v, ts)); |
---|
431 | v = samples[(99 * sample_count) / 100]; |
---|
432 | T_printf("M:P99:%s\n", T_ticks_to_string_ns(v, ts)); |
---|
433 | v = samples[sample_count - 1]; |
---|
434 | T_printf("M:MX:%s\n", T_ticks_to_string_ns(v, ts)); |
---|
435 | v = median_absolute_deviation(samples, sample_count); |
---|
436 | T_printf("M:MAD:%s\n", T_ticks_to_string_ns(v, ts)); |
---|
437 | T_printf("M:D:%s\n", T_time_to_string_ns(a, ts)); |
---|
438 | T_printf("M:E:%s:D:%s\n", req->name, T_time_to_string_ns(d, ts)); |
---|
439 | } |
---|
440 | |
---|
441 | static void |
---|
442 | fill_data_cache(volatile unsigned int *chunk, size_t chunk_size, |
---|
443 | size_t cache_line_size) |
---|
444 | { |
---|
445 | size_t m; |
---|
446 | size_t k; |
---|
447 | size_t i; |
---|
448 | |
---|
449 | m = chunk_size / sizeof(chunk[0]); |
---|
450 | k = cache_line_size / sizeof(chunk[0]); |
---|
451 | |
---|
452 | for (i = 0; i < m; i += k) { |
---|
453 | chunk[i]; |
---|
454 | } |
---|
455 | } |
---|
456 | |
---|
457 | static void |
---|
458 | dirty_call(void (*body)(void *), void *arg) |
---|
459 | { |
---|
460 | void *space; |
---|
461 | |
---|
462 | /* Ensure that we use an untouched stack area */ |
---|
463 | space = alloca(1024); |
---|
464 | RTEMS_OBFUSCATE_VARIABLE(space); |
---|
465 | |
---|
466 | (*body)(arg); |
---|
467 | } |
---|
468 | |
---|
469 | static void |
---|
470 | setup(const T_measure_runtime_request *req, void *arg) |
---|
471 | { |
---|
472 | if (req->setup != NULL) { |
---|
473 | (*req->setup)(arg); |
---|
474 | } |
---|
475 | } |
---|
476 | |
---|
477 | static bool |
---|
478 | teardown(const T_measure_runtime_request *req, void *arg, T_ticks *delta, |
---|
479 | uint32_t tic, uint32_t toc, unsigned int retry, |
---|
480 | unsigned int maximum_retries) |
---|
481 | { |
---|
482 | if (req->teardown == NULL) { |
---|
483 | return tic == toc || retry >= maximum_retries; |
---|
484 | } |
---|
485 | |
---|
486 | return (*req->teardown)(arg, delta, tic, toc, retry); |
---|
487 | } |
---|
488 | |
---|
489 | static unsigned int |
---|
490 | get_maximum_retries(const T_measure_runtime_request *req) |
---|
491 | { |
---|
492 | return (req->flags & T_MEASURE_RUNTIME_ALLOW_CLOCK_ISR) != 0 ? 1 : 0; |
---|
493 | } |
---|
494 | |
---|
495 | static void |
---|
496 | measure_valid_cache(T_measure_runtime_context *ctx, |
---|
497 | const T_measure_runtime_request *req) |
---|
498 | { |
---|
499 | size_t sample_count; |
---|
500 | T_ticks *samples; |
---|
501 | void (*body)(void *); |
---|
502 | void *arg; |
---|
503 | size_t i; |
---|
504 | T_time begin; |
---|
505 | |
---|
506 | measure_variant_begin(req->name, "ValidCache"); |
---|
507 | begin = T_now(); |
---|
508 | sample_count = ctx->sample_count; |
---|
509 | samples = ctx->samples; |
---|
510 | body = req->body; |
---|
511 | arg = req->arg; |
---|
512 | |
---|
513 | for (i = 0; i < sample_count; ++i) { |
---|
514 | unsigned int maximum_retries; |
---|
515 | unsigned int retry; |
---|
516 | |
---|
517 | maximum_retries = get_maximum_retries(req); |
---|
518 | retry = 0; |
---|
519 | |
---|
520 | while (true) { |
---|
521 | rtems_interval tic; |
---|
522 | rtems_interval toc; |
---|
523 | T_ticks t0; |
---|
524 | T_ticks t1; |
---|
525 | |
---|
526 | setup(req, arg); |
---|
527 | fill_data_cache(ctx->chunk, ctx->chunk_size, |
---|
528 | ctx->cache_line_size); |
---|
529 | |
---|
530 | tic = rtems_clock_get_ticks_since_boot(); |
---|
531 | t0 = T_tick(); |
---|
532 | (*body)(arg); |
---|
533 | t1 = T_tick(); |
---|
534 | toc = rtems_clock_get_ticks_since_boot(); |
---|
535 | samples[i] = t1 - t0; |
---|
536 | |
---|
537 | if (teardown(req, arg, &samples[i], tic, toc, retry, |
---|
538 | maximum_retries)) { |
---|
539 | break; |
---|
540 | } |
---|
541 | |
---|
542 | ++retry; |
---|
543 | } |
---|
544 | } |
---|
545 | |
---|
546 | measure_variant_end(ctx, req, begin); |
---|
547 | } |
---|
548 | |
---|
549 | static void |
---|
550 | measure_hot_cache(T_measure_runtime_context *ctx, |
---|
551 | const T_measure_runtime_request *req) |
---|
552 | { |
---|
553 | size_t sample_count; |
---|
554 | T_ticks *samples; |
---|
555 | void (*body)(void *); |
---|
556 | void *arg; |
---|
557 | size_t i; |
---|
558 | T_time begin; |
---|
559 | |
---|
560 | measure_variant_begin(req->name, "HotCache"); |
---|
561 | begin = T_now(); |
---|
562 | sample_count = ctx->sample_count; |
---|
563 | samples = ctx->samples; |
---|
564 | body = req->body; |
---|
565 | arg = req->arg; |
---|
566 | |
---|
567 | for (i = 0; i < sample_count; ++i) { |
---|
568 | unsigned int maximum_retries; |
---|
569 | unsigned int retry; |
---|
570 | |
---|
571 | maximum_retries = get_maximum_retries(req); |
---|
572 | retry = 0; |
---|
573 | |
---|
574 | while (true) { |
---|
575 | rtems_interval tic; |
---|
576 | rtems_interval toc; |
---|
577 | T_ticks t0; |
---|
578 | T_ticks t1; |
---|
579 | |
---|
580 | setup(req, arg); |
---|
581 | |
---|
582 | tic = rtems_clock_get_ticks_since_boot(); |
---|
583 | t0 = T_tick(); |
---|
584 | (*body)(arg); |
---|
585 | t1 = T_tick(); |
---|
586 | toc = rtems_clock_get_ticks_since_boot(); |
---|
587 | samples[i] = t1 - t0; |
---|
588 | |
---|
589 | (void)teardown(req, arg, &samples[i], tic, toc, retry, |
---|
590 | 0); |
---|
591 | setup(req, arg); |
---|
592 | |
---|
593 | tic = rtems_clock_get_ticks_since_boot(); |
---|
594 | t0 = T_tick(); |
---|
595 | (*body)(arg); |
---|
596 | t1 = T_tick(); |
---|
597 | toc = rtems_clock_get_ticks_since_boot(); |
---|
598 | samples[i] = t1 - t0; |
---|
599 | |
---|
600 | if (teardown(req, arg, &samples[i], tic, toc, retry, |
---|
601 | maximum_retries)) { |
---|
602 | break; |
---|
603 | } |
---|
604 | |
---|
605 | ++retry; |
---|
606 | } |
---|
607 | } |
---|
608 | |
---|
609 | measure_variant_end(ctx, req, begin); |
---|
610 | } |
---|
611 | |
---|
612 | static void |
---|
613 | measure_dirty_cache(T_measure_runtime_context *ctx, |
---|
614 | const T_measure_runtime_request *req) |
---|
615 | { |
---|
616 | size_t sample_count; |
---|
617 | T_ticks *samples; |
---|
618 | void (*body)(void *); |
---|
619 | void *arg; |
---|
620 | size_t i; |
---|
621 | T_time begin; |
---|
622 | size_t token; |
---|
623 | |
---|
624 | measure_variant_begin(req->name, "DirtyCache"); |
---|
625 | begin = T_now(); |
---|
626 | sample_count = ctx->sample_count; |
---|
627 | samples = ctx->samples; |
---|
628 | body = req->body; |
---|
629 | arg = req->arg; |
---|
630 | token = 0; |
---|
631 | |
---|
632 | for (i = 0; i < sample_count; ++i) { |
---|
633 | unsigned int maximum_retries; |
---|
634 | unsigned int retry; |
---|
635 | |
---|
636 | maximum_retries = get_maximum_retries(req); |
---|
637 | retry = 0; |
---|
638 | |
---|
639 | while (true) { |
---|
640 | rtems_interval tic; |
---|
641 | rtems_interval toc; |
---|
642 | T_ticks t0; |
---|
643 | T_ticks t1; |
---|
644 | |
---|
645 | setup(req, arg); |
---|
646 | token = dirty_data_cache(ctx->chunk, ctx->chunk_size, |
---|
647 | ctx->cache_line_size, token); |
---|
648 | rtems_cache_invalidate_entire_instruction(); |
---|
649 | |
---|
650 | tic = rtems_clock_get_ticks_since_boot(); |
---|
651 | t0 = T_tick(); |
---|
652 | dirty_call(body, arg); |
---|
653 | t1 = T_tick(); |
---|
654 | toc = rtems_clock_get_ticks_since_boot(); |
---|
655 | samples[i] = t1 - t0; |
---|
656 | |
---|
657 | if (teardown(req, arg, &samples[i], tic, toc, retry, |
---|
658 | maximum_retries)) { |
---|
659 | break; |
---|
660 | } |
---|
661 | |
---|
662 | ++retry; |
---|
663 | } |
---|
664 | } |
---|
665 | |
---|
666 | measure_variant_end(ctx, req, begin); |
---|
667 | } |
---|
668 | |
---|
669 | #ifdef __sparc__ |
---|
670 | /* |
---|
671 | * Use recursive function calls to wake sure that we cause window overflow |
---|
672 | * traps in the body. Try to make it hard for the compiler to optimize the |
---|
673 | * recursive function away. |
---|
674 | */ |
---|
675 | static T_ticks |
---|
676 | recursive_load_call(void (*body)(void *), void *arg, int n) |
---|
677 | { |
---|
678 | T_ticks delta; |
---|
679 | |
---|
680 | RTEMS_OBFUSCATE_VARIABLE(n); |
---|
681 | |
---|
682 | if (n > 0) { |
---|
683 | delta = recursive_load_call(body, arg, n - 1); |
---|
684 | } else { |
---|
685 | T_ticks t0; |
---|
686 | T_ticks t1; |
---|
687 | |
---|
688 | t0 = T_tick(); |
---|
689 | dirty_call(body, arg); |
---|
690 | t1 = T_tick(); |
---|
691 | |
---|
692 | delta = t1 - t0; |
---|
693 | } |
---|
694 | |
---|
695 | RTEMS_OBFUSCATE_VARIABLE(delta); |
---|
696 | return delta; |
---|
697 | } |
---|
698 | #else |
---|
699 | static T_ticks |
---|
700 | load_call(void (*body)(void *), void *arg) |
---|
701 | { |
---|
702 | T_ticks t0; |
---|
703 | T_ticks t1; |
---|
704 | |
---|
705 | t0 = T_tick(); |
---|
706 | dirty_call(body, arg); |
---|
707 | t1 = T_tick(); |
---|
708 | |
---|
709 | return t1 - t0; |
---|
710 | } |
---|
711 | #endif |
---|
712 | |
---|
713 | static void |
---|
714 | measure_load_variant(T_measure_runtime_context *ctx, |
---|
715 | const T_measure_runtime_request *req, |
---|
716 | const load_context *lctx, uint32_t load) |
---|
717 | { |
---|
718 | size_t sample_count; |
---|
719 | T_ticks *samples; |
---|
720 | void (*body)(void *); |
---|
721 | void *arg; |
---|
722 | size_t i; |
---|
723 | T_time begin; |
---|
724 | size_t token; |
---|
725 | |
---|
726 | measure_variant_begin(req->name, "Load"); |
---|
727 | T_printf("M:L:%" PRIu32 "\n", load + 1); |
---|
728 | begin = T_now(); |
---|
729 | sample_count = ctx->sample_count; |
---|
730 | samples = ctx->samples; |
---|
731 | body = req->body; |
---|
732 | arg = req->arg; |
---|
733 | token = 0; |
---|
734 | |
---|
735 | restart_worker(lctx); |
---|
736 | |
---|
737 | for (i = 0; i < sample_count; ++i) { |
---|
738 | unsigned int maximum_retries; |
---|
739 | unsigned int retry; |
---|
740 | |
---|
741 | maximum_retries = get_maximum_retries(req); |
---|
742 | retry = 0; |
---|
743 | |
---|
744 | while (true) { |
---|
745 | rtems_interval tic; |
---|
746 | rtems_interval toc; |
---|
747 | T_ticks delta; |
---|
748 | |
---|
749 | setup(req, arg); |
---|
750 | token = dirty_data_cache(ctx->chunk, ctx->chunk_size, |
---|
751 | ctx->cache_line_size, token); |
---|
752 | rtems_cache_invalidate_entire_instruction(); |
---|
753 | |
---|
754 | tic = rtems_clock_get_ticks_since_boot(); |
---|
755 | #ifdef __sparc__ |
---|
756 | delta = recursive_load_call(body, arg, |
---|
757 | SPARC_NUMBER_OF_REGISTER_WINDOWS - 3); |
---|
758 | #else |
---|
759 | delta = load_call(body, arg); |
---|
760 | #endif |
---|
761 | toc = rtems_clock_get_ticks_since_boot(); |
---|
762 | samples[i] = delta; |
---|
763 | |
---|
764 | if (teardown(req, arg, &samples[i], tic, toc, retry, |
---|
765 | maximum_retries)) { |
---|
766 | break; |
---|
767 | } |
---|
768 | |
---|
769 | ++retry; |
---|
770 | } |
---|
771 | } |
---|
772 | |
---|
773 | measure_variant_end(ctx, req, begin); |
---|
774 | } |
---|
775 | |
---|
776 | static void |
---|
777 | measure_load(T_measure_runtime_context *ctx, |
---|
778 | const T_measure_runtime_request *req) |
---|
779 | { |
---|
780 | const load_context *lctx; |
---|
781 | uint32_t load; |
---|
782 | |
---|
783 | #ifdef RTEMS_SMP |
---|
784 | for (load = 0; load < ctx->load_count - 1; ++load) { |
---|
785 | lctx = &ctx->load_contexts[load]; |
---|
786 | |
---|
787 | if (lctx->id != 0) { |
---|
788 | if ((req->flags & |
---|
789 | T_MEASURE_RUNTIME_DISABLE_MINOR_LOAD) == 0) { |
---|
790 | measure_load_variant(ctx, req, lctx, load); |
---|
791 | } else { |
---|
792 | restart_worker(lctx); |
---|
793 | } |
---|
794 | } |
---|
795 | } |
---|
796 | #endif |
---|
797 | |
---|
798 | if ((req->flags & T_MEASURE_RUNTIME_DISABLE_MAX_LOAD) == 0) { |
---|
799 | load = ctx->load_count - 1; |
---|
800 | lctx = &ctx->load_contexts[load]; |
---|
801 | |
---|
802 | if (lctx->id != 0) { |
---|
803 | measure_load_variant(ctx, req, lctx, load); |
---|
804 | } |
---|
805 | } |
---|
806 | |
---|
807 | for (load = 0; load < ctx->load_count; ++load) { |
---|
808 | lctx = &ctx->load_contexts[load]; |
---|
809 | |
---|
810 | if (lctx->id != 0) { |
---|
811 | suspend_worker(lctx); |
---|
812 | } |
---|
813 | } |
---|
814 | } |
---|
815 | |
---|
816 | void |
---|
817 | T_measure_runtime(T_measure_runtime_context *ctx, |
---|
818 | const T_measure_runtime_request *req) |
---|
819 | { |
---|
820 | /* |
---|
821 | * Do ValidCache variant before HotCache to get a good overall cache |
---|
822 | * state for the HotCache variant. |
---|
823 | */ |
---|
824 | if ((req->flags & T_MEASURE_RUNTIME_DISABLE_VALID_CACHE) == 0) { |
---|
825 | measure_valid_cache(ctx, req); |
---|
826 | } |
---|
827 | |
---|
828 | if ((req->flags & T_MEASURE_RUNTIME_DISABLE_HOT_CACHE) == 0) { |
---|
829 | measure_hot_cache(ctx, req); |
---|
830 | } |
---|
831 | |
---|
832 | if ((req->flags & T_MEASURE_RUNTIME_DISABLE_DIRTY_CACHE) == 0) { |
---|
833 | measure_dirty_cache(ctx, req); |
---|
834 | } |
---|
835 | |
---|
836 | measure_load(ctx, req); |
---|
837 | } |
---|