1 /*
2  * Copyright (C) 2022 The Android Open Source Project
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining
5  * a copy of this software and associated documentation files
6  * (the "Software"), to deal in the Software without restriction,
7  * including without limitation the rights to use, copy, modify, merge,
8  * publish, distribute, sublicense, and/or sell copies of the Software,
9  * and to permit persons to whom the Software is furnished to do so,
10  * subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be
13  * included in all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23 
24 /*
25  * bench functions can be defined with the macro
26  * BENCH(suite_name,bench_name,n [, params])
27  * {
28  *     ... bench function body ...
29  * }
30  *
31  *  - This body will be executed n times for each params, if 4 arguments are
32  *    given.
33  *  - This body will be executed n times, if 3 arguments are given.
34  *
35  * For a suite, one is expected to also define BENCH_SETUP, BENCH_TEARDOWN.
36  * For a 2-tuple (suite_name, bench_name) one is expected to also define at
37  * least one BENCH_RESULT.
38  *
39  * BENCH_SETUP(suite_name)
40  * {
41  *     ... bench setup body ...
42  *     return int_error_code;
43  * }
44  *
45  * BENCH_SETUP(suite_name):
46  *  - Will return 0 or NO_ERROR when it succeed.
47  *  - Will be run before every execution of the BENCH body
48  *  - Will cancel execution of the next BENCH body if returns non-zero.
49  *    Test will be considered failed.
50  *  - Will cancel execution of the next BENCH body if any ASSERT_<op> fails.
51  *    Test will be considered failed.
52  *  - All ASSERT_<op> macros from trusty_unittest can be used
53  *  - GTEST_SKIP() maybe be called to skip the benchmark run.
54  *
55  * BENCH_TEARDOWN(suite_name)
56  * {
57  *     ... bench teardown body ...
58  * }
59  *
60  * BENCH_TEARDOWN(suite_name):
61  *  - Is executed even if BENCH_SETUP failed
62  *  - Does not return any value
63  *  - All ASSERT_<op> macros from trusty_unittest can be used
64  *
65  * BENCH_RESULT(suite_name,bench_name,res_name)
66  * {
67  *     ... bench result body ...
68  *     return int64_t_value_of_res_name_for_last_bench_body_run;
69  * }
70  *
71  *
72  * BENCH_RESULT(suite_name,bench_name,res_name):
73  *  - At least one must be defined. Can define multiple times.
74  *  - Must return an int64_t
75  *  - Results will be aggregated for n runs of the BENCH( ) body.
76  *    Aggregation is grouped by params to min/max/avg of the n runs
77  *  - res_name will be used as column title for the metric summary
78  *
79  * Example:
80  *      BENCH_RESULT(hwcrypto, hwrng, time_ns) {
81  *          return bench_get_duration_ns();
82  *      }
83  *
84  * - The execution sequence is roughly:
85  *
86  *       for each param if any:
87  *          BENCH_SETUP(suite_name,bench_name)
88  *           repeat n times:
89  *               BENCH_CONTENT
90  *               for each BENCH_RESULT(suite_name,bench_name,res_name)
91  *                   update the accumulators for res_name [min,max,avg]
92  *           BENCH_TEARDOWN(suite_name,bench_name)
93  *       Print Result Table
94  *
95  * NOTE:
96  * When using a parameter array:
97  *  - params must be an array of any type T any_name_is_fine[NB_PARAMS] = {...};
98  *    The number of params is deduced from the sizeof(params)/sizeof(params[0]).
99  *    So please do not dynamically allocate T* params.
100  *  - params array name is up to the test writer
101  *
102  * The default column name for a parameter in the summary table is its index in
103  * the param array. To customize it, one can define a function with the
104  * following signature:
105  * static void trusty_bench_get_param_name_cb(char* buf, size_t buf_size,
106  * size_t param_idx);
107  *
108  * then assign it during BENCH_SETUP to the trusty_bench_get_param_name_cb
109  * global:
110  *
111  * BENCH_SETUP(suite_name) {
112  *   trusty_bench_get_param_name_cb = &get_param_name_cb;
113  *   …
114  * }
115  *
116  * trusty_bench_get_param_name_cb will be reset to NULL after teardown.
117  *
118  * See "trusty/user/app/sample/hwrng-bench/main.c" for a working and thoroughly
119  * commented example
120  */
121 
122 #pragma once
123 #include <errno.h>
124 #include <inttypes.h>
125 #include <stdarg.h>
126 #include <stdlib.h>
127 
128 #include <lib/unittest/unittest.h>
129 #include <trusty_log.h>
130 #include "trusty_bench_common.h"
131 #include "trusty_bench_json_print.h"
132 #include "trusty_bench_option_cb.h"
133 #include "trusty_bench_print_tables.h"
134 #include "trusty_unittest.h"
135 #ifdef TRUSTY_USERSPACE
136 #ifdef WITH_PTHREAD
137 #include <lib/thread/pthread.h>
138 #endif
139 #elif WITH_SMP
140 #include <kernel/mp.h>
141 #endif
142 #include <uapi/err.h>
143 
144 /*
145  * A few helper macros for static dispatch
146  */
147 #define NB_ARGS_HELPER(_1, _2, _3, _4, _5, _6, _7, _8, N, ...) N
148 #define NB_ARGS(...) NB_ARGS_HELPER(__VA_ARGS__, 8, 7, 6, 5, 4, 3, 2, 1, 0)
149 
150 #define CAT(a, ...) PRIMITIVE_CAT(a, __VA_ARGS__)
151 #define PRIMITIVE_CAT(a, ...) a##__VA_ARGS__
152 
153 #define EVAL(...) __VA_ARGS__
154 
155 __BEGIN_CDECLS
156 
157 /**
158  * struct benchmark_internal_state - Store internals for current bench.
159  * @last_bench_body_duration:   nanoseconds duration of the last execution of
160  *                              the bench body.
161  * @cur_param_idx:              index of current parameter in param_array.
162  */
163 static struct benchmark_internal_state {
164     int64_t last_bench_body_duration;
165     size_t cur_param_idx;
166 } bench_state;
167 
168 /**
169  * bench_get_duration_ns - convenience function to use in BENCH_RESULT to get
170  * the duration of last bench body execution.
171  *
172  * Return: The duration of the last completed BENCH body in nanoseconds.
173  */
bench_get_duration_ns(void)174 static inline int64_t bench_get_duration_ns(void) {
175     return bench_state.last_bench_body_duration;
176 }
177 
178 /**
179  * bench_get_param_idx - convenience function to use to get the
180  * index of the current parameter BENCH_XXX is running for.
181  * Return: The index of the parameter BENCH_XXX is running for.
182  */
bench_get_param_idx(void)183 static inline size_t bench_get_param_idx(void) {
184     return bench_state.cur_param_idx % trusty_cur_bench_nb_params;
185 }
186 
187 /**
188  * bench_get_cpu_idx - convenience function to use to get the
189  * index of the current cpu BENCH_XXX is running for.
190  * Return: The index of the cpu BENCH_XXX is running for.
191  */
bench_get_cpu_idx(void)192 static inline size_t bench_get_cpu_idx(void) {
193     return bench_state.cur_param_idx / trusty_cur_bench_nb_params;
194 }
195 
196 /*
197  * Helper macros to run on tests on all CPUs
198  */
199 #if defined(TRUSTY_USERSPACE) && defined(WITH_PTHREAD)
trusty_bench_multi_cpus_setup(void)200 static int trusty_bench_multi_cpus_setup(void) {
201     if (trusty_bench_nb_cpu > 1) {
202         cpu_set_t cpu_set;
203 
204         CPU_ZERO(&cpu_set);
205         CPU_SET(bench_state.cur_param_idx / trusty_cur_bench_nb_params,
206                 &cpu_set);
207 
208         return pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t),
209                                       &cpu_set);
210     }
211     return NO_ERROR;
212 }
213 
trusty_bench_multi_cpus_teardown(void)214 static int trusty_bench_multi_cpus_teardown(void) {
215     if (trusty_bench_nb_cpu > 1) {
216         cpu_set_t cpu_set;
217 
218         CPU_ZERO(&cpu_set);
219         for (int i = 0; i < SMP_MAX_CPUS; i++) {
220             CPU_SET(i, &cpu_set);
221         }
222 
223         return pthread_setaffinity_np(pthread_self(), sizeof(cpu_set_t),
224                                       &cpu_set);
225     }
226     return NO_ERROR;
227 }
228 #elif !defined(TRUSTY_USERSPACE) && WITH_SMP
trusty_bench_multi_cpus_setup(void)229 static int trusty_bench_multi_cpus_setup(void) {
230     if (trusty_bench_nb_cpu > 1) {
231         const int cpu = bench_state.cur_param_idx / trusty_cur_bench_nb_params;
232 
233         if (cpu < SMP_MAX_CPUS && mp_is_cpu_active(cpu)) {
234             thread_set_pinned_cpu(get_current_thread(), cpu);
235         } else {
236             return EINVAL;
237         }
238     }
239 
240     return NO_ERROR;
241 }
242 
trusty_bench_multi_cpus_teardown(void)243 static int trusty_bench_multi_cpus_teardown(void) {
244     if (trusty_bench_nb_cpu > 1) {
245         thread_set_pinned_cpu(get_current_thread(), -1);
246     }
247     return NO_ERROR;
248 }
249 #else
trusty_bench_multi_cpus_setup(void)250 static int trusty_bench_multi_cpus_setup(void) {
251     return NO_ERROR;
252 }
253 
trusty_bench_multi_cpus_teardown(void)254 static int trusty_bench_multi_cpus_teardown(void) {
255     return NO_ERROR;
256 }
257 #endif
258 
259 /**
260  * trusty_bench_update_metric -  Update the appropriate metric with the value
261  * returned by BENCH_RESULT
262  * @m:              The metric whose aggregate needs to be updated.
263  * @val:            The value returned by BENCH_RESULT.
264  */
trusty_bench_update_metric(struct bench_metric_node * m,int64_t val)265 static inline void trusty_bench_update_metric(struct bench_metric_node* m,
266                                               int64_t val) {
267     m->cnt += 1;
268     m->tot += val;
269     m->aggregates[BENCH_AGGREGATE_AVG] = m->tot / m->cnt;
270     m->aggregates[BENCH_AGGREGATE_MIN] =
271             MIN(m->aggregates[BENCH_AGGREGATE_MIN], val);
272     m->aggregates[BENCH_AGGREGATE_MAX] =
273             MAX(m->aggregates[BENCH_AGGREGATE_MAX], val);
274 }
275 
276 /**
277  * trusty_bench_run_metrics -        Run All Metric Updaters after one iteration
278  * of bench function for all param/metric in the last BENCH.
279  * @metric_list:        List of metrics aggregated during all BENCH runs.
280  * @param_idx:          Index of the current parameter in the param_array of
281  *                      BENCH.
282  * @cold_run:           Are we updating metrics for the initial cold run?
283  */
trusty_bench_run_metrics(struct list_node * metric_list,size_t param_idx,bool cold_run)284 static inline void trusty_bench_run_metrics(struct list_node* metric_list,
285                                             size_t param_idx,
286                                             bool cold_run) {
287     struct bench_metric_list_node* entry;
288 
289     list_for_every_entry(metric_list, entry, struct bench_metric_list_node,
290                          node) {
291         if (param_idx == entry->param_idx) {
292             if (cold_run) {
293                 entry->metric.cold = entry->bench_result();
294             } else {
295                 trusty_bench_update_metric(&entry->metric,
296                                            entry->bench_result());
297             }
298         }
299     }
300 }
301 
302 /**
303  * trusty_bench_reset_metrics -        Run All Metric Updaters after one
304  * iteration of bench function for all param/metric in the last BENCH.
305  * @metric_list:        List of metrics aggregated during all BENCH runs.
306  * @param_idx:          Index of the current parameter in the param_array of
307  *                      BENCH.
308  */
trusty_bench_reset_metrics(struct list_node * metric_list,size_t param_idx)309 static inline void trusty_bench_reset_metrics(struct list_node* metric_list,
310                                               size_t param_idx) {
311     struct bench_metric_list_node* entry;
312 
313     list_for_every_entry(metric_list, entry, struct bench_metric_list_node,
314                          node) {
315         if (param_idx == entry->param_idx) {
316             trusty_bench_update_metric(&entry->metric, entry->bench_result());
317         }
318     }
319 }
320 
321 /**
322  * BENCH_SETUP -        Runs before every execution of the body of the BENCH
323  *                      macro. Can be used to allocate memory, setup 'states',
324  *                      initialize 'sessions'...
325  * @suite_name:         Identifier of the current suite.
326  */
327 #define BENCH_SETUP(suite_name)          \
328     static int suite_name##_setup(void); \
329     static int suite_name##_setup(void)
330 
331 /**
332  * BENCH_TEARDOWN -     Runs after every execution of the body of the BENCH
333  *                      macro. Can be used to free memory, clear 'states',
334  *                      close 'sessions'...
335  * @suite_name:         Identifier of the current suite.
336  */
337 #define BENCH_TEARDOWN(suite_name)           \
338     static void suite_name##_teardown(void); \
339     static void suite_name##_teardown(void)
340 
341 /**
342  * BENCH_RESULT_INNER -       Declare a metric name for the corresponding BENCH
343  * and declare the functions to update it after every iteration
344  * @suite_name:         Identifier of the current suite.
345  * @bench_name:         Unique identifier of the Bench in the suite.
346  * @metric_name:        Name of the metric to print in the result table.
347  * @formatted_value_cb:        [optional] A callback of
348  * trusty_bench_get_formatted_value_callback_t type for formatting the result
349  * value to a string
350  * @param_name_cb:              [optional] A callback of
351  * trusty_bench_get_param_name_callback_t type for formatting the param name
352  */
353 #define BENCH_RESULT_INNER(suite_name, bench_name, metric_name,                 \
354                            formatted_value_cb_, param_name_cb_)                 \
355     static int64_t update_##suite_name##_##bench_name##_##metric_name(void);    \
356     static struct bench_metric_list_node                                        \
357             suite_name##_##bench_name##_##metric_name##_node = {                \
358                     .node = LIST_INITIAL_CLEARED_VALUE,                         \
359                     .metric = {0, 0, 0, {INT32_MAX, 0, 0}},                     \
360                     .name = STRINGIFY(metric_name),                             \
361                     .param_idx = 0,                                             \
362                     .nb_params = 0,                                             \
363                     .bench_result =                                             \
364                             update_##suite_name##_##bench_name##_##metric_name, \
365                     .formatted_value_cb = formatted_value_cb_,                  \
366                     .param_name_cb = param_name_cb_};                           \
367     __attribute__((constructor)) void                                           \
368             suite_name##_##bench_name##_##metric_name##_add(void) {             \
369         list_add_tail(&suite_name##_##bench_name##_metric_list,                 \
370                       &suite_name##_##bench_name##_##metric_name##_node.node);  \
371     }                                                                           \
372                                                                                 \
373     static int64_t update_##suite_name##_##bench_name##_##metric_name(void)
374 
375 /* Dispatch Mechanics for BENCH_RESULT */
376 #define BENCH_RESULT_3(suite_name, bench_name, metric_name) \
377     BENCH_RESULT_INNER(suite_name, bench_name, metric_name, 0, 0)
378 #define BENCH_RESULT_4(suite_name, bench_name, metric_name, \
379                        formatted_value_cb)                  \
380     BENCH_RESULT_INNER(suite_name, bench_name, metric_name, \
381                        formatted_value_cb, 0)
382 #define BENCH_RESULT_5(suite_name, bench_name, metric_name, \
383                        formatted_value_cb, param_name_cb)   \
384     BENCH_RESULT_INNER(suite_name, bench_name, metric_name, \
385                        formatted_value_cb, param_name_cb)
386 
387 /**
388  * BENCH_RESULT             Dispatch BENCH_RESULT Called 3, 4 or 5 parameters.
389  * @suite_name:             Identifier of the current suite.
390  * @bench_name:             Unique identifier of the Bench in the suite.
391  * @metric_name:        Name of the metric to print in the result table.
392  * @formatted_value_cb:        [optional] A callback of
393  * trusty_bench_get_formatted_value_callback_t type for formatting the result
394  * value to a string
395  * @param_name_cb:              [optional] A callback of
396  * trusty_bench_get_param_name_callback_t type for formatting the param name
397  */
398 #define BENCH_RESULT(...) \
399     CAT(BENCH_RESULT_, EVAL(NB_ARGS(__VA_ARGS__)))(__VA_ARGS__)
400 
401 /**
402  * PARAM_TEST_NODES_SIMPLE -    Create the unparameterized test node lists for
403  *                              BENCH
404  * @suite_name:                 Identifier of the current suite.
405  * @bench_name:                 Unique identifier of the Bench in the suite.
406  */
407 #define PARAM_TEST_NODES_SIMPLE(suite_name, bench_name)                        \
408     static struct test_list_node suite_name##_##bench_name##_bench_##_node = { \
409             .node = LIST_INITIAL_CLEARED_VALUE,                                \
410             .suite = STRINGIFY(suite_name_),                                   \
411             .name = STRINGIFY(bench_name_),                                    \
412             .func = suite_name##_##bench_name##_bench_,                        \
413             .needs_param = 0,                                                  \
414     };                                                                         \
415                                                                                \
416     __attribute__((constructor)) void                                          \
417             suite_name##_##bench_name##_bench_##_add(void) {                   \
418         list_add_tail(&_test_list,                                             \
419                       &suite_name##_##bench_name##_bench_##_node.node);        \
420     }
421 
422 /**
423  * PARAM_TEST_NODES_PARAMETRIC -    Create the unparameterized test node lists
424  *                                  for BENCH
425  * @suite_name:                     Identifier of the current suite.
426  * @bench_name:                     Unique identifier of the Bench in the suite.
427  * @params:                         identifier of the param Array for parametric
428  * benches
429  */
430 #define PARAM_TEST_NODES_PARAMETRIC(suite_name, bench_name, params)       \
431     static struct test_list_node                                          \
432             suite_name##_##bench_name##_bench_##params##_node = {         \
433                     .node = LIST_INITIAL_CLEARED_VALUE,                   \
434                     .suite = STRINGIFY(suite_name_##params),              \
435                     .name = STRINGIFY(bench_name_##params),               \
436                     .func = suite_name##_##bench_name##_bench_##params,   \
437                     .needs_param = 0,                                     \
438     };                                                                    \
439                                                                           \
440     __attribute__((constructor)) void                                     \
441             suite_name##_##bench_name##_bench_##params##_add(void) {      \
442         list_add_tail(                                                    \
443                 &_test_list,                                              \
444                 &suite_name##_##bench_name##_bench_##params##_node.node); \
445     }
446 
447 /**
448  * set_param_metric -       Create a list of parameterized metrics out of the
449  *                          existing list of non-parameterized metric.
450  * @unparameterized_list:   List of metrics aggregated during all BENCH
451  *                          runs.
452  * @parameterized_list:     Will be filled with nb_params *
453  *                          length_of(unparameterized_list) metrics with
454  *                          appropriate param_idx value.
455  * @nb_params:              Number of parameters of the BENCH macro.
456  * Return:                  The list of parameterized metrics.
457  */
set_param_metric(struct list_node * unparameterized_list,struct list_node * parameterized_list,size_t nb_params)458 static inline struct bench_metric_list_node* set_param_metric(
459         struct list_node* unparameterized_list,
460         struct list_node* parameterized_list,
461         size_t nb_params) {
462     size_t idx = 0;
463     struct bench_metric_list_node* entry;
464     struct bench_metric_list_node* list_pool =
465             calloc(nb_params * list_length(unparameterized_list),
466                    sizeof(struct bench_metric_list_node));
467     if (list_pool == NULL) {
468         TLOGE("Failed to Allocate memory for bench_metric_list_node!");
469         return NULL;
470     }
471     list_for_every_entry(unparameterized_list, entry,
472                          struct bench_metric_list_node, node) {
473         for (size_t idx_param = 0; idx_param < nb_params; ++idx_param) {
474             struct bench_metric_node tmp_metric = {0, 0, 0, {INT32_MAX, 0, 0}};
475 
476             list_pool[idx].metric = tmp_metric;
477             list_pool[idx].name = entry->name;
478             list_pool[idx].param_idx = idx_param;
479             list_pool[idx].nb_params = nb_params;
480             list_pool[idx].bench_result = entry->bench_result;
481             list_pool[idx].formatted_value_cb = entry->formatted_value_cb;
482             list_pool[idx].param_name_cb = entry->param_name_cb;
483             list_add_tail(parameterized_list, &(list_pool[idx].node));
484             ++idx;
485         }
486     }
487     return list_pool;
488 }
489 
490 /**
491  * trusty_bench_get_overhead - Get Minimal overhead of the benchmark around
492  * benched function
493  *
494  * Return:        The Value of the overhead in nanoseconds.
495  */
trusty_bench_get_overhead(void)496 static int64_t trusty_bench_get_overhead(void) {
497     const size_t nb_runs = 100;
498     int64_t start_time;
499     int64_t end_time;
500     int64_t res = INT64_MAX;
501 
502     for (size_t i = 0; i < nb_runs; ++i) {
503         start_time = get_current_time_ns();
504         end_time = get_current_time_ns();
505         res = MIN(end_time - start_time, res);
506     }
507     return res;
508 }
509 
510 /**
511  * get_extended_bench_name - Print Status of Currently Running Bench.
512  *
513  * @test_name_in:   Name of the Current Unparameterized Test.
514  * @test_name_out:  Name of the Current Unparameterized Test.
515  *                  + "_[param_idx]"
516  *
517  * Return:          When successful, returns 0
518  *                  If test_name_out allocation/print failed returns asprintf
519  *                  return code
520  */
get_extended_bench_name(const char * test_name_in,char ** test_name_out)521 static inline int get_extended_bench_name(const char* test_name_in,
522                                           char** test_name_out) {
523     int res = snprintf(NULL, 0, "%s_%zu", test_name_in,
524                        bench_state.cur_param_idx);
525     *test_name_out = NULL;
526     if (res >= 0) {
527         *test_name_out = malloc(res + 1);
528         res = snprintf(*test_name_out, res + 1, "%s_%zu", test_name_in,
529                        bench_state.cur_param_idx);
530     }
531     if (res < 0) {
532         return res;
533     }
534     if (!test_name_out) {
535         TLOGE("Cannot Allocate memory for test name\n");
536         return -1;
537     }
538     return 0;
539 }
540 
541 /**
542  * BENCH_CORE -             Called by both parametrized and unparameterized
543  * BENCH for their common part
544  * @suite_name:             Identifier of the current suite.
545  * @bench_name:             Unique identifier of the Bench in the suite.
546  * @nb_runs:                The number of execution of its body for each param
547  * @nb_params:              Number of params in params array
548  * @params:                 An array T array_name[nb_params] of parameter
549  * @metric_list:            List of metric nodes to update
550  */
551 #define BENCH_CORE(suite_name, bench_name, nb_runs, nb_params, params,          \
552                    metric_list)                                                 \
553     reset_vertical_print_widths();                                              \
554     trusty_bench_print_title(STRINGIFY(suite_name), STRINGIFY(bench_name),      \
555                              STRINGIFY(params));                                \
556     static trusty_bench_print_callback_t trusty_bench_print_cb =                \
557             &BENCHMARK_PRINT_CB;                                                \
558     trusty_cur_bench_nb_params = nb_params;                                     \
559     for (size_t idx_param = 0; idx_param < (nb_params * trusty_bench_nb_cpu);   \
560          ++idx_param) {                                                         \
561         bench_state.cur_param_idx = idx_param;                                  \
562         char* extended_test_name = NULL;                                        \
563         int res_alloc = get_extended_bench_name(                                \
564                 STRINGIFY(bench_name##_##params), &extended_test_name);         \
565         if (res_alloc < 0) {                                                    \
566             TLOGE("ERROR %d expanding test name\n", res_alloc);                 \
567             _test_context.all_ok = false;                                       \
568             _test_context.tests_failed++;                                       \
569             continue;                                                           \
570         }                                                                       \
571         TEST_BEGIN_FUNC(STRINGIFY(suite_name), extended_test_name);             \
572                                                                                 \
573         int rc = trusty_bench_multi_cpus_setup();                               \
574         if (rc != NO_ERROR) {                                                   \
575             _test_context.skipped = true;                                       \
576             _test_context.tests_skipped++;                                      \
577         } else {                                                                \
578             rc = suite_name##_setup();                                          \
579         }                                                                       \
580                                                                                 \
581         if (_test_context.skipped) {                                            \
582             trusty_unittest_print_status(" SKIPPED");                           \
583             continue;                                                           \
584         } else if (rc != NO_ERROR) {                                            \
585             TLOGE("ERROR %d during benchmark setup\n", rc);                     \
586             _test_context.all_ok = false;                                       \
587             _test_context.tests_failed++;                                       \
588             continue;                                                           \
589         }                                                                       \
590         int64_t overhead = trusty_bench_get_overhead();                         \
591                                                                                 \
592         /* Cold Run */                                                          \
593         int64_t start_time;                                                     \
594         int64_t end_time;                                                       \
595         start_time = get_current_time_ns();                                     \
596         int64_t res = suite_name##_##bench_name##_inner_##params();             \
597         end_time = get_current_time_ns();                                       \
598                                                                                 \
599         if (res != NO_ERROR) {                                                  \
600             TLOGE("ERROR During Cold Run%" PRId64 "\n", res);                   \
601             _test_context.all_ok = false;                                       \
602             _test_context.tests_failed++;                                       \
603             continue;                                                           \
604         }                                                                       \
605                                                                                 \
606         bench_state.last_bench_body_duration = end_time - start_time;           \
607         if (5 * overhead >= bench_state.last_bench_body_duration) {             \
608             trusty_unittest_printf(                                             \
609                     "WARNING: Benchmark internal function is too fast %" PRId64 \
610                     "ns, while the benchmark overhead is %" PRId64 "ns.",       \
611                     overhead, bench_state.last_bench_body_duration);            \
612         }                                                                       \
613                                                                                 \
614         bench_state.last_bench_body_duration -= overhead;                       \
615                                                                                 \
616         if (!_test_context.hard_fail && _test_context.all_ok) {                 \
617             trusty_bench_run_metrics(&metric_list, idx_param, true);            \
618         }                                                                       \
619                                                                                 \
620         for (size_t idx_run = 0; idx_run < nb_runs; ++idx_run) {                \
621             if (!_test_context.hard_fail && _test_context.all_ok) {             \
622                 start_time = get_current_time_ns();                             \
623                 res = suite_name##_##bench_name##_inner_##params();             \
624                 end_time = get_current_time_ns();                               \
625                 bench_state.last_bench_body_duration = end_time - start_time;   \
626                 if (overhead >= bench_state.last_bench_body_duration) {         \
627                     TLOGE("Benchmark internal function is too fast %" PRId64    \
628                           "ns, while the benchmark overhead is %" PRId64        \
629                           "ns.",                                                \
630                           overhead, bench_state.last_bench_body_duration);      \
631                 }                                                               \
632                                                                                 \
633                 bench_state.last_bench_body_duration -= overhead;               \
634                 if (res != NO_ERROR) {                                          \
635                     TLOGE("ERROR %" PRId64 "\n", res);                          \
636                 }                                                               \
637             }                                                                   \
638             if (!_test_context.hard_fail && _test_context.all_ok) {             \
639                 trusty_bench_run_metrics(&metric_list, idx_param, false);       \
640             }                                                                   \
641         }                                                                       \
642         suite_name##_teardown();                                                \
643         rc = trusty_bench_multi_cpus_teardown();                                \
644         if (rc != NO_ERROR) {                                                   \
645             TLOGW("failed to reset CPU affinity: %d\n", rc);                    \
646         }                                                                       \
647         TEST_END_FUNC();                                                        \
648         free(extended_test_name);                                               \
649         extended_test_name = NULL;                                              \
650     }                                                                           \
651     trusty_bench_print_cb(&metric_list, (nb_params * trusty_bench_nb_cpu),      \
652                           STRINGIFY(suite_name),                                \
653                           STRINGIFY(bench_name##_##params));                    \
654     trusty_bench_get_param_name_cb = NULL;                                      \
655     trusty_bench_get_formatted_value_cb = NULL
656 
657 /**
658  * BENCH_PARAMETERIZED_PTR -Called when BENCH has 5 parameters. This allows
659  *                          to reuse Other macros for different bench by
660  * aliasing an array to a pointer
661  * @suite_name:             Identifier of the current suite.
662  * @bench_name:             Unique identifier of the Bench in the suite.
663  * @nb_runs:                The number of execution of its body for each param
664  * @params:                 An array T array_name[nb_params] of parameter
665  * @nb_params:              Number of parameters in the parameter Array
666  */
667 #define BENCH_PARAMETERIZED_PTR(nb_cpu, suite_name, bench_name, nb_runs,         \
668                                 params, nb_params)                               \
669     static int suite_name##_##bench_name##_inner_##params(void);                 \
670     static void suite_name##_##bench_name##_bench_##params(void);                \
671     static struct list_node suite_name##_##bench_name##_metric_list =            \
672             LIST_INITIAL_VALUE(suite_name##_##bench_name##_metric_list);         \
673     static struct list_node suite_name##_##bench_name##_metric_##params##_list = \
674             LIST_INITIAL_VALUE(                                                  \
675                     suite_name##_##bench_name##_metric_##params##_list);         \
676                                                                                  \
677     static void suite_name##_##bench_name##_bench_##params(void) {               \
678         trusty_bench_nb_cpu = nb_cpu;                                            \
679         struct bench_metric_list_node* metric_pool = set_param_metric(           \
680                 &suite_name##_##bench_name##_metric_list,                        \
681                 &suite_name##_##bench_name##_metric_##params##_list,             \
682                 (nb_params * trusty_bench_nb_cpu));                              \
683         if (metric_pool == NULL) {                                               \
684             _test_context.hard_fail = true;                                      \
685             return;                                                              \
686         }                                                                        \
687         BENCH_CORE(suite_name, bench_name, nb_runs, nb_params, params,           \
688                    suite_name##_##bench_name##_metric_##params##_list);          \
689         free(metric_pool);                                                       \
690     }                                                                            \
691     PARAM_TEST_NODES(suite_name, bench_name, params)                             \
692                                                                                  \
693     static int suite_name##_##bench_name##_inner_##params(void)
694 
695 /**
696  * BENCH_PARAMETERIZED -    Called when BENCH has 4 parameters
697  * @suite_name:             Identifier of the current suite.
698  * @bench_name:             Unique identifier of the Bench in the suite.
699  * @nb_runs:                The number of execution of its body for each param
700  * @params:                 An array T array_name[nb_params] of parameter
701  */
702 #define BENCH_PARAMETERIZED(nb_cpu, suite_name, bench_name, nb_runs, params) \
703     BENCH_PARAMETERIZED_PTR(nb_cpu, suite_name, bench_name, nb_runs, params, \
704                             countof(params))
705 
706 /**
707  * BENCH_SIMPLE -       Called when BENCH has only 3 parameters.
708  * @suite_name:         Identifier of the current suite.
709  * @bench_name:         Unique identifier of the Bench in the suite.
710  * @nb_runs:            The number of execution of its body.
711  */
712 #define BENCH_SIMPLE(nb_cpu, suite_name, bench_name, nb_runs)                  \
713     static int suite_name##_##bench_name##_inner_(void);                       \
714     static void suite_name##_##bench_name##_bench_(void);                      \
715     static struct list_node suite_name##_##bench_name##_metric_list =          \
716             LIST_INITIAL_VALUE(suite_name##_##bench_name##_metric_list);       \
717     static struct list_node suite_name##_##bench_name##_metric_cpu##_list =    \
718             LIST_INITIAL_VALUE(suite_name##_##bench_name##_metric_cpu##_list); \
719     static void suite_name##_##bench_name##_bench_(void) {                     \
720         bench_state.cur_param_idx = 0;                                         \
721         trusty_bench_nb_cpu = nb_cpu;                                          \
722         struct bench_metric_list_node* metric_pool = set_param_metric(         \
723                 &suite_name##_##bench_name##_metric_list,                      \
724                 &suite_name##_##bench_name##_metric_cpu##_list,                \
725                 trusty_bench_nb_cpu);                                          \
726         if (metric_pool == NULL) {                                             \
727             _test_context.hard_fail = true;                                    \
728             return;                                                            \
729         }                                                                      \
730         BENCH_CORE(suite_name, bench_name, nb_runs, 1, ,                       \
731                    suite_name##_##bench_name##_metric_cpu##_list);             \
732     }                                                                          \
733                                                                                \
734     PARAM_TEST_NODES(suite_name, bench_name)                                   \
735     static int suite_name##_##bench_name##_inner_(void)
736 
737 /*
738  * BENCH - Routing the BENCH macros depending on its number of parameters.
739  */
740 #define BENCH_3 BENCH_SIMPLE
741 #define BENCH_4 BENCH_PARAMETERIZED
742 #define BENCH_5 BENCH_PARAMETERIZED_PTR
743 
744 /**
745  * BENCH - Called 3, 4 or 5 parameters. This allows
746  *                          to reuse Other macros for different bench by
747  * aliasing an array to a pointer
748  * @suite_name:             Identifier of the current suite.
749  * @bench_name:             Unique identifier of the Bench in the suite.
750  * @nb_runs:                The number of execution of its body for each param
751  * @params:                 [optional] An array T array_name[nb_params] of
752  *                          parameter, or a pointer T*, in the latter case a 5th
753  *                          parameter is needed
754  * @nb_params:              [optional] if 4th parameter is a pointer, Number of
755  *                          parameters in the parameter Array
756  */
757 #define BENCH(...) CAT(BENCH_, EVAL(NB_ARGS(__VA_ARGS__)))(1, __VA_ARGS__)
758 
759 /**
760  * BENCH_ALL_CPU - Called 3, 4 or 5 parameters. This allows
761  *                          to reuse Other macros for different bench by
762  * aliasing an array to a pointer
763  * @suite_name:             Identifier of the current suite.
764  * @bench_name:             Unique identifier of the Bench in the suite.
765  * @nb_runs:                The number of execution of its body for each param
766  * @params:                 [optional] An array T array_name[nb_params] of
767  *                          parameter, or a pointer T*, in the latter case a 5th
768  *                          parameter is needed
769  * @nb_params:              [optional] if 4th parameter is a pointer, Number of
770  *                          parameters in the parameter Array
771  */
772 #define BENCH_ALL_CPU(...) \
773     CAT(BENCH_, EVAL(NB_ARGS(__VA_ARGS__)))(SMP_MAX_CPUS, __VA_ARGS__)
774 
775 /*
776  * PARAM_TEST_NODES - Routing the PARAM_TEST_NODES macros depending on its
777  * number of parameters.
778  */
779 #define PARAM_TEST_NODES_2 PARAM_TEST_NODES_SIMPLE
780 #define PARAM_TEST_NODES_3 PARAM_TEST_NODES_PARAMETRIC
781 #define PARAM_TEST_NODES(...) \
782     CAT(PARAM_TEST_NODES_, EVAL(NB_ARGS(__VA_ARGS__)))(__VA_ARGS__)
783 
784 __END_CDECLS
785