Home
last modified time | relevance | path

Searched refs:baseline (Results 1 – 3 of 3) sorted by relevance

/test/mlts/benchmark/results/
Dgenerate_result.py178 baseline = next(filter(lambda x: x.backend_type == BASELINE_BACKEND,
180 other = sorted(filter(lambda x: x is not baseline, results),
183 baseline=baseline,
191 if known_group[0].match(results_with_bl.baseline.name):
291 def generate_accuracy_values(baseline, result): argument
295 if result is baseline:
302 base = [float(x) * 100.0 for x in baseline.evaluator_values]
314 if result is baseline:
325 baseline.evaluator_values + [baseline.max_single_error]]
338 if result is baseline:
[all …]
/test/mlts/benchmark/src/com/android/nn/crashtest/core/test/
DPerformanceDegradationTest.java156 final BenchmarkResult baseline = modelPerformanceCollector(inferenceModelEntry, in testDegradationForModels() local
158 if (baseline.hasBenchmarkError()) { in testDegradationForModels()
160 mTestName, baseline.getBenchmarkError())); in testDegradationForModels()
163 baseline.getMeanTimeSec())); in testDegradationForModels()
208 / baseline.getMeanTimeSec()) - 1.0) * 100); in testDegradationForModels()
/test/mlts/benchmark/src/com/android/nn/benchmark/core/
DTestModelsListLoader.java66 double baseline = jsonTestModelEntry.getDouble("baselineSec"); in parseJSONModelsList() local
151 new TestModels.TestModelEntry(name, (float) baseline, inputSize, inputOutputs, in parseJSONModelsList()