1 /*
2 * Copyright (C) 2018 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <gmock/gmock.h>
18 #include <gtest/gtest.h>
19
20 #pragma clang diagnostic push
21 #pragma clang diagnostic ignored "-Wunused-parameter"
22 #pragma clang diagnostic ignored "-Winvalid-partial-specialization"
23 #include <unsupported/Eigen/CXX11/Tensor>
24 #pragma clang diagnostic pop
25
26 #include <vector>
27
28 #include "Multinomial.h"
29 #include "NeuralNetworksWrapper.h"
30 #include "philox_random.h"
31 #include "simple_philox.h"
32
33 namespace android {
34 namespace nn {
35 namespace wrapper {
36
37 using ::testing::FloatNear;
38
39 constexpr int kFixedRandomSeed1 = 37;
40 constexpr int kFixedRandomSeed2 = 42;
41
42 class MultinomialOpModel {
43 public:
MultinomialOpModel(uint32_t batch_size,uint32_t class_size,uint32_t sample_size)44 MultinomialOpModel(uint32_t batch_size, uint32_t class_size, uint32_t sample_size)
45 : batch_size_(batch_size), class_size_(class_size), sample_size_(sample_size) {
46 std::vector<uint32_t> inputs;
47 OperandType logitsType(Type::TENSOR_FLOAT32, {batch_size_, class_size_});
48 inputs.push_back(model_.addOperand(&logitsType));
49 OperandType samplesType(Type::INT32, {});
50 inputs.push_back(model_.addOperand(&samplesType));
51 OperandType seedsType(Type::TENSOR_INT32, {2});
52 inputs.push_back(model_.addOperand(&seedsType));
53
54 std::vector<uint32_t> outputs;
55 OperandType outputType(Type::TENSOR_INT32, {batch_size_, sample_size_});
56 outputs.push_back(model_.addOperand(&outputType));
57
58 model_.addOperation(ANEURALNETWORKS_RANDOM_MULTINOMIAL, inputs, outputs);
59 model_.identifyInputsAndOutputs(inputs, outputs);
60 model_.finish();
61 }
62
Invoke()63 void Invoke() {
64 ASSERT_TRUE(model_.isValid());
65
66 Compilation compilation(&model_);
67 compilation.finish();
68 Execution execution(&compilation);
69
70 tensorflow::random::PhiloxRandom rng(kFixedRandomSeed1);
71 tensorflow::random::SimplePhilox srng(&rng);
72 const int sample_count = batch_size_ * class_size_;
73 for (int i = 0; i < sample_count; ++i) {
74 input_.push_back(srng.RandDouble());
75 }
76 ASSERT_EQ(execution.setInput(Multinomial::kInputTensor, input_.data(),
77 sizeof(float) * input_.size()),
78 Result::NO_ERROR);
79 ASSERT_EQ(execution.setInput(Multinomial::kSampleCountParam, &sample_size_,
80 sizeof(sample_size_)),
81 Result::NO_ERROR);
82
83 std::vector<uint32_t> seeds{kFixedRandomSeed1, kFixedRandomSeed2};
84 ASSERT_EQ(execution.setInput(Multinomial::kRandomSeedsTensor, seeds.data(),
85 sizeof(uint32_t) * seeds.size()),
86 Result::NO_ERROR);
87
88 output_.insert(output_.end(), batch_size_ * sample_size_, 0);
89 ASSERT_EQ(execution.setOutput(Multinomial::kOutputTensor, output_.data(),
90 sizeof(uint32_t) * output_.size()),
91 Result::NO_ERROR);
92
93 ASSERT_EQ(execution.compute(), Result::NO_ERROR);
94 }
95
GetInput() const96 const std::vector<float>& GetInput() const { return input_; }
GetOutput() const97 const std::vector<uint32_t>& GetOutput() const { return output_; }
98
99 private:
100 Model model_;
101
102 const uint32_t batch_size_;
103 const uint32_t class_size_;
104 const uint32_t sample_size_;
105
106 std::vector<float> input_;
107 std::vector<uint32_t> output_;
108 };
109
TEST(MultinomialOpTest,ProbabilityDeltaWithinTolerance)110 TEST(MultinomialOpTest, ProbabilityDeltaWithinTolerance) {
111 constexpr int kBatchSize = 8;
112 constexpr int kNumClasses = 10000;
113 constexpr int kNumSamples = 128;
114 constexpr float kMaxProbabilityDelta = 0.025;
115
116 MultinomialOpModel multinomial(kBatchSize, kNumClasses, kNumSamples);
117 multinomial.Invoke();
118
119 std::vector<uint32_t> output = multinomial.GetOutput();
120 std::vector<int> class_counts;
121 class_counts.resize(kNumClasses);
122 for (auto index : output) {
123 class_counts[index]++;
124 }
125
126 std::vector<float> input = multinomial.GetInput();
127 for (int b = 0; b < kBatchSize; ++b) {
128 float probability_sum = 0;
129 const int batch_index = kBatchSize * b;
130 for (int i = 0; i < kNumClasses; ++i) {
131 probability_sum += expf(input[batch_index + i]);
132 }
133 for (int i = 0; i < kNumClasses; ++i) {
134 float probability =
135 static_cast<float>(class_counts[i]) / static_cast<float>(kNumSamples);
136 float probability_expected = expf(input[batch_index + i]) / probability_sum;
137 EXPECT_THAT(probability, FloatNear(probability_expected, kMaxProbabilityDelta));
138 }
139 }
140 }
141
142 } // namespace wrapper
143 } // namespace nn
144 } // namespace android
145