1# 2# Copyright (C) 2021 The Android Open Source Project 3# 4# Licensed under the Apache License, Version 2.0 (the "License"); 5# you may not use this file except in compliance with the License. 6# You may obtain a copy of the License at 7# 8# http://www.apache.org/licenses/LICENSE-2.0 9# 10# Unless required by applicable law or agreed to in writing, software 11# distributed under the License is distributed on an "AS IS" BASIS, 12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13# See the License for the specific language governing permissions and 14# limitations under the License. 15# 16def test(name, axis_value, input_tensors, output_tensor, inputs_data, output_data): 17 model = Model().Operation("PACK", Int32Scalar("axis", axis_value), *input_tensors).To(output_tensor) 18 quant8_asymm_type = ("TENSOR_QUANT8_ASYMM", 0.5, 4) 19 quant8_asymm_dict = dict(zip([*input_tensors, output_tensor], [quant8_asymm_type] * (len(input_tensors) + 1))) 20 quant8_asymm = DataTypeConverter(name="quant8_asymm").Identify(quant8_asymm_dict) 21 quant8_asymm_signed_type = ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, -9) 22 quant8_asymm_signed_dict = dict(zip([*input_tensors, output_tensor], [quant8_asymm_signed_type] * (len(input_tensors) + 1))) 23 quant8_asymm_signed = DataTypeConverter(name="quant8_asymm_signed").Identify(quant8_asymm_signed_dict) 24 Example((dict(zip(input_tensors, inputs_data)), {output_tensor: output_data}), model=model, name=name).AddVariations("float16", quant8_asymm, quant8_asymm_signed, "int32") 25 26test( 27 name="FLOAT32_unary_axis0", 28 axis_value=0, 29 input_tensors=[Input("in0", ("TENSOR_FLOAT32", [2]))], 30 output_tensor=Output("out", ("TENSOR_FLOAT32", [1,2])), 31 inputs_data=[[3, 4]], 32 output_data=[3, 4], 33) 34 35test( 36 name="FLOAT32_unary_axis1", 37 axis_value=1, 38 input_tensors=[Input("in0", ("TENSOR_FLOAT32", [2]))], 39 output_tensor=Output("out", ("TENSOR_FLOAT32", [2,1])), 40 inputs_data=[[3, 4]], 41 output_data=[3, 4], 42) 43 44test( 45 name="FLOAT32_binary_axis0", 46 axis_value=0, 47 input_tensors=[Input("in0", ("TENSOR_FLOAT32", [3,4])), 48 Input("in1", ("TENSOR_FLOAT32", [3,4]))], 49 output_tensor=Output("out", ("TENSOR_FLOAT32", [2,3,4])), 50 inputs_data=[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], 51 [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]], 52 output_data=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 53 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23], 54 55) 56 57test( 58 name="FLOAT32_binary_axis1", 59 axis_value=1, 60 input_tensors=[Input("in0", ("TENSOR_FLOAT32", [3,4])), 61 Input("in1", ("TENSOR_FLOAT32", [3,4]))], 62 output_tensor=Output("out", ("TENSOR_FLOAT32", [3,2,4])), 63 inputs_data=[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], 64 [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]], 65 output_data=[0, 1, 2, 3, 66 12, 13, 14, 15, 67 4, 5, 6, 7, 68 16, 17, 18, 19, 69 8, 9, 10, 11, 70 20, 21, 22, 23], 71) 72 73test( 74 name="FLOAT32_binary_axis2", 75 axis_value=2, 76 input_tensors=[Input("in0", ("TENSOR_FLOAT32", [3,4])), 77 Input("in1", ("TENSOR_FLOAT32", [3,4]))], 78 output_tensor=Output("out", ("TENSOR_FLOAT32", [3,4,2])), 79 inputs_data=[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], 80 [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]], 81 output_data=[0, 12, 82 1, 13, 83 2, 14, 84 3, 15, 85 4, 16, 86 5, 17, 87 6, 18, 88 7, 19, 89 8, 20, 90 9, 21, 91 10, 22, 92 11, 23], 93) 94