/packages/modules/NeuralNetworks/runtime/test/specs/V1_3/ |
D | roi_align_quant8_signed.mod.py | 175 i4 = Input("in", "TENSOR_FLOAT32", "{4, 4, 4, 1}") variable 178 Model().Operation("ROI_ALIGN", i4, roi4, [2, 2, 2, 2, 2], 2, 2, 2.0, 1.0, 0, 4, layout).To(o4) 181 i4: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, 0), 188 i4: [ 211 }).AddNchw(i4, o4, layout).AddVariations(quant8_signed, includeDefault=False) 248 i4 = Input("in", "TENSOR_FLOAT32", "{1, 512, 8, 1}") variable 251 Model().Operation("ROI_ALIGN", i4, roi4, [0], 128, 4, 1.0, 64.0, 10, 10, layout).To(o4) 254 i4: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, 0), 261 i4: [0] * (512 * 8), 264 }).AddNchw(i4, o4, layout).AddVariations(quant8_signed, includeDefault=False)
|
D | generate_proposals_quant8_signed.mod.py | 23 i4 = Input("imageInfo", "TENSOR_FLOAT32", "{1, 2}") # image info variable 28 i1, i2, i3, i4, 4.0, 4.0, -1, -1, 0.30, 1.0, layout).To(o1, o2, o3) 34 i4: ("TENSOR_QUANT16_ASYMM", 0.125, 0), 51 i4: [32, 32], # image info 73 i4 = Input("imageInfo", "TENSOR_FLOAT32", "{2, 2}") # image info variable 78 i1, i2, i3, i4, 10.0, 10.0, 32, 16, 0.20, 1.0, layout).To(o1, o2, o3) 84 i4: ("TENSOR_QUANT16_ASYMM", 0.125, 0), 164 i4: [64, 64, 32, 32], # image info
|
D | space_to_batch_quant8_signed.mod.py | 195 i4 = Input("op1", "TENSOR_FLOAT32", "{1, 4, 2, 1}") variable 198 Model().Operation("SPACE_TO_BATCH_ND", i4, [3, 2], pad4, layout).To(o4) 202 i4: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, 0), 208 i4: [1, 2, 3, 4, 5, 6, 7, 8], 212 }).AddNchw(i4, o4, layout).AddVariations(quant8_signed, includeDefault=False)
|
D | transpose_conv2d_quant8_signed.mod.py | 162 i4 = Input("op1", "TENSOR_FLOAT32", "{1, 4, 4, 2}") # input 0 variable 167 Model().Operation("TRANSPOSE_CONV_2D", i4, w4, b4, s4, 2, 1, 1, 0, layout).To(o4) 171 i4: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, -118), 178 i4: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 186 }).AddNchw(i4, o4, s4, layout).AddVariations(quant8_signed, includeDefault=False)
|
D | depthwise_conv2d_quant8_signed.mod.py | 472 i4 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 4}") variable 476 Model("large").Operation("DEPTHWISE_CONV_2D", i4, f4, b4, 0, 0, 0, 0, 1, 1, 1, 0, layout).To(o4) 480 i4: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, 0), 486 i4: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.5, 0), 494 i4: [10, 21, 10, 0, 499 }).AddNchw(i4, o4, layout).AddVariations(quant8_signed, includeDefault=False)
|
D | transpose_quant8_signed.mod.py | 163 i4 = Input("op1", "TENSOR_FLOAT32", "{1, 4, 4, 2}") # input 0 variable 168 Model().Operation("TRANSPOSE_CONV_2D", i4, w4, b4, s4, 2, 1, 1, 0, layout).To(o4) 172 i4: ("TENSOR_QUANT8_ASYMM_SIGNED", 0.25, -118), 179 i4: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 187 }).AddNchw(i4, o4, s4, layout).AddVariations(quant8_signed, includeDefault=False)
|
/packages/modules/NeuralNetworks/runtime/test/specs/V1_2/ |
D | roi_align.mod.py | 175 i4 = Input("in", "TENSOR_FLOAT32", "{4, 4, 4, 1}") variable 178 Model().Operation("ROI_ALIGN", i4, roi4, [2, 2, 2, 2, 2], 2, 2, 2.0, 1.0, 0, 4, layout).To(o4) 181 i4: ("TENSOR_QUANT8_ASYMM", 0.25, 128), 188 i4: [ 211 }).AddNchw(i4, o4, layout).AddVariations("relaxed", quant8, "float16") 248 i4 = Input("in", "TENSOR_FLOAT32", "{1, 512, 8, 1}") variable 251 Model().Operation("ROI_ALIGN", i4, roi4, [0], 128, 4, 1.0, 64.0, 10, 10, layout).To(o4) 254 i4: ("TENSOR_QUANT8_ASYMM", 0.25, 128), 261 i4: [0] * (512 * 8), 264 }).AddNchw(i4, o4, layout).AddVariations("relaxed", quant8, "float16")
|
D | generate_proposals.mod.py | 24 i4 = Input("imageInfo", "TENSOR_FLOAT32", "{1, 2}") # image info variable 29 i1, i2, i3, i4, 4.0, 4.0, -1, -1, 0.30, 1.0, layout).To(o1, o2, o3) 35 i4: ("TENSOR_QUANT16_ASYMM", 0.125, 0), 52 i4: [32, 32], # image info 73 i4 = Input("imageInfo", "TENSOR_FLOAT32", "{2, 2}") # image info variable 78 i1, i2, i3, i4, 10.0, 10.0, 32, 16, 0.20, 1.0, layout).To(o1, o2, o3) 84 i4: ("TENSOR_QUANT16_ASYMM", 0.125, 0), 164 i4: [64, 64, 32, 32], # image info
|
D | space_to_batch_v1_2.mod.py | 77 i4 = Input("op1", "TENSOR_FLOAT32", "{1, 4, 2, 1}") variable 80 Model().Operation("SPACE_TO_BATCH_ND", i4, [3, 2], pad4, layout).To(o4) 84 i4: ("TENSOR_QUANT8_ASYMM", 0.25, 128), 90 i4: [1, 2, 3, 4, 5, 6, 7, 8], 94 }).AddNchw(i4, o4, layout).AddVariations("relaxed", "float16", quant8)
|
D | depthwise_conv2d_v1_2.mod.py | 115 i4 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 2, 4}") variable 119 Model("large").Operation("DEPTHWISE_CONV_2D", i4, f4, b4, 0, 0, 0, 0, 1, 1, 1, 0, layout).To(o4) 123 i4: ("TENSOR_QUANT8_ASYMM", 0.5, 128), 129 i4: ("TENSOR_QUANT8_ASYMM", 0.5, 128), 137 i4: [10, 21, 10, 0, 142 }).AddNchw(i4, o4, layout).AddVariations("relaxed", "float16", quant8, channelQuant8)
|
D | conv2d_v1_2.mod.py | 104 i4 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 3, 3}") variable 108 Model("large").Operation("CONV_2D", i4, f4, b4, 0, 0, 0, 0, 1, 1, 0, layout).To(o4) 112 i4: ("TENSOR_QUANT8_ASYMM", 0.5, 128), 118 i4: ("TENSOR_QUANT8_ASYMM", 0.5, 128), 124 i4: ("TENSOR_QUANT8_ASYMM", 1.0, 127), 132 i4: [1., 2., 3., 4., 5., 6., 7., 8., 9., 140 }).AddNchw(i4, o4, layout).AddVariations("relaxed", quant8, channelQuant8, channelQuant8_mult_gt_1,…
|
D | max_pool_v1_2.mod.py | 94 i4 = Input("op1", "TENSOR_FLOAT32", "{1, 2, 4, 1}") variable 96 Model().Operation("MAX_POOL_2D", i4, 1, 2, 2, 2, 2, 0, layout).To(o4) 100 i4: ("TENSOR_QUANT8_ASYMM", 0.25, 0), 106 i4: [0, 6, 2, 4, 3, 2, 10, 7], 108 }).AddNchw(i4, o4, layout).AddVariations("relaxed", quant8, "float16")
|
D | avg_pool_v1_2.mod.py | 106 i4 = Input("op1", ("TENSOR_FLOAT32", [bat, row, col, chn])) variable 108 Model().Operation("AVERAGE_POOL_2D", i4, pad, pad, pad, pad, std, std, flt, flt, 3, layout).To(o4) 112 i4: ("TENSOR_QUANT8_ASYMM", 0.5, 0), 118 i4: [10 for _ in range(bat * row * col * chn)], 120 }).AddNchw(i4, o4, layout).AddVariations("relaxed", "float16", quant8)
|
D | transpose_conv2d.mod.py | 127 i4 = Input("op1", "TENSOR_FLOAT32", "{1, 4, 4, 2}") # input 0 variable 132 Model().Operation("TRANSPOSE_CONV_2D", i4, w4, b4, s4, 2, 1, 1, 0, layout).To(o4) 136 i4: ("TENSOR_QUANT8_ASYMM", 0.25, 10), 143 i4: [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 151 }).AddNchw(i4, o4, s4, layout).AddVariations("relaxed", quant8, "float16")
|
/packages/modules/NeuralNetworks/tools/test_generator/tests/P_internal/ |
D | add_internal.mod.py | 21 i4 = Input("i4", ("TENSOR_FLOAT32", [2])) # input 0 variable 50 model.Operation("ADD", i3, i4, act).To(t2) 58 i4: [0, 0],
|
/packages/modules/NeuralNetworks/tools/test_generator/tests/P_vts_internal/ |
D | add_internal.mod.py | 21 i4 = Input("i4", ("TENSOR_FLOAT32", [2])) # input 0 variable 50 model.Operation("ADD", i3, i4, act).To(t2) 58 i4: [0, 0],
|
/packages/modules/NeuralNetworks/runtime/test/specs/V1_0/ |
D | conv_1_h3_w2_SAME.mod.py | 2 i4 = Int32Scalar("b4", 1) variable 10 model = model.Operation("CONV_2D", i2, i0, i1, i4, i5, i6, i7).To(i3)
|
D | conv_3_h3_w2_SAME.mod.py | 2 i4 = Int32Scalar("b4", 1) variable 10 model = model.Operation("CONV_2D", i2, i0, i1, i4, i5, i6, i7).To(i3)
|
D | conv_3_h3_w2_VALID.mod.py | 2 i4 = Int32Scalar("b4", 2) variable 10 model = model.Operation("CONV_2D", i2, i0, i1, i4, i5, i6, i7).To(i3)
|
D | conv_1_h3_w2_VALID.mod.py | 2 i4 = Int32Scalar("b4", 2) variable 10 model = model.Operation("CONV_2D", i2, i0, i1, i4, i5, i6, i7).To(i3)
|
D | depthwise_conv.mod.py | 2 i4 = Int32Scalar("b4", 1) variable 11 model = model.Operation("DEPTHWISE_CONV_2D", i2, i0, i1, i4, i5, i6, i7, i8).To(i3)
|
/packages/modules/NeuralNetworks/runtime/test/specs/V1_1/ |
D | conv_1_h3_w2_SAME_relaxed.mod.py | 18 i4 = Int32Scalar("b4", 1) variable 26 model = model.Operation("CONV_2D", i2, i0, i1, i4, i5, i6, i7).To(i3)
|
D | conv_1_h3_w2_VALID_relaxed.mod.py | 18 i4 = Int32Scalar("b4", 2) variable 26 model = model.Operation("CONV_2D", i2, i0, i1, i4, i5, i6, i7).To(i3)
|
D | conv_3_h3_w2_SAME_relaxed.mod.py | 18 i4 = Int32Scalar("b4", 1) variable 26 model = model.Operation("CONV_2D", i2, i0, i1, i4, i5, i6, i7).To(i3)
|
D | conv_3_h3_w2_VALID_relaxed.mod.py | 18 i4 = Int32Scalar("b4", 2) variable 26 model = model.Operation("CONV_2D", i2, i0, i1, i4, i5, i6, i7).To(i3)
|