/packages/modules/NeuralNetworks/runtime/test/specs/V1_2/ |
D | sub_quantized_different_scales.mod.py | 27 output_scale, output_offset): argument 31 return quantize(a_dequantized - b_dequantized, output_scale, output_offset) 44 "{%d}, %g, %d" % (size, output_scale, output_offset))
|
D | tanh_v1_2.mod.py | 33 output_scale, output_offset = 1.0 / 128, 128 # Required. variable 39 return max(0, min(255, int(round(x / output_scale)) + output_offset)) 42 output0 = Output("output0", "TENSOR_QUANT8_ASYMM", "{256}, %g, %d" % (output_scale, output_offset))
|
/packages/modules/NeuralNetworks/runtime/test/specs/V1_3/ |
D | tanh_quant8_signed.mod.py | 19 output_scale, output_offset = 1.0 / 128, 0 # Required. variable 25 return max(-128, min(127, int(round(x / output_scale)) + output_offset)) 28 … = Output("output0", "TENSOR_QUANT8_ASYMM_SIGNED", "{256}, %g, %d" % (output_scale, output_offset))
|
D | sub_quant8_signed.mod.py | 28 output_scale, output_offset): argument 32 return quantize(a_dequantized - b_dequantized, output_scale, output_offset) 45 "{%d}, %g, %d" % (size, output_scale, output_offset))
|
/packages/modules/Virtualization/authfs/src/ |
D | file.rs | 58 let mut output_offset = offset; in write_all_at() localVariable 60 let size = self.write_at(&buf[input_offset..], output_offset)?; in write_all_at() 62 output_offset += size as u64; in write_all_at()
|
/packages/modules/Virtualization/authfs/src/fsverity/ |
D | editor.rs | 199 for (output_offset, current_size) in in write_at() 206 let offset_in_buf = (output_offset - offset) as usize; in write_at() 208 let output_chunk_index = (output_offset / CHUNK_SIZE) as usize; in write_at() 209 let offset_from_alignment = (output_offset % CHUNK_SIZE) as usize; in write_at() 223 let written = output_offset - offset; in write_at() 236 self.file.write_all_at(source, output_offset)?; in write_at()
|
/packages/modules/NeuralNetworks/common/cpu_operations/ |
D | Broadcast.cpp | 137 const int32_t output_offset = shapeOut.offset; in addQuant8() local 177 op_params.output_offset = output_offset; in addQuant8() 274 const int32_t output_offset = shapeOut.offset; in mulQuant8() local 296 op_params.output_offset = output_offset; in mulQuant8() 346 const int32_t output_offset = shapeOut.offset; in subQuant8() local 389 op_params.output_offset = output_offset; in subQuant8()
|
D | PRelu.cpp | 73 const int32_t output_offset = outputShape.offset; in evalQuant8() local 87 output_offset + tflite::MultiplyByQuantizedMultiplier( in evalQuant8() 91 output_val = output_offset + in evalQuant8()
|
D | FullyConnected.cpp | 165 params.output_offset = outputShape.offset; in fullyConnectedQuant8()
|
D | BatchMatmul.cpp | 183 params.output_offset = outputShape.offset; in batchMatMulQuantized()
|
D | Reshape.cpp | 236 {.output_offset = outputShape.offset}, convertShapeToTflshape(inputShape), inputData, in spaceToBatchGeneric()
|
D | DepthwiseConv2D.cpp | 224 .output_offset = outputShape.offset, in depthwiseConvNhwc()
|
D | Conv2D.cpp | 486 convParams.output_offset = outputShape.offset; in convQuant8PerChannelNhwc()
|