/packages/modules/NeuralNetworks/common/types/operations/src/ |
D | Elementwise.cpp | 28 NN_RET_CHECK(inputType == OperandType::TENSOR_FLOAT16 || in validate() 31 NN_RET_CHECK(validateInputTypes(context, {inputType})); in validate() 32 NN_RET_CHECK(validateOutputTypes(context, {inputType})); in validate() 40 NN_RET_CHECK(inputType == OperandType::TENSOR_FLOAT16 || in validateAbs() 43 NN_RET_CHECK(validateInputTypes(context, {inputType})); in validateAbs() 44 NN_RET_CHECK(validateOutputTypes(context, {inputType})); in validateAbs() 53 NN_RET_CHECK(inputType == OperandType::TENSOR_FLOAT16 || in validateFloor() 56 NN_RET_CHECK(validateInputTypes(context, {inputType})); in validateFloor() 57 NN_RET_CHECK(validateOutputTypes(context, {inputType})); in validateFloor() 71 NN_RET_CHECK(inputType == OperandType::TENSOR_FLOAT16 || in validateRsqrt() [all …]
|
D | Reduce.cpp | 28 NN_RET_CHECK(inputType == OperandType::TENSOR_FLOAT16 || in validateProdSum() 31 NN_RET_CHECK( in validateProdSum() 33 NN_RET_CHECK(validateOutputTypes(context, {inputType})); in validateProdSum() 45 NN_RET_CHECK(inputType == OperandType::TENSOR_FLOAT16 || in validateMaxMin() 50 NN_RET_CHECK( in validateMaxMin() 52 NN_RET_CHECK(validateOutputTypes(context, {inputType})); in validateMaxMin() 68 NN_RET_CHECK(inputType == OperandType::TENSOR_BOOL8) in validateLogical() 70 NN_RET_CHECK( in validateLogical() 72 NN_RET_CHECK(validateOutputTypes(context, {inputType})); in validateLogical()
|
D | ResizeImageOps.cpp | 30 NN_RET_CHECK(numInputs >= kNumInputs - 1 && numInputs <= kNumInputs + kNumOptionalInputs); in validate() 32 NN_RET_CHECK(numInputs >= kNumInputs && numInputs <= kNumInputs + kNumOptionalInputs); in validate() 41 NN_RET_CHECK(inputType == OperandType::TENSOR_FLOAT16 || in validate() 55 NN_RET_CHECK(scalarType == OperandType::FLOAT32); in validate() 57 NN_RET_CHECK(scalarType == OperandType::FLOAT16); in validate() 60 NN_RET_CHECK(scalarType == OperandType::FLOAT32); in validate() 74 NN_RET_CHECK(validateInputTypes(context, inExpectedTypes)); in validate() 75 NN_RET_CHECK(validateOutputTypes(context, {inputType})); in validate()
|
D | FullyConnected.cpp | 29 NN_RET_CHECK(weights.type == input.type); in validateShapes() 32 NN_RET_CHECK(bias.type == OperandType::TENSOR_INT32); in validateShapes() 34 NN_RET_CHECK(bias.type == input.type); in validateShapes() 120 NN_RET_CHECK(validateInputTypes(context, inExpectedTypes)); in validate() 121 NN_RET_CHECK(validateOutputTypes(context, {inputType})); in validate() 127 NN_RET_CHECK(validateShapes(input, weights, bias)); in validate()
|
D | Fill.cpp | 49 NN_RET_CHECK(outputType == OperandType::TENSOR_FLOAT16 || in validate() 53 NN_RET_CHECK(validateOutputTypes(context, {outputType})); in validate() 56 NN_RET_CHECK(getValueType(outputType, &valueType)); in validate() 57 NN_RET_CHECK(validateInputTypes(context, {OperandType::TENSOR_INT32, valueType})); in validate()
|
D | GenerateProposals.cpp | 47 NN_RET_CHECK(validateInputTypes(context, inExpectedTypes)); in validate() 48 NN_RET_CHECK(validateOutputTypes(context, {inputType})); in validate() 92 NN_RET_CHECK(validateInputTypes(context, inExpectedTypes)); in validate() 93 NN_RET_CHECK(validateOutputTypes(context, outExpectedTypes)); in validate() 157 NN_RET_CHECK(validateInputTypes(context, inExpectedTypes)); in validate() 158 NN_RET_CHECK(validateOutputTypes(context, outExpectedTypes)); in validate() 195 NN_RET_CHECK(validateInputTypes(context, inExpectedTypes)); in validate() 196 NN_RET_CHECK(validateOutputTypes( in validate()
|
D | TransposeConv2D.cpp | 28 NN_RET_CHECK(inputCount == kNumInputs1 || inputCount == kNumInputs2); in validate() 38 NN_RET_CHECK(filterType == OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL || in validate() 67 NN_RET_CHECK(validateInputTypes(context, inExpectedTypes)); in validate() 68 NN_RET_CHECK(validateOutputTypes(context, {inputType})); in validate()
|
D | Activation.cpp | 48 NN_RET_CHECK(validateInputTypes(context, {inputType})); in validate() 49 NN_RET_CHECK(validateOutputTypes(context, {inputType})); in validate() 65 NN_RET_CHECK(validateInputTypes(context, {inputType})); in validateHardSwish() 66 NN_RET_CHECK(validateOutputTypes(context, {inputType})); in validateHardSwish()
|
/packages/modules/NeuralNetworks/common/cpu_operations/ |
D | QLSTM.cpp | 59 NN_RET_CHECK(!context->isOmittedInput(tensor)) in prepare() 118 NN_RET_CHECK(cifgWeightsAllOrNone); in prepare() 147 NN_RET_CHECK(peepholeWeightsAllOrNone); in prepare() 150 NN_RET_CHECK(hasTensor(context, kInputGateBiasTensor)); in prepare() 155 NN_RET_CHECK(!hasTensor(context, kInputGateBiasTensor)) in prepare() 216 NN_RET_CHECK(!hasTensor(context, kInputLayerNormTensor)) in prepare() 224 NN_RET_CHECK(layerNormWeightsAllOrNoneCifg); in prepare() 234 NN_RET_CHECK(layerNormWeightsAllOrNone); in prepare() 355 NN_RET_CHECK(CheckedLog2(prevCellStateShape.scale, &cellShift)); in execute() 356 NN_RET_CHECK(cellShift <= -9); in execute() [all …]
|
D | RoiPooling.cpp | 72 NN_RET_CHECK(roiInfo[0] >= 0); in roiPoolingNhwc() 73 NN_RET_CHECK(roiInfo[1] >= 0); in roiPoolingNhwc() 74 NN_RET_CHECK(roiInfo[2] >= 0); in roiPoolingNhwc() 75 NN_RET_CHECK(roiInfo[3] >= 0); in roiPoolingNhwc() 76 NN_RET_CHECK(roiInfo[0] * widthScale <= inWidth); in roiPoolingNhwc() 77 NN_RET_CHECK(roiInfo[1] * heightScale <= inHeight); in roiPoolingNhwc() 78 NN_RET_CHECK(roiInfo[2] * widthScale <= inWidth); in roiPoolingNhwc() 79 NN_RET_CHECK(roiInfo[3] * heightScale <= inHeight); in roiPoolingNhwc() 80 NN_RET_CHECK(roiInfo[0] <= roiInfo[2]); in roiPoolingNhwc() 81 NN_RET_CHECK(roiInfo[1] <= roiInfo[3]); in roiPoolingNhwc() [all …]
|
D | Pooling.cpp | 261 NN_RET_CHECK(input.initialize(inputData, inputShape)); in averagePool() 262 NN_RET_CHECK(output.initialize(outputData, outputShape)); in averagePool() 263 NN_RET_CHECK(averagePoolNhwc(input.getNhwcBuffer(), input.getNhwcShape(), param, in averagePool() 265 NN_RET_CHECK(output.commit()); in averagePool() 274 NN_RET_CHECK(input.initialize(inputData, inputShape)); in l2Pool() 275 NN_RET_CHECK(output.initialize(outputData, outputShape)); in l2Pool() 276 NN_RET_CHECK(l2PoolNhwc(input.getNhwcBuffer(), input.getNhwcShape(), param, in l2Pool() 278 NN_RET_CHECK(output.commit()); in l2Pool() 287 NN_RET_CHECK(input.initialize(inputData, inputShape)); in maxPool() 288 NN_RET_CHECK(output.initialize(outputData, outputShape)); in maxPool() [all …]
|
D | RoiAlign.cpp | 78 NN_RET_CHECK(roiInfo[0] >= 0); in roiAlignNhwc() 79 NN_RET_CHECK(roiInfo[1] >= 0); in roiAlignNhwc() 80 NN_RET_CHECK(roiInfo[2] >= 0); in roiAlignNhwc() 81 NN_RET_CHECK(roiInfo[3] >= 0); in roiAlignNhwc() 82 NN_RET_CHECK(roiInfo[0] * widthScale <= inWidth); in roiAlignNhwc() 83 NN_RET_CHECK(roiInfo[1] * heightScale <= inHeight); in roiAlignNhwc() 84 NN_RET_CHECK(roiInfo[2] * widthScale <= inWidth); in roiAlignNhwc() 85 NN_RET_CHECK(roiInfo[3] * heightScale <= inHeight); in roiAlignNhwc() 86 NN_RET_CHECK(roiInfo[0] <= roiInfo[2]); in roiAlignNhwc() 87 NN_RET_CHECK(roiInfo[1] <= roiInfo[3]); in roiAlignNhwc() [all …]
|
D | GenerateProposals.cpp | 125 NN_RET_CHECK(bboxTransformFloat32(roi_float32.data(), roiShape, delta_float32.data(), in bboxTransformFloat16() 147 NN_RET_CHECK(bboxTransformFloat32(roi_float32.data(), roiShape, delta_float32.data(), in bboxTransformQuant() 169 NN_RET_CHECK(bboxTransformFloat32(roi_float32.data(), roiShape, delta_float32.data(), in bboxTransformQuant() 237 NN_RET_CHECK(context->setOutputShape(kOutputTensor, outputShape)); in prepare() 531 NN_RET_CHECK(context->setOutputShape(kOutputScoreTensor, scoresOutShape)); in boxWithNmsLimitWriteOutput() 535 NN_RET_CHECK(context->setOutputShape(kOutputRoiTensor, roiOutShape)); in boxWithNmsLimitWriteOutput() 539 NN_RET_CHECK(context->setOutputShape(kOutputClassTensor, classesOutShape)); in boxWithNmsLimitWriteOutput() 543 NN_RET_CHECK(context->setOutputShape(kOutputBatchesTensor, batchesOutShape)); in boxWithNmsLimitWriteOutput() 585 NN_RET_CHECK(boxWithNmsLimitFloat32Compute( in boxWithNmsLimitFloat32() 609 NN_RET_CHECK(boxWithNmsLimitFloat32Compute( in boxWithNmsLimitFloat16() [all …]
|
D | LogicalAndOr.cpp | 42 NN_RET_CHECK(outputShapeIndexed.indexToFlatIndex(curIndex, &outputFlatIndex)); in compute() 44 NN_RET_CHECK(aShapeIndexed.broadcastedIndexToFlatIndex(curIndex, &aFlatIndex)); in compute() 46 NN_RET_CHECK(bShapeIndexed.broadcastedIndexToFlatIndex(curIndex, &bFlatIndex)); in compute() 50 NN_RET_CHECK(outputShapeIndexed.nextIndexInplace(&curIndex, &lastIndex)); in compute() 61 NN_RET_CHECK(calculateBroadcastedShape(input1, input2, &output)); in prepare()
|
D | QuantizedLSTM.cpp | 267 NN_RET_CHECK(weightsScale != 0); in prepare() 282 NN_RET_CHECK(checkWeightsShape(inputToInputWeights, inputSize)); in prepare() 283 NN_RET_CHECK(checkWeightsShape(inputToForgetWeights, inputSize)); in prepare() 284 NN_RET_CHECK(checkWeightsShape(inputToCellWeights, inputSize)); in prepare() 285 NN_RET_CHECK(checkWeightsShape(inputToOutputWeights, inputSize)); in prepare() 291 NN_RET_CHECK(checkWeightsShape(recurrentToInputWeights, outputSize)); in prepare() 292 NN_RET_CHECK(checkWeightsShape(recurrentToForgetWeights, outputSize)); in prepare() 293 NN_RET_CHECK(checkWeightsShape(recurrentToCellWeights, outputSize)); in prepare() 294 NN_RET_CHECK(checkWeightsShape(recurrentToOutputWeights, outputSize)); in prepare() 313 NN_RET_CHECK(checkBiasShape(inputGateBias)); in prepare() [all …]
|
D | TransposeConv2D.cpp | 206 NN_RET_CHECK(GetQuantizedConvolutionMultiplier(inputShape, filterShape, biasShape, outputShape, in transposeConvNhwc() 209 NN_RET_CHECK(QuantizeMultiplier(realMultiplier, &outputMultiplier, &exponent)); in transposeConvNhwc() 303 NN_RET_CHECK(input.initialize(inputData, inputShape)); in transposeConv() 304 NN_RET_CHECK(output.initialize(outputData, outputShape)); in transposeConv() 305 NN_RET_CHECK(transposeConvNhwc(input.getNhwcBuffer(), input.getNhwcShape(), filterData, in transposeConv() 308 NN_RET_CHECK(output.commit()); in transposeConv() 347 NN_RET_CHECK(GetQuantizedConvolutionMultiplier( in transposeConvQuant8PerChannelNhwc() 350 NN_RET_CHECK(QuantizeMultiplier(realMultiplier[i], &outputMultiplier[i], &exponent)); in transposeConvQuant8PerChannelNhwc() 423 NN_RET_CHECK(input.initialize(inputData, inputShape)); in transposeConvQuant8PerChannel() 424 NN_RET_CHECK(output.initialize(outputData, outputShape)); in transposeConvQuant8PerChannel() [all …]
|
D | DepthwiseConv2D.cpp | 206 NN_RET_CHECK(GetQuantizedConvolutionMultiplier(inputShape, filterShape, biasShape, outputShape, in depthwiseConvNhwc() 209 NN_RET_CHECK(QuantizeMultiplier(real_multiplier, &output_multiplier, &exponent)); in depthwiseConvNhwc() 260 NN_RET_CHECK(depthwiseConvNhwc(unsignedInput.data(), inputShape, unsignedFilter.data(), in depthwiseConvNhwc() 308 NN_RET_CHECK(GetQuantizedConvolutionMultiplier( in depthwiseConvQuant8PerChannelNhwc() 311 NN_RET_CHECK(QuantizeMultiplier(realMultiplier[i], &outputMultiplier[i], &exponent)); in depthwiseConvQuant8PerChannelNhwc() 378 NN_RET_CHECK(input.initialize(inputData, inputShape)); in depthwiseConv() 379 NN_RET_CHECK(output.initialize(outputData, outputShape)); in depthwiseConv() 380 NN_RET_CHECK(depthwiseConvNhwc(input.getNhwcBuffer(), input.getNhwcShape(), filterData, in depthwiseConv() 385 NN_RET_CHECK(output.commit()); in depthwiseConv() 401 NN_RET_CHECK(input.initialize(inputData, inputShape)); in depthwiseConvQuant8PerChannel() [all …]
|
D | Conv2D.cpp | 240 NN_RET_CHECK(GetQuantizedConvolutionMultiplier(inputShape, filterShape, biasShape, outputShape, in convNhwc() 243 NN_RET_CHECK(QuantizeMultiplier(real_multiplier, &output_multiplier, &exponent)); in convNhwc() 293 NN_RET_CHECK(convNhwc(unsignedInput.data(), inputShape, unsignedFilter.data(), filterShape, in convNhwc() 338 NN_RET_CHECK(input.initialize(inputData, inputShape)); in conv() 339 NN_RET_CHECK(output.initialize(outputData, outputShape)); in conv() 340 NN_RET_CHECK(convNhwc(input.getNhwcBuffer(), input.getNhwcShape(), filterData, filterShape, in conv() 345 NN_RET_CHECK(output.commit()); in conv() 382 NN_RET_CHECK(GetQuantizedConvolutionMultiplier( in convQuant8PerChannelNhwc() 385 NN_RET_CHECK(QuantizeMultiplier(realMultiplier[i], &outputMultiplier[i], &exponent)); in convQuant8PerChannelNhwc() 475 NN_RET_CHECK(GetQuantizedConvolutionMultiplier( in convQuant8PerChannelNhwc() [all …]
|
D | UnidirectionalSequenceLSTM.cpp | 86 NN_RET_CHECK(!context->isOmittedInput(requiredInput)) in prepare() 147 NN_RET_CHECK(cifgWeightsAllOrNone); in prepare() 176 NN_RET_CHECK(peepholeWeightsAllOrNone); in prepare() 179 NN_RET_CHECK(hasTensor(context, kInputGateBiasTensor)); in prepare() 184 NN_RET_CHECK(!hasTensor(context, kInputGateBiasTensor)) in prepare() 245 NN_RET_CHECK(!hasTensor(context, kInputLayerNormWeightsTensor)) in prepare() 254 NN_RET_CHECK(layerNormWeightsAllOrNoneCifg); in prepare() 265 NN_RET_CHECK(layerNormWeightsAllOrNone); in prepare() 272 NN_RET_CHECK(!context->isOmittedOutput(kOutputStateOutTensor)); in prepare() 273 NN_RET_CHECK(!context->isOmittedOutput(kCellStateOutTensor)); in prepare() [all …]
|
D | PRelu.cpp | 55 NN_RET_CHECK(outputShapeIndexed.indexToFlatIndex(curIndex, &outputFlatIndex)); in eval() 57 NN_RET_CHECK(aShapeIndexed.broadcastedIndexToFlatIndex(curIndex, &aFlatIndex)); in eval() 59 NN_RET_CHECK(bShapeIndexed.broadcastedIndexToFlatIndex(curIndex, &bFlatIndex)); in eval() 63 NN_RET_CHECK(outputShapeIndexed.nextIndexInplace(&curIndex, &lastIndex)); in eval() 103 NN_RET_CHECK(input.type == alpha.type); in prepare() 105 NN_RET_CHECK(calculateBroadcastedShape(input, alpha, &output)); in prepare()
|
/packages/modules/NeuralNetworks/runtime/operation_converters/ |
D | DepthwiseConv2DOperationConverter.cpp | 54 NN_RET_CHECK(isOperandConstant(paddingTypeOperand)); in convert() 72 NN_RET_CHECK(isOperandConstant(strideWOperand)); in convert() 73 NN_RET_CHECK(isOperandConstant(strideHOperand)); in convert() 74 NN_RET_CHECK(isOperandConstant(activationOperand)); in convert() 75 NN_RET_CHECK(isOperandConstant(depthwiseMultiplierOperand)); in convert() 88 NN_RET_CHECK(isOperandConstant(isNchwOperand)); in convert() 91 NN_RET_CHECK(!isNchw) << "TFLite does not support NCHW formatted input tensors"; in convert() 102 NN_RET_CHECK(isOperandConstant(dilationWOperand)); in convert() 108 NN_RET_CHECK(isOperandConstant(dilationHOperand)); in convert()
|
D | Conv2DOperationConverter.cpp | 29 NN_RET_CHECK(isOperandConstant( in getConv2DInputs() 68 NN_RET_CHECK(isOperandConstant(frontWidthPaddingOperand)); in decomposeExplicitPadding() 69 NN_RET_CHECK(isOperandConstant(backWidthPaddingOperand)); in decomposeExplicitPadding() 70 NN_RET_CHECK(isOperandConstant(frontHeightPaddingOperand)); in decomposeExplicitPadding() 71 NN_RET_CHECK(isOperandConstant(backHeightPaddingOperand)); in decomposeExplicitPadding() 172 NN_RET_CHECK(isOperandConstant(paddingTypeOperand)); in convert() 188 NN_RET_CHECK(isOperandConstant(strideWOperand)); in convert() 189 NN_RET_CHECK(isOperandConstant(strideHOperand)); in convert() 190 NN_RET_CHECK(isOperandConstant(activationOperand)); in convert() 202 NN_RET_CHECK(isOperandConstant(isNchwOperand)); in convert() [all …]
|
/packages/modules/NeuralNetworks/common/types/src/ |
D | Validation.cpp | 258 NN_RET_CHECK(timing.timeOnDevice.value() <= timing.timeInDriver.value()) << lazyMessage(); in validateTiming() 311 NN_RET_CHECK(isValidExtensionName(extension.name)); in validateExtension() 323 NN_RET_CHECK(iter == types.end()) << "Extension has duplicate type " << *iter; in validateExtension() 341 NN_RET_CHECK(nameIter == names.end()) in validateExtensions() 364 NN_RET_CHECK(location.pointer == kNullptrVariant) in validateOperandDataLocation() 386 NN_RET_CHECK(location.pointer == kNullptrVariant) in validateOperandDataLocation() 397 NN_RET_CHECK(location.pointer == kNullptrVariant) << "SUBGRAPH with a non-null pointer"; in validateOperandDataLocation() 413 NN_RET_CHECK(nonNull) << "POINTER with a null pointer"; in validateOperandDataLocation() 433 NN_RET_CHECK(operand.dimensions.empty()) in validateOperandDimensions() 450 NN_RET_CHECK(!operand.dimensions.empty()) in validateOperandDimensions() [all …]
|
/packages/modules/NeuralNetworks/common/ |
D | LegacyUtils.cpp | 371 NN_RET_CHECK(type.dimensions == nullptr) << tag << " invalid dimensions for scalar type"; in validateScalarDimensions() 376 NN_RET_CHECK(0 <= type.zeroPoint && type.zeroPoint <= 255) in validateQuant8AsymmParams() 384 NN_RET_CHECK(-128 <= type.zeroPoint && type.zeroPoint <= 127) in validateQuant8AsymmSignedParams() 397 NN_RET_CHECK(0 <= type.zeroPoint && type.zeroPoint <= 65535) in validateQuant16AsymmParams() 445 NN_RET_CHECK(extensionOperandTypeInfo != nullptr); in validateOperandTypeHelper() 447 NN_RET_CHECK( in validateOperandTypeHelper() 450 NN_RET_CHECK(validateScalarDimensions(type, tag)); in validateOperandTypeHelper() 455 NN_RET_CHECK(extensionOperandTypeInfo == nullptr); in validateOperandTypeHelper() 456 NN_RET_CHECK(validCode(kNumberOfDataTypes, kNumberOfDataTypesOEM, type.type)) in validateOperandTypeHelper() 461 NN_RET_CHECK(validateScalarDimensions(type, tag)); in validateOperandTypeHelper() [all …]
|
/packages/modules/NeuralNetworks/runtime/ |
D | Memory.cpp | 57 NN_RET_CHECK(offset + length <= kSize) << "request size larger than the memory size."; in validate() 58 NN_RET_CHECK(offset != 0 || length != 0) << "memory size cannot be implied."; in validate() 81 NN_RET_CHECK(compilation != nullptr) in validate() 83 NN_RET_CHECK(offset == 0 && length == 0) in validate() 108 NN_RET_CHECK(kCompilationRoles.count({compilation, ioType, index}) > 0) in validate() 110 NN_RET_CHECK(offset == 0 && length == 0) in validate() 114 NN_RET_CHECK(isTensor || type->dimensionCount == 0) in validate() 122 NN_RET_CHECK(combined.has_value()) in validate() 130 NN_RET_CHECK(mInitialized) << "using an uninitialized memory as input"; in validateInputDimensions() 131 NN_RET_CHECK(dimensions == mUpdatedDimensions) in validateInputDimensions() [all …]
|