Searched refs:weightsScale (Results 1 – 3 of 3) sorted by relevance
/packages/modules/NeuralNetworks/common/cpu_operations/ |
D | QuantizedLSTMTest.cpp | 276 float weightsScale = 0.00408021; in TEST_F() local 292 OperandTypeParams(Type::TENSOR_QUANT8_ASYMM, {outputSize, inputSize}, weightsScale, in TEST_F() 294 OperandTypeParams(Type::TENSOR_QUANT8_ASYMM, {outputSize, inputSize}, weightsScale, in TEST_F() 296 OperandTypeParams(Type::TENSOR_QUANT8_ASYMM, {outputSize, inputSize}, weightsScale, in TEST_F() 298 OperandTypeParams(Type::TENSOR_QUANT8_ASYMM, {outputSize, inputSize}, weightsScale, in TEST_F() 304 OperandTypeParams(Type::TENSOR_QUANT8_ASYMM, {outputSize, outputSize}, weightsScale, in TEST_F() 306 OperandTypeParams(Type::TENSOR_QUANT8_ASYMM, {outputSize, outputSize}, weightsScale, in TEST_F() 308 OperandTypeParams(Type::TENSOR_QUANT8_ASYMM, {outputSize, outputSize}, weightsScale, in TEST_F() 310 OperandTypeParams(Type::TENSOR_QUANT8_ASYMM, {outputSize, outputSize}, weightsScale, in TEST_F() 316 OperandTypeParams(Type::TENSOR_INT32, {outputSize}, weightsScale / 128., 0), in TEST_F() [all …]
|
D | QuantizedLSTM.cpp | 266 const float weightsScale = inputToInputWeights->scale; in prepare() local 267 NN_RET_CHECK(weightsScale != 0); in prepare() 274 NN_RET_CHECK_EQ(weights->scale, weightsScale); in prepare() 298 NN_RET_CHECK_EQ(biasScale, weightsScale / 128.0); in prepare()
|
/packages/modules/NeuralNetworks/common/types/operations/src/ |
D | FullyConnected.cpp | 92 const float weightsScale = context->getInputShape(kWeightsTensor).scale; in validate() local 94 bool meetsQuantizedScaleConstraintBeforeV1_2 = (outputScale > inputScale * weightsScale); in validate()
|