Home
last modified time | relevance | path

Searched refs:num_units (Results 1 – 10 of 10) sorted by relevance

/packages/modules/NeuralNetworks/runtime/test/specs/V1_3/
Dunidirectional_sequence_rnn.mod.py50 num_units = 16 variable
182 "{{{}, {}}}".format(num_units, input_size)),
184 "{{{}, {}}}".format(num_units, num_units)),
185 bias=Input("bias", "TENSOR_FLOAT32", "{{{}}}".format(num_units)),
187 "{{{}, {}}}".format(num_batches, num_units)),
189 "{{{}, {}, {}}}".format(num_batches, max_time, num_units)),
191 "{{{}, {}}}".format(num_batches, num_units)),
198 hidden_state_data=[0] * num_batches * num_units,
208 "{{{}, {}}}".format(num_units, input_size)),
210 "{{{}, {}}}".format(num_units, num_units)),
[all …]
Dqlstm_projection.mod.py24 num_units = 4 variable
30 InputWeightsType = ("TENSOR_QUANT8_SYMM", [num_units, input_size], 0.00784314, 0)
36 RecurrentWeightsType = ("TENSOR_QUANT8_SYMM", [num_units, output_size], 0.00784314, 0)
42 CellWeightsType = ("TENSOR_QUANT16_SYMM", [num_units], 1.0, 0)
48 BiasType = ("TENSOR_INT32", [num_units], 0.0, 0)
55 ("TENSOR_QUANT8_SYMM", [output_size, num_units], 0.00392157, 0))
59 CellStateType = ("TENSOR_QUANT16_SYMM", [batch_size, num_units], 3.05176e-05, 0)
63 LayerNormType = ("TENSOR_QUANT16_SYMM", [num_units], 3.05182e-05, 0)
135 cell_state_in: [ 0 for _ in range(batch_size * num_units) ],
192 cell_state_in: [ 0 for _ in range(batch_size * num_units) ],
Dqlstm_noprojection.mod.py24 num_units = 4 variable
30 InputWeightsType = ("TENSOR_QUANT8_SYMM", [num_units, input_size], 0.00784314, 0)
36 RecurrentWeightsType = ("TENSOR_QUANT8_SYMM", [num_units, output_size], 0.00784314, 0)
42 CellWeightsType = ("TENSOR_QUANT16_SYMM", [num_units], 1.0, 0)
48 BiasType = ("TENSOR_INT32", [num_units], 0.0, 0)
55 ("TENSOR_QUANT8_SYMM", [output_size, num_units], 0.00392157, 0))
59 CellStateType = ("TENSOR_QUANT16_SYMM", [batch_size, num_units], 3.05176e-05, 0)
63 LayerNormType = ("TENSOR_QUANT16_SYMM", [num_units], 3.05182e-05, 0)
129 cell_state_in: [ 0 for _ in range(batch_size * num_units) ],
/packages/modules/NeuralNetworks/runtime/test/specs/V1_2/
Dunidirectional_sequence_rnn.mod.py47 num_units = 16 variable
144 num_units, input_size)),
146 "{{{}, {}}}".format(num_units, num_units)),
147 bias=Input("bias", "TENSOR_FLOAT32", "{{{}}}".format(num_units)),
149 num_batches, num_units)),
151 num_batches, max_time, num_units)),
158 hidden_state_data=[0] * num_batches * num_units,
166 num_units, input_size)),
168 "{{{}, {}}}".format(num_units, num_units)),
169 bias=Input("bias", "TENSOR_FLOAT32", "{{{}}}".format(num_units)),
[all …]
/packages/modules/NeuralNetworks/common/cpu_operations/
DRNN.cpp62 const uint32_t num_units = SizeOfDimension(input_weights, 0); in Prepare() local
72 hiddenStateShape->dimensions = {batch_size, num_units}; in Prepare()
76 outputShape->dimensions = {batch_size, num_units}; in Prepare()
146 const uint32_t num_units = weightsShape.dimensions[0]; in RNNStep() local
163 const T* hidden_state_in_ptr_batch = hiddenStateInputData + b * num_units; in RNNStep()
179 for (uint32_t o = 0; o < num_units; o++) { in RNNStep()
184 for (uint32_t o = 0; o < num_units; o++) { in RNNStep()
193 for (uint32_t o = 0; o < num_units; o++) { in RNNStep()
202 for (uint32_t o = 0; o < num_units; o++) { in RNNStep()
203 for (uint32_t h = 0; h < num_units; h++) { in RNNStep()
[all …]
DSVDF.cpp86 const uint32_t num_units = num_filters / rank; in Prepare() local
93 NN_CHECK_EQ(SizeOfDimension(bias, 0), num_units); in Prepare()
105 outputShape->dimensions = {batch_size, num_units}; in Prepare()
173 const int num_units = num_filters / rank; in EvalFloat32() local
208 tflite::tensor_utils::ReductionSumVector(scratch, outputData, batch_size * num_units, rank); in EvalFloat32()
212 tflite::tensor_utils::VectorBatchVectorAdd(biasData, num_units, batch_size, outputData); in EvalFloat32()
217 outputData, batch_size * num_units, in EvalFloat32()
DRNNTest.cpp189 uint32_t num_units() const { return units_; } in num_units() function in android::nn::wrapper::BasicRNNOpModel
294 float* golden_start = rnn_golden_output + i * rnn.num_units(); in TEST()
295 float* golden_end = golden_start + rnn.num_units(); in TEST()
DSVDFTest.cpp296 int num_units() const { return units_; } in num_units() function in android::nn::wrapper::SVDFOpModel
340 const int svdf_num_units = svdf.num_units(); in TEST()
399 const int svdf_num_units = svdf.num_units(); in TEST()
/packages/modules/NeuralNetworks/common/types/operations/src/
DFullyConnected.cpp43 uint32_t num_units = getSizeOfDimension(weights, 0u); in validateShapes() local
51 if (num_units != 0 && bias_len != 0) { in validateShapes()
52 NN_RET_CHECK_EQ(bias_len, num_units); in validateShapes()
56 NN_RET_CHECK_GT(num_units, 0u); in validateShapes()
59 output->dimensions = {batch_size, num_units}; in validateShapes()
/packages/modules/NeuralNetworks/tools/api/
Dtypes.spec1170 * [num_units, input_size], where "num_units" corresponds to the number
1172 * * 2: A 1-D tensor, of shape [num_units], specifying the bias. For input
1188 * * 0: The output tensor, of shape [batch_size, num_units]. %{BeforeNNAPILevel3For}
1705 * A 2-D tensor of shape [num_units, input_size], where “num_units
1708 * A 2-D tensor of shape [num_units, input_size].
1710 * A 2-D tensor of shape [num_units, input_size].
1712 * A 2-D tensor of shape [num_units, input_size].
1714 * A 2-D tensor of shape [num_units, output_size], where “output_size”
1715 * corresponds to either the number of cell units (i.e., “num_units”),
1718 * A 2-D tensor of shape [num_units, output_size].
[all …]