/packages/modules/NeuralNetworks/common/cpu_operations/ |
D | Reshape.cpp | 40 bool copyData(const void* inputData, const Shape& inputShape, void* outputData, in copyData() argument 44 memcpy(outputData, inputData, count); in copyData() 50 T* outputData, const Shape& outputShape) { in depthToSpaceGeneric() argument 53 outputData, convertShapeToDims(outputShape)); in depthToSpaceGeneric() 57 int32_t blockSize, float* outputData, 60 int32_t blockSize, _Float16* outputData, 63 int32_t blockSize, uint8_t* outputData, 66 int32_t blockSize, int8_t* outputData, 71 T* outputData, const Shape& outputShape) { in spaceToDepthGeneric() argument 74 outputData, convertShapeToDims(outputShape)); in spaceToDepthGeneric() [all …]
|
D | Activation.cpp | 54 bool reluFloat(const T* inputData, const Shape& inputShape, T* outputData, in reluFloat() argument 59 for (int i = 0; i < numElements; i++, inputData++, outputData++) { in reluFloat() 60 *outputData = static_cast<T>( in reluFloat() 65 template bool reluFloat<float>(const float* inputData, const Shape& inputShape, float* outputData, 68 _Float16* outputData, const Shape& outputShape, float reluMin, 72 bool relu1Float(const T* inputData, const Shape& inputShape, T* outputData, in relu1Float() argument 74 return reluFloat(inputData, inputShape, outputData, outputShape, -1.f, 1.f); in relu1Float() 76 template bool relu1Float<float>(const float* inputData, const Shape& inputShape, float* outputData, 79 _Float16* outputData, const Shape& outputShape); 82 bool relu6Float(const T* inputData, const Shape& inputShape, T* outputData, in relu6Float() argument [all …]
|
D | Pooling.cpp | 144 float* outputData, const Shape& outputShape) { in averagePoolNhwc() argument 149 convertShapeToTflshape(outputShape), outputData); in averagePoolNhwc() 154 _Float16* outputData, const Shape& outputShape) { in averagePoolNhwc() argument 162 convertFloat32ToFloat16(outputDataFloat32, outputData); in averagePoolNhwc() 167 uint8_t* outputData, const Shape& outputShape) { in averagePoolNhwc() argument 172 convertShapeToTflshape(outputShape), outputData); in averagePoolNhwc() 177 int8_t* outputData, const Shape& outputShape) { in averagePoolNhwc() argument 185 outputData); in averagePoolNhwc() 190 float* outputData, const Shape& outputShape) { in l2PoolNhwc() argument 195 convertShapeToTflshape(outputShape), outputData); in l2PoolNhwc() [all …]
|
D | L2Normalization.cpp | 47 float* outputData, const Shape& /*outputShape*/) { in l2normFloat32Impl() argument 57 float* outputBeg = outputData + outer * axisSize * innerSize; in l2normFloat32Impl() 75 uint8_t* outputData, const Shape& /*outputShape*/) { in l2normQuant8Impl() argument 84 uint8_t* outputBeg = outputData + outer * axisSize * innerSize; in l2normQuant8Impl() 107 int8_t* outputData, const Shape& /*outputShape*/) { in l2normQuant8SignedImpl() argument 116 int8_t* outputBeg = outputData + outer * axisSize * innerSize; in l2normQuant8SignedImpl() 137 bool l2normFloat32(const float* inputData, const Shape& inputShape, int32_t axis, float* outputData, in l2normFloat32() argument 146 convertShapeToTflshape(outputShape), outputData); in l2normFloat32() 149 return l2normFloat32Impl(inputData, inputShape, axis, outputData, outputShape); in l2normFloat32() 154 _Float16* outputData, const Shape& outputShape) { in l2normFloat16() argument [all …]
|
D | SimpleMath.cpp | 40 const Shape& axisShape, bool keepDims, _Float16* outputData, in meanFloat16() argument 49 convertFloat32ToFloat16(outputDataFloat32, outputData); in meanFloat16() 55 bool keepDims, T* outputData, const Shape& outputShape) { in meanGeneric() argument 73 getNumberOfDimensions(inputShape), outputData, in meanGeneric() 85 float* outputData, const Shape& outputShape); 88 bool keepDims, uint8_t* outputData, 92 bool keepDims, int8_t* outputData,
|
D | LocalResponseNormalization.cpp | 47 int32_t axis, float* outputData, in localResponseNormFloat32Impl() argument 56 float* outputBase = outputData + outer * axisSize * innerSize; in localResponseNormFloat32Impl() 77 T beta, int32_t axis, T* outputData, const Shape& outputShape); 81 float bias, float alpha, float beta, int32_t axis, float* outputData, in localResponseNorm() argument 93 convertShapeToTflshape(outputShape), outputData); in localResponseNorm() 97 outputData, outputShape); in localResponseNorm() 104 _Float16* outputData, const Shape& outputShape) { in localResponseNorm() argument 112 convertFloat32ToFloat16(outputDataFloat32, outputData); in localResponseNorm()
|
D | Softmax.cpp | 51 int32_t axis, float* outputData, const Shape& /*outputShape*/) { in softmaxSlowFloat32() argument 60 float* outputBeg = outputData + outer * axisSize * innerSize; in softmaxSlowFloat32() 83 float* outputData, const Shape& outputShape) { in softmaxFloat32() argument 91 convertShapeToTflshape(outputShape), outputData); in softmaxFloat32() 94 return softmaxSlowFloat32(inputData, inputShape, beta, axis, outputData, outputShape); in softmaxFloat32() 99 int32_t axis, _Float16* outputData, const Shape& outputShape) { in softmaxFloat16() argument 107 convertFloat32ToFloat16(outputData_float32, outputData); in softmaxFloat16() 115 T* outputData, const Shape& /*outputShape*/) { in softmaxQuant8Impl() argument 135 T* outputBeg = outputData + outer * axisSize * innerSize; in softmaxQuant8Impl() 201 T* outputData, const Shape& outputShape) { in softmaxQuant8() argument [all …]
|
D | Pow.cpp | 35 const Shape& exponentShape, T* outputData, const Shape& outputShape) { in evalGeneric() argument 50 outputData[outputFlatIndex] = std::pow(static_cast<float>(baseData[baseFlatIndex]), in evalGeneric() 70 const Shape& exponentShape, void* outputData, const Shape& outputShape) { in eval() argument 75 reinterpret_cast<_Float16*>(outputData), outputShape); in eval() 80 reinterpret_cast<float*>(outputData), outputShape); in eval()
|
D | Cast.cpp | 44 bool copyToTensor(const FromT* inputData, int numElements, uint8_t* outputData, in copyToTensor() argument 49 copyCast(inputData, reinterpret_cast<dataType*>(outputData), numElements); \ in copyToTensor() 72 bool eval(const uint8_t* inputData, const Shape& inputShape, uint8_t* outputData, in eval() argument 80 copyToTensor(reinterpret_cast<const dataType*>(inputData), numElements, outputData, \ in eval() 92 return copyData(inputData, inputShape, outputData, outputShape); in eval()
|
D | FullyConnected.cpp | 55 float* outputData, const Shape& outputShape) { in fullyConnectedFloat32() argument 70 outputData, convertShapeToDims(outputShape)); in fullyConnectedFloat32() 77 outputData, convertShapeToDims(outputShape)); in fullyConnectedFloat32() 85 _Float16* outputData, const Shape& outputShape) { in fullyConnectedFloat16() argument 98 convertFloat32ToFloat16(outputDataFloat32, outputData); in fullyConnectedFloat16() 106 uint8_t* outputData, const Shape& outputShape) { in fullyConnectedQuant8() argument 138 outputActivationMin, outputActivationMax, outputData, in fullyConnectedQuant8() 147 int8_t* outputData, const Shape& outputShape) { in fullyConnectedQuant8() argument 175 biasData, convertShapeToTflshape(outputShape), outputData); in fullyConnectedQuant8()
|
D | ResizeImageOps.cpp | 62 bool halfPixelCenters, T* outputData, const Shape& outputShape) { in resizeNearestNeighbor() argument 94 outputData + b * outHeight * outWidth * channels + in resizeNearestNeighbor() 105 bool alignCorners, bool halfPixelCenters, T* outputData, in resizeImageOpNhwc() argument 120 outDimData, convertShapeToTflshape(outputShape), outputData); in resizeImageOpNhwc() 124 resizeNearestNeighbor(inputData, inputShape, alignCorners, halfPixelCenters, outputData, in resizeImageOpNhwc() 133 _Float16* outputData, const Shape& outputShape) { in resizeImageOpNhwc() argument 140 convertFloat32ToFloat16(outputData_float32, outputData); in resizeImageOpNhwc() 146 bool alignCorners, bool halfPixelCenters, T* outputData, in resizeImageOp() argument 151 NN_RET_CHECK(output.initialize(outputData, outputShape)); in resizeImageOp()
|
D | GroupedConv2D.cpp | 54 int32_t numGroups, int32_t activation, float* outputData, in groupedConvFloat32() argument 63 float* outPtr = outputData; in groupedConvFloat32() 112 int32_t numGroups, int32_t activation, T* outputData, in groupedConvQuant8() argument 135 T* outPtr = outputData; in groupedConvQuant8() 191 int32_t numGroups, int32_t activation, int8_t* outputData, 200 int32_t numGroups, int32_t activation, uint8_t* outputData, 211 T* outputData, const Shape& outputShape) { in groupedConvQuant8PerChannel() argument 240 T* outPtr = outputData; in groupedConvQuant8PerChannel() 295 int32_t activation, _Float16* outputData, const Shape& outputShape) { in groupedConvFloat16() argument 311 convertFloat32ToFloat16(outputData_float32, outputData); in groupedConvFloat16() [all …]
|
D | RNN.cpp | 119 const int32_t activation, T* outputData) { in RNNStep() argument 128 /*outputBatchStride=*/numUnits, /*outputBatchOffset=*/0, outputData); in RNNStep() 141 const uint32_t outputBatchStride, const uint32_t outputBatchOffset, T* outputData, in RNNStep() argument 168 T* output_ptr_batch = outputData + b * outputBatchStride + outputBatchOffset; in RNNStep() 228 _Float16* outputData); 237 const uint32_t outputBatchOffset, _Float16* outputData, 244 float* outputData); 253 float* outputData, float* hiddenStateOutput);
|
D | ArgMinMax.cpp | 32 Out* outputData, const Shape& /*outputShape*/) { in argMinMaxImpl() argument 48 outputData[outer * innerSize + inner] = minMaxIndex; in argMinMaxImpl() 54 uint8_t* outputData, const Shape& outputShape) { in argMinMaxGeneric() argument 62 reinterpret_cast<int32_t*>(outputData), outputShape); \ in argMinMaxGeneric()
|
D | TransposeConv2D.cpp | 124 const TransposeConv2dParam& param, float* outputData, in transposeConvNhwc() argument 132 memset(outputData, 0, getNumberOfElements(outputShape) * sizeof(float)); in transposeConvNhwc() 135 float* outputBase = outputData; in transposeConvNhwc() 167 float* outPtr = outputData; in transposeConvNhwc() 181 const TransposeConv2dParam& param, T* outputData, const Shape& outputShape) { in transposeConvNhwc() argument 259 T* outPtr = outputData; in transposeConvNhwc() 276 const TransposeConv2dParam& param, _Float16* outputData, in transposeConvNhwc() argument 291 convertFloat32ToFloat16(outputData_float32, outputData); in transposeConvNhwc() 299 const TransposeConv2dParam& param, T_Input* outputData, in transposeConv() argument 304 NN_RET_CHECK(output.initialize(outputData, outputShape)); in transposeConv() [all …]
|
D | Quantize.cpp | 36 bool quantizeToQuant8(const T* inputData, uint8_t* outputData, const Shape& outputShape) { in quantizeToQuant8() argument 40 outputData[i] = static_cast<uint8_t>(std::max<float>( in quantizeToQuant8() 49 bool quantizeToQuant8Signed(const T* inputData, int8_t* outputData, const Shape& outputShape) { in quantizeToQuant8Signed() argument 53 outputData[i] = static_cast<int8_t>(std::max<float>( in quantizeToQuant8Signed()
|
D | RoiPooling.cpp | 45 T_Input* outputData, const Shape& outputShape) { in roiPoolingNhwc() argument 61 T_Input* outPtr = outputData; in roiPoolingNhwc() 134 bool useNchw, T_Input* outputData, const Shape& outputShape) { in roiPooling() argument 138 NN_RET_CHECK(output.initialize(outputData, outputShape)); in roiPooling() 151 float widthStride, bool useNchw, uint8_t* outputData, in roiPooling() argument 156 batchSplitShape, heightStride, widthStride, useNchw, outputData, in roiPooling() 166 float widthStride, bool useNchw, int8_t* outputData, in roiPooling() argument 171 batchSplitShape, heightStride, widthStride, useNchw, outputData, in roiPooling()
|
D | Tile.cpp | 69 void tileImpl(const T* inputData, const Shape& inputShape, const int32_t* multiples, T* outputData, in tileImpl() argument 71 TileOneDimension(inputShape, inputData, multiples, outputData, 0); in tileImpl() 91 uint8_t* outputData, const Shape& outputShape) { in eval() argument 97 reinterpret_cast<dataType*>(outputData), outputShape); \ in eval()
|
/packages/apps/Camera2/src/com/android/camera/app/ |
D | MemoryQuery.java | 104 HashMap outputData = new HashMap(); in queryMemory() local 105 outputData.put(KEY_TIMESTAMP, new Long(timestamp)); in queryMemory() 106 outputData.put(KEY_MEMORY_AVAILABLE, new Long(availMem)); in queryMemory() 107 outputData.put(KEY_TOTAL_MEMORY, new Long(totalMem)); in queryMemory() 108 outputData.put(KEY_TOTAL_PSS, new Long(totalPSS)); in queryMemory() 109 outputData.put(KEY_LAST_TRIM_LEVEL, new Integer(info.lastTrimLevel)); in queryMemory() 110 outputData.put(KEY_TOTAL_PRIVATE_DIRTY, new Long(totalPrivateDirty)); in queryMemory() 111 outputData.put(KEY_TOTAL_SHARED_DIRTY, new Long(totalSharedDirty)); in queryMemory() 112 outputData.put(KEY_MEMORY_CLASS, new Long(memoryClass)); in queryMemory() 113 outputData.put(KEY_LARGE_MEMORY_CLASS, new Long(largeMemoryClass)); in queryMemory() [all …]
|
/packages/modules/NeuralNetworks/common/include/ |
D | Operations.h | 32 bool floorFloat16(const _Float16* inputData, _Float16* outputData, const Shape& shape); 33 bool floorFloat32(const float* inputData, float* outputData, const Shape& shape); 41 _Float16* outputData, const Shape& outputShape); 47 int32_t depthMultiplier, int32_t activation, float* outputData, 55 uint8_t* outputData, const Shape& outputShape); 63 int32_t depthMultiplier, int32_t activation, uint8_t* outputData, 68 _Float16* outputData, const Shape& outputShape); 70 float bias, float alpha, float beta, int32_t axis, float* outputData, 73 bool copyData(const void* inputData, const Shape& inputShape, void* outputData, 78 T* outputData, const Shape& outputShape); [all …]
|
/packages/modules/OnDevicePersonalization/tests/cts/endtoend/src/com/android/ondevicepersonalization/cts/e2e/ |
D | InferenceInputTest.java | 50 HashMap<Integer, Object> outputData = new HashMap<>(); in buildInput_success() local 51 outputData.put(0, new float[1]); in buildInput_success() 61 new InferenceOutput.Builder().setDataOutputs(outputData).build()) in buildInput_success() 74 HashMap<Integer, Object> outputData = new HashMap<>(); in buildInputWithSetters() local 75 outputData.put(0, new float[1]); in buildInputWithSetters() 85 new InferenceOutput.Builder().setDataOutputs(outputData).build()) in buildInputWithSetters() 90 new InferenceOutput.Builder().setDataOutputs(outputData).build()) in buildInputWithSetters() 102 HashMap<Integer, Object> outputData = new HashMap<>(); in buildInput_batchNotSet_success() local 103 outputData.put(0, new float[1]); in buildInput_batchNotSet_success() 113 new InferenceOutput.Builder().setDataOutputs(outputData).build()) in buildInput_batchNotSet_success()
|
D | InferenceOutputTest.java | 33 HashMap<Integer, Object> outputData = new HashMap<>(); in build_success() local 34 outputData.put(0, new float[] {1.0f}); in build_success() 35 InferenceOutput output = new InferenceOutput.Builder().setDataOutputs(outputData).build(); in build_success()
|
/packages/modules/OnDevicePersonalization/tests/frameworktests/src/android/adservices/ondevicepersonalization/ |
D | InferenceInputTest.java | 58 HashMap<Integer, Object> outputData = new HashMap<>(); in buildInferenceInput_reusable() local 59 outputData.put(0, new float[1]); in buildInferenceInput_reusable() 69 new InferenceOutput.Builder().setDataOutputs(outputData).build()) in buildInferenceInput_reusable() 78 HashMap<Integer, Object> outputData = new HashMap<>(); in buildInput_success() local 79 outputData.put(0, new float[1]); in buildInput_success() 89 new InferenceOutput.Builder().setDataOutputs(outputData).build()) in buildInput_success() 102 HashMap<Integer, Object> outputData = new HashMap<>(); in buildInput_batchNotSet_success() local 103 outputData.put(0, new float[1]); in buildInput_batchNotSet_success() 113 new InferenceOutput.Builder().setDataOutputs(outputData).build()) in buildInput_batchNotSet_success()
|
D | ModelManagerTest.java | 68 HashMap<Integer, Object> outputData = new HashMap<>(); in runInference_success() local 69 outputData.put(0, new float[1]); in runInference_success() 76 new InferenceOutput.Builder().setDataOutputs(outputData).build()) in runInference_success() 91 HashMap<Integer, Object> outputData = new HashMap<>(); in runInference_error() local 92 outputData.put(0, new float[1]); in runInference_error() 100 new InferenceOutput.Builder().setDataOutputs(outputData).build()) in runInference_error() 121 HashMap<Integer, Object> outputData = new HashMap<>(); in runInference_resultMissingInferenceOutput() local 122 outputData.put(0, new float[1]); in runInference_resultMissingInferenceOutput() 130 new InferenceOutput.Builder().setDataOutputs(outputData).build()) in runInference_resultMissingInferenceOutput()
|
/packages/modules/NeuralNetworks/runtime/test/specs/experimental/ |
D | densify_8.mod.py | 26 outputData = [0.0] * 210 variable 27 outputData[22] = 11.0 28 outputData[51] = 13.0 29 outputData[80] = 17.0 30 outputData[129] = 19.0 32 outputData}
|