1#
2# Copyright (C) 2017 The Android Open Source Project
3#
4# Licensed under the Apache License, Version 2.0 (the "License");
5# you may not use this file except in compliance with the License.
6# You may obtain a copy of the License at
7#
8#      http://www.apache.org/licenses/LICENSE-2.0
9#
10# Unless required by applicable law or agreed to in writing, software
11# distributed under the License is distributed on an "AS IS" BASIS,
12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13# See the License for the specific language governing permissions and
14# limitations under the License.
15#
16
17batches = 2
18units = 16
19input_size = 8
20
21model = Model()
22
23input = Input("input", "TENSOR_FLOAT32", "{%d, %d}" % (batches, input_size))
24weights = Input("weights", "TENSOR_FLOAT32", "{%d, %d}" % (units, input_size))
25recurrent_weights = Input("recurrent_weights", "TENSOR_FLOAT32", "{%d, %d}" % (units, units))
26bias = Input("bias", "TENSOR_FLOAT32", "{%d}" % (units))
27hidden_state_in = Input("hidden_state_in", "TENSOR_FLOAT32", "{%d, %d}" % (batches, units))
28
29activation_param = Int32Scalar("activation_param", 1)  # Relu
30
31hidden_state_out = IgnoredOutput("hidden_state_out", "TENSOR_FLOAT32", "{%d, %d}" % (batches, units))
32output = Output("output", "TENSOR_FLOAT32", "{%d, %d}" % (batches, units))
33
34model = model.Operation("RNN", input, weights, recurrent_weights, bias, hidden_state_in,
35                        activation_param).To([hidden_state_out, output])
36
37input0 = {
38    weights: [
39        0.461459,    0.153381,   0.529743,    -0.00371218, 0.676267,   -0.211346,
40       0.317493,    0.969689,   -0.343251,   0.186423,    0.398151,   0.152399,
41       0.448504,    0.317662,   0.523556,    -0.323514,   0.480877,   0.333113,
42       -0.757714,   -0.674487,  -0.643585,   0.217766,    -0.0251462, 0.79512,
43       -0.595574,   -0.422444,  0.371572,    -0.452178,   -0.556069,  -0.482188,
44       -0.685456,   -0.727851,  0.841829,    0.551535,    -0.232336,  0.729158,
45       -0.00294906, -0.69754,   0.766073,    -0.178424,   0.369513,   -0.423241,
46       0.548547,    -0.0152023, -0.757482,   -0.85491,    0.251331,   -0.989183,
47       0.306261,    -0.340716,  0.886103,    -0.0726757,  -0.723523,  -0.784303,
48       0.0354295,   0.566564,   -0.485469,   -0.620498,   0.832546,   0.697884,
49       -0.279115,   0.294415,   -0.584313,   0.548772,    0.0648819,  0.968726,
50       0.723834,    -0.0080452, -0.350386,   -0.272803,   0.115121,   -0.412644,
51       -0.824713,   -0.992843,  -0.592904,   -0.417893,   0.863791,   -0.423461,
52       -0.147601,   -0.770664,  -0.479006,   0.654782,    0.587314,   -0.639158,
53       0.816969,    -0.337228,  0.659878,    0.73107,     0.754768,   -0.337042,
54       0.0960841,   0.368357,   0.244191,    -0.817703,   -0.211223,  0.442012,
55       0.37225,     -0.623598,  -0.405423,   0.455101,    0.673656,   -0.145345,
56       -0.511346,   -0.901675,  -0.81252,    -0.127006,   0.809865,   -0.721884,
57       0.636255,    0.868989,   -0.347973,   -0.10179,    -0.777449,  0.917274,
58       0.819286,    0.206218,   -0.00785118, 0.167141,    0.45872,    0.972934,
59       -0.276798,   0.837861,   0.747958,    -0.0151566,  -0.330057,  -0.469077,
60       0.277308,    0.415818
61    ],
62    recurrent_weights: [
63        0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
64        0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
65        0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
66        0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
67        0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
68        0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
69        0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
70        0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
71        0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
72        0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
73        0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
74        0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
75        0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
76        0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
77        0.1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
78        0.1
79    ],
80    bias: [
81        0.065691948, -0.69055247, 0.1107955, -0.97084129, -0.23957068,
82        -0.23566568, -0.389184, 0.47481549, -0.4791103, 0.29931796,
83        0.10463274, 0.83918178, 0.37197268, 0.61957061, 0.3956964,
84        -0.37609905
85    ],
86}
87
88input0[input] = [
89  -0.69424844, -0.93421471, -0.87287879, 0.37144363,
90  -0.62476718, 0.23791671, 0.40060222, 0.1356622,
91  -0.69424844, -0.93421471, -0.87287879, 0.37144363,
92  -0.62476718, 0.23791671, 0.40060222, 0.1356622,
93]
94input0[hidden_state_in] = [
95  0.496726, 0, 0.965996, 0,
96  0.0584256, 0, 0, 0.12315,
97  0, 0, 0.612267, 0.456601,
98  0, 0.52286, 1.16099, 0.0291233,
99  0.496726, 0, 0.965996, 0,
100  0.0584256, 0, 0, 0.12315,
101  0, 0, 0.612267, 0.456601,
102  0, 0.52286, 1.16099, 0.0291233,
103]
104output0 = {
105  hidden_state_out : [
106  0, 0, 0.524902, 0,
107  0, 0, 0, 1.02116,
108  0, 1.35762, 0, 0.356909,
109  0.436415, 0.0355731, 0, 0,
110  0, 0, 0.524902, 0,
111  0, 0, 0, 1.02116,
112  0, 1.35762, 0, 0.356909,
113  0.436415, 0.0355731, 0, 0,
114  ]
115}
116output0[output] = [
117  0,          0,          0.524901,  0,         0,         0,
118  0,          1.02116,    0,         1.35762,   0,         0.356909,
119  0.436415,   0.0355727,  0,         0,
120
121  0,          0,          0.524901,  0,         0,         0,
122  0,          1.02116,    0,         1.35762,   0,         0.356909,
123  0.436415,   0.0355727,  0,         0,
124]
125
126Example((input0, output0))
127