MachineIntelligenceCore:NeuralNets
 All Classes Namespaces Files Functions Variables Enumerations Enumerator Friends Macros
Linear_tests.hpp
Go to the documentation of this file.
1 
25 #ifndef LINEARLAYERTESTS_HPP_
26 #define LINEARLAYERTESTS_HPP_
27 
28 #include <gtest/gtest.h>
29 
30 // Redefine word "public" so every class field/method will be accessible for tests.
31 #define private public
32 #define protected public
35 
36 namespace mic { namespace neural_nets { namespace unit_tests {
37 
42 class Linear1x1Float : public ::testing::Test {
43 public:
44  // Constructor. Sets layer size.
45  Linear1x1Float () : layer(1,1) { }
46 
47 protected:
48  // Sets W and b.
49  virtual void SetUp() {
50  (*layer.p["W"])[0] = 1.0;
51  (*layer.p["b"])[0] = 1.0;
52  }
53 
54 
55 private:
56  // Object to be tested.
58 };
59 
64 class Linear5x2Float : public ::testing::Test {
65 public:
66  // Constructor. Sets layer size.
67  Linear5x2Float () : layer(5,2) { }
68 
69 private:
70  // Object to be tested.
72 };
73 
74 
79 class Linear2x3Float : public ::testing::Test {
80 public:
81  // Constructor. Sets layer size.
82  Linear2x3Float () : layer(2,3) {
83  const_x = MAKE_MATRIX_PTR(float, 2, 1);
84  const_dy = MAKE_MATRIX_PTR(float, 3, 1);
85  target_y = MAKE_MATRIX_PTR(float, 3, 1);
86  }
87 
88 protected:
89  // Sets values
90  virtual void SetUp() {
91  (*layer.p["W"]) << 1, 2, 3, 5, 6, 9;
92  (*layer.p["b"]) << -3, -2, -1;
93 
94  (*const_x) << -1, 1;
95 
96  (*const_dy) << -1, -2, 1;
97 
98  (*target_y) << -1, -2, 1;
99 
100  // Reset state and gradients.
101  layer.s["x"]->setZero();
102  layer.s["y"]->setZero();
103  layer.g["x"]->setZero();
104  layer.g["y"]->setZero();
105  }
106 
107 
108 private:
109  // Object to be tested.
111 
112  // Test input x - used in forward pass.
113  mic::types::MatrixPtr<float> const_x;
114 
115  // Test gradient dy - used in backward pass.
116  mic::types::MatrixPtr<float> const_dy;
117 
118  // Target y values.
119  mic::types::MatrixPtr<float> target_y;
120 
121  // Loss function.
123 };
124 
125 
130 class Linear2x3Double : public ::testing::Test {
131 public:
132  // Constructor. Sets layer size.
133  Linear2x3Double () : layer(2,3) {
134  const_x = MAKE_MATRIX_PTR(double, 2, 1);
135  const_dy = MAKE_MATRIX_PTR(double, 3, 1);
136  target_y = MAKE_MATRIX_PTR(double, 3, 1);
137  }
138 
139 protected:
140  // Sets values
141  virtual void SetUp() {
142  (*layer.p["W"]) << 1, 2, 3, 5, 6, 9;
143  (*layer.p["b"]) << -3, -2, -1;
144 
145  (*const_x) << -1, 1;
146 
147  (*const_dy) << -1, -2, 1;
148 
149  (*target_y) << -1, -2, 1;
150 
151  // Reset state and gradients.
152  layer.s["x"]->setZero();
153  layer.s["y"]->setZero();
154  layer.g["x"]->setZero();
155  layer.g["y"]->setZero();
156  }
157 
158 private:
159  // Object to be tested.
161 
162  // Test input x - used in forward pass.
163  mic::types::MatrixPtr<double> const_x;
164 
165  // Test gradient dy - used in backward pass.
166  mic::types::MatrixPtr<double> const_dy;
167 
168  // Target y values.
169  mic::types::MatrixPtr<double> target_y;
170 
171  // Loss function.
173 };
174 
175 
180 class Linear50x100Double : public ::testing::Test {
181 public:
182  // Constructor. Sets layer size.
183  Linear50x100Double () : layer(50,100) {
184  const_x = MAKE_MATRIX_PTR (double, layer.inputSize(), 1);
185  target_y = MAKE_MATRIX_PTR (double, layer.outputSize(), 1);
186  }
187 
188 protected:
189  // Sets values
190  virtual void SetUp() {
191 
192  // Initialize random number generator with normal distribution.
193  std::random_device rd;
194  std::mt19937 mt(rd());
195 
196  // Initialize W.
197  double range = sqrt(6.0 / double(layer.outputSize() + layer.inputSize()));
198  std::uniform_real_distribution<double> distW(-range, range);
199  for (size_t i = 0; i < layer.outputSize() * layer.inputSize(); i++)
200  (*layer.p["W"])[i] = (double)distW(rd);
201 
202  // Initialize b.
203  std::uniform_real_distribution<double> distb(-5.0, 5.0);
204  for(size_t row=0; row < layer.outputSize(); row++)
205  (*layer.p["b"])[row] = (double)distb(rd);
206 
207  // Initialize x and y.
208  std::uniform_real_distribution<double> distxy(-5.0, 5.0);
209  for (size_t i = 0; i <layer.inputSize(); i++)
210  (*const_x)[i] = (double)distxy(rd);
211 
212  for (size_t i = 0; i < layer.outputSize(); i++)
213  (*target_y)[i] = (double)distxy(rd);
214 
215  // Reset state and gradients.
216  layer.s["x"]->setZero();
217  layer.s["y"]->setZero();
218  layer.g["x"]->setZero();
219  layer.g["y"]->setZero();
220  }
221 
222 private:
223  // Object to be tested.
225 
226  // Test input x - used in forward pass.
227  mic::types::MatrixPtr<double> const_x;
228 
229  // Target y values.
230  mic::types::MatrixPtr<double> target_y;
231 
232  // Loss function.
234 };
235 
236 } } } //: namespaces
237 
238 
239 #endif /* LINEARLAYERTESTS_HPP_ */
mic::neural_nets::loss::SquaredErrorLoss< double > loss
Test Fixture - layer of size 5x2, floats.
mic::neural_nets::loss::SquaredErrorLoss< double > loss
size_t inputSize()
Returns size (length) of inputs.
Definition: Layer.hpp:255
mic::mlnn::fully_connected::Linear< float > layer
mic::mlnn::fully_connected::Linear< double > layer
mic::types::MatrixPtr< float > target_y
mic::types::MatrixPtr< float > const_x
Test Fixture - layer of size 1x1, floats, sets W[0] = 1.0 and b[0] = 1.0.
size_t outputSize()
Returns size (length) of outputs.
Definition: Layer.hpp:260
Test Fixture - layer of size 50x100, doubles, randomly sets all internal and external values required...
mic::mlnn::fully_connected::Linear< float > layer
Test Fixture - layer of size 2x3, floats, sets all internal and external values.
mic::types::MatrixPtr< double > const_x
mic::types::MatrixPtr< double > target_y
mic::types::MatrixArray< eT > s
States - contains input [x] and output [y] matrices.
Definition: Layer.hpp:753
mic::types::MatrixArray< eT > g
Gradients - contains input [x] and output [y] matrices.
Definition: Layer.hpp:756
mic::types::MatrixPtr< double > const_dy
mic::mlnn::fully_connected::Linear< double > layer
mic::types::MatrixPtr< double > target_y
mic::types::MatrixPtr< float > const_dy
mic::neural_nets::loss::SquaredErrorLoss< float > loss
Test Fixture - layer of size 2x3, doubles, sets all internal and external values. ...
mic::mlnn::fully_connected::Linear< float > layer
mic::types::MatrixArray< eT > p
Parameters - parameters of the layer, to be used by the derived classes.
Definition: Layer.hpp:759