MachineIntelligenceCore:NeuralNets
 All Classes Namespaces Files Functions Variables Enumerations Enumerator Friends Macros
Linear_tests.cpp
Go to the documentation of this file.
1 
25 #include "Linear_tests.hpp"
26 
27 namespace mic { namespace neural_nets { namespace unit_tests {
28 
33 TEST_F(Linear5x2Float, WbInitialization) {
34  for (size_t i=0; i<10; i++)
35  ASSERT_NE( (*layer.p["W"])[i], 0.0 ) << "Weight W is zero at position i=" << i;
36 
37  for (size_t i=0; i<2; i++)
38  ASSERT_EQ( (*layer.p["b"])[i], 0.0 ) << "Bias b is not zero at position i=" << i;
39 }
40 
46  for (size_t i=0; i<10; i++)
47  ASSERT_EQ( std::isnan((*layer.p["W"])[i]), false ) << "Weight W is NaN at position i=" << i;
48 }
49 
54 TEST_F(Linear5x2Float, WIsNotInf) {
55  for (size_t i=0; i<10; i++)
56  ASSERT_EQ( std::isinf((*layer.p["W"])[i]), false ) << "Weight W is Inf at position i=" << i;
57 }
58 
59 
64 TEST_F(Linear5x2Float, WAreDifferent) {
65  for (size_t i=0; i<10; i++) {
66  for (size_t j=i+1; j<10; j++)
67  ASSERT_NE( (*layer.p["W"])[i], (*layer.p["W"])[j] ) << "Weights at positions i=" << i << " and j=" << j << "are equal";
68  }//: for i
69 }
70 
71 
76 TEST_F(Linear1x1Float, Forward_y) {
77  mic::types::MatrixPtr<float> input = MAKE_MATRIX_PTR(float, 1, 1);
78 
79  (*input)[0] = 0.0;
80  ASSERT_EQ( (*layer.forward( input ))[0], 1.0 );
81 
82  (*input)[0] = 1.0;
83  ASSERT_EQ( (*layer.forward( input ))[0], 2.0 );
84 }
85 
90 TEST_F(Linear2x3Float, Forward_y) {
91  mic::types::MatrixPtr<float> y = layer.forward(const_x);
92  ASSERT_EQ((*y)[0], -2 );
93  ASSERT_EQ((*y)[1], 0 );
94  ASSERT_EQ((*y)[2], 2 );
95 }
96 
101 TEST(LinearStacked1x2x3Float, Forward_y) {
102  // Initialize network consisting of two layers.
104  (*l1.p["W"]) << 1.0, 2.0;
105  (*l1.p["b"]) << 0.0, 1.0;
106 
108  (*l2.p["W"]) << -1, -2, -3, -5, 6, 9;
109  (*l2.p["b"]) << -3, -2, -1;
110 
111  // Input.
112  mic::types::MatrixPtr<float> x = MAKE_MATRIX_PTR(float, 1, 1);
113  (*x) << -1;
114 
115  // Check the result.
116  mic::types::MatrixPtr<float> y = l2.forward(l1.forward(x));
117  ASSERT_EQ((*y)[0], 0 );
118  ASSERT_EQ((*y)[1], 6 );
119  ASSERT_EQ((*y)[2], -16 );
120 }
121 
122 
127 TEST(Linear2x1Float, Backward_dx) {
129  (*layer.p["W"]) << 1.0, 2.0;
130  (*layer.p["b"]) << 1.0;
131 
132  // Output.
133  mic::types::MatrixPtr<float> dy = MAKE_MATRIX_PTR(float, 1, 1);
134  (*dy) << 2.0;
135 
136  mic::types::MatrixPtr<float> dx = layer.backward(dy);
137  ASSERT_EQ((*dx)[0], 2 );
138  ASSERT_EQ((*dx)[1], 4 );
139 }
140 
141 
146 TEST_F(Linear2x3Float, Backward_dx) {
147  mic::types::MatrixPtr<float> dx = layer.backward(const_dy);
148 
149  // Check dx.
150  ASSERT_EQ((*dx)[0], -1);
151  ASSERT_EQ((*dx)[1], -3);
152 }
153 
158 TEST_F(Linear2x3Float, Backward_dWdb) {
159  // Forward pass.
160  mic::types::MatrixPtr<float> y = layer.forward(const_x);
161  // Backward pass.
162  mic::types::MatrixPtr<float> dx = layer.backward(const_dy);
163 
164  // Check dW.
165  ASSERT_EQ((*layer.g["W"])(0,0), 1);
166  ASSERT_EQ((*layer.g["W"])(0,1), -1);
167  ASSERT_EQ((*layer.g["W"])(1,0), 2);
168  ASSERT_EQ((*layer.g["W"])(1,1), -2);
169  ASSERT_EQ((*layer.g["W"])(2,0), -1);
170  ASSERT_EQ((*layer.g["W"])(2,1), 1);
171 
172  // Check db.
173  for (size_t i=0; i<3; i++)
174  ASSERT_EQ((*layer.g["b"])[i], (*const_dy)[i]);
175 }
176 
177 
182 TEST_F(Linear2x3Double, NumericalGradientCheck_dW) {
183 
184  // Calculate gradients.
185  mic::types::MatrixPtr<double> predicted_y = layer.forward(const_x);
186  mic::types::MatrixPtr<double> dy = loss.calculateGradient(target_y, predicted_y);
187  layer.backward(dy);
188  // Store resulting gradients - make a copy!
189  mic::types::MatrixPtr<double> dW = MAKE_MATRIX_PTR(double, *layer.g["W"]);
190 
191  // Calculate numerical gradients.
192  double delta = 1e-5;
193  mic::types::MatrixPtr<double> nW = layer.calculateNumericalGradient<mic::neural_nets::loss::SquaredErrorLoss<double> >(const_x, target_y, layer.p["W"], loss, delta);
194 
195  // Compare gradients.
196  double eps = 1e-8;
197  for (size_t i=0; i<(size_t)dW->size(); i++){
198 // std::cout << "i=" << i << " (*dW)[i]= " << (*dW)[i] << " (*nW)[i]= " << (*nW)[i] << std::endl;
199  EXPECT_LE( fabs((*dW)[i] - (*nW)[i]), eps) << "Too big difference between dW and numerical dW at position i=" << i;
200  }//: for
201 }
202 
203 
208 TEST_F(Linear2x3Double, NumericalGradientCheck_db) {
209 
210  // Calculate gradients.
211  mic::types::MatrixPtr<double> predicted_y = layer.forward(const_x);
212  mic::types::MatrixPtr<double> dy = loss.calculateGradient(target_y, predicted_y);
213  layer.backward(dy);
214  // Store resulting gradients - make a copy!
215  mic::types::MatrixPtr<double> db = MAKE_MATRIX_PTR(double, *layer.g["b"]);
216 
217  // Calculate numerical gradients.
218  double delta = 1e-5;
219  mic::types::MatrixPtr<double> nb = layer.calculateNumericalGradient<mic::neural_nets::loss::SquaredErrorLoss<double> >(const_x, target_y, layer.p["b"], loss, delta);
220 
221  // Compare gradients.
222  double eps = 1e-8;
223  for (size_t i=0; i<(size_t)db->size(); i++)
224  EXPECT_LE( fabs((*db)[i] - (*nb)[i]), eps) << "Too big difference between db and numerical db at position i=" << i;
225 }
226 
227 
232 TEST_F(Linear2x3Double, NumericalGradientCheck_dx) {
233 
234  // Calculate gradients.
235  mic::types::MatrixPtr<double> predicted_y = layer.forward(const_x);
236  mic::types::MatrixPtr<double> dy = loss.calculateGradient(target_y, predicted_y);
237  // Store resulting gradients - make a copy!
238  mic::types::MatrixPtr<double> dx = MAKE_MATRIX_PTR(double, *layer.backward(dy));
239 
240  // Calculate numerical gradients.
241  double delta = 1e-5;
242  mic::types::MatrixPtr<double> nx = layer.calculateNumericalGradient<mic::neural_nets::loss::SquaredErrorLoss<double> >(const_x, target_y, const_x, loss, delta);
243 
244  // Compare gradients.
245  double eps = 1e-8;
246  for (size_t i=0; i<(size_t)dx->size(); i++)
247  EXPECT_LE( fabs((*dx)[i] - (*nx)[i]), eps) << "Too big difference between dx and numerical dx at position i=" << i;
248 }
249 
250 
251 
256 TEST_F(Linear50x100Double, NumericalGradientCheck_dW) {
257 
258  // Calculate gradients.
259  mic::types::MatrixPtr<double> predicted_y = layer.forward(const_x);
260  mic::types::MatrixPtr<double> dy = loss.calculateGradient(target_y, predicted_y);
261  layer.backward(dy);
262  // Store resulting gradients - make a copy!
263  mic::types::MatrixPtr<double> dW = MAKE_MATRIX_PTR(double, *layer.g["W"]);
264 
265  //(*layer.p["W"])[0] = 50000.0;
266 
267  // Calculate numerical gradients.
268  double delta = 1e-5;
269  mic::types::MatrixPtr<double> nW = layer.calculateNumericalGradient<mic::neural_nets::loss::SquaredErrorLoss<double> >(const_x, target_y, layer.p["W"], loss, delta);
270 
271 
272  // Compare gradients.
273  double eps = 1e-6;
274  for (size_t i=0; i<(size_t)dW->size(); i++){
275  //std::cout << "i=" << i << " (*dW)[i]= " << (*dW)[i] << " (*nW)[i]= " << (*nW)[i] << std::endl;
276  EXPECT_LE( fabs((*dW)[i] - (*nW)[i]), eps) << "Too big difference between dW and numerical dW at position i=" << i;
277  }//: for
278 }
279 
280 
285 TEST_F(Linear50x100Double, NumericalGradientCheck_db) {
286 
287  // Calculate gradients.
288  mic::types::MatrixPtr<double> predicted_y = layer.forward(const_x);
289  mic::types::MatrixPtr<double> dy = loss.calculateGradient(target_y, predicted_y);
290  layer.backward(dy);
291  // Store resulting gradients - make a copy!
292  mic::types::MatrixPtr<double> db = MAKE_MATRIX_PTR(double, *layer.g["b"]);
293 
294  //(*layer.p["W"])[0] = 50000.0;
295 
296  // Calculate numerical gradients.
297  double delta = 1e-5;
298  mic::types::MatrixPtr<double> nb = layer.calculateNumericalGradient<mic::neural_nets::loss::SquaredErrorLoss<double> >(const_x, target_y, layer.p["b"], loss, delta);
299 
300  // Compare gradients.
301  double eps = 1e-6;
302  for (size_t i=0; i<(size_t)db->size(); i++){
303  //std::cout << "i=" << i << " (*db)[i]= " << (*db)[i] << " (*nb)[i]= " << (*nb)[i] << std::endl;
304  EXPECT_LE( fabs((*db)[i] - (*nb)[i]), eps) << "Too big difference between db and numerical db at position i=" << i;
305  }//: for
306 }
307 
308 
313 TEST_F(Linear50x100Double, NumericalGradientCheck_dx) {
314 
315  // Calculate gradients.
316  mic::types::MatrixPtr<double> predicted_y = layer.forward(const_x);
317  mic::types::MatrixPtr<double> dy = loss.calculateGradient(target_y, predicted_y);
318  // Store resulting gradients - make a copy!
319  mic::types::MatrixPtr<double> dx = MAKE_MATRIX_PTR(double, *layer.backward(dy));
320 
321  //(*layer.p["W"])[0] = 50000.0;
322 
323  // Calculate numerical gradients.
324  double delta = 1e-5;
325  mic::types::MatrixPtr<double> nx = layer.calculateNumericalGradient<mic::neural_nets::loss::SquaredErrorLoss<double> >(const_x, target_y, const_x, loss, delta);
326 
327  // Compare gradients.
328  double eps = 1e-6;
329  for (size_t i=0; i<(size_t)dx->size(); i++)
330  EXPECT_LE( fabs((*dx)[i] - (*nx)[i]), eps) << "Too big difference between dx and numerical dx at position i=" << i;
331 }
332 
333 
334 } } } //: namespaces
335 
336 
337 int main(int argc, char **argv) {
338  testing::InitGoogleTest(&argc, argv);
339  return RUN_ALL_TESTS();
340 }
int main(int argc, char **argv)
Test Fixture - layer of size 5x2, floats.
Test Fixture - layer of size 1x1, floats, sets W[0] = 1.0 and b[0] = 1.0.
TEST(Convolutions, LayerDimensions)
Test Fixture - layer of size 50x100, doubles, randomly sets all internal and external values required...
TEST_F(Conv2x2x2Filter2x1x1s1Double, NumericalGradientCheck)
Numerical gradient test of all parameters for layer of input size 2x2x2 and with filter bank of 2 fil...
Test Fixture - layer of size 2x3, floats, sets all internal and external values.
void forward(bool test_=false)
Definition: Linear.hpp:105
Test Fixture - layer of size 2x3, doubles, sets all internal and external values. ...
mic::types::MatrixArray< eT > p
Parameters - parameters of the layer, to be used by the derived classes.
Definition: Layer.hpp:759