MachineIntelligenceCore:NeuralNets
 All Classes Namespaces Files Functions Variables Enumerations Enumerator Friends Macros
Convolution_tests.hpp
Go to the documentation of this file.
1 
25 #ifndef CONVOLUTIONLAYERTESTS_HPP_
26 #define CONVOLUTIONLAYERTESTS_HPP_
27 
28 #include <gtest/gtest.h>
29 
30 // Redefine word "public" so every class field/method will be accessible for tests.
31 #define private public
32 #define protected public
34 #include <loss/LossTypes.hpp>
35 
36 namespace mic { namespace neural_nets { namespace unit_tests {
37 
38 
44 class Conv2x2x2Filter2x1x1s1Double : public ::testing::Test {
45 public:
46  // Constructor. Sets layer size.
47  Conv2x2x2Filter2x1x1s1Double () : layer(2,2,2,2,1,1) {
48 
49  x = MAKE_MATRIX_PTR(double, 8, 1);
50 
51  desired_y = MAKE_MATRIX_PTR(double, 8, 1);
52 
53  dy = MAKE_MATRIX_PTR(double, 8, 1);
54 
55  target_y = MAKE_MATRIX_PTR(double, 8, 1);
56 
57  desired_dx = MAKE_MATRIX_PTR(double, 8, 1);
58  // Number of filters * input channels.
59  desired_dW = MAKE_MATRIX_PTR(double, 4, 1);
60  // Number of "real neurons".
61  desired_db = MAKE_MATRIX_PTR(double, 2, 1);
62  }
63 
64 protected:
65  // Sets values
66  virtual void SetUp() {
67  (*layer.p["W0x0"]) << 0;
68  (*layer.p["W0x1"]) << 2;
69 
70  (*layer.p["W1x0"]) << 3;
71  (*layer.p["W1x1"]) << 1;
72 
73  // Set biases of both neurons.
74  (*layer.p["b"]) << 0, 1;
75 
76  (*x).enumerate();// << 0, 1, 2, 3, 4, 5, 6, 7;
77  (*desired_y) << 8, 10, 12, 14, 5, 9 ,13, 17;
78 
79  (*dy).enumerate();// << 0, 1, 2, 3, 4, 5, 6, 7;
80  (*desired_dx) << 12, 15, 18, 21, 4, 7, 10, 13;
81  (*target_y) << 13, 13, 20, 10, 3, 2, 12, 15;
82 
83  (*desired_dW) << 14, 126, 38, 38;
84  (*desired_db) << 6, 22;
85  }
86 
87 private:
90 
91  // Loss function.
93 
95  mic::types::MatrixPtr<double> x;
96 
98  mic::types::MatrixPtr<double> desired_y;
99 
101  mic::types::MatrixPtr<double> dy;
102 
104  mic::types::MatrixPtr<double> target_y;
105 
107  mic::types::MatrixPtr<double> desired_dx;
108 
110  mic::types::MatrixPtr<double> desired_dW;
111 
113  mic::types::MatrixPtr<double> desired_db;
114 
115 };
116 
117 
118 
124 class Conv3x3x2Filter3x2x2s1Float : public ::testing::Test {
125 public:
126  // Constructor. Sets layer size.
127  Conv3x3x2Filter3x2x2s1Float () : layer(3,3,2,3,2,1) {
128 
129  x = MAKE_MATRIX_PTR(float, 18, 1);
130 
131  desired_y = MAKE_MATRIX_PTR(float, 12, 1);
132 
133  }
134 
135 protected:
136  // Sets values
137  virtual void SetUp() {
138  (*layer.p["W0x0"]) << 0, 1, 1, 0;
139  (*layer.p["W0x1"]) << 0, -1, -1, 0;
140 
141  (*layer.p["W1x0"]) << -1, 0, 0, 1;
142  (*layer.p["W1x1"]) << 1, 0, 0, -1;
143 
144  (*layer.p["W2x0"]) << 0, 0, 1, 1;
145  (*layer.p["W2x1"]) << 0, 0, -1, -1;
146 
147  // Set biases of all three neurons.
148  (*layer.p["b"]) << 1, 0, -1;
149 
150  (*x) << 1, 4, 7, 2, 5, 8, 3, 6, 9, 9, 6, 3, 8, 5, 2, 7, 4, 1;
151  (*desired_y) << -7, 5, -3, 9, 8, 8, 8, 8, -7, 5, -3, 9;
152 
153  }
154 
155 private:
158 
160  mic::types::MatrixPtr<float> x;
161 
163  mic::types::MatrixPtr<float> desired_y;
164 
165 };
166 
167 
173 class Conv4x4x1Filter1x2x2s2Float : public ::testing::Test {
174 public:
175  // Constructor. Sets layer size.
176  Conv4x4x1Filter1x2x2s2Float () : layer(4,4,1,1,2,2) {
177 
178  x = MAKE_MATRIX_PTR(float, 16, 1);
179 
180  desired_y = MAKE_MATRIX_PTR(float, 4, 1);
181 
182  dy = MAKE_MATRIX_PTR(float, 4, 1);
183 
184  desired_dx = MAKE_MATRIX_PTR(float, 16, 1);
185 
186  desired_dW = MAKE_MATRIX_PTR(float, 4, 1);
187  // Number of "real neurons".
188  desired_db = MAKE_MATRIX_PTR(float, 1, 1);
189  }
190 
191 protected:
192  // Sets values
193  virtual void SetUp() {
194  (*layer.p["W0x0"]) << 0, 1, 2, 3;
195 
196  // Set biases of both neurons.
197  (*layer.p["b"]) << 0;
198 
199  (*x).enumerate();
200  (*desired_y) << 24, 36, 72, 84;
201 
202  (*dy).enumerate();
203  (*desired_dx) << 0, 0, 0, 1, 0, 0, 2, 3, 0, 2, 0, 3, 4, 6, 6, 9;
204 
205  (*desired_dW) << 48, 54, 72, 78;
206  (*desired_db) << 6;
207  }
208 
209 private:
212 
214  mic::types::MatrixPtr<float> x;
215 
217  mic::types::MatrixPtr<float> desired_y;
218 
220  mic::types::MatrixPtr<float> dy;
221 
223  mic::types::MatrixPtr<float> desired_dx;
224 
226  mic::types::MatrixPtr<float> desired_dW;
227 
229  mic::types::MatrixPtr<float> desired_db;
230 
231 };
232 
233 
239 class Conv4x4x1Filter3x1x1s3Double : public ::testing::Test {
240 public:
241  // Constructor. Sets layer size.
243 
244  x = MAKE_MATRIX_PTR(double, 16, 1);
245 
246  desired_y = MAKE_MATRIX_PTR(double, 12, 1);
247 
248  dy = MAKE_MATRIX_PTR(double, 12, 1);
249 
250  //target_y = MAKE_MATRIX_PTR(double, 12, 1);
251 
252  desired_dx = MAKE_MATRIX_PTR(double, 16, 1);
253  // Number of filters * input channels.
254  desired_dW = MAKE_MATRIX_PTR(double, 3, 1);
255  // Number of "real neurons".
256  desired_db = MAKE_MATRIX_PTR(double, 3, 1);
257  }
258 
259 protected:
260  // Sets values
261  virtual void SetUp() {
262  (*layer.p["W0x0"]) << 0;
263  (*layer.p["W1x0"]) << 1;
264  (*layer.p["W2x0"]) << 2;
265 
266  // Set biases of neurons.
267  (*layer.p["b"]) << -1, 0, 1;
268 
269  (*x).enumerate();
270  (*desired_y) << -1, -1, -1, -1, 0, 3, 12,15, 1, 7, 25, 31;
271 
272  (*dy).enumerate();
273  (*desired_dx) << 20, 0, 0, 23, 0,0,0,0,0,0,0,0, 26, 0,0, 29;
274  //(*target_y) << 13, 13, 20, 10, 3, 2, 12, 15;
275 
276  (*desired_dW) << 72, 192, 312;
277  (*desired_db) << 6, 22, 38;
278  }
279 
280 private:
283 
284  // Loss function.
286 
288  mic::types::MatrixPtr<double> x;
289 
291  mic::types::MatrixPtr<double> desired_y;
292 
294  mic::types::MatrixPtr<double> dy;
295 
297  //mic::types::MatrixPtr<double> target_y;
298 
300  mic::types::MatrixPtr<double> desired_dx;
301 
303  mic::types::MatrixPtr<double> desired_dW;
304 
306  mic::types::MatrixPtr<double> desired_db;
307 
308 };
309 
315 class Conv5x5x1Filter1x3x3s1Float : public ::testing::Test {
316 public:
317  // Constructor. Sets layer size.
318  Conv5x5x1Filter1x3x3s1Float () : layer(5,5,1,1,3,1) {
319 
320  x = MAKE_MATRIX_PTR(float, 25, 1);
321 
322  desired_y = MAKE_MATRIX_PTR(float, 9, 1);
323 
324  dy = MAKE_MATRIX_PTR(float, 9, 1);
325 
326  }
327 
328 protected:
329  // Sets values
330  virtual void SetUp() {
331  (*layer.p["W0x0"]) << 1, 0, 1, 0, 1, 0, 1, 0, 1;
332  (*layer.p["b"]) << 0;
333 
334  (*x) << 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0;
335  (*desired_y) << 4, 3, 4, 2, 4, 3, 2, 3, 4;
336 
337  }
338 
339 private:
342 
344  mic::types::MatrixPtr<float> x;
345 
347  mic::types::MatrixPtr<float> desired_y;
348 
350  mic::types::MatrixPtr<float> dy;
351 
352 };
353 
354 
355 
361 class Conv5x5x1Filter1x2x2s3Float : public ::testing::Test {
362 public:
363  // Constructor. Sets layer size.
364  Conv5x5x1Filter1x2x2s3Float () : layer(5,5,1,1,2,3) {
365 
366  x = MAKE_MATRIX_PTR(float, 25, 1);
367 
368  desired_y = MAKE_MATRIX_PTR(float, 4, 1);
369 
370  dy = MAKE_MATRIX_PTR(float, 4, 1);
371 
372  desired_dx = MAKE_MATRIX_PTR(float, 25, 1);
373  // Number of filters * input channels.
374  desired_dW = MAKE_MATRIX_PTR(float, 4, 1);
375  // Number of "real neurons".
376  desired_db = MAKE_MATRIX_PTR(float, 1, 1);
377  }
378 
379 protected:
380  // Sets values
381  virtual void SetUp() {
382  (*layer.p["W0x0"]).enumerate();
383  (*layer.p["b"]) << 0;
384 
385  (*x).enumerate();
386  (*desired_y) << 29, 47, 119, 137;
387 
388  (*dy).enumerate();
389  (*desired_dx) << 0,0,0,0, 1, 0,0,0, 2, 3, 0,0,0,0,0,0, 2, 0,0, 3, 4, 6, 0, 6, 9;
390 
391  (*desired_dW) << 87, 93, 117, 123;
392  (*desired_db) << 6;
393 
394  }
395 
396 private:
399 
401  mic::types::MatrixPtr<float> x;
402 
404  mic::types::MatrixPtr<float> desired_y;
405 
407  mic::types::MatrixPtr<float> dy;
408 
410  mic::types::MatrixPtr<float> desired_dx;
411 
413  mic::types::MatrixPtr<float> desired_dW;
414 
416  mic::types::MatrixPtr<float> desired_db;
417 };
418 
419 
425 class Conv7x7x3Filter3x3x3s2Float : public ::testing::Test {
426 public:
427  // Constructor. Sets layer size.
428  Conv7x7x3Filter3x3x3s2Float () : layer(7,7,3,2,3,2) {
429 
430  x = MAKE_MATRIX_PTR(float, 7*7*3, 1);
431 
432  desired_y = MAKE_MATRIX_PTR(float, 3*3*2, 1);
433 
434  }
435 
436 protected:
437  // Sets values
438  virtual void SetUp() {
439 
440  // Set weights of first neuron.
441  (*layer.p["W0x0"]) << 0, -1, 0, 0, 1, -1, 1, 1, -1;
442  (*layer.p["W0x1"]) << 1, 0, 1, 0, -1, -1, 1, 1, -1;
443  (*layer.p["W0x2"]) << 1, 1, 0, -1, 1, -1, 1, 0, 1;
444 
445  // Set weights of second neuron.
446  (*layer.p["W1x0"]) << 1, 1, -1, -1, -1, 1, 0, -1, -1;
447  (*layer.p["W1x1"]) << 0, 1, 1, -1, 1, -1, 0, -1, -1;
448  (*layer.p["W1x2"]) << 0, 0, 0, 1, 1, -1, -1, 0, 1;
449 
450  // Set biases of both neurons.
451  (*layer.p["b"]) << 1, 0;
452 
453  (*x) <<
454  // x[:,:,0]
455  0, 0, 0, 0, 0, 0, 0,
456  0, 0, 2, 1, 1, 2, 0,
457  0, 2, 0, 1, 0, 1, 0,
458  0, 2, 2, 2, 0, 0, 0,
459  0, 2, 2, 1, 2, 0, 0,
460  0, 0, 1, 1, 2, 0, 0,
461  0, 0, 0, 0, 0, 0, 0,
462  // x[:,:,1]
463  0, 0, 0, 0, 0, 0, 0,
464  0, 0, 2, 0, 2, 0, 0,
465  0, 1, 2, 2, 2, 2, 0,
466  0, 0, 0, 0, 1, 2, 0,
467  0, 1, 0, 0, 1, 2, 0,
468  0, 0, 0, 1, 0, 2, 0,
469  0, 0, 0, 0, 0, 0, 0,
470  // x[:,:,2]
471  0, 0, 0, 0, 0, 0, 0,
472  0, 0, 2, 1, 2, 2, 0,
473  0, 1, 2, 0, 2, 0, 0,
474  0, 2, 0, 1, 0, 2, 0,
475  0, 1, 2, 2, 2, 0, 0,
476  0, 2, 2, 0, 2, 0, 0,
477  0, 0, 0, 0, 0, 0, 0;
478 
479  (*desired_y) <<
480  // o[:,:,0]
481  -2, 3, 10, 7, 12, 11, -1, -1, 0,
482  // o[:,:,1]
483  -5, -10, -6, 4, -3, 2, 2, 3, 6;
484  }
485 
486 private:
489 
491  mic::types::MatrixPtr<float> x;
492 
494  mic::types::MatrixPtr<float> desired_y;
495 
496 };
497 
498 
499 
500 
506 class Conv5x6x1Filter1x4x4s1Float : public ::testing::Test {
507 public:
508  // Constructor. Sets layer size.
509  Conv5x6x1Filter1x4x4s1Float () : layer(5,6,1,1,4,1) {
510 
511  x = MAKE_MATRIX_PTR(float, 6, 5);
512 
513  desired_y = MAKE_MATRIX_PTR(float, 6,1);
514 
515  dy = MAKE_MATRIX_PTR(float, 6, 1);
516  desired_dx = MAKE_MATRIX_PTR(float, 30, 1);
517  }
518 
519 protected:
520  // Sets values
521  virtual void SetUp() {
522  for (size_t i=0; i<16; i++)
523  (*layer.p["W0x0"])(i) = i+1;
524  (*layer.p["W0x0"]).resize(4,4);
525  (*layer.p["W0x0"]).transposeInPlace();
526  //std::cout<<"*layer.p[W00] = \n" << (*layer.p["W00"]) << std::endl;
527  (*layer.p["W0x0"]).resize(1, 4*4);
528 
529  // Set neuron bias.
530  (*layer.p["b"]) << 0;
531 
532  for (size_t i=0; i<30; i++)
533  (*x)(i) = i+1;
534  (*x).transposeInPlace();
535  //std::cout<<"*x = \n" << (*x) << std::endl;
536  (*x).resize(30,1);
537 
538  (*desired_y) << 2064, 2880, 2200, 3016, 2336, 3152;
539  //std::cout<<"*desired_y = \n" << (*desired_y) << std::endl;
540 
541  (*dy).enumerate();// << 1, 4, 2, 5, 3, 6;
542  // IMPROPER VALUES!
543  // (*desired_dx) << 1, 9, 29, 49, 52, 4, 29, 77, 125, 121, 10, 62, 146, 230, 208, 16, 83, 167, 251, 223, 17, 75, 139, 203, 170, 12, 48, 84, 120, 96;
544  }
545 
546 private:
549 
551  mic::types::MatrixPtr<float> x;
552 
554  mic::types::MatrixPtr<float> desired_y;
555 
557  mic::types::MatrixPtr<float> dy;
558 
560  mic::types::MatrixPtr<float> desired_dx;
561 
562 };
563 
564 
569 class Conv28x28x1Filter2x28x28s1Double : public ::testing::Test {
570 public:
571  // Constructor. Sets layer size.
573 
574  x = MAKE_MATRIX_PTR(double, 8*8, 1);
575 
576  target_y = MAKE_MATRIX_PTR(double, 2, 1);
577 
578  }
579 
580 protected:
581  // Sets values
582  virtual void SetUp() {
583 
584  // Random input
585  (*x).randn(0, 6.0/(8*8));
586  // We want output to be 1 and 0.
587  (*target_y) << 1,0;
588 
589  }
590 
591 private:
594 
595  // Loss function - cross entropy.
597 
599  mic::types::MatrixPtr<double> x;
600 
602  mic::types::MatrixPtr<double> target_y;
603 
604 };
605 
606 
611 class Conv8x8x1Filter2x4x4s4Double : public ::testing::Test {
612 public:
613  // Constructor. Sets layer size.
614  Conv8x8x1Filter2x4x4s4Double () : layer(28,28,1,2,14,7) {
615 
616  x = MAKE_MATRIX_PTR(double, 28*28, 1);
617 
618  target_y = MAKE_MATRIX_PTR(double, 8, 1);
619 
620  }
621 
622 protected:
623  // Sets values
624  virtual void SetUp() {
625 
626  // Random input
627  (*x).randn(0, 6.0/(8*8));
628  // We want output to be 1 and 0.
629  //(*target_y) << 0.25, 0.5, 1, 0, 0.2, 0.1, 0.3, 0.5;
630 
631  }
632 
633 private:
636 
637  // Loss function - cross entropy.
639 
641  mic::types::MatrixPtr<double> x;
642 
644  mic::types::MatrixPtr<double> target_y;
645 
646 };
647 
648 
649 } } } //: namespaces
650 
651 #endif /* CONVOLUTIONLAYERTESTS_HPP_ */
mic::types::MatrixPtr< float > x
Test x - used in forward pass.
mic::neural_nets::loss::SquaredErrorLoss< double > loss
mic::types::MatrixPtr< double > desired_dW
Desired gradient dW from backpropagation.
mic::types::MatrixPtr< float > dy
Gradient passed to backpropagation.
mic::types::MatrixPtr< double > desired_db
Desired gradient db from backpropagation.
mic::types::MatrixPtr< double > dy
Gradient passed to backpropagation.
mic::mlnn::convolution::Convolution< double > layer
Object to be tested.
Test Fixture - layer of input size 4x4x1 and with filter bank of 3 filters of size 1x1 with stride 3...
mic::mlnn::convolution::Convolution< float > layer
Object to be tested.
mic::types::MatrixPtr< float > x
Test x - used in forward pass.
mic::neural_nets::loss::SquaredErrorLoss< double > loss
mic::types::MatrixPtr< float > desired_y
Desired output for a given x.
mic::types::MatrixPtr< float > desired_dx
Desired gradient dy from backpropagation.
mic::mlnn::convolution::Convolution< double > layer
Object to be tested.
mic::mlnn::convolution::Convolution< double > layer
Object to be tested.
mic::types::MatrixPtr< float > dy
Gradient passed to backpropagation.
mic::neural_nets::loss::SquaredErrorLoss< double > loss
mic::types::MatrixPtr< double > x
Test x - used in forward pass.
mic::types::MatrixPtr< double > desired_db
Desired gradient db from backpropagation.
mic::types::MatrixPtr< double > target_y
Target y values.
mic::mlnn::convolution::Convolution< float > layer
Object to be tested.
mic::types::MatrixPtr< float > desired_dx
Desired gradient dx from backpropagation.
Test Fixture - layer of input size 5x6x1 and with filter bank of 1 filter of size 4x4 with stride 1...
mic::types::MatrixPtr< float > desired_dx
Desired gradient dy from backpropagation.
mic::types::MatrixPtr< double > target_y
Target y values.
mic::types::MatrixPtr< double > x
Test x - used in forward pass.
mic::types::MatrixPtr< double > desired_dx
Target y values.
Test Fixture - layer of input size 4x4x1 and with filter bank of 1 filters of size 2x2 with stride 2...
mic::types::MatrixPtr< double > x
Test x - used in forward pass.
mic::mlnn::convolution::Convolution< double > layer
Object to be tested.
Test Fixture - layer of input size 2x2x2 and with filter bank of 2 filters of size 1x1 with stride 1...
mic::types::MatrixPtr< float > dy
Gradient passed to backpropagation.
Test Fixture - layer of input size 5x5x1 and with filter bank of 1 filter of size 3x3 with stride 1 (...
mic::neural_nets::loss::SquaredErrorLoss< double > loss
mic::types::MatrixPtr< float > desired_db
Desired gradient db from backpropagation.
mic::types::MatrixPtr< double > desired_dx
Desired gradient dx from backpropagation.
mic::types::MatrixPtr< double > desired_y
Desired output for a given x.
mic::types::MatrixPtr< float > desired_dW
Desired gradient dW from backpropagation.
Test Fixture - layer of input size 8x8x1 and with filter bank of 2 filters of size 4x4 with stride 4...
mic::types::MatrixPtr< float > desired_y
Desired output for a given x.
mic::mlnn::convolution::Convolution< float > layer
Object to be tested.
Test Fixture - layer of input size 3x3x2 and with filter bank of 3 filters of size 2x2 with stride 1...
mic::types::MatrixPtr< float > dy
Gradient passed to backpropagation.
Test Fixture - layer of input size 28x28x1 and with filter bank of 2 filters of size 28x28 with strid...
mic::types::MatrixPtr< float > desired_y
Desired output for a given x.
mic::types::MatrixPtr< double > desired_dW
Desired gradient dW from backpropagation.
mic::types::MatrixPtr< float > desired_y
Desired output for a given x.
mic::types::MatrixPtr< float > x
Test x - used in forward pass.
mic::types::MatrixPtr< double > dy
Gradient passed to backpropagation.
mic::types::MatrixPtr< float > desired_db
Desired gradient db from backpropagation.
mic::types::MatrixPtr< float > desired_y
Desired output for a given x.
mic::types::MatrixPtr< float > x
Test x - used in forward pass.
mic::types::MatrixPtr< double > desired_y
Desired output for a given x.
mic::mlnn::convolution::Convolution< float > layer
Object to be tested.
mic::types::MatrixPtr< float > desired_y
Desired output for a given x.
mic::types::MatrixPtr< double > target_y
Target y values.
mic::mlnn::convolution::Convolution< float > layer
Object to be tested.
Test Fixture - layer of input size 5x5x1 and with filter bank of 1 filter of size 2x2 with stride 3 (...
mic::types::MatrixPtr< float > desired_dW
Desired gradient dW from backpropagation.
mic::types::MatrixPtr< float > x
Test x - used in forward pass.
mic::mlnn::convolution::Convolution< float > layer
Object to be tested.
mic::types::MatrixPtr< float > x
Test x - used in forward pass.
Test Fixture - layer of input size 7x7x3 and with filter bank of 2 filters of 3x3 with stride 2 (floa...
mic::types::MatrixPtr< double > x
Test x - used in forward pass.
mic::types::MatrixArray< eT > p
Parameters - parameters of the layer, to be used by the derived classes.
Definition: Layer.hpp:759