MachineIntelligenceCore:NeuralNets
 All Classes Namespaces Files Functions Variables Enumerations Enumerator Friends Macros
MultiLayerNeuralNetworkTests.cpp
Go to the documentation of this file.
1 
26 
27 namespace mic { namespace neural_nets { namespace unit_tests {
28 
33 
34  // Number of layers.
35  ASSERT_EQ(nn.layers.size(), 4);
36  // First fully connected dimensions.
37  ASSERT_EQ(nn.layers[0]->inputSize(), 10);
38  ASSERT_EQ(nn.layers[0]->batchSize(), 1);
39  ASSERT_EQ(nn.layers[0]->outputSize(), 20);
40 
41  // Second fully connected dimensions.
42  ASSERT_EQ(nn.layers[2]->inputSize(), 20);
43  ASSERT_EQ(nn.layers[2]->batchSize(), 1);
44  ASSERT_EQ(nn.layers[2]->outputSize(), 4);
45 
46 }
47 
52 
53  // Check original batch size.
54  for (size_t i=0; i< nn.layers.size(); i++)
55  ASSERT_EQ(nn.layers[0]->batchSize(), 1);
56 
57  // Resize.
58  nn.resizeBatch(5);
59  // Check new size.
60  for (size_t i=0; i< nn.layers.size(); i++)
61  ASSERT_EQ(nn.layers[0]->batchSize(), 5);
62 }
63 
64 
69 
70  // Save network to file.
71  const char* fileName = "saved.txt";
72  nn.save(fileName);
73  std::cout << "Saved network: \n" << nn;
74 
75  // Load network from file.
76  mic::mlnn::MultiLayerNeuralNetwork<double> restored_nn("simple_linear_network_loaded");
77  restored_nn.load(fileName);
78  std::cout << "Restored network: \n" << restored_nn;
79 
80  // Compare number of layers.
81  ASSERT_EQ(nn.layers.size(), restored_nn.layers.size());
82 
83  // Compare layers.
84  for (size_t i=0; i< nn.layers.size(); i++) {
85  // Compare sizes and types.
86  ASSERT_EQ(nn.layers[i]->batchSize(), restored_nn.layers[i]->batchSize());
87  ASSERT_EQ(nn.layers[i]->inputSize(), restored_nn.layers[i]->inputSize());
88  ASSERT_EQ(nn.layers[i]->outputSize(), restored_nn.layers[i]->outputSize());
89  ASSERT_EQ(nn.layers[i]->layer_type, restored_nn.layers[i]->layer_type);
90  }//: for
91 
92  // TODO: Check loss function.
93  // TODO: Check optimization function.
94 }
95 
96 
100 TEST_F(Tutorial2LayerNN, BackpropagationSingleStep) {
101  double eps = 1e-5;
102 
103  // Forward pass.
104  nn.forward(input_x);
105 
106  // Lin1 layer output.
107  ASSERT_LE( fabs( (*nn.layers[0]->s["y"])[0] - (*ffpass1_lin1_y)[0]), eps);
108  ASSERT_LE( fabs( (*nn.layers[0]->s["y"])[1] - (*ffpass1_lin1_y)[1]), eps);
109  // Sig1 layer output.
110  ASSERT_LE( fabs( (*nn.layers[1]->s["y"])[0] - (*ffpass1_sig1_y)[0]), eps);
111  ASSERT_LE( fabs( (*nn.layers[1]->s["y"])[1] - (*ffpass1_sig1_y)[1]), eps);
112  // Lin1 layer output.
113  ASSERT_LE( fabs( (*nn.layers[2]->s["y"])[0] - (*ffpass1_lin2_y)[0]), eps);
114  ASSERT_LE( fabs( (*nn.layers[2]->s["y"])[1] - (*ffpass1_lin2_y)[1]), eps);
115  // Sig1 layer output.
116  ASSERT_LE( fabs( (*nn.layers[3]->s["y"])[0] - (*ffpass1_sig2_y)[0]), eps);
117  ASSERT_LE( fabs( (*nn.layers[3]->s["y"])[1] - (*ffpass1_sig2_y)[1]), eps);
118 
119  // Calculate loss.
120  double loss = nn.loss->calculateLoss(target_y, nn.getPredictions());
121  ASSERT_LE( fabs( loss - ffpass1_loss), eps);
122 
123  // Calculate gradient.
124  mic::types::MatrixPtr<double> dy = MAKE_MATRIX_PTR(double, 2, 1);
125  (*dy) = (*nn.loss->calculateGradient(target_y, nn.getPredictions()));
126 
127  // Check gradient.
128  ASSERT_LE( fabs( (*dy)[0] - (*ffpass1_dy)[0]), eps);
129  ASSERT_LE( fabs( (*dy)[1] - (*ffpass1_dy)[1]), eps);
130 
131  // Backpropagate the gradients from last layer to the first.
132  nn.backward(dy);
133 
134  // Check weight gradients.
135  ASSERT_LE( fabs( (*nn.layers[2]->g["W"])[0] - (*bwpass1_lin2_dW)[0]), eps);
136  ASSERT_LE( fabs( (*nn.layers[2]->g["W"])[1] - (*bwpass1_lin2_dW)[1]), eps);
137  ASSERT_LE( fabs( (*nn.layers[2]->g["W"])[2] - (*bwpass1_lin2_dW)[2]), eps);
138  ASSERT_LE( fabs( (*nn.layers[2]->g["W"])[3] - (*bwpass1_lin2_dW)[3]), eps);
139 
140  ASSERT_LE( fabs( (*nn.layers[0]->g["W"])[0] - (*bwpass1_lin1_dW)[0]), eps);
141  ASSERT_LE( fabs( (*nn.layers[0]->g["W"])[1] - (*bwpass1_lin1_dW)[1]), eps);
142  ASSERT_LE( fabs( (*nn.layers[0]->g["W"])[2] - (*bwpass1_lin1_dW)[2]), eps);
143  ASSERT_LE( fabs( (*nn.layers[0]->g["W"])[3] - (*bwpass1_lin1_dW)[3]), eps);
144 
145  // Apply changes.
146  nn.update(0.5);
147 
148  // Check weight gradients after the update.
149  ASSERT_LE( fabs( (*nn.layers[2]->p["W"])[0] - (*bwpass1_lin2_pW_updated)[0]), eps);
150  ASSERT_LE( fabs( (*nn.layers[2]->p["W"])[1] - (*bwpass1_lin2_pW_updated)[1]), eps);
151  ASSERT_LE( fabs( (*nn.layers[2]->p["W"])[2] - (*bwpass1_lin2_pW_updated)[2]), eps);
152  ASSERT_LE( fabs( (*nn.layers[2]->p["W"])[3] - (*bwpass1_lin2_pW_updated)[3]), eps);
153 
154  ASSERT_LE( fabs( (*nn.layers[0]->p["W"])[0] - (*bwpass1_lin1_pW_updated)[0]), eps);
155  ASSERT_LE( fabs( (*nn.layers[0]->p["W"])[1] - (*bwpass1_lin1_pW_updated)[1]), eps);
156  ASSERT_LE( fabs( (*nn.layers[0]->p["W"])[2] - (*bwpass1_lin1_pW_updated)[2]), eps);
157  ASSERT_LE( fabs( (*nn.layers[0]->p["W"])[3] - (*bwpass1_lin1_pW_updated)[3]), eps);
158 
159 }
160 
161 
165 TEST_F(Tutorial2LayerNN, TrainSingleStep) {
166  double eps = 1e-5;
167 
168  // Perform a single training step.
169  double loss = nn.train(input_x, target_y, 0.5);
170 
171  // Check loss
172  ASSERT_LE( fabs( loss - ffpass1_loss), eps);
173 
174  // Lin1 layer output.
175  ASSERT_LE( fabs( (*nn.layers[0]->s["y"])[0] - (*ffpass1_lin1_y)[0]), eps);
176  ASSERT_LE( fabs( (*nn.layers[0]->s["y"])[1] - (*ffpass1_lin1_y)[1]), eps);
177  // Sig1 layer output.
178  ASSERT_LE( fabs( (*nn.layers[1]->s["y"])[0] - (*ffpass1_sig1_y)[0]), eps);
179  ASSERT_LE( fabs( (*nn.layers[1]->s["y"])[1] - (*ffpass1_sig1_y)[1]), eps);
180  // Lin1 layer output.
181  ASSERT_LE( fabs( (*nn.layers[2]->s["y"])[0] - (*ffpass1_lin2_y)[0]), eps);
182  ASSERT_LE( fabs( (*nn.layers[2]->s["y"])[1] - (*ffpass1_lin2_y)[1]), eps);
183  // Sig1 layer output.
184  ASSERT_LE( fabs( (*nn.layers[3]->s["y"])[0] - (*ffpass1_sig2_y)[0]), eps);
185  ASSERT_LE( fabs( (*nn.layers[3]->s["y"])[1] - (*ffpass1_sig2_y)[1]), eps);
186 
187  // Check weight gradients after the update.
188  ASSERT_LE( fabs( (*nn.layers[2]->p["W"])[0] - (*bwpass1_lin2_pW_updated)[0]), eps);
189  ASSERT_LE( fabs( (*nn.layers[2]->p["W"])[1] - (*bwpass1_lin2_pW_updated)[1]), eps);
190  ASSERT_LE( fabs( (*nn.layers[2]->p["W"])[2] - (*bwpass1_lin2_pW_updated)[2]), eps);
191  ASSERT_LE( fabs( (*nn.layers[2]->p["W"])[3] - (*bwpass1_lin2_pW_updated)[3]), eps);
192 
193  ASSERT_LE( fabs( (*nn.layers[0]->p["W"])[0] - (*bwpass1_lin1_pW_updated)[0]), eps);
194  ASSERT_LE( fabs( (*nn.layers[0]->p["W"])[1] - (*bwpass1_lin1_pW_updated)[1]), eps);
195  ASSERT_LE( fabs( (*nn.layers[0]->p["W"])[2] - (*bwpass1_lin1_pW_updated)[2]), eps);
196  ASSERT_LE( fabs( (*nn.layers[0]->p["W"])[3] - (*bwpass1_lin1_pW_updated)[3]), eps);
197 
198 }
199 
200 } } }//: namespaces
201 
202 int main(int argc, char **argv) {
203  testing::InitGoogleTest(&argc, argv);
204  return RUN_ALL_TESTS();
205 }
TEST_F(Conv2x2x2Filter2x1x1s1Double, NumericalGradientCheck)
Numerical gradient test of all parameters for layer of input size 2x2x2 and with filter bank of 2 fil...
int main(int argc, char **argv)
Test Fixture - feed-forward net with 2 layers. A "formalized" example from a step-by-step tutorial: h...