MachineIntelligenceCore:NeuralNets
 All Classes Namespaces Files Functions Variables Enumerations Enumerator Friends Macros
mnist_convnet_features_visualization_test.cpp
Go to the documentation of this file.
1 
26 #include <boost/thread/thread.hpp>
27 #include <boost/bind.hpp>
28 
29 #include <importers/MNISTMatrixImporter.hpp>
30 
31 #include <logger/Log.hpp>
32 #include <logger/ConsoleOutput.hpp>
33 using namespace mic::logger;
34 
35 #include <application/ApplicationState.hpp>
36 
37 #include <configuration/ParameterServer.hpp>
38 
39 #include <opengl/visualization/WindowManager.hpp>
40 #include <opengl/visualization/WindowGrayscaleBatch.hpp>
41 #include <opengl/visualization/WindowCollectorChart.hpp>
42 using namespace mic::opengl::visualization;
43 
44 // Neural net.
46 using namespace mic::mlnn;
47 
48 // Encoders.
49 #include <encoders/MatrixXfMatrixXfEncoder.hpp>
50 #include <encoders/UIntMatrixXfEncoder.hpp>
51 
53 WindowGrayscaleBatch<float> *w_conv10, *w_conv11, *w_conv12, *w_conv13, *w_conv14, *w_conv15, *w_conv16;
54 WindowGrayscaleBatch<float> *w_conv20, *w_conv21, *w_conv22, *w_conv23, *w_conv24, *w_conv25;
55 WindowGrayscaleBatch<float> *w_conv30, *w_conv31, *w_conv32, *w_conv33, *w_conv34, *w_conv35;
57 WindowCollectorChart<float>* w_chart;
59 mic::utils::DataCollectorPtr<std::string, float> collector_ptr;
60 
61 
63 mic::importers::MNISTMatrixImporter<float>* importer;
66 
68 mic::encoders::MatrixXfMatrixXfEncoder* mnist_encoder;
70 mic::encoders::UIntMatrixXfEncoder* label_encoder;
71 
72 const size_t batch_size = 1;
73 const char* convent_filename = "nn_convent.txt";
74 const char* convnet_log = "nn_convent_log.csv";
75 
76 
81 void batch_function (void) {
82 
83 /* if (neural_net.load(convent_filename)) {
84  LOG(LINFO) << "Loaded neural network from a file";
85  } else {*/
86  {
89  neural_net.pushLayer(new ELU<float>(20, 20, 9));
91 
93  neural_net.pushLayer(new ELU<float>(4, 4, 16));
95 
96  neural_net.pushLayer(new Linear<float>(2, 2, 16, 10, 1, 1));
98 
99  if (!neural_net.verify())
100  exit(-1);
101 
102 
105 
106  LOG(LINFO) << "Generated new neural network";
107  }//: else
108 
109  // Import data from datasets.
110  if (!importer->importData())
111  exit(-1);
112 
113 
114  size_t iteration = 0;
115 
116  // Retrieve the next minibatch.
117  //mic::types::MNISTBatch bt = importer->getNextBatch();
118  //importer->setNextSampleIndex(5);
119 
120  // Main application loop.
121  while (!APP_STATE->Quit()) {
122 
123  // If not paused.
124  if (!APP_STATE->isPaused()) {
125 
126  // If single step mode - pause after the step.
127  if (APP_STATE->isSingleStepModeOn())
128  APP_STATE->pressPause();
129 
130  { // Enter critical section - with the use of scoped lock from AppState!
131  APP_DATA_SYNCHRONIZATION_SCOPED_LOCK();
132 
133  // Retrieve the next minibatch.
134  mic::types::MNISTBatch<float> bt = importer->getRandomBatch();
135 
136  // Encode data.
137  mic::types::MatrixXfPtr encoded_batch = mnist_encoder->encodeBatch(bt.data());
138  mic::types::MatrixXfPtr encoded_labels = label_encoder->encodeBatch(bt.labels());
139 
140 /* mic::types::MatrixPtr<float> encoded_batch = MAKE_MATRIX_PTR(float, patch_size*patch_size, 1);
141  for (size_t i=0; i<patch_size*patch_size; i++)
142  (*encoded_batch)[i]= 1.0 -(float)i/(patch_size*patch_size);*/
143  /*mic::types::MatrixPtr<float> encoded_labels = MAKE_MATRIX_PTR(float, output_size, 1);
144  encoded_labels->setZero();
145  (*encoded_labels)[0]= 1.0;*/
146  /*(*encoded_labels)[6]= 1.0;
147  (*encoded_labels)[9]= 1.0;
148  (*encoded_labels)[15]= 1.0;*/
149 
150  // Train the autoencoder.
151  float loss = neural_net.train (encoded_batch, encoded_labels, 0.001, 0.0001);
152 
153  // Get reconstruction.
154  /*mic::types::MatrixXfPtr encoded_reconstruction = neural_net.getPredictions();
155  std::vector<mic::types::MatrixXfPtr> decoded_reconstruction = mnist_encoder->decodeBatch(encoded_reconstruction);
156  w_reconstruction->setBatchUnsynchronized(decoded_reconstruction);*/
157 
158  if (iteration%10 == 0) {
159  // Visualize the weights.
160  //std::shared_ptr<Layer<float> > layer1 = neural_net.getLayer(3);
161 
162  std::shared_ptr<mic::mlnn::convolution::Convolution<float> > conv1 =
164  w_conv10->setBatchUnsynchronized(conv1->getInputActivations());
165  w_conv11->setBatchUnsynchronized(conv1->getInputGradientActivations());
166  w_conv12->setBatchUnsynchronized(conv1->getWeightActivations());
167  w_conv13->setBatchUnsynchronized(conv1->getWeightGradientActivations());
168  w_conv14->setBatchUnsynchronized(conv1->getOutputActivations());
169  w_conv15->setBatchUnsynchronized(conv1->getOutputGradientActivations());
170 
171  // Similarity.
172  mic::types::MatrixPtr<float> similarity = conv1->getFilterSimilarityMatrix();
173  w_conv16->setSampleUnsynchronized(similarity);
174 
175  float max_similarity = 0;
176  float mean_similarity = 0;
177  for (size_t i=0; i<9; i++)
178  for (size_t j=0; j<i; j++) {
179  std::string label = "Similarity " + std::to_string(i) + "-" +std::to_string(j);
180  collector_ptr->addDataToContainer(label, (*similarity)(i,j));
181  mean_similarity += (*similarity)(i,j);
182  max_similarity = ((*similarity)(i,j) > max_similarity) ? (*similarity)(i,j) : max_similarity;
183  }//: for
184 
185  collector_ptr->addDataToContainer("Similarity max", max_similarity);
186  mean_similarity /= (1+2+3+4+5+6+7+8);
187  collector_ptr->addDataToContainer("Similarity mean", mean_similarity);
188 
189 
190  std::shared_ptr<mic::mlnn::convolution::Convolution<float> > conv2 =
192  w_conv20->setBatchUnsynchronized(conv2->getInputActivations());
193  w_conv21->setBatchUnsynchronized(conv2->getInputGradientActivations());
194  w_conv22->setBatchUnsynchronized(conv2->getWeightActivations());
195  w_conv23->setBatchUnsynchronized(conv2->getWeightGradientActivations());
196  w_conv24->setBatchUnsynchronized(conv2->getOutputActivations());
197  w_conv25->setBatchUnsynchronized(conv2->getOutputGradientActivations());
198 
199  std::shared_ptr<mic::mlnn::fully_connected::Linear<float> > lin1 =
201  w_conv30->setBatchUnsynchronized(lin1->getInputActivations());
202  w_conv31->setBatchUnsynchronized(lin1->getInputGradientActivations());
203  w_conv32->setBatchUnsynchronized(lin1->getWeightActivations());
204  w_conv33->setBatchUnsynchronized(lin1->getWeightGradientActivations());
205 
206  std::shared_ptr<Layer<float> > sm1 = neural_net.getLayer(7);
207  w_conv34->setBatchUnsynchronized(sm1->getOutputActivations());
208  w_conv35->setBatchUnsynchronized(sm1->getOutputGradientActivations());
209 
210  // Add data to chart window.
211  collector_ptr->addDataToContainer("Loss", loss);
212  //float reconstruction_error = neural_net.getLayer<mic::mlnn::fully_connected::Linear<float> >(1)->calculateMeanReconstructionError();
213  //collector_ptr->addDataToContainer("Reconstruction Error", reconstruction_error);
214 
215  // Export to file.
216  collector_ptr->exportDataToCsv(convnet_log);
217  }//: if
218 
219  iteration++;
220  //float reconstruction_error = neural_net.getLayer<mic::mlnn::fully_connected::Linear<float> >(1)->calculateMeanReconstructionError();
221 
222  LOG(LINFO) << "Iteration: " << iteration << " loss =" << loss;// << " reconstruction error =" << reconstruction_error;
223  }//: end of critical section
224 
225  }//: if
226 
227  // Sleep.
228  APP_SLEEP();
229  }//: while
230 
231 }//: image_encoder_and_visualization_test
232 
233 
234 
242 int main(int argc, char* argv[]) {
243  // Set console output to logger.
244  LOGGER->addOutput(new ConsoleOutput());
245  LOG(LINFO) << "Logger initialized. Starting application";
246 
247  // Parse parameters.
248  PARAM_SERVER->parseApplicationParameters(argc, argv);
249 
250  // Initilize application state ("touch it") ;)
251  APP_STATE;
252 
253  // Load dataset.
254  importer = new mic::importers::MNISTMatrixImporter<float>();
255  importer->setBatchSize(batch_size);
256 
257  // Initialize the encoders.
258  mnist_encoder = new mic::encoders::MatrixXfMatrixXfEncoder(28, 28);
259  label_encoder = new mic::encoders::UIntMatrixXfEncoder(10);
260 
261  // Set parameters of all property-tree derived objects - USER independent part.
262  PARAM_SERVER->loadPropertiesFromConfiguration();
263 
264  // Initialize property-dependent variables of all registered property-tree objects - USER dependent part.
265  PARAM_SERVER->initializePropertyDependentVariables();
266 
267  // Initialize GLUT! :]
268  VGL_MANAGER->initializeGLUT(argc, argv);
269 
270  // Create batch visualization window.
271  w_conv10 = new WindowGrayscaleBatch<float>("Conv1 x", WindowGrayscaleBatch<float>::Norm_HotCold, WindowGrayscaleBatch<float>::Grid_Both, 50, 50, 256, 256);
272  w_conv11 = new WindowGrayscaleBatch<float>("Conv1 dx", WindowGrayscaleBatch<float>::Norm_HotCold, WindowGrayscaleBatch<float>::Grid_Both, 316, 50, 256, 256);
273  w_conv12 = new WindowGrayscaleBatch<float>("Conv1 W", WindowGrayscaleBatch<float>::Norm_HotCold, WindowGrayscaleBatch<float>::Grid_Both, 562, 50, 256, 256);
274  w_conv13 = new WindowGrayscaleBatch<float>("Conv1 dW", WindowGrayscaleBatch<float>::Norm_HotCold, WindowGrayscaleBatch<float>::Grid_Both, 818, 50, 256, 256);
275  w_conv14 = new WindowGrayscaleBatch<float>("Conv1 y", WindowGrayscaleBatch<float>::Norm_HotCold, WindowGrayscaleBatch<float>::Grid_Both, 1074, 50, 256, 256);
276  w_conv15 = new WindowGrayscaleBatch<float>("Conv1 dy", WindowGrayscaleBatch<float>::Norm_HotCold, WindowGrayscaleBatch<float>::Grid_Both, 1330, 50, 256, 256);
277  w_conv16 = new WindowGrayscaleBatch<float>("Conv1 similarity", WindowGrayscaleBatch<float>::Norm_HotCold, WindowGrayscaleBatch<float>::Grid_Both, 1586, 50, 256, 256);
278 
279  w_conv20 = new WindowGrayscaleBatch<float>("Conv2 x", WindowGrayscaleBatch<float>::Norm_HotCold, WindowGrayscaleBatch<float>::Grid_Both, 50, 336, 256, 256);
280  w_conv21 = new WindowGrayscaleBatch<float>("Conv2 dx", WindowGrayscaleBatch<float>::Norm_HotCold, WindowGrayscaleBatch<float>::Grid_Both, 316, 336, 256, 256);
281  w_conv22 = new WindowGrayscaleBatch<float>("Conv2 W", WindowGrayscaleBatch<float>::Norm_HotCold, WindowGrayscaleBatch<float>::Grid_Both, 562, 336, 256, 256);
282  w_conv23 = new WindowGrayscaleBatch<float>("Conv2 dW", WindowGrayscaleBatch<float>::Norm_HotCold, WindowGrayscaleBatch<float>::Grid_Both, 818, 336, 256, 256);
283  w_conv24 = new WindowGrayscaleBatch<float>("Conv2 y", WindowGrayscaleBatch<float>::Norm_HotCold, WindowGrayscaleBatch<float>::Grid_Both, 1074, 336, 256, 256);
284  w_conv25 = new WindowGrayscaleBatch<float>("Conv2 dy", WindowGrayscaleBatch<float>::Norm_HotCold, WindowGrayscaleBatch<float>::Grid_Both, 1330, 336, 256, 256);
285 
286  w_conv30 = new WindowGrayscaleBatch<float>("L1 x", WindowGrayscaleBatch<float>::Norm_HotCold, WindowGrayscaleBatch<float>::Grid_Both, 50, 622, 256, 256);
287  w_conv31 = new WindowGrayscaleBatch<float>("L1 dx", WindowGrayscaleBatch<float>::Norm_HotCold, WindowGrayscaleBatch<float>::Grid_Both, 316, 622, 256, 256);
288  w_conv32 = new WindowGrayscaleBatch<float>("L1 W", WindowGrayscaleBatch<float>::Norm_HotCold, WindowGrayscaleBatch<float>::Grid_Both, 562, 622, 256, 256);
289  w_conv33 = new WindowGrayscaleBatch<float>("L1 dW", WindowGrayscaleBatch<float>::Norm_HotCold, WindowGrayscaleBatch<float>::Grid_Both, 818, 622, 256, 256);
290  w_conv34 = new WindowGrayscaleBatch<float>("SM y", WindowGrayscaleBatch<float>::Norm_None, WindowGrayscaleBatch<float>::Grid_Both, 1074, 622, 256, 256);
291  w_conv35 = new WindowGrayscaleBatch<float>("SM dy", WindowGrayscaleBatch<float>::Norm_HotCold, WindowGrayscaleBatch<float>::Grid_Both, 1330, 622, 256, 256);
292 
293  // Chart.
294  w_chart = new WindowCollectorChart<float>("Statistics", 60, 878, 512, 256);
295  collector_ptr= std::make_shared < mic::utils::DataCollector<std::string, float> >( );
296  w_chart->setDataCollectorPtr(collector_ptr);
297 
298  // Create data containers.
299  collector_ptr->createContainer("Loss", mic::types::color_rgba(0, 100, 0, 180));
300  //collector_ptr->createContainer("Reconstruction Error", mic::types::color_rgba(255, 255, 255, 180));
301  collector_ptr->createContainer("Similarity max", mic::types::color_rgba(255, 0, 0, 180));
302  collector_ptr->createContainer("Similarity mean", mic::types::color_rgba(0, 0, 255, 180));
303  for (size_t i=0; i<9; i++)
304  for (size_t j=0; j<i; j++) {
305  std::string label = "Similarity " + std::to_string(i) + "-" +std::to_string(j);
306  collector_ptr->createContainer(label, mic::types::color_rgba(255*(9*i+j)/81, 255*(9*i+j)/81, 255*(9*i+j)/81, 180));
307  }
308 
309  boost::thread batch_thread(boost::bind(&batch_function));
310 
311  // Start visualization thread.
312  VGL_MANAGER->startVisualizationLoop();
313 
314  LOG(LINFO) << "Waiting for threads to join...";
315  // End test thread.
316  batch_thread.join();
317  LOG(LINFO) << "Threads joined - ending application";
318 }//: main
WindowCollectorChart< float > * w_chart
Window for displaying chart with statistics.
mic::utils::DataCollectorPtr< std::string, float > collector_ptr
Data collector.
Class implementing the layer with Exponential Linear Unit (ELU). http://arxiv.org/pdf/1511.07289v5.pdf.
Definition: ELU.hpp:39
WindowGrayscaleBatch< float > * w_conv15
mic::encoders::MatrixXfMatrixXfEncoder * mnist_encoder
MNIST matrix encoder.
Layer performing max pooling.
Definition: MaxPooling.hpp:39
WindowGrayscaleBatch< float > * w_conv12
mic::encoders::UIntMatrixXfEncoder * label_encoder
Label 2 matrix encoder (1 hot).
Class implementing cropping operation - crops the size of image (matrix) by a margin of n pixels on e...
Definition: Cropping.hpp:38
WindowGrayscaleBatch< float > * w_conv34
WindowGrayscaleBatch< float > * w_conv32
WindowGrayscaleBatch< float > * w_conv35
std::shared_ptr< LayerType > getLayer(size_t index_)
WindowGrayscaleBatch< float > * w_conv11
WindowGrayscaleBatch< float > * w_conv14
mic::importers::MNISTMatrixImporter< float > * importer
MNIST importer.
WindowGrayscaleBatch< float > * w_conv20
WindowGrayscaleBatch< float > * w_conv25
WindowGrayscaleBatch< float > * w_conv22
int main(int argc, char *argv[])
Main program function. Runs two threads: main (for GLUT) and another one (for data processing)...
WindowGrayscaleBatch< float > * w_conv33
WindowGrayscaleBatch< float > * w_conv16
WindowGrayscaleBatch< float > * w_conv24
void batch_function(void)
Function for batch sampling.
eT train(mic::types::MatrixPtr< eT > encoded_batch_, mic::types::MatrixPtr< eT > encoded_targets_, eT learning_rate_, eT decay_=0.0f)
WindowGrayscaleBatch< float > * w_conv10
Windows for displaying activations.
BackpropagationNeuralNetwork< float > neural_net
Multi-layer neural network.
WindowGrayscaleBatch< float > * w_conv30
Adam - adaptive moment estimation.
Definition: Adam.hpp:39
WindowGrayscaleBatch< float > * w_conv21
WindowGrayscaleBatch< float > * w_conv31
WindowGrayscaleBatch< float > * w_conv13
WindowGrayscaleBatch< float > * w_conv23