MachineIntelligenceCore:NeuralNets
 All Classes Namespaces Files Functions Variables Enumerations Enumerator Friends Macros
mnist_mlnn_features_visualization_test.cpp
Go to the documentation of this file.
1 
26 #include <boost/thread/thread.hpp>
27 #include <boost/bind.hpp>
28 
29 #include <importers/MNISTMatrixImporter.hpp>
30 
31 #include <logger/Log.hpp>
32 #include <logger/ConsoleOutput.hpp>
33 using namespace mic::logger;
34 
35 #include <application/ApplicationState.hpp>
36 
37 #include <configuration/ParameterServer.hpp>
38 
39 #include <opengl/visualization/WindowManager.hpp>
40 #include <opengl/visualization/WindowGrayscaleBatch.hpp>
41 #include <opengl/visualization/WindowCollectorChart.hpp>
42 using namespace mic::opengl::visualization;
43 
44 // Neural net.
46 using namespace mic::mlnn;
47 
48 // Encoders.
49 #include <encoders/MatrixXfMatrixXfEncoder.hpp>
50 #include <encoders/UIntMatrixXfEncoder.hpp>
51 
53 WindowGrayscaleBatch<float> *w_conv10, *w_conv11, *w_conv12, *w_conv13, *w_conv14, *w_conv15;
54 WindowGrayscaleBatch<float> *w_conv20, *w_conv21, *w_conv22, *w_conv23, *w_conv24, *w_conv25;
55 WindowGrayscaleBatch<float> *w_conv30, *w_conv31, *w_conv32, *w_conv33, *w_conv34, *w_conv35;
57 WindowCollectorChart<float>* w_chart;
59 mic::utils::DataCollectorPtr<std::string, float> collector_ptr;
60 
61 
63 mic::importers::MNISTMatrixImporter<float>* importer;
66 
68 mic::encoders::MatrixXfMatrixXfEncoder* mnist_encoder;
70 mic::encoders::UIntMatrixXfEncoder* label_encoder;
71 
72 const size_t batch_size = 9;
73 const char* fileName = "nn_autoencoder_weights_visualization.txt";
74 
75 
80 void batch_function (void) {
81 
82 /* if (neural_net.load(fileName)) {
83  LOG(LINFO) << "Loaded neural network from a file";
84  } else {*/
85  {
86  /*neural_net.pushLayer(new mic::mlnn::convolution::Cropping<float>(28, 28, 1, 2));
87  neural_net.pushLayer(new Linear<float>(24, 24, 1, 10, 1, 1));
88  neural_net.pushLayer(new Softmax<float>(10));*/
89 
91  neural_net.pushLayer(new Linear<float>(24, 24, 1, 24, 24, 1));
93 
94  if (!neural_net.verify())
95  exit(-1);
96 
97 
100 
101  LOG(LINFO) << "Generated new neural network";
102  }//: else
103 
104  // Import data from datasets.
105  if (!importer->importData())
106  exit(-1);
107 
108 
109  size_t iteration = 0;
110 
111  // Retrieve the next minibatch.
112  //mic::types::MNISTBatch bt = importer->getNextBatch();
113  //importer->setNextSampleIndex(5);
114 
115  // Main application loop.
116  while (!APP_STATE->Quit()) {
117 
118  // If not paused.
119  if (!APP_STATE->isPaused()) {
120 
121  // If single step mode - pause after the step.
122  if (APP_STATE->isSingleStepModeOn())
123  APP_STATE->pressPause();
124 
125  { // Enter critical section - with the use of scoped lock from AppState!
126  APP_DATA_SYNCHRONIZATION_SCOPED_LOCK();
127 
128  // Retrieve the next minibatch.
129  mic::types::MNISTBatch<float> bt = importer->getRandomBatch();
130 
131  // Encode data.
132  mic::types::MatrixXfPtr encoded_batch = mnist_encoder->encodeBatch(bt.data());
133  mic::types::MatrixXfPtr encoded_labels = label_encoder->encodeBatch(bt.labels());
134 
135  // Train the autoencoder.
136  float loss = neural_net.train (encoded_batch, encoded_batch, 0.001, 0.0001);
137 
138  if (iteration%10 == 0) {
139 
140  std::shared_ptr<mic::mlnn::fully_connected::Linear<float> > lin1 =
142  w_conv10->setBatchUnsynchronized(lin1->getInputActivations());
143  w_conv11->setBatchUnsynchronized(lin1->getInputGradientActivations());
144  w_conv12->setBatchUnsynchronized(lin1->getWeightActivations());
145  w_conv13->setBatchUnsynchronized(lin1->getWeightGradientActivations());
146  w_conv14->setBatchUnsynchronized(lin1->getOutputActivations());
147  w_conv15->setBatchUnsynchronized(lin1->getOutputGradientActivations());
148 
149  w_conv20->setBatchUnsynchronized(lin1->getInverseWeightActivations());
150  w_conv21->setBatchUnsynchronized(lin1->getInverseOutputActivations());
151 
152  /*std::shared_ptr<Layer<float> > sm1 = neural_net.getLayer(2);
153  w_conv34->setBatchUnsynchronized(sm1->getOutputActivations());
154  w_conv35->setBatchUnsynchronized(sm1->getOutputGradientActivations());*/
155 
156  // Add data to chart window.
157  collector_ptr->addDataToContainer("Loss", loss);
158  float reconstruction_error = neural_net.getLayer<mic::mlnn::fully_connected::Linear<float> >(1)->calculateMeanReconstructionError();
159  collector_ptr->addDataToContainer("Reconstruction Error", reconstruction_error);
160  }//: if
161 
162  iteration++;
163  //float reconstruction_error = neural_net.getLayer<mic::mlnn::fully_connected::Linear<float> >(1)->calculateMeanReconstructionError();
164 
165  LOG(LINFO) << "Iteration: " << iteration << " loss =" << loss;// << " reconstruction error =" << reconstruction_error;
166  }//: end of critical section
167 
168  }//: if
169 
170  // Sleep.
171  APP_SLEEP();
172  }//: while
173 
174 }//: image_encoder_and_visualization_test
175 
176 
177 
185 int main(int argc, char* argv[]) {
186  // Set console output to logger.
187  LOGGER->addOutput(new ConsoleOutput());
188  LOG(LINFO) << "Logger initialized. Starting application";
189 
190  // Parse parameters.
191  PARAM_SERVER->parseApplicationParameters(argc, argv);
192 
193  // Initilize application state ("touch it") ;)
194  APP_STATE;
195 
196  // Load dataset.
197  importer = new mic::importers::MNISTMatrixImporter<float>();
198  importer->setBatchSize(batch_size);
199 
200  // Initialize the encoders.
201  mnist_encoder = new mic::encoders::MatrixXfMatrixXfEncoder(28, 28);
202  label_encoder = new mic::encoders::UIntMatrixXfEncoder(10);
203 
204  // Set parameters of all property-tree derived objects - USER independent part.
205  PARAM_SERVER->loadPropertiesFromConfiguration();
206 
207  // Initialize property-dependent variables of all registered property-tree objects - USER dependent part.
208  PARAM_SERVER->initializePropertyDependentVariables();
209 
210  // Initialize GLUT! :]
211  VGL_MANAGER->initializeGLUT(argc, argv);
212 
213  // Create batch visualization window.
214  w_conv10 = new WindowGrayscaleBatch<float>("Lin1 x", WindowGrayscaleBatch<float>::Norm_HotCold, WindowGrayscaleBatch<float>::Grid_Both, 50, 50, 256, 256);
215  w_conv11 = new WindowGrayscaleBatch<float>("Lin1 dx", WindowGrayscaleBatch<float>::Norm_HotCold, WindowGrayscaleBatch<float>::Grid_Both, 316, 50, 256, 256);
216  w_conv12 = new WindowGrayscaleBatch<float>("Lin1 W", WindowGrayscaleBatch<float>::Norm_HotCold, WindowGrayscaleBatch<float>::Grid_Both, 562, 50, 256, 256);
217  w_conv13 = new WindowGrayscaleBatch<float>("Lin1 dW", WindowGrayscaleBatch<float>::Norm_HotCold, WindowGrayscaleBatch<float>::Grid_Both, 818, 50, 256, 256);
218  w_conv14 = new WindowGrayscaleBatch<float>("Lin1 y", WindowGrayscaleBatch<float>::Norm_HotCold, WindowGrayscaleBatch<float>::Grid_Both, 1074, 50, 256, 256);
219  w_conv15 = new WindowGrayscaleBatch<float>("Lin1 dy", WindowGrayscaleBatch<float>::Norm_HotCold, WindowGrayscaleBatch<float>::Grid_Both, 1330, 50, 256, 256);
220 
221  w_conv20 = new WindowGrayscaleBatch<float>("Lin1 inverse neuron activation", WindowGrayscaleBatch<float>::Norm_HotCold, WindowGrayscaleBatch<float>::Grid_Both, 50, 336, 256, 256);
222  w_conv21 = new WindowGrayscaleBatch<float>("Lin1 inverse output activation", WindowGrayscaleBatch<float>::Norm_HotCold, WindowGrayscaleBatch<float>::Grid_Both, 316, 336, 256, 256);
223  /*w_conv22 = new WindowGrayscaleBatch<float>("Conv2 W", WindowGrayscaleBatch<float>::Norm_HotCold, WindowGrayscaleBatch<float>::Grid_Both, 562, 336, 256, 256);
224  w_conv23 = new WindowGrayscaleBatch<float>("Conv2 dW", WindowGrayscaleBatch<float>::Norm_HotCold, WindowGrayscaleBatch<float>::Grid_Both, 818, 336, 256, 256);
225  w_conv24 = new WindowGrayscaleBatch<float>("Conv2 y", WindowGrayscaleBatch<float>::Norm_HotCold, WindowGrayscaleBatch<float>::Grid_Both, 1074, 336, 256, 256);
226  w_conv25 = new WindowGrayscaleBatch<float>("Conv2 dy", WindowGrayscaleBatch<float>::Norm_HotCold, WindowGrayscaleBatch<float>::Grid_Both, 1330, 336, 256, 256);
227 
228  w_conv30 = new WindowGrayscaleBatch<float>("L1 x", WindowGrayscaleBatch<float>::Norm_HotCold, WindowGrayscaleBatch<float>::Grid_Both, 50, 622, 256, 256);
229  w_conv31 = new WindowGrayscaleBatch<float>("L1 dx", WindowGrayscaleBatch<float>::Norm_HotCold, WindowGrayscaleBatch<float>::Grid_Both, 316, 622, 256, 256);
230  w_conv32 = new WindowGrayscaleBatch<float>("L1 W", WindowGrayscaleBatch<float>::Norm_HotCold, WindowGrayscaleBatch<float>::Grid_Both, 562, 622, 256, 256);
231  w_conv33 = new WindowGrayscaleBatch<float>("L1 dW", WindowGrayscaleBatch<float>::Norm_HotCold, WindowGrayscaleBatch<float>::Grid_Both, 818, 622, 256, 256);
232  w_conv34 = new WindowGrayscaleBatch<float>("SM y", WindowGrayscaleBatch<float>::Norm_None, WindowGrayscaleBatch<float>::Grid_Both, 1074, 622, 256, 256);
233  w_conv35 = new WindowGrayscaleBatch<float>("SM dy", WindowGrayscaleBatch<float>::Norm_HotCold, WindowGrayscaleBatch<float>::Grid_Both, 1330, 622, 256, 256);*/
234 
235  // Chart.
236  w_chart = new WindowCollectorChart<float>("Statistics", 60, 878, 512, 256);
237  collector_ptr= std::make_shared < mic::utils::DataCollector<std::string, float> >( );
238  w_chart->setDataCollectorPtr(collector_ptr);
239 
240  // Create data containers.
241  collector_ptr->createContainer("Loss", mic::types::color_rgba(255, 0, 0, 180));
242  collector_ptr->createContainer("Reconstruction Error", mic::types::color_rgba(255, 255, 255, 180));
243 
244  boost::thread batch_thread(boost::bind(&batch_function));
245 
246  // Start visualization thread.
247  VGL_MANAGER->startVisualizationLoop();
248 
249  LOG(LINFO) << "Waiting for threads to join...";
250  // End test thread.
251  batch_thread.join();
252  LOG(LINFO) << "Threads joined - ending application";
253 }//: main
BackpropagationNeuralNetwork< float > neural_net
Multi-layer neural network.
WindowGrayscaleBatch< float > * w_conv23
Class implementing cropping operation - crops the size of image (matrix) by a margin of n pixels on e...
Definition: Cropping.hpp:38
WindowGrayscaleBatch< float > * w_conv11
WindowCollectorChart< float > * w_chart
Window for displaying chart with statistics.
WindowGrayscaleBatch< float > * w_conv20
WindowGrayscaleBatch< float > * w_conv15
mic::encoders::MatrixXfMatrixXfEncoder * mnist_encoder
MNIST matrix encoder.
WindowGrayscaleBatch< float > * w_conv12
WindowGrayscaleBatch< float > * w_conv22
mic::encoders::UIntMatrixXfEncoder * label_encoder
Label 2 matrix encoder (1 hot).
std::shared_ptr< LayerType > getLayer(size_t index_)
WindowGrayscaleBatch< float > * w_conv34
WindowGrayscaleBatch< float > * w_conv32
WindowGrayscaleBatch< float > * w_conv35
WindowGrayscaleBatch< float > * w_conv14
void batch_function(void)
Function for batch sampling.
mic::importers::MNISTMatrixImporter< float > * importer
MNIST importer.
WindowGrayscaleBatch< float > * w_conv10
Windows for displaying activations.
WindowGrayscaleBatch< float > * w_conv25
WindowGrayscaleBatch< float > * w_conv30
WindowGrayscaleBatch< float > * w_conv21
int main(int argc, char *argv[])
Main program function. Runs two threads: main (for GLUT) and another one (for data processing)...
WindowGrayscaleBatch< float > * w_conv31
WindowGrayscaleBatch< float > * w_conv13
WindowGrayscaleBatch< float > * w_conv33
eT train(mic::types::MatrixPtr< eT > encoded_batch_, mic::types::MatrixPtr< eT > encoded_targets_, eT learning_rate_, eT decay_=0.0f)
mic::utils::DataCollectorPtr< std::string, float > collector_ptr
Data collector .
WindowGrayscaleBatch< float > * w_conv24
Class implementing padding operation - expanding the size of image (matrix) by a margin of n pixels o...
Definition: Padding.hpp:38
Adam - adaptive moment estimation.
Definition: Adam.hpp:39