MachineIntelligenceCore:NeuralNets
 All Classes Namespaces Files Functions Variables Enumerations Enumerator Friends Macros
MNISTPatchSoftmaxApplication.cpp
Go to the documentation of this file.
1 
40 
41 
42 
43 namespace mic {
44 namespace application {
45 
50 void RegisterApplication (void) {
52 }
53 
54 } /* namespace application */
55 
56 namespace applications {
57 
58 MNISTPatchSoftmaxApplication::MNISTPatchSoftmaxApplication(std::string node_name_) : OpenGLContinuousLearningApplication(node_name_),
59  autoencoder_filename("autoencoder_filename", "autoencoder.txt"),
60  autoencoder_layers_to_be_removed("autoencoder_layers_to_be_removed", 0),
61  softmax_filename("softmax_filename", "softmax.txt"),
62  softmax_save("softmax_save", false),
63  softmax_load("softmax_load", false)
64  {
65  // Register properties - so their values can be overridden (read from the configuration file).
66  registerProperty(autoencoder_filename);
67  registerProperty(autoencoder_layers_to_be_removed);
68  registerProperty(softmax_filename);
69  registerProperty(softmax_save);
70  registerProperty(softmax_load);
71 
72  // Create importers.
73  training_dataset_importer = new mic::importers::MNISTPatchImporter("mnist_training_dataset_importer");
74  test_dataset_importer = new mic::importers::MNISTPatchImporter("mnist_test_dataset_importer");
75 
76  LOG(LINFO) << "Properties registered";
77 }
78 
81  delete(test_dataset_importer);
82  // Delete visualization windows.
83  delete (w2d_input);
84  delete (w_prob);
85  delete (w_chart);
86  // Delete encoders.
87  delete (mnist_encoder);
88  delete (label_encoder);
89 }
90 
91 void MNISTPatchSoftmaxApplication::initialize(int argc, char* argv[]) {
92 
93  // Initialize GLUT! :]
94  VGL_MANAGER->initializeGLUT(argc, argv);
95 
96  // Create visualization window.
97  w2d_input = new WindowMatrix2D("Input matrix", 256, 256, 0, 0);
98 
99  w_prob = new WindowProbability("Probabilty", 128, 256, 320, 0);
100 
101  collector_ptr = std::make_shared < mic::utils::DataCollector<std::string, float> >( );
102  // Add containers to collector.
103  collector_ptr->createContainer("training_loss", mic::types::color_rgba(0, 0, 255, 180));
104  collector_ptr->createContainer("test_loss", mic::types::color_rgba(0, 255, 0, 180));
105 
106  // Create the visualization windows - must be created in the same, main thread :]
107  w_chart = new WindowCollectorChart<float>("MNISTPatchReconstruction", 0, 310, 512, 256);
108  w_chart->setDataCollectorPtr(collector_ptr);
109 
110 }
111 
113  LOG(LTRACE) << "MNISTClassificationSoftmaxApplication::initializePropertyDependentVariables";
114 
115  // Get patch size.
116  patch_size = training_dataset_importer->getPatchSize();
117 
118  // Allocate memory for images.
119  input_image = std::make_shared<mic::types::MatrixXf >(patch_size, patch_size);
120  input_target = std::make_shared<mic::types::MatrixXf >(10,1);
121  decoded_prediction = std::make_shared<mic::types::MatrixXf >(10,1);
122 
123  // Set displayed matrix pointers.
124  w2d_input->setMatrixPointerSynchronized(input_image);
125  w_prob->setMatrixPointer1(input_target);
126  w_prob->setMatrixPointer2(decoded_prediction);
127 
128  // Load datasets.
129  if (!training_dataset_importer->importData())
130  return;
131 
132  if (!test_dataset_importer->importData())
133  return;
134 
135  // Initialize the encoders.
136  mnist_encoder = new mic::encoders::MatrixXfMatrixXfEncoder(patch_size, patch_size);
137  label_encoder = new mic::encoders::UIntMatrixXfEncoder(10);
138 
139  // Try to load autoencoder from file.
141  LOG(LINFO) << "Loaded the autoencoder network";
142  LOG(LINFO) << neural_net;
143 
144  // Remove n last layers and add softmax on top.
146  neural_net.pushLayer(new Linear<float>(20, 10));
149 
150  LOG(LINFO) << "The resulting softmax network";
151  LOG(LINFO) << neural_net;
152  } else if ((softmax_load) && (neural_net.load(softmax_filename))) {
153  // Softmax network loaded - there is nothing more to do here...
154  LOG(LINFO) << "Loaded the softmax network";
155  LOG(LINFO) << neural_net;
156  } else {
157  // We do not have the network!
158  exit(1);
159  }//: else
160 
161 }
162 
163 
165 
166  // Random select sample from training dataset.
167  mic::types::MNISTSample<float> sample = training_dataset_importer->getRandomSample();
168 
169  // Copy sample data to input matrix - for visualization.
170  (*input_image) = (*sample.data());
171  //std::cout << " input: \n" << *(input_image) << std::endl;
172 
173  // Encode data.
174  mic::types::MatrixXfPtr encoded_patch = mnist_encoder->encodeSample(sample.data());
175  mic::types::MatrixXfPtr encoded_label = label_encoder->encodeSample(sample.label());
176 
177  // Train the autoencoder.
178  float loss = neural_net.train (encoded_patch, encoded_label, 0.005);
179 
180  // Copy encoded and label prediction - for visualization.
181  (*input_target) = (*encoded_label);
182  (*decoded_prediction) = (*neural_net.getPredictions());
183 
184  // Collect statistics.
185  collector_ptr->addDataToContainer("training_loss", loss);
186 
187  return true;
188 }
189 
190 
192  // Random select sample from test dataset.
193  mic::types::MNISTSample<float> sample = test_dataset_importer->getRandomSample();
194 
195  // Copy sample data to input matrix - for visualization.
196  (*input_image) = (*sample.data());
197 
198  // Encode data.
199  mic::types::MatrixXfPtr encoded_patch = mnist_encoder->encodeSample(sample.data());
200  mic::types::MatrixXfPtr encoded_label = label_encoder->encodeSample(sample.label());
201 
202  // Train the autoencoder.
203  float loss = neural_net.test (encoded_patch, encoded_label);
204 
205  // Collect statistics.
206  collector_ptr->addDataToContainer("test_loss", loss);
207 }
208 
209 
211  // Average the sums.
212  /*classification_cost_sum /= (float)number_of_averaged_test_measures;
213  correct_classification_factor_sum /= (float)number_of_averaged_test_measures;
214 
215  LOG(LINFO)<< "Iteration = " << iteration << " classification_cost_sum = " << classification_cost_sum;
216 
217  // Add data to chart window.
218 
219  // Reset partial sums.
220  classification_cost_sum = 0;
221  correct_classification_factor_sum = 0;
222  */
223 
224  //collector_ptr->addDataToContainer("reconstruction", classification_cost_sum);
225  //classification_cost_sum += .001;
226 
227  LOG(LINFO)<< "Iteration = " << iteration;
228 
229  // Save nn to file.
230  if (softmax_save)
232 }
233 
234 
235 
236 } /* namespace applications */
237 } /* namespace mic */
mic::utils::DataCollectorPtr< std::string, float > collector_ptr
Data collector.
Class representing a cross-entropy loss function (classification).
mic::configuration::Property< std::string > autoencoder_filename
Property: name of the file to which the original auto-encoder will be loaded from.
Class implementing a simple MNIST patch softmax classification with multi-layer neural net - imported...
MNISTPatchSoftmaxApplication(std::string node_name_="mnist_patch_autoencoder_reconstruction")
BackpropagationNeuralNetwork< float > neural_net
Multi-layer neural network.
mic::types::MatrixXfPtr input_image
Input image/matrix.
WindowProbability * w_prob
Window displaying the probabilities.
void popLayer(size_t number_of_layers_=1)
mic::configuration::Property< bool > softmax_load
Property: flag denoting whether the nn should be loaded from a file (at the initialization of the tas...
mic::configuration::Property< bool > softmax_save
Property: flag denoting whether the nn should be saved to a file (after every episode end)...
mic::types::MatrixPtr< eT > getPredictions()
mic::configuration::Property< std::string > softmax_filename
Property: name of the file with softmax nn.
void RegisterApplication(void)
Registers application.
eT test(mic::types::MatrixPtr< eT > encoded_batch_, mic::types::MatrixPtr< eT > encoded_targets_)
WindowCollectorChart< float > * w_chart
Window for displaying chart with statistics.
WindowMatrix2D * w2d_input
Window for displaying the input image.
mic::importers::MNISTPatchImporter * training_dataset_importer
Importer responsible for loading training dataset.
eT train(mic::types::MatrixPtr< eT > encoded_batch_, mic::types::MatrixPtr< eT > encoded_targets_, eT learning_rate_, eT decay_=0.0f)
size_t patch_size
Size of the patch - copied from importers.
mic::configuration::Property< size_t > autoencoder_layers_to_be_removed
Property: number of layers to be removed (starting from the last one).
mic::encoders::UIntMatrixXfEncoder * label_encoder
Label 2 matrix encoder (1 hot).
mic::encoders::MatrixXfMatrixXfEncoder * mnist_encoder
MNIST matrix encoder.
mic::importers::MNISTPatchImporter * test_dataset_importer
Importer responsible for loading testing dataset.