MachineIntelligenceCore:NeuralNets
 All Classes Namespaces Files Functions Variables Enumerations Enumerator Friends Macros
mnist_conv_hebbian.cpp
Go to the documentation of this file.
1 
26 #include <boost/thread/thread.hpp>
27 #include <boost/bind.hpp>
28 
29 #include <importers/MNISTMatrixImporter.hpp>
30 
31 #include <logger/Log.hpp>
32 #include <logger/ConsoleOutput.hpp>
33 using namespace mic::logger;
34 
35 #include <application/ApplicationState.hpp>
36 
37 #include <configuration/ParameterServer.hpp>
38 
39 #include <opengl/visualization/WindowManager.hpp>
40 #include <opengl/visualization/WindowGrayscaleBatch.hpp>
41 #include <opengl/visualization/WindowCollectorChart.hpp>
42 
43 using namespace mic::opengl::visualization;
44 
45 // Hebbian neural net.
47 using namespace mic::mlnn;
48 
49 // Encoders.
50 #include <encoders/ColMatrixEncoder.hpp>
51 #include <encoders/UIntMatrixEncoder.hpp>
52 
54 using namespace mic::mlnn::experimental;
55 
57 WindowGrayscaleBatch<double>* w_input;
59 WindowGrayscaleBatch<double>* w_weights1;
60 
61 WindowGrayscaleBatch<double>* w_output;
62 WindowGrayscaleBatch<double>* w_reconstruction;
63 WindowGrayscaleBatch<double>* w_similarity;
64 
66 WindowCollectorChart<double>* w_chart;
67 mic::utils::DataCollectorPtr<std::string, double> collector_ptr;
68 
70 mic::importers::MNISTMatrixImporter<double>* importer;
73 
75 mic::encoders::ColMatrixEncoder<double>* mnist_encoder;
77 //mic::encoders::UIntMatrixXfEncoder* label_encoder;
78 
79 const size_t patch_size = 28;
80 const size_t batch_size = 1;
81 const size_t input_channels = 1;
82 const size_t filter_size[] = {5};
83 const size_t filters[] = {16};
84 const size_t stride[] = {1};
85 
86 
91 void batch_function (void) {
92 
93 /* if (neural_net.load(fileName)) {
94  LOG(LINFO) << "Loaded neural network from a file";
95  } else {*/
96  {
97  // Create a simple hebbian network.
99 
100  LOG(LINFO) << "Generated new neural network";
101  }//: else
102 
103  std::shared_ptr<mic::mlnn::experimental::ConvHebbian<double> > layer1 =
105 
106  size_t iteration = 0;
107  // Set training parameters.
108  const double learning_rate = 5e-3;
109 
110  // Main application loop.
111  while (!APP_STATE->Quit()) {
112 
113  // If not paused.
114  if (!APP_STATE->isPaused()) {
115 
116  // If single step mode - pause after the step.
117  if (APP_STATE->isSingleStepModeOn())
118  APP_STATE->pressPause();
119 
120  { // Enter critical section - with the use of scoped lock from AppState!
121  APP_DATA_SYNCHRONIZATION_SCOPED_LOCK();
122 
123  // Retrieve the next minibatch.
124  mic::types::MNISTBatch<double> bt = importer->getRandomBatch();
125 
126  // Encode data.
127  mic::types::MatrixPtr<double> encoded_batch = mnist_encoder->encodeBatch(bt.data());
128 
129  MNISTBatch<double> next_batch = importer->getNextBatch();
130  encoded_batch = mnist_encoder->encodeBatch(next_batch.data());
131 
132  neural_net.train(encoded_batch, learning_rate);
133 
134  if (iteration % 10 == 0) {
135  //Visualize the weights.
136  // Set batch to be displayed.
137  w_input->setBatchUnsynchronized(layer1->getInputActivations());
138  w_weights1->setBatchUnsynchronized(layer1->getWeightActivations());
139  w_similarity->setBatchUnsynchronized(layer1->getWeightSimilarity(true));
140  w_output->setBatchUnsynchronized(layer1->getOutputActivations());
141  w_reconstruction->setBatchUnsynchronized(layer1->getOutputReconstruction());
142  collector_ptr->addDataToContainer("Reconstruction error", layer1->getOutputReconstructionError());
143  LOG(LINFO) << "Iteration: " << iteration;
144  }//: if
145 
146  iteration++;
147  }//: end of critical section
148 
149  }//: if
150 
151  // Sleep.
152  APP_SLEEP();
153  }//: while
154 
155 }//: image_encoder_and_visualization_test
156 
157 
158 
166 int main(int argc, char* argv[]) {
167  // Set console output to logger.
168  LOGGER->addOutput(new ConsoleOutput());
169  LOG(LINFO) << "Logger initialized. Starting application";
170 
171  // Parse parameters.
172  PARAM_SERVER->parseApplicationParameters(argc, argv);
173 
174  // Initilize application state ("touch it") ;)
175  APP_STATE;
176 
177  // Load dataset.
178  importer = new mic::importers::MNISTMatrixImporter<double>();
179  importer->setDataFilename("../data/mnist/train-images.idx3-ubyte");
180  importer->setLabelsFilename("../data/mnist/train-labels.idx1-ubyte");
181  importer->setBatchSize(batch_size);
182 
183  // Initialize the encoders.
184  mnist_encoder = new mic::encoders::ColMatrixEncoder<double>(patch_size, patch_size);
185  //label_encoder = new mic::encoders::UIntMatrixXfEncoder(batch_size);
186 
187  // Set parameters of all property-tree derived objects - USER independent part.
188  PARAM_SERVER->loadPropertiesFromConfiguration();
189 
190  // Initialize property-dependent variables of all registered property-tree objects - USER dependent part.
191  PARAM_SERVER->initializePropertyDependentVariables();
192 
193  // Import data from datasets.
194  if (!importer->importData())
195  return -1;
196 
197  // Initialize GLUT! :]
198  VGL_MANAGER->initializeGLUT(argc, argv);
199 
200  // Create batch visualization window.
201  w_input = new WindowGrayscaleBatch<double>("Input batch", Grayscale::Norm_HotCold, Grayscale::Grid_Both, 70, 0, 250, 250);
202  w_weights1 = new WindowGrayscaleBatch<double>("Permanences", Grayscale::Norm_HotCold, Grayscale::Grid_Both, 70+250, 0, 250, 250);
203  w_similarity = new WindowGrayscaleBatch<double>("Cosine similarity matrix", Grayscale::Norm_HotCold, Grayscale::Grid_Both, 70+(2*250), 0, 250, 250);
204  w_output = new WindowGrayscaleBatch<double>("Output", Grayscale::Norm_HotCold, Grayscale::Grid_Both, 70+(3*250), 0, 250, 250);
205  w_reconstruction = new WindowGrayscaleBatch<double>("Reconstruction", Grayscale::Norm_HotCold, Grayscale::Grid_Both, 70+(4*250), 0, 250, 250);
206 
207  // Chart.
208  w_chart = new WindowCollectorChart<double>("Statistics", 60, 878, 512, 256);
209  collector_ptr= std::make_shared < mic::utils::DataCollector<std::string, double> >( );
210  w_chart->setDataCollectorPtr(collector_ptr);
211 
212  // Create data containers.
213  collector_ptr->createContainer("Reconstruction error", mic::types::color_rgba(255, 255, 255, 180));
214 
215  boost::thread batch_thread(boost::bind(&batch_function));
216 
217  // Start visualization thread.
218  VGL_MANAGER->startVisualizationLoop();
219 
220  LOG(LINFO) << "Waiting for threads to join...";
221  // End test thread.
222  batch_thread.join();
223  LOG(LINFO) << "Threads joined - ending application";
224 }//: main
WindowGrayscaleBatch< double > * w_similarity
WindowGrayscaleBatch< double > * w_input
Window for displaying the MNIST batch.
const size_t filter_size[]
WindowGrayscaleBatch< double > * w_weights1
Window for displaying the weights.
mic::importers::MNISTMatrixImporter< double > * importer
MNIST importer.
const size_t input_channels
void batch_function(void)
Function for batch sampling.
Class implementing a convolutional hebbian layer.
Definition: ConvHebbian.hpp:40
: Alexis Asseman alexis.asseman@ibm.com, Tomasz Kornuta tkornut@us.ibm.com : May 30, 2017
const size_t filters[]
WindowGrayscaleBatch< double > * w_reconstruction
mic::utils::DataCollectorPtr< std::string, double > collector_ptr
const size_t stride[]
int main(int argc, char *argv[])
Main program function. Runs two threads: main (for GLUT) and another one (for data processing)...
Class representing a multi-layer neural network based on hebbian learning.
HebbianNeuralNetwork< double > neural_net
Multi-layer neural network.
const size_t batch_size
WindowCollectorChart< double > * w_chart
Data collector.
WindowGrayscaleBatch< double > * w_output
const size_t patch_size
Label 2 matrix encoder (1 hot).
mic::encoders::ColMatrixEncoder< double > * mnist_encoder
MNIST matrix encoder.