MachineIntelligenceCore:NeuralNets
 All Classes Namespaces Files Functions Variables Enumerations Enumerator Friends Macros
mlnn_batch_training_test.cpp
Go to the documentation of this file.
1 
23 #include <logger/Log.hpp>
24 #include <logger/ConsoleOutput.hpp>
25 using namespace mic::logger;
26 
27 #include <iostream>
28 
30 
31 #include <encoders/MatrixXfMatrixXfEncoder.hpp>
32 #include <encoders/UIntMatrixXfEncoder.hpp>
33 
34 #include <types/Batch.hpp>
35 
36 
37 // Using multi-layer neural networks
38 using namespace mic::mlnn;
39 using namespace mic::types;
40 
41 int main() {
42  // Set console output.
43  LOGGER->addOutput(new ConsoleOutput());
44 
45  // Generate a dataset.
46  size_t dataset_size = 15;
47  size_t classes = 5;
48  Batch<MatrixXf, MatrixXf> dataset;
49  for(size_t i=0; i< dataset_size; i++) {
50  // Generate "pose".
51  MatrixXfPtr pose (new MatrixXf(dataset_size, 1));
52  pose->setZero();
53  (*pose)(i,0)=1;
54  dataset.data().push_back(pose);
55 
56  // Generate desired target.
57  MatrixXfPtr target (new MatrixXf(classes, 1));
58  target->setZero();
59  (*target)(i%classes,0)= 1;//(i%4);
60  dataset.labels().push_back(target);
61 
62  // Add index.
63  dataset.indices().push_back(i);
64  }//: for
65 
66  // Neural net.
67  BackpropagationNeuralNetwork<float> nn("simple_linear_network");
68  nn.pushLayer(new Linear<float>(dataset_size, 15, "Linear1"));
69  nn.pushLayer(new ReLU<float>(15, "ReLU1"));
70  nn.pushLayer(new Linear<float>(15, classes, "Linear2"));
71  nn.pushLayer(new Softmax<float>(classes, "Softmax"));
72  // Change optimization function from default GradientDescent to Adam.
74 
75  // Initialize the encoders.
76  mic::encoders::MatrixXfMatrixXfEncoder data_encoder(dataset_size, 1);
77  mic::encoders::MatrixXfMatrixXfEncoder label_encoder(classes, 1);
78 
79  // Training.
80  size_t iteration = 0;
81  while (iteration < 10000) {
82  Batch <MatrixXf, MatrixXf> batch = dataset.getRandomBatch();
83  //std::cout << "[" << iteration << "]: sample (" << sample.index() << "): "<< sample.data()->transpose() << "->" << sample.label()->transpose() << std::endl;
84 
85  MatrixXfPtr encoded_batch, encoded_targets;
86  encoded_batch = data_encoder.encodeBatch(batch.data());
87  encoded_targets = label_encoder.encodeBatch(batch.labels());
88 
89  // Train network with batch.
90  float loss = nn.train (encoded_batch, encoded_targets, 0.1);
91 
92  if (iteration % 1000 == 0){
93  std::cout<<"[" << iteration << "]: Loss : " << loss << std::endl;
94  }
95 
96  // Compare results
97  // MatrixXf predictions = (*nn.getPredictions());
98  //std::cout<<"Targets : " << sample.label()->transpose() << std::endl;
99  //std::cout<<"Predictions : " << predictions.transpose() << std::endl << std::endl;
100  iteration++;
101  }//: while
102 
103  // Test network
104  iteration = 0;
105  dataset.setNextSampleIndex(0);
106  while (iteration < dataset_size) {
107  Sample <MatrixXf, MatrixXf> sample = dataset.getNextSample();
108  std::cout << "[" << iteration++ << "]: sample (" << sample.index() << "): "<< sample.data()->transpose() << "->" << sample.label()->transpose() << std::endl;
109 
110  float loss = nn.test(sample.data(), sample.label());
111  // Compare results
112  MatrixXf predictions = (*nn.getPredictions());
113  std::cout<<"Loss : " << loss << std::endl;
114  std::cout<<"Targets : " << sample.label()->transpose() << std::endl;
115  std::cout<<"Predictions : " << predictions.transpose() << std::endl << std::endl;
116 
117  }//: while
118 
119 }
mic::encoders::UIntMatrixXfEncoder * label_encoder
Label 2 matrix encoder (1 hot).
mic::types::MatrixPtr< eT > getPredictions()
eT test(mic::types::MatrixPtr< eT > encoded_batch_, mic::types::MatrixPtr< eT > encoded_targets_)
eT train(mic::types::MatrixPtr< eT > encoded_batch_, mic::types::MatrixPtr< eT > encoded_targets_, eT learning_rate_, eT decay_=0.0f)
Adam - adaptive moment estimation.
Definition: Adam.hpp:39