MachineIntelligenceCore:NeuralNets
 All Classes Namespaces Files Functions Variables Enumerations Enumerator Friends Macros
mlnn_sample_training_test.cpp
Go to the documentation of this file.
1 
23 #include <logger/Log.hpp>
24 #include <logger/ConsoleOutput.hpp>
25 using namespace mic::logger;
26 
27 #include <iostream>
28 
30 
31 #include <types/Batch.hpp>
32 
33 
34 
35 // Using multi-layer neural networks
36 using namespace mic::mlnn;
37 using namespace mic::types;
38 
39 int main() {
40  // Set console output.
41  LOGGER->addOutput(new ConsoleOutput());
42 
43  // Generate a dataset.
44  size_t dataset_size = 15;
45  size_t classes = 5;
46  Batch<MatrixXf, MatrixXf> dataset;
47  for(size_t i=0; i< dataset_size; i++) {
48  // Generate "pose".
49  MatrixXfPtr pose (new MatrixXf(dataset_size, 1));
50  pose->setZero();
51  (*pose)(i,0)=1;
52  dataset.data().push_back(pose);
53 
54  // Generate desired target.
55  MatrixXfPtr target (new MatrixXf(classes, 1));
56  target->setZero();
57  (*target)(i%classes,0)= 1;//(i%4);
58  dataset.labels().push_back(target);
59 
60  // Add index.
61  dataset.indices().push_back(i);
62  }//: for
63 
64  // Neural net.
65  BackpropagationNeuralNetwork<float> nn("simple_linear_network");
66  nn.pushLayer(new Linear<float>(dataset_size, 15, "Linear1"));
67  nn.pushLayer(new ReLU<float>(15, "ReLU1"));
68  nn.pushLayer(new Linear<float>(15, classes, "Linear2"));
69  nn.pushLayer(new Softmax<float>(classes, "Softmax"));
70  // Change optimization function from default GradientDescent to Adam.
72 
73  // Training.
74  size_t iteration = 0;
75  while (iteration < 10000) {
76  Sample <MatrixXf, MatrixXf> sample = dataset.getRandomSample();
77  //std::cout << "[" << iteration << "]: sample (" << sample.index() << "): "<< sample.data()->transpose() << "->" << sample.label()->transpose() << std::endl;
78 
79  float loss = nn.train(sample.data(), sample.label(), 0.1);
80 
81 
82  if (iteration % 1000 == 0)
83  std::cout<<"[" << iteration << "]: Loss : " << loss << std::endl;
84 
85  // Compare results.
86 /* MatrixXf predictions = (*nn.getPredictions());
87  std::cout<<"Targets : " << sample.label()->transpose() << std::endl;
88  std::cout<<"Predictions : " << predictions.transpose() << std::endl << std::endl;*/
89  iteration++;
90  }//: while
91 
92  // Test network
93  iteration = 0;
94  dataset.setNextSampleIndex(0);
95  while (iteration < dataset_size) {
96  Sample <MatrixXf, MatrixXf> sample = dataset.getNextSample();
97  std::cout << "[" << iteration++ << "]: sample (" << sample.index() << "): "<< sample.data()->transpose() << "->" << sample.label()->transpose() << std::endl;
98 
99  float loss = nn.test(sample.data(), sample.label());
100  // Compare results
101  MatrixXf predictions = (*nn.getPredictions());
102  std::cout<<"Loss : " << loss << std::endl;
103  std::cout<<"Targets : " << sample.label()->transpose() << std::endl;
104  std::cout<<"Predictions : " << predictions.transpose() << std::endl << std::endl;
105 
106  }//: while
107 
108 }
mic::types::MatrixPtr< eT > getPredictions()
eT test(mic::types::MatrixPtr< eT > encoded_batch_, mic::types::MatrixPtr< eT > encoded_targets_)
eT train(mic::types::MatrixPtr< eT > encoded_batch_, mic::types::MatrixPtr< eT > encoded_targets_, eT learning_rate_, eT decay_=0.0f)
Adam - adaptive moment estimation.
Definition: Adam.hpp:39