21 #include <logger/Log.hpp>
22 #include <logger/ConsoleOutput.hpp>
23 using namespace mic::logger;
27 #include <importers/MNISTMatrixImporter.hpp>
28 #include <encoders/MatrixXfMatrixXfEncoder.hpp>
29 #include <encoders/UIntMatrixXfEncoder.hpp>
34 using namespace mic::mlnn;
35 using namespace mic::types;
43 LOGGER->addOutput(
new ConsoleOutput());
64 mic::importers::MNISTMatrixImporter<float> training;
66 training.setDataFilename(
"../data/mnist/train-images.idx3-ubyte");
67 training.setLabelsFilename(
"../data/mnist/train-labels.idx1-ubyte");
68 training.setBatchSize(batch_size);
70 if (!training.importData())
74 mic::importers::MNISTMatrixImporter<float> test;
76 test.setDataFilename(
"../data/mnist/t10k-images.idx3-ubyte");
77 test.setLabelsFilename(
"../data/mnist/t10k-labels.idx1-ubyte");
78 test.setBatchSize(batch_size);
80 if (!test.importData())
88 LOG(LSTATUS) <<
"Starting the training of neural network...";
89 float learning_rate = 0.001;
90 MatrixXfPtr encoded_batch, encoded_targets;
93 for (
size_t ii = 0; ii < iterations; ii++) {
94 LOG(LINFO) <<
"Batch " << std::setw(4) << ii <<
"/" << std::setw(4) << iterations;
97 MNISTBatch<float> rand_batch = training.getRandomBatch();
98 encoded_batch = mnist_encoder.encodeBatch(rand_batch.data());
99 encoded_targets = label_encoder.encodeBatch(rand_batch.labels());
102 float loss = nn.
train (encoded_batch, encoded_targets, learning_rate);
103 LOG(LINFO) <<
"Training: loss = " << std::setprecision(8) << loss;
105 LOG(LSTATUS) <<
"Training finished";
108 LOG(LSTATUS) <<
"Calculating performance for test dataset...";
111 test.setNextSampleIndex(0);
112 while(!test.isLastBatch()) {
115 MNISTBatch<float> next_batch = test.getNextBatch();
116 encoded_batch = mnist_encoder.encodeBatch(next_batch.data());
117 encoded_targets = label_encoder.encodeBatch(next_batch.labels());
121 nn.
forward(encoded_batch,
true);
123 mic::types::MatrixXfPtr encoded_predictions = nn.
getPredictions();
129 double test_acc = (double)correct / (
double)(test.size());
130 LOG(LINFO) <<
"Test : loss = " << std::setprecision(3) << loss <<
" correct = " << std::setprecision(3) << 100.0 * test_acc <<
" %";
133 LOG(LSTATUS) <<
"Calculating performance for the training dataset...";
136 training.setNextSampleIndex(0);
137 while(!training.isLastBatch()) {
140 MNISTBatch<float> next_batch = training.getNextBatch();
141 encoded_batch = mnist_encoder.encodeBatch(next_batch.data());
142 encoded_targets = label_encoder.encodeBatch(next_batch.labels());
146 nn.
forward(encoded_batch,
true);
148 mic::types::MatrixXfPtr encoded_predictions = nn.
getPredictions();
153 double train_acc = (double)correct / (
double)(training.size());
154 LOG(LINFO) <<
"Train : loss = " << std::setprecision(3) << loss <<
" correct = " << std::setprecision(3) << 100.0 * train_acc <<
" %";
mic::encoders::UIntMatrixXfEncoder * label_encoder
Label 2 matrix encoder (1 hot).
size_t countCorrectPredictions(mic::types::MatrixPtr< eT > targets_, mic::types::MatrixPtr< eT > predictions_)
mic::types::MatrixPtr< eT > getPredictions()
eT train(mic::types::MatrixPtr< eT > encoded_batch_, mic::types::MatrixPtr< eT > encoded_targets_, eT learning_rate_, eT decay_=0.0f)
void forward(mic::types::MatrixPtr< eT > input_data, bool skip_dropout=false)
void pushLayer(LayerType *layer_ptr_)
mic::encoders::ColMatrixEncoder< double > * mnist_encoder
MNIST matrix encoder.
eT calculateMeanLoss(mic::types::MatrixPtr< eT > encoded_targets_, mic::types::MatrixPtr< eT > encoded_predictions_)