21 #include <logger/Log.hpp>
22 #include <logger/ConsoleOutput.hpp>
23 using namespace mic::logger;
27 #include <importers/MNISTMatrixImporter.hpp>
28 #include <encoders/MatrixXfMatrixXfEncoder.hpp>
29 #include <encoders/UIntMatrixXfEncoder.hpp>
33 using namespace mic::types;
35 using namespace mic::mlnn;
36 using namespace mic::mlnn::convolution;
44 ConsoleOutput* co =
new ConsoleOutput();
45 LOGGER->addOutput(co);
49 mic::importers::MNISTMatrixImporter<float> training;
51 training.setDataFilename(
"../data/mnist/train-images.idx3-ubyte");
52 training.setLabelsFilename(
"../data/mnist/train-labels.idx1-ubyte");
53 training.setBatchSize(batch_size);
55 if (!training.importData())
59 mic::importers::MNISTMatrixImporter<float> test;
61 test.setDataFilename(
"../data/mnist/t10k-images.idx3-ubyte");
62 test.setLabelsFilename(
"../data/mnist/t10k-labels.idx1-ubyte");
63 test.setBatchSize(batch_size);
65 if (!test.importData())
109 double learning_rate = 1e-4;
110 double weight_decay = 1e-5;
111 size_t iterations = training.size() /
batch_size;
113 MatrixXfPtr encoded_batch, encoded_targets;
115 for (
size_t e = 0; e < epochs; e++) {
116 LOG(LSTATUS) <<
"Epoch " << e + 1 <<
": starting the training of neural network...";
118 for (
size_t ii = 0; ii < iterations; ii++) {
119 std::cout<<
"[" << std::setw(4) << ii <<
"/" << std::setw(4) << iterations <<
"] ";
122 MNISTBatch<float> rand_batch = training.getRandomBatch();
123 encoded_batch = mnist_encoder.encodeBatch(rand_batch.data());
124 encoded_targets = label_encoder.encodeBatch(rand_batch.labels());
127 float loss = nn.
train (encoded_batch, encoded_targets, learning_rate, weight_decay);
128 std::cout <<
" loss = " << loss << std::endl;
132 nn.
save(
"mnist_conv");
134 LOG(LSTATUS) <<
"Training finished";
137 LOG(LSTATUS) <<
"Calculating performance for test dataset...";
139 test.setNextSampleIndex(0);
140 while(!test.isLastBatch()) {
143 MNISTBatch<float> next_batch = test.getNextBatch();
144 encoded_batch = mnist_encoder.encodeBatch(next_batch.data());
145 encoded_targets = label_encoder.encodeBatch(next_batch.labels());
148 correct += nn.
test(encoded_batch, encoded_targets);
151 double test_acc = (double)correct / (
double)(test.size());
152 LOG(LINFO) <<
"Test accuracy : " << std::setprecision(3) << 100.0 * test_acc <<
" %";
155 LOG(LSTATUS) <<
"Calculating performance for the training dataset...";
157 training.setNextSampleIndex(0);
158 while(!training.isLastBatch()) {
161 MNISTBatch<float> next_batch = training.getNextBatch();
162 encoded_batch = mnist_encoder.encodeBatch(next_batch.data());
163 encoded_targets = label_encoder.encodeBatch(next_batch.labels());
166 correct += nn.
test(encoded_batch, encoded_targets);
169 double train_acc = (double)correct / (
double)(training.size());
170 LOG(LINFO) <<
"Trainin accuracy : " << std::setprecision(3) << 100.0 * train_acc <<
" %";
Class implementing the layer with Exponential Linear Unit (ELU). http://arxiv.org/pdf/1511.07289v5.pdf.
bool save(std::string filename_)
Layer performing max pooling.
mic::encoders::UIntMatrixXfEncoder * label_encoder
Label 2 matrix encoder (1 hot).
Class implementing cropping operation - crops the size of image (matrix) by a margin of n pixels on e...
Droput layer - a layer used for the regularization of neural network by randomly dropping neurons dur...
eT test(mic::types::MatrixPtr< eT > encoded_batch_, mic::types::MatrixPtr< eT > encoded_targets_)
void resizeBatch(size_t batch_size_)
eT train(mic::types::MatrixPtr< eT > encoded_batch_, mic::types::MatrixPtr< eT > encoded_targets_, eT learning_rate_, eT decay_=0.0f)
Adam - adaptive moment estimation.
void pushLayer(LayerType *layer_ptr_)
mic::encoders::ColMatrixEncoder< double > * mnist_encoder
MNIST matrix encoder.