MachineIntelligenceCore:NeuralNets
 All Classes Namespaces Files Functions Variables Enumerations Enumerator Friends Macros
Layer.hpp
Go to the documentation of this file.
1 
23 #ifndef SRC_MLNN_LAYER_HPP_
24 #define SRC_MLNN_LAYER_HPP_
25 
26 #include <iostream>
27 #include <string>
28 
29 #include<types/MatrixTypes.hpp>
30 #include<types/MatrixArray.hpp>
33 
34 #include <boost/serialization/serialization.hpp>
35 // include this header to serialize vectors
36 #include <boost/serialization/vector.hpp>
37 // include this header to serialize arrays
38 #include <boost/serialization/array.hpp>
39 #include <boost/serialization/version.hpp>
40 
41 #include <logger/Log.hpp>
42 
43 // Forward declaration of class boost::serialization::access
44 namespace boost {
45 namespace serialization {
46 class access;
47 }//: serialization
48 }//: access
49 
50 
51 namespace mic {
52 namespace mlnn {
53 
58 enum class LayerTypes : short
59 {
60  // activation
61  ELU = 0,
62  ReLU,
63  Sigmoid,
64  // convolution
66  Cropping,
67  Padding,
68  MaxPooling,
69  // cost_function
70  Softmax,
71  // fully_connected
72  Linear,
76  // regularization
77  Dropout,
78  // Experimental
80 };
81 
82 
83 
84 // Forward declaration of MultiLayerNeuralNetwork - required for "lazy connection".
85 template <typename eT>
87 
93 template <typename eT=float>
94 class Layer {
95 public:
107  Layer(size_t input_height_, size_t input_width_, size_t input_depth_,
108  size_t output_height_, size_t output_width_, size_t output_depth_,
109  LayerTypes layer_type_, std::string name_ = "layer") :
110  // Set "reduced" input dimensions.
111  input_height(input_height_),
112  input_width(input_width_),
113  input_depth(input_depth_),
114  // Set "reduced" output dimensions.
115  output_height(output_height_),
116  output_width(output_width_),
117  output_depth(output_depth_),
118  // Set batch size.
119  batch_size(1),
120  // Set layer type and name.
121  layer_type(layer_type_),
122  layer_name(name_),
123  // Initialize matrice arrays.
124  s("state"),
125  g("gradients"),
126  p("parameters"),
127  m("memory")
128  {
129  // State.
130  s.add ( "x", input_depth*input_height*input_width, batch_size ); // inputs
131  s.add ( "y", output_depth*output_height*output_width, batch_size); // outputs
132 
133  // Gradients.
134  g.add ( "x", input_depth*input_height*input_width, batch_size ); // inputs
135  g.add ( "y", output_depth*output_height*output_width, batch_size); // outputs
136 
137  // Allocate (temporary) memory for "input sample" - column vector.
138  m.add ("xs", input_depth*input_height*input_width, 1);
139  // Allocate (temporary) memory for "input channel" - column vector.
140  m.add ("xc", input_height*input_width, 1);
141 
142  // Allocate (temporary) memory for "output sample" - a column vector of all channels of a given sample.
143  m.add ("ys", output_depth*output_height*output_width, 1);
144 
145  // Allocate (temporary) memory for "output sample" - a column vector.
146  m.add ("yc", output_height * output_width, 1);
147 
148  };
149 
150 
154  virtual ~Layer() {};
155 
160  virtual void forward(bool test = false) = 0;
161 
165  mic::types::MatrixPtr<eT> forward(mic::types::MatrixPtr<eT> x_, bool test = false) {
166  // Copy "input" sample/batch.
167  (*s["x"]) = (*x_);
168 
169  // Call the (abstract, implemented by a given layer) forward pass.
170  forward(test);
171 
172  // Return "output".
173  return s["y"];
174  }
175 
179  virtual void backward() = 0;
180 
184  mic::types::MatrixPtr<eT> backward(mic::types::MatrixPtr<eT> dy_) {
185  // Copy "output" sample/batch gradient.
186  (*g["y"]) = (*dy_);
187 
188  // Call the (abstract, implemented by a given layer) backward pass.
189  backward();
190 
191  // Return "input" gradient.
192  return g["x"];
193  }
194 
199  virtual void resizeBatch(size_t batch_size_) {
200  // Change the "value". (depricated)
201  batch_size = batch_size_;
202  // Reshape the inputs...
203  s["x"]->resize(s["x"]->rows(), batch_size_);
204  g["x"]->resize(g["x"]->rows(), batch_size_);
205  // ... and outputs.
206  s["y"]->resize(s["y"]->rows(), batch_size_);
207  g["y"]->resize(g["y"]->rows(), batch_size_);
208  }
209 
218  template<typename loss>
219  mic::types::MatrixPtr<eT> calculateNumericalGradient(mic::types::MatrixPtr<eT> x_, mic::types::MatrixPtr<eT> target_y_, mic::types::MatrixPtr<eT> param_, loss loss_, eT delta_) {
220  // Allocate memory.
221  mic::types::MatrixPtr<eT> nGrad = MAKE_MATRIX_PTR(eT, param_->rows(), param_->cols());
222  for (size_t i=0; i<(size_t)param_->size(); i++) {
223  // Add delta.
224  (*param_)[i] += delta_;
225  // Calculate loss.
226  eT p = loss_.calculateLoss(target_y_, forward(x_));
227  // Substract delta.
228  (*param_)[i] -= 2*delta_;
229  // Calculate loss.
230  eT m = loss_.calculateLoss(target_y_, forward(x_));
231 
232  // Store numerical gradient.
233  (*nGrad)[i] = (p-m)/(2*delta_);
234  // Set original value.
235  (*param_)[i] += delta_;
236 
237  }//: for
238  return nGrad;
239  }
240 
241 
245  virtual void resetGrads() {};
246 
252  virtual void update(eT alpha_, eT decay_ = 0.0f) = 0;
253 
255  inline size_t inputSize() {
257  }
258 
260  inline size_t outputSize() {
262  }
263 
265  inline size_t batchSize() {
266  return batch_size;
267  }
268 
270  inline const std::string name() const {
271  return layer_name;
272  }
273 
277  mic::types::MatrixPtr<eT> getParam(std::string name_) {
278  return p[name_];
279  }
280 
284  mic::types::MatrixPtr<eT> getState(std::string name_) {
285  return s[name_];
286  }
287 
291  mic::types::MatrixPtr<eT> getGradient(std::string name_) {
292  return g[name_];
293  }
294 
298  void setState(std::string name_, mic::types::MatrixPtr<eT> mat_ptr_) {
299  (*s[name_]) = (*mat_ptr_);
300  }
301 
306  template<typename omT>
307  void setOptimization () {
308  // Remove all previous optimization functions.
309  opt.clear();
310 
311  // Iterate through parameters and add a separate optimization function for each parameter.
312  for (auto& i: p.keys()) {
313  opt.add(
314  i.first,
315  std::make_shared< omT > (omT ( (p[i.second])->rows(), (p[i.second])->cols() ))
316  );
317  }//: for keys
318  }
319 
323  const std::string type() const {
324  switch(layer_type) {
325  // activation
326  case(LayerTypes::ELU):
327  return "ELU";
328  case(LayerTypes::ReLU):
329  return "ReLU";
330  case(LayerTypes::Sigmoid):
331  return "Sigmoid";
332  // convolution
334  return "Convolution";
335  case(LayerTypes::Padding):
336  return "Padding";
338  return "MaxPooling";
339  // cost_function
340  case(LayerTypes::Softmax):
341  return "Softmax";
342  // fully_connected
343  case(LayerTypes::Linear):
344  return "Linear";
346  return "SparseLinear";
348  return "HebbianLinear";
350  return "BinaryCorrelator";
351  // regularization
352  case(LayerTypes::Dropout):
353  return "Dropout";
354  default:
355  return "Undefined";
356  }//: switch
357  }
358 
363  virtual std::string streamLayerParameters() {
364  std::ostringstream os_;
365  // Display id/type.
366  os_ << " [" << type() << "]: " << layer_name << ": " << inputSize() << "x" << batch_size << " -> " << outputSize() << "x" << batch_size << "\n";
367 
368  // Display dimensions.
369  os_<<" * input_height = " << input_height <<std::endl;
370  os_<<" * input_width = " << input_width <<std::endl;
371  os_<<" * input_channels = " << input_depth <<std::endl;
372  os_<<" * output_height = " << output_height <<std::endl;
373  os_<<" * output_width = " << output_width <<std::endl;
374  os_<<" * output_channels = " << output_depth;
375 
376  return os_.str();
377  }
378 
379 
385  friend std::ostream& operator<<(std::ostream& os_, Layer& obj_) {
386  // Display dimensions.
387  os_ << obj_.streamLayerParameters();
388 
389  // Display inputs.
390  os_ << " [" << obj_.s.name() << "]:\n";
391  for (auto& i: obj_.s.keys()) {
392  // Display elements.
393  os_ << " [" << i.first << "]: ";
394  os_ << (obj_.s[i.second])->cols() << "x" << (obj_.s[i.second])->rows() << std::endl;
395  }//: for keys
396 
397  // Display gradients.
398  os_ << " [" << obj_.g.name() << "]:\n";
399  for (auto& i: obj_.g.keys()) {
400  // Display elements.
401  os_ << " [" << i.first << "]: ";
402  os_ << (obj_.g[i.second])->cols() << "x" << (obj_.g[i.second])->rows() << std::endl;
403  }//: for keys
404 
405  // Display parameters.
406  os_ << " [" << obj_.p.name() << "]:\n";
407  for (auto& i: obj_.p.keys()) {
408  // Display elements.
409  os_ << " [" << i.first << "]: ";
410  os_ << (obj_.p[i.second])->cols() << "x" << (obj_.p[i.second])->rows() << std::endl;
411  }//: for keys
412 
413  // Display gradients.
414  os_ << " [" << obj_.m.name() << "]:\n";
415  for (auto& i: obj_.m.keys()) {
416  // Display elements.
417  os_ << " [" << i.first << "]: ";
418  os_ << (obj_.m[i.second])->cols() << "x" << (obj_.m[i.second])->rows() << std::endl;
419  }//: for keys
420 
421  return os_;
422  }
423 
433  mic::types::MatrixPtr<eT> lazyReturnSampleFromBatch (mic::types::MatrixPtr<eT> batch_ptr_, mic::types::MatrixArray<eT> & array_, std::string id_, size_t sample_number_, size_t sample_size_){
434  // Generate "unique id" for a given sample.
435  std::string sample_id = id_ + std::to_string(sample_number_);
436  mic::types::MatrixPtr<eT> sample;
437 
438  #pragma omp critical
439  {
440  if (!array_.keyExists(sample_id)) {
441  // Allocate memory.
442  array_.add(sample_id, sample_size_, 1);
443  }//: if
444 
445  // Get array.
446  sample = m[sample_id];
447  // Copy data.
448  (*sample) = batch_ptr_->col(sample_number_);
449  }//: end OMP critical section
450 
451  // Return it.
452  return sample;
453  }
454 
460  inline mic::types::MatrixPtr<eT> lazyReturnInputSample (mic::types::MatrixPtr<eT> batch_ptr_, size_t sample_number_){
461  return lazyReturnSampleFromBatch(batch_ptr_, m, "xs", sample_number_, Layer<eT>::inputSize());
462  }
463 
464 
470  inline mic::types::MatrixPtr<eT> lazyReturnOutputSample (mic::types::MatrixPtr<eT> batch_ptr_, size_t sample_number_){
471  return lazyReturnSampleFromBatch(batch_ptr_, m, "ys", sample_number_, Layer<eT>::outputSize());
472  }
473 
474 
487  mic::types::MatrixPtr<eT> lazyReturnChannelFromSample (mic::types::MatrixPtr<eT> sample_ptr_, mic::types::MatrixArray<eT> & array_, std::string id_, size_t sample_number_, size_t channel_number_, size_t height_, size_t width_){
488  // Generate "unique id" for a given sample.
489  std::string channel_id = id_ + std::to_string(channel_number_);
490  mic::types::MatrixPtr<eT> channel;
491 
492  #pragma omp critical
493  {
494  if (!array_.keyExists(channel_id)) {
495  // Allocate memory.
496  array_.add(channel_id, height_*width_, 1);
497  }//: if
498 
499  // Get array.
500  channel = m[channel_id];
501  // Just in case - resize.
502  sample_ptr_->resize(sample_ptr_->size(), 1);
503  // Copy data.
504  (*channel) = sample_ptr_->block(channel_number_*height_*width_, 0, height_*width_, 1);
505  // Resize channel.
506  channel-> resize(height_, width_);
507  }//: end OMP critical section
508 
509  // Return it.
510  return channel;
511  }
512 
519  inline mic::types::MatrixPtr<eT> lazyReturnInputChannel (mic::types::MatrixPtr<eT> sample_ptr_, size_t sample_number_, size_t channel_number_){
520  return lazyReturnChannelFromSample(sample_ptr_, m, "xc", sample_number_, channel_number_, input_height, input_width);
521  }
522 
523 
530  inline mic::types::MatrixPtr<eT> lazyReturnOutputChannel (mic::types::MatrixPtr<eT> sample_ptr_, size_t sample_number_, size_t channel_number_){
531  return lazyReturnChannelFromSample(sample_ptr_, m, "yc", sample_number_, channel_number_, output_height, output_width);
532  }
533 
534 
535 
543  void lazyAllocateMatrixVector(std::vector< std::shared_ptr <mic::types::Matrix<eT> > > & vector_, size_t vector_size_, size_t matrix_height_, size_t matrix_width_) {
544  // Check if memory for the activations was allocated.
545  if (vector_.size() != vector_size_) {
546  // Free memory.
547  vector_.clear();
548  // Allocate.
549  for (size_t i=0; i < vector_size_; i++) {
550  // Allocate memory for activation of every neuron.
551  mic::types::MatrixPtr<eT> m = MAKE_MATRIX_PTR(eT, matrix_height_, matrix_width_);
552  vector_.push_back(m);
553  }//: for
554  }//: if
555  }
556 
557 
563  /*void normalizeMatrixForVisualization(mic::types::MatrixPtr<eT> matrix_) {
564  // Epsilon added for numerical stability.
565  //eT eps = 1e-5;
566  //eT l2 = matrix_->norm() + eps;
567 
568  // Calculate the norm.
569  eT max = matrix_->maxCoeff();
570  eT min = matrix_->minCoeff();
571  eT ultimate_max= (max > -min) ? max : -min;
572 
573  //std::cout << "before: min:" << (*matrix_).minCoeff() <<" max: " << (*matrix_).maxCoeff() << std::endl;
574  // Normalize the inputs to range <0.0, 1.0>.
575  // Check if we can normalize.
576  if (ultimate_max != 0.0) {
577  (*matrix_) = matrix_->unaryExpr ( [&] ( eT x ) { return ( (x)/ultimate_max ); } ).template cast<eT>();
578  //std::cout << "after: min:" << (*matrix_).minCoeff() <<" max: " << (*matrix_).maxCoeff() << std::endl;
579  }//: else: do nothing, all values are ~0 already.
580  }*/
581 
582 
586  virtual std::vector< std::shared_ptr <mic::types::Matrix<eT> > > & getInputActivations() {
587 
588  // Allocate memory.
590 
591  // Get y batch.
592  mic::types::MatrixPtr<eT> batch_x = s['x'];
593 
594  // Iterate through filters and generate "activation image" for each one.
595  for (size_t ib=0; ib< batch_size; ib++) {
596 
597  // Get input sample from batch!
598  mic::types::MatrixPtr<eT> sample_x = m["xs"];
599  (*sample_x) = batch_x->col(ib);
600 
601  // Iterate through input channels.
602  for (size_t ic=0; ic< input_depth; ic++) {
603  // Get activation "row".
604  mic::types::MatrixPtr<eT> row = x_activations[ib*input_depth + ic];
605 
606  // Copy "channel block" from given dx sample.
607  (*row) = sample_x->block(ic*input_height*input_width, 0, input_height*input_width, 1);
608  row->resize(input_height, input_width);
609 
610  }//: for channel
611  }//: for batch
612 
613  // Return output activations.
614  return x_activations;
615  }
616 
617 
621  virtual std::vector< std::shared_ptr <mic::types::Matrix<eT> > > & getInputGradientActivations() {
622 
623  // Allocate memory.
625 
626  // Get dx batch.
627  mic::types::MatrixPtr<eT> batch_dx = g['x'];
628 
629  // Iterate through filters and generate "activation image" for each one.
630  for (size_t ib=0; ib< batch_size; ib++) {
631 
632  // Get input sample from batch!
633  mic::types::MatrixPtr<eT> sample_dx = m["xs"];
634  (*sample_dx) = batch_dx->col(ib);
635 
636  // Iterate through input channels.
637  for (size_t ic=0; ic< input_depth; ic++) {
638  // Get dx "row".
639  mic::types::MatrixPtr<eT> row = dx_activations[ib*input_depth + ic];
640 
641  // Copy "channel block" from given dx sample.
642  (*row) = sample_dx->block(ic*input_height*input_width, 0, input_height*input_width, 1);
643  row->resize(input_height, input_width);
644 
645  }//: for channel
646  }//: for batch
647 
648  // Return dx activations.
649  return dx_activations;
650  }
651 
652 
656  virtual std::vector< std::shared_ptr <mic::types::Matrix<eT> > > & getOutputActivations() {
657 
658  // Allocate memory.
660 
661  // Get y batch.
662  mic::types::MatrixPtr<eT> batch_y = s['y'];
663 
664  // Iterate through filters and generate "activation image" for each one.
665  for (size_t ib=0; ib< batch_size; ib++) {
666 
667  // Get input sample from batch!
668  mic::types::MatrixPtr<eT> sample_y = m["ys"];
669  (*sample_y) = batch_y->col(ib);
670 
671  // Iterate through output channels.
672  for (size_t oc=0; oc< output_depth; oc++) {
673  // Get y "row".
674  mic::types::MatrixPtr<eT> row = y_activations[ib*output_depth + oc];
675 
676  // Copy "channel block" from given dx sample.
677  (*row) = sample_y->block(oc*output_height*output_width, 0, output_height*output_width, 1);
678  row->resize(output_height, output_width);
679 
680  }//: for channel
681  }//: for batch
682 
683  // Return output activations.
684  return y_activations;
685  }
686 
687 
691  virtual std::vector< std::shared_ptr <mic::types::Matrix<eT> > > & getOutputGradientActivations() {
692 
693  // Allocate memory.
695 
696  // Get dy batch.
697  mic::types::MatrixPtr<eT> batch_dy = g['y'];
698 
699  // Iterate through filters and generate "activation image" for each one.
700  for (size_t ib=0; ib< batch_size; ib++) {
701 
702  // Get input sample from batch!
703  mic::types::MatrixPtr<eT> sample_dy = m["ys"];
704  (*sample_dy) = batch_dy->col(ib);
705 
706  // Iterate through output channels.
707  for (size_t oc=0; oc< output_depth; oc++) {
708  // Get y "row".
709  mic::types::MatrixPtr<eT> row = dy_activations[ib*output_depth + oc];
710 
711  // Copy "channel block" from given dx sample.
712  (*row) = sample_dy->block(oc*output_height*output_width, 0, output_height*output_width, 1);
713  row->resize(output_height, output_width);
714 
715  }//: for channel
716  }//: for batch
717 
718  // Return output activations.
719  return dy_activations;
720  }
721 
722 
723 protected:
724 
726  size_t input_height;
727 
729  size_t input_width;
730 
732  size_t input_depth;
733 
736 
738  size_t output_width;
739 
741  size_t output_depth;
742 
744  size_t batch_size;
745 
748 
750  std::string layer_name;
751 
753  mic::types::MatrixArray<eT> s;
754 
756  mic::types::MatrixArray<eT> g;
757 
759  mic::types::MatrixArray<eT> p;
760 
762  mic::types::MatrixArray<eT> m;
763 
766 
768  std::vector< std::shared_ptr <mic::types::Matrix<eT> > > x_activations;
769 
771  std::vector< std::shared_ptr <mic::types::Matrix<eT> > > dx_activations;
772 
774  std::vector< std::shared_ptr <mic::types::Matrix<eT> > > y_activations;
775 
777  std::vector< std::shared_ptr <mic::types::Matrix<eT> > > dy_activations;
778 
779 
783  Layer () { }
784 
785 private:
786  // Friend class - required for using boost serialization.
787  template<typename tmp> friend class MultiLayerNeuralNetwork;
788  template<typename tmp> friend class BackpropagationNeuralNetwork;
789  template<typename tmp> friend class HebbianNeuralNetwork;
790 
791  // Friend class - required for using boost serialization.
792  friend class boost::serialization::access;
793 
799  template<class Archive>
800  void serialize(Archive & ar, const unsigned int version) {
801  // Archive parameters.
802  ar & input_height;
803  ar & input_width;
804  ar & input_depth;
805  ar & output_height;
806  ar & output_width;
807  ar & output_depth;
808  ar & batch_size;
809  ar & layer_type;
810  ar & layer_name;
811  // Archive four matrix arrays.
812  ar & s;
813  ar & g;
814  ar & p;
815  ar & m;
816  // TODO: serialize optimization function!
817  }
818 
819 
820 };
821 
822 
823 } /* namespace mlnn */
824 } /* namespace mic */
825 
826 
827 // Just in the case that something important will change in the Layer class - set version.
828 BOOST_CLASS_VERSION(mic::mlnn::Layer<float>, 2)
829 BOOST_CLASS_VERSION(mic::mlnn::Layer<double>, 2)
830 
831 #endif /* SRC_MLNN_LAYER_HPP_ */
virtual std::vector< std::shared_ptr< mic::types::Matrix< eT > > > & getInputGradientActivations()
Definition: Layer.hpp:621
Class implementing the layer with Exponential Linear Unit (ELU). http://arxiv.org/pdf/1511.07289v5.pdf.
Definition: ELU.hpp:39
size_t inputSize()
Returns size (length) of inputs.
Definition: Layer.hpp:255
mic::types::MatrixPtr< eT > lazyReturnInputChannel(mic::types::MatrixPtr< eT > sample_ptr_, size_t sample_number_, size_t channel_number_)
Definition: Layer.hpp:519
std::string layer_name
Name (identifier of the type) of the layer.
Definition: Layer.hpp:750
size_t input_depth
Number of channels of the input (e.g. 3 for RGB images).
Definition: Layer.hpp:732
std::vector< std::shared_ptr< mic::types::Matrix< eT > > > y_activations
Vector containing activations of output neurons - used in visualization.
Definition: Layer.hpp:774
size_t batch_size
Size (length) of (mini)batch.
Definition: Layer.hpp:744
void add(std::string name_, size_t rows_, size_t cols_)
size_t batchSize()
Returns size (length) of (mini)batch.
Definition: Layer.hpp:265
size_t outputSize()
Returns size (length) of outputs.
Definition: Layer.hpp:260
const std::string type() const
Definition: Layer.hpp:323
void serialize(Archive &ar, const unsigned int version)
Definition: Layer.hpp:800
Class representing a multi-layer neural network based on backpropagation/gradient descent...
mic::types::MatrixPtr< eT > calculateNumericalGradient(mic::types::MatrixPtr< eT > x_, mic::types::MatrixPtr< eT > target_y_, mic::types::MatrixPtr< eT > param_, loss loss_, eT delta_)
Definition: Layer.hpp:219
virtual void backward()=0
mic::types::MatrixPtr< eT > lazyReturnOutputChannel(mic::types::MatrixPtr< eT > sample_ptr_, size_t sample_number_, size_t channel_number_)
Definition: Layer.hpp:530
mic::types::MatrixPtr< eT > backward(mic::types::MatrixPtr< eT > dy_)
Definition: Layer.hpp:184
const std::string name() const
Returns name of the layer.
Definition: Layer.hpp:270
LayerTypes layer_type
Type of the layer.
Definition: Layer.hpp:747
mic::types::MatrixPtr< eT > lazyReturnSampleFromBatch(mic::types::MatrixPtr< eT > batch_ptr_, mic::types::MatrixArray< eT > &array_, std::string id_, size_t sample_number_, size_t sample_size_)
Definition: Layer.hpp:433
virtual void resizeBatch(size_t batch_size_)
Definition: Layer.hpp:199
mic::types::MatrixPtr< eT > getGradient(std::string name_)
Definition: Layer.hpp:291
virtual ~Layer()
Definition: Layer.hpp:154
size_t input_height
Height of the input (e.g. 28 for MNIST).
Definition: Layer.hpp:726
void lazyAllocateMatrixVector(std::vector< std::shared_ptr< mic::types::Matrix< eT > > > &vector_, size_t vector_size_, size_t matrix_height_, size_t matrix_width_)
Definition: Layer.hpp:543
Class representing a multi-layer neural network.
Definition: Layer.hpp:86
size_t output_depth
Number of filters = number of output channels.
Definition: Layer.hpp:741
mic::types::MatrixPtr< eT > lazyReturnOutputSample(mic::types::MatrixPtr< eT > batch_ptr_, size_t sample_number_)
Definition: Layer.hpp:470
std::vector< std::shared_ptr< mic::types::Matrix< eT > > > x_activations
Vector containing activations of input neurons - used in visualization.
Definition: Layer.hpp:768
virtual void update(eT alpha_, eT decay_=0.0f)=0
std::vector< std::shared_ptr< mic::types::Matrix< eT > > > dx_activations
Vector containing activations of gradients of inputs (dx) - used in visualization.
Definition: Layer.hpp:771
mic::neural_nets::optimization::OptimizationArray< eT > opt
Array of optimization functions.
Definition: Layer.hpp:765
LayerTypes
Enumeration of possible layer types.
Definition: Layer.hpp:58
virtual void resetGrads()
Definition: Layer.hpp:245
virtual void forward(bool test=false)=0
void setOptimization()
Definition: Layer.hpp:307
mic::types::MatrixPtr< eT > getState(std::string name_)
Definition: Layer.hpp:284
mic::types::MatrixPtr< eT > forward(mic::types::MatrixPtr< eT > x_, bool test=false)
Definition: Layer.hpp:165
void setState(std::string name_, mic::types::MatrixPtr< eT > mat_ptr_)
Definition: Layer.hpp:298
mic::types::MatrixArray< eT > s
States - contains input [x] and output [y] matrices.
Definition: Layer.hpp:753
mic::types::MatrixArray< eT > g
Gradients - contains input [x] and output [y] matrices.
Definition: Layer.hpp:756
mic::types::MatrixPtr< eT > getParam(std::string name_)
Definition: Layer.hpp:277
mic::types::MatrixPtr< eT > lazyReturnInputSample(mic::types::MatrixPtr< eT > batch_ptr_, size_t sample_number_)
Definition: Layer.hpp:460
size_t input_width
Width of the input (e.g. 28 for MNIST).
Definition: Layer.hpp:729
virtual std::vector< std::shared_ptr< mic::types::Matrix< eT > > > & getInputActivations()
Definition: Layer.hpp:586
size_t output_height
Number of receptive fields in a single channel - vertical direction.
Definition: Layer.hpp:735
std::vector< std::shared_ptr< mic::types::Matrix< eT > > > dy_activations
Vector containing activations of gradients of outputs (dy) - used in visualization.
Definition: Layer.hpp:777
Class representing a multi-layer neural network based on hebbian learning.
Layer(size_t input_height_, size_t input_width_, size_t input_depth_, size_t output_height_, size_t output_width_, size_t output_depth_, LayerTypes layer_type_, std::string name_="layer")
Definition: Layer.hpp:107
size_t output_width
Number of receptive fields in a single channel - horizontal direction.
Definition: Layer.hpp:738
friend std::ostream & operator<<(std::ostream &os_, Layer &obj_)
Definition: Layer.hpp:385
virtual std::vector< std::shared_ptr< mic::types::Matrix< eT > > > & getOutputActivations()
Definition: Layer.hpp:656
virtual std::vector< std::shared_ptr< mic::types::Matrix< eT > > > & getOutputGradientActivations()
Definition: Layer.hpp:691
virtual std::string streamLayerParameters()
Definition: Layer.hpp:363
mic::types::MatrixArray< eT > m
Memory - a list of temporal parameters, to be used by the derived classes.
Definition: Layer.hpp:762
mic::types::MatrixPtr< eT > lazyReturnChannelFromSample(mic::types::MatrixPtr< eT > sample_ptr_, mic::types::MatrixArray< eT > &array_, std::string id_, size_t sample_number_, size_t channel_number_, size_t height_, size_t width_)
Definition: Layer.hpp:487
mic::types::MatrixArray< eT > p
Parameters - parameters of the layer, to be used by the derived classes.
Definition: Layer.hpp:759