MachineIntelligenceCore:NeuralNets
 All Classes Namespaces Files Functions Variables Enumerations Enumerator Friends Macros
Padding.hpp
Go to the documentation of this file.
1 
23 #ifndef SRC_MLNN_PADDING_HPP_
24 #define SRC_MLNN_PADDING_HPP_
25 
26 #include <mlnn/layer/Layer.hpp>
27 
28 namespace mic {
29 namespace mlnn {
30 namespace convolution {
31 
37 template <typename eT=float>
38 class Padding : public mic::mlnn::Layer<eT> {
39 public:
40 
49  Padding(size_t input_height_, size_t input_width_, size_t depth_,
50  size_t padding_,
51  std::string name_ = "Padding") :
52  Layer<eT>::Layer(input_height_, input_width_, depth_,
53  (input_height_ + 2*padding_), (input_width_ + 2*padding_), depth_,
54  LayerTypes::Padding, name_),
55  padding(padding_)
56  {
57 
58  }
59 
63  virtual ~Padding() { }
64 
68  void forward(bool test = false) {
69  LOG(LTRACE) << "Padding::forward\n";
70 
71  // Get pointer to input batch.
72  mic::types::MatrixPtr<eT> batch_x = s['x'];
73  //std::cout<< "forward batch_x=\n" << (*batch) << std::endl;
74  //std::cout << "forward input x activation: min:" << (*batch_x).minCoeff() <<" max: " << (*batch_x).maxCoeff() << std::endl;
75 
76  // Get pointer to output batch - so the results will be stored!
77  mic::types::MatrixPtr<eT> batch_y = s['y'];
78  batch_y->setZero();
79 
80  // TODO: should work for more channels - but requires testing!
81  assert(input_depth == 1);
82 
83  // Iterate through batch.
84  #pragma omp parallel for
85  for (size_t ib = 0; ib < batch_size; ib++) {
86 
87  // Iterate through input/output channels.
88  for (size_t ic=0; ic< input_depth; ic++) {
89 
90  // Iterate through "blocks" o in channels.
91  for (size_t iw=0; iw< input_width; iw++) {
92  // Calculate addresses.
93  size_t ia = ic * (input_width) * (input_height) + iw*(input_height);
94  size_t oa = ic * (input_width + 2*padding) * (input_height + 2*padding) + (iw+padding)*(input_height + 2*padding) + padding;
95  //std::cout << " iw = " << iw << " ia = " << ia << " oa = " << oa << std::endl;
96 
97 
98  #pragma omp critical
99  {
100  // Copy "height" block from input to output.
101  batch_y->block(oa, ib, input_height, 1) =
102  batch_x->block(ia, ib, input_height, 1);
103  }//: omp critical
104 
105  }//: for width
106  }//: for channels
107  }//: for batch
108  LOG(LTRACE) << "Padding::forward end\n";
109  }
110 
114  void backward() {
115  LOG(LTRACE) << "Padding::backward\n";
116 
117  // Get pointer to dy batch.
118  mic::types::MatrixPtr<eT> batch_dy = g['y'];
119 
120  // Get pointer to dx batch.
121  mic::types::MatrixPtr<eT> batch_dx = g['x'];
122 
123 
124  // Iterate through batch.
125  #pragma omp parallel for
126  for (size_t bi = 0; bi < batch_size; bi++) {
127 
128  // Iterate through input/output channels.
129  for (size_t ic=0; ic< input_depth; ic++) {
130 
131  // Iterate through "blocks" o in channels.
132  for (size_t iw=0; iw< input_width; iw++) {
133  // Calculate addresses.
134  size_t ia = ic * (input_width) * (input_height) + iw*(input_height);
135  size_t oa = ic * (input_width + 2*padding) * (input_height + 2*padding) + (iw+padding)*(input_height + 2*padding) + padding;
136 
137  // Copy "height" block from input to output.
138  batch_dx->block(ia, bi, input_height, 1) = batch_dy->block(oa, bi, input_height, 1);
139 
140  }//: for width
141 
142  }//: for channels
143  }//: for batch
144 
145  LOG(LTRACE) << "Padding::backward end\n";
146  }
147 
153  void update(eT alpha_, eT decay_ = 0.0f) { }
154 
155  // Unhide the overloaded methods inherited from the template class Layer fields via "using" statement.
156  using Layer<eT>::forward;
157  using Layer<eT>::backward;
158 
159 protected:
160  // Unhide the fields inherited from the template class Layer via "using" statement.
161  using Layer<eT>::g;
162  using Layer<eT>::s;
163  using Layer<eT>::p;
164  using Layer<eT>::m;
165 
166  // Uncover "sizes" for visualization.
173  using Layer<eT>::batch_size;
174 
175  // Size of padding.
176  size_t padding;
177 
178 private:
179  // Friend class - required for using boost serialization.
180  template<typename tmp> friend class mic::mlnn::MultiLayerNeuralNetwork;
181 
186 
187 
188 
189 };
190 
191 
192 } /* namespace convolution */
193 } /* namespace mlnn */
194 } /* namespace mic */
195 
196 #endif /* SRC_MLNN_PADDING_HPP_ */
size_t input_depth
Number of channels of the input (e.g. 3 for RGB images).
Definition: Layer.hpp:732
size_t batch_size
Size (length) of (mini)batch.
Definition: Layer.hpp:744
void forward(bool test=false)
Definition: Padding.hpp:68
size_t input_height
Height of the input (e.g. 28 for MNIST).
Definition: Layer.hpp:726
Class representing a multi-layer neural network.
Definition: Layer.hpp:86
LayerTypes
Enumeration of possible layer types.
Definition: Layer.hpp:58
Padding(size_t input_height_, size_t input_width_, size_t depth_, size_t padding_, std::string name_="Padding")
Definition: Padding.hpp:49
mic::types::MatrixArray< eT > s
States - contains input [x] and output [y] matrices.
Definition: Layer.hpp:753
mic::types::MatrixArray< eT > g
Gradients - contains input [x] and output [y] matrices.
Definition: Layer.hpp:756
size_t input_width
Width of the input (e.g. 28 for MNIST).
Definition: Layer.hpp:729
void update(eT alpha_, eT decay_=0.0f)
Definition: Padding.hpp:153
Class implementing padding operation - expanding the size of image (matrix) by a margin of n pixels o...
Definition: Padding.hpp:38
Contains a template class representing a layer.