MachineIntelligenceCore:NeuralNets
 All Classes Namespaces Files Functions Variables Enumerations Enumerator Friends Macros
HebbianLinear.hpp
Go to the documentation of this file.
1 
26 #ifndef SRC_MLNN_HEBBIANLINEAR_HPP_
27 #define SRC_MLNN_HEBBIANLINEAR_HPP_
28 
29 #include <mlnn/layer/Layer.hpp>
30 
31 namespace mic {
32 namespace mlnn {
33 namespace fully_connected {
34 
40 template <typename eT=float>
41 class HebbianLinear : public mic::mlnn::Layer<eT> {
42 public:
43 
50  HebbianLinear(size_t inputs_, size_t outputs_, eT permanence_threshold_ = 0.5, eT proximal_threshold_ = 0.5, std::string name_ = "HebbianLinear") :
51  HebbianLinear(inputs_, 1, 1, outputs_, 1, 1, permanence_threshold_, proximal_threshold_, name_)
52  {
53 
54  }
55 
66  HebbianLinear(size_t input_height_, size_t input_width_, size_t input_depth_,
67  size_t output_height_, size_t output_width_, size_t output_depth_,
68  eT permanence_threshold_ = 0.5, eT proximal_threshold_ = 0.5,
69  std::string name_ = "HebbianLinear") :
70  Layer<eT>::Layer(input_height_, input_width_, input_depth_,
71  output_height_, output_width_, output_depth_,
72  LayerTypes::HebbianLinear, name_)
73  {
74  // Create the weights matrix.
76 
77  // Initialize weights of the W matrix.
78  double range = sqrt(6.0 / double(Layer<eT>::outputSize() + Layer<eT>::inputSize()));
79  Layer<eT>::p['W']->rand(-range, range);
80 
81  // Set hebbian learning as default optimization function.
82  Layer<eT>::template setOptimization<mic::neural_nets::learning::HebbianRule<eT> > ();
83  };
84 
85 
89  virtual ~HebbianLinear() {};
90 
95  void forward(bool test_ = false) {
96  // Get input matrices.
97  mic::types::Matrix<eT> x = (*s['x']);
98  mic::types::Matrix<eT> W = (*p['W']);
99  // Get output pointer - so the results will be stored!
100  mic::types::MatrixPtr<eT> y = s['y'];
101 
102  // Forward pass.
103  (*y) = W * x;
104  for (size_t i = 0; i < (size_t)s['x']->rows() * s['x']->cols(); i++) {
105  // Sigmoid.
106  //(*y)[i] = 1.0f / (1.0f +::exp(-(*y)[i]));
107  // Threshold.
108  (*y)[i] = ((*y)[i] > 0.8) ? 1.0f : 0.0f;
109  }//: for
110  }
111 
115  void backward() {
116  throw std::logic_error("Backward propagation should not be used with layers using Hebbian learning!");
117  }
118 
124  void update(eT alpha_, eT decay_ = 0.0f) {
125  opt["W"]->update(p['W'], s['x'], s['y'], alpha_);
126  }
127 
131  std::vector< std::shared_ptr <mic::types::Matrix<eT> > > & getActivations(size_t height_, size_t width_) {
132  // Check if memory for the activations was allocated.
133  if (neuron_activations.size() == 0) {
134  for (size_t i=0; i < outputSize(); i++) {
135  // Allocate memory for activation of every neuron.
136  mic::types::MatrixPtr<eT> row = MAKE_MATRIX_PTR(eT, inputSize(), 1);
137  neuron_activations.push_back(row);
138  }//: for
139  }//: if
140 
141  // Epsilon added for numerical stability.
142  eT eps = 1e-10;
143 
144  mic::types::MatrixPtr<eT> W = p["W"];
145  // Iterate through "neurons" and generate "activation image" for each one.
146  for (size_t i=0; i < outputSize(); i++) {
147  // Get row.
148  mic::types::MatrixPtr<eT> row = neuron_activations[i];
149  // Copy data.
150  (*row) = W->row(i);
151  // Resize row.
152  row->resize( height_, width_);
153  // Calculate l2 norm.
154  eT l2 = row->norm() + eps;
155  // Normalize the inputs to <-0.5,0.5> and add 0.5f -> range <0.0, 1.0>.
156  (*row) = row->unaryExpr ( [&] ( eT x ) { return ( x / l2 + 0.5f); } );
157  }//: for
158 
159  // Return activations.
160  return neuron_activations;
161  }
162 
163 
164  // Unhide the overloaded methods inherited from the template class Layer fields via "using" statement.
165  using Layer<eT>::forward;
166  using Layer<eT>::backward;
167 
168 protected:
169  // Unhide the fields inherited from the template class Layer via "using" statement.
170  using Layer<eT>::s;
171  using Layer<eT>::p;
172  using Layer<eT>::m;
173  using Layer<eT>::inputSize;
174  using Layer<eT>::outputSize;
175  using Layer<eT>::batch_size;
176  using Layer<eT>::opt;
177 
178 private:
179  // Friend class - required for using boost serialization.
180  template<typename tmp> friend class mic::mlnn::MultiLayerNeuralNetwork;
181  //template<typename tmp> friend class mic::mlnn::HebbianNeuralNetwork;
182 
184  std::vector< std::shared_ptr <mic::types::Matrix<eT> > > neuron_activations;
185 
190 
191 };
192 
193 
194 } /* namespace fully_connected */
195 } /* namespace mlnn */
196 } /* namespace mic */
197 
198 #endif /* SRC_MLNN_HEBBIANLINEAR_HPP_ */
size_t inputSize()
Returns size (length) of inputs.
Definition: Layer.hpp:255
std::vector< std::shared_ptr< mic::types::Matrix< eT > > > & getActivations(size_t height_, size_t width_)
Class implementing a linear, fully connected layer.
HebbianLinear(size_t input_height_, size_t input_width_, size_t input_depth_, size_t output_height_, size_t output_width_, size_t output_depth_, eT permanence_threshold_=0.5, eT proximal_threshold_=0.5, std::string name_="HebbianLinear")
std::vector< std::shared_ptr< mic::types::Matrix< eT > > > neuron_activations
Vector containing activations of neurons.
size_t outputSize()
Returns size (length) of outputs.
Definition: Layer.hpp:260
void update(eT alpha_, eT decay_=0.0f)
Class representing a multi-layer neural network.
Definition: Layer.hpp:86
mic::neural_nets::optimization::OptimizationArray< eT > opt
Array of optimization functions.
Definition: Layer.hpp:765
LayerTypes
Enumeration of possible layer types.
Definition: Layer.hpp:58
mic::types::MatrixArray< eT > s
States - contains input [x] and output [y] matrices.
Definition: Layer.hpp:753
HebbianLinear(size_t inputs_, size_t outputs_, eT permanence_threshold_=0.5, eT proximal_threshold_=0.5, std::string name_="HebbianLinear")
Contains a template class representing a layer.
mic::types::MatrixArray< eT > p
Parameters - parameters of the layer, to be used by the derived classes.
Definition: Layer.hpp:759