MachineIntelligenceCore:NeuralNets
 All Classes Namespaces Files Functions Variables Enumerations Enumerator Friends Macros
HebbianNeuralNetwork.hpp
Go to the documentation of this file.
1 
25 #ifndef HEBBIANNEURALNETWORK_HPP_
26 #define HEBBIANNEURALNETWORK_HPP_
27 
29 
30 
31 namespace mic {
32 namespace mlnn {
33 
40 template <typename eT>
42 public:
43 
44 
49  HebbianNeuralNetwork(std::string name_ = "hebbian_net") : MultiLayerNeuralNetwork<eT> (name_)
50  {
51  // Set classical hebbian rule as default learning rule.
52  MultiLayerNeuralNetwork<eT>::template setOptimization<mic::neural_nets::learning::HebbianRule<eT> > ();
53  }
54 
55 
57  virtual ~HebbianNeuralNetwork() { }
58 
59 
65  void forward(mic::types::MatrixPtr<eT> input_data, bool skip_dropout = false) {
66  // Make sure that there are some layers in the nn!
67  assert(layers.size() != 0);
68 
69  // Boost::Matrix is col major!
70  LOG(LDEBUG) << "Inputs size: " << input_data->rows() << "x" << input_data->cols();
71  LOG(LDEBUG) << "First layer input matrix size: " << layers[0]->s['x']->rows() << "x" << layers[0]->s['x']->cols();
72 
73  // Make sure that the dimensions are ok.
74  // Check only rows, as cols determine the batch size - and we allow them to be dynamically changing!.
75  assert((layers[0]->s['x'])->rows() == input_data->rows());
76  //LOG(LDEBUG) <<" input_data: " << input_data.transpose();
77 
78  // Connect layers by setting the input matrices pointers to point the output matrices.
79  // There will not need to be copy data between layers anymore.
80  if (!connected) {
81  // Set pointers - pass result to the next layer: x(next layer) = y(current layer).
82  if (layers.size() > 1)
83  for (size_t i = 0; i < layers.size()-1; i++) {
84  // Assert sizes.
85  assert(layers[i+1]->s['x']->rows() == layers[i]->s['y']->rows());
86  // Connect pointers.
87  layers[i+1]->s['x'] = layers[i]->s['y'];
88  }//: for
89  connected = true;
90  }
91 
92  //assert((layers[0]->s['x'])->cols() == input_data->cols());
93  // Change the size of batch - if required.
94  resizeBatch(input_data->cols());
95 
96  // Copy inputs to the lowest point in the network.
97  (*(layers[0]->s['x'])) = (*input_data);
98 
99  // Compute the forward activations.
100  for (size_t i = 0; i < layers.size(); i++) {
101  LOG(LDEBUG) << "Layer [" << i << "] " << layers[i]->name() << ": (" <<
102  layers[i]->inputSize() << "x" << layers[i]->batchSize() << ") -> (" <<
103  layers[i]->outputSize() << "x" << layers[i]->batchSize() << ")";
104 
105  // Perform the forward computation: y = f(x).
106  layers[i]->forward(skip_dropout);
107 
108  }
109  //LOG(LDEBUG) <<" predictions: " << getPredictions()->transpose();
110  }
111 
118  eT train(mic::types::MatrixPtr<eT> encoded_batch_, eT learning_rate_) {
119 
120  // Forward propagate the activations from first layer to the last.
121  forward(encoded_batch_);
122 
123  // Apply the changes - according to the optimization function.
124  update(learning_rate_);
125 
126  // Calculate mean value of the loss function (i.e. loss divided by the batch size).
127  //eT loss_value = loss->calculateMeanLoss(encoded_targets_, encoded_predictions);
128 
129  // Return loss.
130  return 0;//loss_value;
131  }
132 
133 
140  eT test(mic::types::MatrixPtr<eT> encoded_batch_) {
141  // skip dropout layers at test time
142  bool skip_dropout = true;
143 
144  forward(encoded_batch_, skip_dropout);
145 
146  // Calculate the mean loss.
147  return 0;//loss->calculateMeanLoss(encoded_targets_, encoded_predictions);
148  }
149 
150  // Unhide the overloaded public methods & fields inherited from the template class MultiLayerNeuralNetwork fields via "using" statement.
154 
155 protected:
156  // Unhide the overloaded protected methods & fields inherited from the template class MultiLayerNeuralNetwork fields via "using" statement.
159 
160 };
161 
162 } /* namespace mlnn */
163 } /* namespace mic */
164 
165 #endif /* HEBBIANNEURALNETWORK_HPP_ */
HebbianNeuralNetwork(std::string name_="hebbian_net")
virtual ~HebbianNeuralNetwork()
Virtual descriptor - empty.
bool connected
Flag denoting whether the layers are interconnected, thus no copying between inputs and outputs of th...
std::vector< std::shared_ptr< mic::mlnn::Layer< eT > > > layers
eT test(mic::types::MatrixPtr< eT > encoded_batch_)
void update(eT alpha_, eT decay_=0.0f)
Class representing a multi-layer neural network.
Definition: Layer.hpp:86
void forward(mic::types::MatrixPtr< eT > input_data, bool skip_dropout=false)
eT train(mic::types::MatrixPtr< eT > encoded_batch_, eT learning_rate_)
Class representing a multi-layer neural network based on hebbian learning.