MachineIntelligenceCore:NeuralNets
 All Classes Namespaces Files Functions Variables Enumerations Enumerator Friends Macros
NormalizedZerosumHebbianRule.hpp
Go to the documentation of this file.
1 
25 #ifndef NORMALIZEDZEROSUMHEBBIANRULE_HPP_
26 #define NORMALIZEDZEROSUMHEBBIANRULE_HPP_
27 
29 #include <random>
30 
31 namespace mic {
32 namespace neural_nets {
33 namespace learning {
34 
40 template <typename eT=float>
42 public:
48  NormalizedZerosumHebbianRule(size_t rows_, size_t cols_) {
49  delta = MAKE_MATRIX_PTR(eT, rows_, cols_);
50  delta->zeros();
51  }
52 
53  // Virtual destructor - empty.
55 
56 
64  virtual void update(mic::types::MatrixPtr<eT> p_, mic::types::MatrixPtr<eT> x_, mic::types::MatrixPtr<eT> y_, eT learning_rate_ = 0.001) {
65  assert(p_->rows() == y_->rows());
66  assert(p_->cols() == x_->rows());
67  assert(x_->cols() == y_->cols());
68 
69  // Calculate the update using hebbian "fire together, wire together".
70  mic::types::MatrixPtr<eT> delta = calculateUpdate(x_, y_, learning_rate_);
71 
72  // weight += delta;
73  (*p_) += (*delta);
74  // Eigen doesn't check for div by 0 (the doc lies...)
75  for(auto i = 0 ; i < p_->rows() ; i++){
76  if(p_->row(i).norm() != 0){
77  p_->row(i) = p_->row(i).normalized();
78  }
79  }
80  }
81 
88  virtual mic::types::MatrixPtr<eT> calculateUpdate(mic::types::MatrixPtr<eT> x_, mic::types::MatrixPtr<eT> y_, eT learning_rate_) {
89  // delta based on winner take all: Best corresponding kernel gets to learn for each slice
90  // Winner take all happens for each column of the output matrix, between the rows of the kernels matrix
91 
92  // Iterate over the output columns
93  delta->zeros();
94  typename mic::types::Matrix<eT>::Index argmax, argmin;
95 
96  //Randomize access to the indices of image patches
97  std::vector<typename mic::types::Matrix<eT>::Index> shuffled_indices;
98  for(auto i = 0 ; i < y_->cols() ; i++) shuffled_indices.push_back(i);
99  std::random_shuffle(std::begin(shuffled_indices), std::end(shuffled_indices));
100 
101  for(auto i: shuffled_indices){
102  y_->col(i).maxCoeff(&argmax);
103  y_->col(i).minCoeff(&argmin);
104  //argmax = am(y_->col(i));
105  if(argmin != argmax){ // If all filters respond equally, then do nothing about this input patch
106  // Pick the image slice and apply it to best matching filter (ie: row of p['W'])
107  delta->row(argmax) = x_->col(i);
108  // Make the vector zero-sum
109  delta->row(argmax).array() -= delta->row(argmax).sum() / delta->cols();
110  // Eigen doesn't check for div by 0 (the doc lies...)
111  if(delta->row(argmax).norm() != 0){
112  delta->row(argmax) = delta->row(argmax).normalized();
113  }
114  }
115 
116  }
117  (*delta) *= learning_rate_;
118  return delta;
119  }
120 
121 
122 protected:
124  mic::types::MatrixPtr<eT> delta;
125 };
126 
127 } //: namespace learning
128 } /* namespace neural_nets */
129 } /* namespace mic */
130 
131 #endif /* NORMALIZEDZEROSUMHEBBIANRULE_HPP_ */
virtual mic::types::MatrixPtr< eT > calculateUpdate(mic::types::MatrixPtr< eT > x_, mic::types::MatrixPtr< eT > y_, eT learning_rate_)
Abstract class representing interface to optimization function.
virtual void update(mic::types::MatrixPtr< eT > p_, mic::types::MatrixPtr< eT > x_, mic::types::MatrixPtr< eT > y_, eT learning_rate_=0.001)
Updates according to a modified Hebbian rule (wij += ni * f(x, y)) with additional normalization and ...