25 #ifndef SPARSELINEAR_H_
26 #define SPARSELINEAR_H_
32 namespace fully_connected {
39 template <
typename eT=
float>
49 SparseLinear<eT>(
size_t inputs_,
size_t outputs_, std::string name_ =
"SparseLinear") :
78 mic::types::MatrixPtr<eT> ro =
m[
"ro"];
82 mic::types::MatrixPtr<eT> penalty =
m[
"penalty"];
88 (*
g[
'W']) = (*
g[
'y']) * ((*
s[
'x']).transpose());
89 (*
g[
'b']) = (*
g[
'y']).rowwise().mean();
90 (*
g[
'x']) = (*
p[
'W']).transpose() * (*
g[
'y']);
98 void update(eT alpha_, eT decay_ = 0.0f) {
103 opt[
"W"]->update(
p[
'W'],
g[
'W'], alpha_, decay_);
106 mic::types::MatrixPtr<eT> penalty =
m[
"penalty"];
108 opt[
"b"]->update(
p[
'b'],
g[
'b'], alpha_, 0.0);
size_t batch_size
Size (length) of (mini)batch.
size_t outputSize()
Returns size (length) of outputs.
void update(eT alpha_, eT decay_=0.0f)
Class representing a multi-layer neural network.
Class implementing a linear, fully connected layer.
mic::neural_nets::optimization::OptimizationArray< eT > opt
Array of optimization functions.
mic::types::MatrixArray< eT > s
States - contains input [x] and output [y] matrices.
mic::types::MatrixArray< eT > g
Gradients - contains input [x] and output [y] matrices.
Class implementing a linear, fully connected layer with sparsity regulation.
eT desired_ro
Desired sparsity of the layer.
eT beta
Controls the weight of the sparsity penalty term.
mic::types::MatrixArray< eT > m
Memory - a list of temporal parameters, to be used by the derived classes.
mic::types::MatrixArray< eT > p
Parameters - parameters of the layer, to be used by the derived classes.