27 namespace mic {
namespace neural_nets {
namespace unit_tests {
34 for (
size_t i=0; i<10; i++)
35 ASSERT_NE( (*layer.p[
"W"])[i], 0.0 ) <<
"Weight W is zero at position i=" << i;
37 for (
size_t i=0; i<2; i++)
38 ASSERT_EQ( (*layer.p[
"b"])[i], 0.0 ) <<
"Bias b is not zero at position i=" << i;
46 for (
size_t i=0; i<10; i++)
47 ASSERT_EQ( std::isnan((*layer.p[
"W"])[i]),
false ) <<
"Weight W is NaN at position i=" << i;
55 for (
size_t i=0; i<10; i++)
56 ASSERT_EQ( std::isinf((*layer.p[
"W"])[i]),
false ) <<
"Weight W is Inf at position i=" << i;
65 for (
size_t i=0; i<10; i++) {
66 for (
size_t j=i+1; j<10; j++)
67 ASSERT_NE( (*layer.p[
"W"])[i], (*layer.p[
"W"])[j] ) <<
"Weights at positions i=" << i <<
" and j=" << j <<
"are equal";
77 mic::types::MatrixPtr<float> input = MAKE_MATRIX_PTR(
float, 1, 1);
80 ASSERT_EQ( (*layer.forward( input ))[0], 1.0 );
83 ASSERT_EQ( (*layer.forward( input ))[0], 2.0 );
91 mic::types::MatrixPtr<float> y = layer.forward(const_x);
92 ASSERT_EQ((*y)[0], -2 );
93 ASSERT_EQ((*y)[1], 0 );
94 ASSERT_EQ((*y)[2], 2 );
101 TEST(LinearStacked1x2x3Float, Forward_y) {
104 (*l1.
p[
"W"]) << 1.0, 2.0;
105 (*l1.
p[
"b"]) << 0.0, 1.0;
108 (*l2.
p[
"W"]) << -1, -2, -3, -5, 6, 9;
109 (*l2.
p[
"b"]) << -3, -2, -1;
112 mic::types::MatrixPtr<float> x = MAKE_MATRIX_PTR(
float, 1, 1);
117 ASSERT_EQ((*y)[0], 0 );
118 ASSERT_EQ((*y)[1], 6 );
119 ASSERT_EQ((*y)[2], -16 );
127 TEST(Linear2x1Float, Backward_dx) {
129 (*layer.
p[
"W"]) << 1.0, 2.0;
130 (*layer.
p[
"b"]) << 1.0;
133 mic::types::MatrixPtr<float> dy = MAKE_MATRIX_PTR(
float, 1, 1);
136 mic::types::MatrixPtr<float> dx = layer.
backward(dy);
137 ASSERT_EQ((*dx)[0], 2 );
138 ASSERT_EQ((*dx)[1], 4 );
147 mic::types::MatrixPtr<float> dx = layer.backward(const_dy);
150 ASSERT_EQ((*dx)[0], -1);
151 ASSERT_EQ((*dx)[1], -3);
160 mic::types::MatrixPtr<float> y = layer.forward(const_x);
162 mic::types::MatrixPtr<float> dx = layer.backward(const_dy);
165 ASSERT_EQ((*layer.g[
"W"])(0,0), 1);
166 ASSERT_EQ((*layer.g[
"W"])(0,1), -1);
167 ASSERT_EQ((*layer.g[
"W"])(1,0), 2);
168 ASSERT_EQ((*layer.g[
"W"])(1,1), -2);
169 ASSERT_EQ((*layer.g[
"W"])(2,0), -1);
170 ASSERT_EQ((*layer.g[
"W"])(2,1), 1);
173 for (
size_t i=0; i<3; i++)
174 ASSERT_EQ((*layer.g[
"b"])[i], (*const_dy)[i]);
185 mic::types::MatrixPtr<double> predicted_y = layer.forward(const_x);
186 mic::types::MatrixPtr<double> dy = loss.calculateGradient(target_y, predicted_y);
189 mic::types::MatrixPtr<double> dW = MAKE_MATRIX_PTR(
double, *layer.g[
"W"]);
197 for (
size_t i=0; i<(size_t)dW->size(); i++){
199 EXPECT_LE( fabs((*dW)[i] - (*nW)[i]), eps) <<
"Too big difference between dW and numerical dW at position i=" << i;
211 mic::types::MatrixPtr<double> predicted_y = layer.forward(const_x);
212 mic::types::MatrixPtr<double> dy = loss.calculateGradient(target_y, predicted_y);
215 mic::types::MatrixPtr<double> db = MAKE_MATRIX_PTR(
double, *layer.g[
"b"]);
223 for (
size_t i=0; i<(size_t)db->size(); i++)
224 EXPECT_LE( fabs((*db)[i] - (*nb)[i]), eps) <<
"Too big difference between db and numerical db at position i=" << i;
235 mic::types::MatrixPtr<double> predicted_y = layer.forward(const_x);
236 mic::types::MatrixPtr<double> dy = loss.calculateGradient(target_y, predicted_y);
238 mic::types::MatrixPtr<double> dx = MAKE_MATRIX_PTR(
double, *layer.backward(dy));
246 for (
size_t i=0; i<(size_t)dx->size(); i++)
247 EXPECT_LE( fabs((*dx)[i] - (*nx)[i]), eps) <<
"Too big difference between dx and numerical dx at position i=" << i;
259 mic::types::MatrixPtr<double> predicted_y = layer.forward(const_x);
260 mic::types::MatrixPtr<double> dy = loss.calculateGradient(target_y, predicted_y);
263 mic::types::MatrixPtr<double> dW = MAKE_MATRIX_PTR(
double, *layer.g[
"W"]);
274 for (
size_t i=0; i<(size_t)dW->size(); i++){
276 EXPECT_LE( fabs((*dW)[i] - (*nW)[i]), eps) <<
"Too big difference between dW and numerical dW at position i=" << i;
288 mic::types::MatrixPtr<double> predicted_y = layer.forward(const_x);
289 mic::types::MatrixPtr<double> dy = loss.calculateGradient(target_y, predicted_y);
292 mic::types::MatrixPtr<double> db = MAKE_MATRIX_PTR(
double, *layer.g[
"b"]);
302 for (
size_t i=0; i<(size_t)db->size(); i++){
304 EXPECT_LE( fabs((*db)[i] - (*nb)[i]), eps) <<
"Too big difference between db and numerical db at position i=" << i;
316 mic::types::MatrixPtr<double> predicted_y = layer.forward(const_x);
317 mic::types::MatrixPtr<double> dy = loss.calculateGradient(target_y, predicted_y);
319 mic::types::MatrixPtr<double> dx = MAKE_MATRIX_PTR(
double, *layer.backward(dy));
329 for (
size_t i=0; i<(size_t)dx->size(); i++)
330 EXPECT_LE( fabs((*dx)[i] - (*nx)[i]), eps) <<
"Too big difference between dx and numerical dx at position i=" << i;
337 int main(
int argc,
char **argv) {
338 testing::InitGoogleTest(&argc, argv);
339 return RUN_ALL_TESTS();
int main(int argc, char **argv)
Test Fixture - layer of size 5x2, floats.
Test Fixture - layer of size 1x1, floats, sets W[0] = 1.0 and b[0] = 1.0.
TEST(Convolutions, LayerDimensions)
Test Fixture - layer of size 50x100, doubles, randomly sets all internal and external values required...
TEST_F(Conv2x2x2Filter2x1x1s1Double, NumericalGradientCheck)
Numerical gradient test of all parameters for layer of input size 2x2x2 and with filter bank of 2 fil...
Test Fixture - layer of size 2x3, floats, sets all internal and external values.
void forward(bool test_=false)
Test Fixture - layer of size 2x3, doubles, sets all internal and external values. ...
mic::types::MatrixArray< eT > p
Parameters - parameters of the layer, to be used by the derived classes.