36     ASSERT_EQ(loss.
calculateLoss(target_y, predicted_y), (float)2.0);
 
   57     for (
size_t i=0; i<(size_t)dy->size(); i++){
 
   58         ASSERT_EQ((*dy)[i], -1.0) << 
"Gradient error at position i=" << i << 
" (*dy)[i] is " << (*dy)[i] << 
" whereas -1.0 is expected";
 
   71     EXPECT_LE(fabs(l1-0.0225), eps);
 
   74     EXPECT_LE(fabs(l2-0.0225), eps);
 
   86     EXPECT_LE(fabs(l1-0.0225), eps);
 
   89     EXPECT_LE(fabs(l2-0.0225), eps);
 
  100     mic::types::MatrixPtr<float> dy1 = loss.
calculateGradient(target_y, predicted_y1);
 
  101     EXPECT_LE(fabs((*dy1)[0] + 0.15), eps) << 
"Gradient error at position i=0";
 
  102     EXPECT_LE(fabs((*dy1)[1] - 0.15), eps) << 
"Gradient error at position i=1";
 
  103     EXPECT_LE(fabs((*dy1)[2] - 0.0), eps) << 
"Gradient error at position i=2";
 
  104     EXPECT_LE(fabs((*dy1)[3] - 0.0), eps) << 
"Gradient error at position i=3";
 
  106     mic::types::MatrixPtr<float> dy2 = loss.
calculateGradient(target_y, predicted_y2);
 
  107     EXPECT_LE(fabs((*dy2)[0] - 0.0), eps) << 
"Gradient error at position i=0";
 
  108     EXPECT_LE(fabs((*dy2)[1] - 0.0), eps) << 
"Gradient error at position i=1";
 
  109     EXPECT_LE(fabs((*dy2)[2] + 0.15), eps) << 
"Gradient error at position i=2";
 
  110     EXPECT_LE(fabs((*dy2)[3] - 0.15), eps) << 
"Gradient error at position i=3";
 
  124     EXPECT_LE(fabs(l1 - 2.0), eps);
 
  127     EXPECT_LE(fabs(l2 - 2.02193), eps);
 
  139     EXPECT_LE(fabs(l1 - 2.0), eps);
 
  142     EXPECT_LE(fabs(l2 - 2.02193), eps);
 
  154     mic::types::MatrixPtr<float> dy1 = loss.
calculateGradient(target_y, predicted_y1);
 
  155     EXPECT_LE(fabs((*dy1)[0] + 0.15), eps) << 
"Gradient error at position i=0";
 
  156     EXPECT_LE(fabs((*dy1)[1] - 0.15), eps) << 
"Gradient error at position i=1";
 
  157     EXPECT_LE(fabs((*dy1)[2] + 0.0), eps) << 
"Gradient error at position i=2";
 
  158     EXPECT_LE(fabs((*dy1)[3] + 0.0), eps) << 
"Gradient error at position i=3";
 
  160     mic::types::MatrixPtr<float> dy2 = loss.
calculateGradient(target_y, predicted_y2);
 
  161     EXPECT_LE(fabs((*dy2)[0] + 0.0), eps) << 
"Gradient error at position i=0";
 
  162     EXPECT_LE(fabs((*dy2)[1] + 0.0), eps) << 
"Gradient error at position i=1";
 
  163     EXPECT_LE(fabs((*dy2)[2] + 0.15), eps) << 
"Gradient error at position i=2";
 
  164     EXPECT_LE(fabs((*dy2)[3] - 0.15), eps) << 
"Gradient error at position i=3";
 
  177     EXPECT_LE(fabs(l1 - 0.145), eps);
 
  189     EXPECT_LE(fabs(l1 - 0.0725), eps);
 
  203     EXPECT_LE(fabs((*dy)(0,0) - 0.1), eps) << 
"Gradient error at position (0,0)";
 
  204     EXPECT_LE(fabs((*dy)(0,1) + 0.0), eps) << 
"Gradient error at position (0,1)";
 
  205     EXPECT_LE(fabs((*dy)(1,0) - 0.1), eps) << 
"Gradient error at position (1,0)";
 
  206     EXPECT_LE(fabs((*dy)(1,1) + 0.1), eps) << 
"Gradient error at position (1,1)";
 
  207     EXPECT_LE(fabs((*dy)(2,0) + 0.1), eps) << 
"Gradient error at position (2,0)";
 
  208     EXPECT_LE(fabs((*dy)(2,1) - 0.5), eps) << 
"Gradient error at position (2,1)";
 
  222     EXPECT_LE(fabs(l1 - 2.42782), eps);
 
  235     EXPECT_LE(fabs(l1 - 1.21391), eps);
 
  248     EXPECT_LE(fabs((*dy)(0,0) - 0.1), eps) << 
"Gradient error at position (0,0)";
 
  249     EXPECT_LE(fabs((*dy)(0,1) + 0.0), eps) << 
"Gradient error at position (0,1)";
 
  250     EXPECT_LE(fabs((*dy)(1,0) - 0.1), eps) << 
"Gradient error at position (1,0)";
 
  251     EXPECT_LE(fabs((*dy)(1,1) + 0.1), eps) << 
"Gradient error at position (1,1)";
 
  252     EXPECT_LE(fabs((*dy)(2,0) + 0.1), eps) << 
"Gradient error at position (2,0)";
 
  253     EXPECT_LE(fabs((*dy)(2,1) - 0.5), eps) << 
"Gradient error at position (2,1)";
 
  258 int main(
int argc, 
char **argv) {
 
  259     testing::InitGoogleTest(&argc, argv);
 
  260     return RUN_ALL_TESTS();
 
Class representing a cross-entropy loss function (classification). 
 
Test Fixture - two predictions of size 4x1, floats. 
 
mic::types::MatrixPtr< dtype > calculateGradient(mic::types::MatrixPtr< dtype > target_y_, mic::types::MatrixPtr< dtype > predicted_y_)
Function calculating gradient - for squared difference (regression). 
 
Test Fixture - two vectors of size 4x1, floats. 
 
mic::types::MatrixPtr< dtype > calculateGradient(mic::types::MatrixPtr< dtype > target_y_, mic::types::MatrixPtr< dtype > predicted_y_)
Gradient calculation for cross-entropy. 
 
dtype calculateLoss(mic::types::MatrixPtr< dtype > target_y_, mic::types::MatrixPtr< dtype > predicted_y_)
Calculates cross entropy(using log) and returns cross-entropy error (CE). 
 
int main(int argc, char **argv)
 
dtype calculateLoss(mic::types::MatrixPtr< dtype > target_y_, mic::types::MatrixPtr< dtype > predicted_y_)
Function calculates squared difference loss (regression) and returns squared error (SE)...
 
TEST_F(Vectors4x1Float, SquaredErrorLoss)
 
Test Fixture - two vectors of size 3x2, floats. 
 
virtual dtype calculateMeanLoss(mic::types::MatrixPtr< dtype > target_y_, mic::types::MatrixPtr< dtype > predicted_y_)
Calculates mean loss (i.e. divides the loss by the size of batch) - ACE for cross-entropy or MSE for ...