functionaltest.hh 16.7 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
// vi: set et ts=4 sw=2 sts=2:
#ifndef DUNE_TNNMG_FUNCTIONALS_FUNCTIONALTEST_HH
#define DUNE_TNNMG_FUNCTIONALS_FUNCTIONALTEST_HH


/** \file
 *  \brief Contains various unit tests for nonlinear convex functionals
 */

Oliver Sander's avatar
Oliver Sander committed
11
#include <vector>
12
13
#include <cmath>
#include <limits>
Oliver Sander's avatar
Oliver Sander committed
14
15
#include <array>
#include <iostream>
Oliver Sander's avatar
Oliver Sander committed
16
17

#include <dune/common/exceptions.hh>
Oliver Sander's avatar
Oliver Sander committed
18
19
20
#include <dune/common/classname.hh>

#include <dune/istl/matrixindexset.hh>
Oliver Sander's avatar
Oliver Sander committed
21

22
23
#include <dune/solvers/common/interval.hh>

24
25
26
27
28
29
30
31
32
33
34
namespace Dune {

namespace TNNMG {

/** \brief Test whether functional is convex
 * \tparam Functional Type of the functional to be tested
 * \param functional The functional to be tested
 * \param testPoints Test convexity along segment between pairs of these points
 */
template <class Functional>
void testConvexity(const Functional& functional,
35
#ifdef USE_OLD_TNNMG
36
                   const std::vector<typename Functional::VectorType>& testPoints)
37
38
39
#else
                   const std::vector<typename Functional::Vector>& testPoints)
#endif
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
{
  for (size_t i=0; i<testPoints.size(); i++)
    for (size_t j=0; j<testPoints.size(); j++)
    {
      // short-hand for the two test points
      const auto& p0 = testPoints[i];
      const auto& p1 = testPoints[j];

      // pre-compute functional values at the segment ends
      double v0 = functional(p0);
      double v1 = functional(p1);

      // Test convexity at a few selected points between the two test points
      for (double t : {0.0, 0.2, 0.4, 0.6, 0.8, 1.0})
      {
        // convex combination between the two test points
56
57
58
        auto p = p0;
        p *= (1-t);
        p.axpy(t,p1);
59
60

        // Test for convexity
61
        if (functional(p) > ((1-t)*v0 + t*v1) + 1e-10)
62
63
64
65
66
          DUNE_THROW(Exception, "Functional is not convex!");
      }
    }
}

67
68
69
70
71
72
73
74
/** \brief Test whether a functional is positive 1-homogeneous
 *
 * Being positive 1-homogeneous is not a requirement for functionals to work
 * in a TNNMG algorithm.  However, various important functionals are homogeneous
 * in this sense, therefore it is convenient to have a test for it.
 *
 * \tparam Functional Type of the functional to be tested
 * \param functional The functional to be tested
75
 * \param testDirections Test homogeneity at these points
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
 */
template <class Functional>
void testHomogeneity(const Functional& functional,
                     const std::vector<typename Functional::VectorType>& testDirections)
{
  for (auto&& testDirection : testDirections)
  {
    // Test convexity at a few selected points between the two test points
    for (double t : {0.0, 0.25, 0.5, 0.75, 1.0, 1.5, 2.0, 5.0})
    {
      auto scaledDirection = testDirection;
      scaledDirection *= t;
      if (std::abs(functional(scaledDirection) - t*functional(testDirection)) > 1e-6)
        DUNE_THROW(MathError, "Functional is not positive 1-homogeneous!");
    }
  }
}

94
95
96
97
98
99
100
101
102
103
104
105
106

/** \brief Test the addGradient method
 *
 * This method tests the Functional::addGradient method by comparing its result with a
 * Finite-Difference-Approximation.  Test points from the testPoints input argument
 * are skipped if the method Functional::regularity returns a value larger than 1e6
 * for them.  In that case the functional is not differentiable at that point.
 *
 * \tparam Functional Type of the functional to be tested
 * \param functional The functional to be tested
 * \param testPoints Test addGradient at these points
 */
template <class Functional>
107
void testGradient(Functional& functional,
108
109
                   const std::vector<typename Functional::VectorType>& testPoints)
{
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
  for (auto&& testPoint : testPoints)
  {
    // Get the gradient at the current test point as computed by 'functional'
    typename Functional::VectorType gradient(testPoint.size());
    gradient = 0;
    functional.addGradient(testPoint, gradient);

    // Compute FD approximation to the gradient
    // Best value: square root of the machine precision
    const double eps = std::sqrt(std::numeric_limits<double>::epsilon());

    for (size_t i=0; i<testPoint.size(); i++)
      for (size_t j=0; j<testPoint[i].size(); j++)
      {
        // Do not compare anything if the functional claims to be non-differentiable
        // at the test point in the given direction
        functional.setVector(testPoint);
        if (functional.regularity(i, testPoint[i][j], j) > 1e10)
          continue;

        auto forwardPoint = testPoint;
        forwardPoint[i][j] += eps;
        auto backwardPoint = testPoint;
        backwardPoint[i][j] -= eps;

        auto fdGradient = (functional(forwardPoint) - functional(backwardPoint)) / (2*eps);

        if (std::abs(fdGradient - gradient[i][j]) > 100*eps)
        {
139
          std::cout << "Bug in addGradient for functional type " << Dune::className<Functional>() << ":" << std::endl;
140
141
142
143
144
145
          std::cerr << "Gradient doesn't match FD approximation at coefficient (" << i << ", " << j << ")" << std::endl;
          std::cerr << "Gradient: " << gradient[i][j] << ",   FD: " << fdGradient << std::endl;
          DUNE_THROW(MathError,"");
        }
      }
  }
146
147
148
149
150
151
152
153
154
155
156
157
158
159
}

/** \brief Test the addHessian method
 *
 * This method tests the Functional::addHessian method by comparing its result with a
 * Finite-Difference-Approximation.  Test points from the testPoints input argument
 * are skipped if the method Functional::regularity returns a value larger than 1e6
 * for them.  In that case the functional is not differentiable at that point.
 *
 * \tparam Functional Type of the functional to be tested
 * \param functional The functional to be tested
 * \param testPoints Test addHessian at these points
 */
template <class Functional>
Oliver Sander's avatar
Oliver Sander committed
160
void testHessian(Functional& functional,
161
162
                 const std::vector<typename Functional::VectorType>& testPoints)
{
Oliver Sander's avatar
Oliver Sander committed
163
164
165
  for (auto&& testPoint : testPoints)
  {
    // Get the Hessian at the current test point as computed by 'functional'
166
167
168
169
    /** \todo This code is conservative, and adds the entire matrix to the pattern.
     * A smarter code would ask the functional for all entries it intends to write,
     * but I don't know if and how this is possible.
     */
Oliver Sander's avatar
Oliver Sander committed
170
171
172
    MatrixIndexSet diagonalPattern;
    diagonalPattern.resize(testPoint.size(), testPoint.size());
    for (size_t i=0; i<testPoint.size(); i++)
173
174
      for (size_t j=0; j<testPoint.size(); j++)
        diagonalPattern.add(i,j);
Oliver Sander's avatar
Oliver Sander committed
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218

    typename Functional::MatrixType hessian;
    diagonalPattern.exportIdx(hessian);

    hessian = 0;
    functional.addHessian(testPoint, hessian);

    // FD step size. Best value: fourth root of the machine precision
    const double eps = std::pow(std::numeric_limits<double>::epsilon(),0.25);

    for (size_t i=0; i<testPoint.size(); i++)
    {
      for (size_t j=0; j<testPoint.size(); j++)
      {
        for (size_t k=0; k<testPoint[i].size(); k++)
        {
          for (size_t l=0; l<testPoint[j].size(); l++)
          {
            // Do not compare anything if the functional claims to be non-differentiable
            // at the test point in the given direction
            functional.setVector(testPoint);
            bool nonDifferentiableIK = functional.regularity(i, testPoint[i][k], k) > 1e10;
            bool nonDifferentiableJL = functional.regularity(j, testPoint[j][l], l) > 1e10;
            if (nonDifferentiableIK || nonDifferentiableJL)
              continue;

            // Compute an approximation to the derivative by finite differences
            std::array<typename Functional::VectorType, 4> neighborPoints;
            std::fill(neighborPoints.begin(), neighborPoints.end(), testPoint);

            neighborPoints[0][i][k] += eps;
            neighborPoints[0][j][l] += eps;
            neighborPoints[1][i][k] -= eps;
            neighborPoints[1][j][l] += eps;
            neighborPoints[2][i][k] += eps;
            neighborPoints[2][j][l] -= eps;
            neighborPoints[3][i][k] -= eps;
            neighborPoints[3][j][l] -= eps;

            std::array<double,4> neighborValues;
            for (int ii = 0; ii < 4; ii++)
              neighborValues[ii] = functional(neighborPoints[ii]);

            // The current partial derivative
219
            double derivative = hessian.exists(i,j) ? hessian[i][j][k][l] : 0.0;
Oliver Sander's avatar
Oliver Sander committed
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240

            double finiteDiff = (neighborValues[0]
                  - neighborValues[1] - neighborValues[2]
                  + neighborValues[3]) / (4 * eps * eps);

            // Check whether second derivative and its FD approximation match
            if (std::abs(derivative - finiteDiff) > eps)
            {
              std::cout << "Bug in addHessian for functional type "
                        << Dune::className<Functional>() << ":" << std::endl;
              std::cout << "    Second derivative does not agree with FD approximation" << std::endl;
              std::cout << "    Expected: " << derivative << ",  FD: " << finiteDiff << std::endl;
              std::cout << std::endl;

              DUNE_THROW(MathError,"");
            }
          }
        }
      }
    }
  }
241
242
243
244
}

/** \brief Test the directionalSubDiff method
 *
245
246
 * This method tests the Functional::directionalSubDiff method by comparing its result with a
 * Finite-Difference-Approximation.
247
248
249
 *
 * \tparam Functional Type of the functional to be tested
 * \param functional The functional to be tested
250
 * \param testPoints Test directionalSubDiff at these points
251
252
253
254
255
 */
template <class Functional>
void testDirectionalSubdifferential(const Functional& functional,
                                    const std::vector<typename Functional::VectorType>& testPoints)
{
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
  // Step size. Best value: square root of the machine precision
  const double eps = std::sqrt(std::numeric_limits<double>::epsilon());

  // Loop over all test points
  for (auto&& testPoint : testPoints)
  {
    // Abuse test points also as test directions
    for (auto&& testDirection : testPoints)
    {
      Solvers::Interval<double> subDifferential;
      functional.directionalSubDiff(testPoint, testDirection, subDifferential);

      auto forwardPoint = testPoint;
      forwardPoint.axpy(eps,testDirection);
      auto backwardPoint = testPoint;
      backwardPoint.axpy(-eps,testDirection);

      auto forwardFDGradient  = (functional(forwardPoint) - functional(testPoint)) / eps;
      auto backwardFDGradient = (functional(testPoint) - functional(backwardPoint)) / eps;

      if (std::abs(backwardFDGradient - subDifferential[0]) > 100*eps
          or std::abs(forwardFDGradient - subDifferential[1]) > 100*eps)
      {
        std::cout << "Bug in directionalSubDiff for functional type " << Dune::className<Functional>() << ":" << std::endl;
        std::cerr << "Subdifferential doesn't match FD approximation" << std::endl;
        std::cerr << "SubDifferential: " << subDifferential
                  << ",   FD: [" << backwardFDGradient << ", " << forwardFDGradient << "]" << std::endl;
        std::cerr << "Test point:" << std::endl << testPoint << std::endl;
        std::cerr << "Test direction:" << std::endl << testDirection << std::endl;
        DUNE_THROW(MathError,"");
      }
    }
  }
289
290
291
}


292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
/** \brief Test the subDiff method
 *
 * This method tests the Functional::subDiff method by comparing its result with a
 * Finite Difference approximation.
 *
 * \tparam Functional Type of the functional to be tested
 * \param functional The functional to be tested
 * \param testPoints Test subDiff at these points
 */
template <class Functional>
void testSubDiff(Functional& functional,
                 const std::vector<typename Functional::VectorType>& testPoints)
{
  for (auto&& testPoint : testPoints)
  {
    // Step size. Best value: square root of the machine precision
    const double eps = std::sqrt(std::numeric_limits<double>::epsilon());

    for (size_t i=0; i<testPoint.size(); i++)
      for (size_t j=0; j<testPoint[i].size(); j++)
      {
        Solvers::Interval<double> subDifferential;
        // subDiff needs to be given the current point internally
        functional.setVector(testPoint);
        functional.subDiff(i, testPoint[i][j], subDifferential, j);

        auto forwardPoint = testPoint;
        forwardPoint[i][j] += eps;
        auto backwardPoint = testPoint;
        backwardPoint[i][j] -= eps;

        auto forwardFDGradient  = (functional(forwardPoint) - functional(testPoint)) / eps;
        auto backwardFDGradient = (functional(testPoint) - functional(backwardPoint)) / eps;

        if (std::abs(backwardFDGradient - subDifferential[0]) > 100*eps
            or std::abs(forwardFDGradient - subDifferential[1]) > 100*eps)
        {
          std::cout << "Bug in subDiff for functional type " << Dune::className<Functional>() << ":" << std::endl;
          std::cerr << "Subdifferential doesn't match FD approximation at coefficient (" << i << ", " << j << ")" << std::endl;
          std::cerr << "SubDifferential: " << subDifferential
                    << ",   FD: [" << backwardFDGradient << ", " << forwardFDGradient << "]" << std::endl;
          DUNE_THROW(MathError,"");
        }
      }
  }
}

339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
/** \brief Test whether the values of a directional restriction are correct, by comparing
 *  with the values of the unrestricted functional.
 * \param functional The functional to be restricted
 * \param testPoints Set of test points, will additionally be abused as a set of test directions, too
 * \param testParameters Test the one-dimensional restriction at these parameter values
 */
template <typename Functional, typename TestPoints>
void testDirectionalRestrictionValues(const Functional& functional,
                                      const TestPoints& testPoints,
                                      const std::vector<double> testParameters)
{
  // Loop over all test points
  for (auto&& origin : testPoints)
  {
    // Abuse test points also as test directions
    for (auto&& testDirection : testPoints)
    {
      auto restriction = directionalRestriction(functional, origin, testDirection);

      for (auto v : testParameters)
      {
        // Test whether restriction really is a restriction
        auto probe = origin;
        probe.axpy(v, testDirection);

        auto restrictionValue = restriction(v);
        auto realValue        = functional(probe);

        if (fabs(restrictionValue - realValue) > 1e-6)
        {
          std::cerr << "Value of the directional restriction does not match the actual "
                    << "functional value." << std::endl
                    << "Restriction value at parameter " << v << " : " << restrictionValue << std::endl
                    << "Actual value: " << realValue << std::endl;
          abort();
        }
      }
    }
  }
}

/** \brief Test whether the subdifferentials of a directional restriction are correct, by comparing
 *  with a finite-difference approximation
 * \param functional The functional to be restricted
 * \param testPoints Set of test points, will additionally be abused as a set of test directions, too
 * \param testParameters Test the one-dimensional restriction at these parameter values
 */
template <typename Functional, typename TestPoints>
void testDirectionalRestrictionSubdifferential(const Functional& functional,
                                               const TestPoints& testPoints,
                                               const std::vector<double> testParameters)
{
  // Loop over all test points
  for (auto&& origin : testPoints)
  {
    // Abuse test points also as test directions
    for (auto&& testDirection : testPoints)
    {
      auto restriction = directionalRestriction(functional, origin, testDirection);

      for (auto v : testParameters)
      {
        // Test the subdifferential of the restriction
402
403
        Solvers::Interval<double> subDifferential;
        restriction.subDiff(v, subDifferential);
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427

        // Step size. Best value: square root of the machine precision
        constexpr double eps = std::sqrt(std::numeric_limits<double>::epsilon());

        auto forwardFDGradient  = (restriction(v+eps) - restriction(v)) / eps;
        auto backwardFDGradient = (restriction(v) - restriction(v-eps)) / eps;

        if (std::abs(backwardFDGradient - subDifferential[0]) > 1e4*eps
            or std::abs(forwardFDGradient - subDifferential[1]) > 1e4*eps)
        {
          std::cout << "Bug in subdifferential for directional restriction " << Dune::className(restriction) << ":" << std::endl;
          std::cerr << "Subdifferential doesn't match FD approximation" << std::endl;
          std::cerr << "SubDifferential: " << subDifferential
                    << ",   FD: [" << backwardFDGradient << ", " << forwardFDGradient << "]" << std::endl;
          std::cerr << "Origin:" << std::endl << origin << std::endl;
          std::cerr << "Test direction:" << std::endl << testDirection << std::endl;
          std::cerr << "Parameter value: " << v << std::endl;
          DUNE_THROW(MathError,"");
        }
      }
    }
  }
}

428

429
430
431
432
}   // namespace TNNMG

}   // namespace Dune

433
#endif   // DUNE_TNNMG_FUNCTIONALS_FUNCTIONALTEST_HH