Skip to content
Snippets Groups Projects
Forked from agnumpde / dune-tectonic
1281 commits behind the upstream repository.
Code owners
Assign users and groups as approvers for specific file changes. Learn more.
samplefunctional.hh 6.26 KiB
/* -*- mode:c++; mode:semantic -*- */

#ifndef SAMPLE_FUNCTIONAL_HH
#define SAMPLE_FUNCTIONAL_HH

#include <dune/common/fmatrix.hh>
#include <dune/common/fvector.hh>
#include <dune/common/stdstreams.hh>

#include <dune/fufem/interval.hh>

#include <dune/tnnmg/problem-classes/bisection.hh>
#include <dune/tnnmg/problem-classes/directionalconvexfunction.hh>

#include "localnonlinearity.hh"

namespace Dune {
template <int dim> class SampleFunctional {
public:
  typedef FieldVector<double, dim> SmallVector;
  typedef FieldMatrix<double, dim, dim> SmallMatrix;

  typedef LocalNonlinearity<dim> NonlinearityType;

  SampleFunctional(SmallMatrix const &A, SmallVector const &b,
                   NonlinearityType const &phi)
      : A(A), b(b), phi(phi) {}

  double operator()(SmallVector const &v) const {
    SmallVector y;
    A.mv(v, y);            // y = Av
    y /= 2;                // y = 1/2 Av
    y -= b;                // y = 1/2 Av - b
    return y * v + phi(v); // <1/2 Av - b,v> + H(|v|)
  }

  void descentDirection(SmallVector const x, SmallVector &ret) const {
    // Check the squared norm rather than each component because
    // complementaryProjection() divides by it
    if (x.two_norm2() == 0.0) {
      // If there is a direction of descent, this is it
      SmallVector d;
      smoothGradient(x, d);

      Interval<double> D;
      phi.directionalSubDiff(x, d, D);
      double const nonlinearDecline = D[1];
      double const smoothDecline = -(d * d);
      double const combinedDecline = smoothDecline + nonlinearDecline;

      if (combinedDecline < 0) {
        ret = d;
        ret *= -1;
      } else {
        ret = 0;
      }
      return;
    }

    SmallVector pg;
    upperGradient(x, pg);
    SmallVector mg;
    lowerGradient(x, mg);
    double const pgx = pg * x;
    double const mgx = mg * x;
    if (pgx >= 0 && mgx >= 0) {
      ret = pg;
      dverb << "## Directional derivative (as per scalar product w/ "
               "semigradient): " << -(ret * mg)
            << " (coordinates of the restriction)" << std::endl;
    } else if (pgx <= 0 && mgx <= 0) {
      ret = mg;
      dverb << "## Directional derivative (as per scalar product w/ "
               "semigradient): " << -(ret * pg)
            << " (coordinates of the restriction)" << std::endl;
    } else {
      // Includes the case that pg points in direction x and mg
      // points in direction -x. The projection will then be zero.
      SmallVector d;
      smoothGradient(x, d);
      complementaryProjection(d, x, ret);
      dverb << "## Directional derivative (as per scalar product w/ "
               "semigradient): " << -(ret * ret)
            << " (coordinates of the restriction)" << std::endl;
    }
    ret *= -1;
  }

  SmallMatrix const &A;
  SmallVector const &b;
  NonlinearityType const &phi;

private:
  // Gradient of the smooth part
  void smoothGradient(SmallVector const &x, SmallVector &y) const {
    A.mv(x, y); // y = Av
    y -= b;     // y = Av - b
  }

  void upperGradient(SmallVector const &x, SmallVector &y) const {
    phi.upperGradient(x, y);
    SmallVector z;
    smoothGradient(x, z);
    y += z;
  }

  void lowerGradient(SmallVector const &x, SmallVector &y) const {
    phi.lowerGradient(x, y);
    SmallVector z;
    smoothGradient(x, z);
    y += z;
  }

  // y = (id - P)(d) where P is the projection onto the line t*x
  void complementaryProjection(SmallVector const &d, SmallVector const &x,
                               SmallVector &y) const {
    double const dx = d * x;
    double const xx = x.two_norm2();
    y = d;
    y.axpy(-dx / xx, x);
  }
};

template <class Functional>
void minimise(Functional const J, typename Functional::SmallVector &x,
              size_t steps = 1,
              Bisection const &bisection =
                  Bisection(0.0, // acceptError: Stop if the search interval has
                                 // become smaller than this number
                            1.0, // acceptFactor: ?
                            1e-12, // requiredResidual: ?
                            true,  // fastQuadratic
                            0))    // safety: acceptance factor for inexact
                                   // minimization
{
  typedef typename Functional::SmallVector SmallVector;

  for (size_t step = 0; step < steps; ++step) {
    SmallVector descDir;
    J.descentDirection(x, descDir);

    if (descDir == SmallVector(0.0))
      return;

    // {{{ Construct a restriction of J to the line x + t * descDir

    /* We have

       1/2 <A(u+xv),u+xv>-<b,u+xv> = 1/2 <Av,v> x^2 - <b-Au,v> x + <1/2 Au-b,u>

       since A is symmetric.
    */
    SmallVector tmp;

    J.A.mv(descDir, tmp);                //  Av
    double const JRestA = tmp * descDir; // <Av,v>

    tmp = J.b;                           //  b
    J.A.mmv(x, tmp);                     //  b-Au
    double const JRestb = tmp * descDir; // <b-Au,v>

    typedef typename Functional::NonlinearityType LocalNonlinearityType;
    LocalNonlinearityType phi = J.phi;
    typedef DirectionalConvexFunction<LocalNonlinearityType>
    MyDirectionalConvexFunctionType;
    // FIXME: We cannot pass J.phi directly because the constructor
    // does not allow for constant arguments
    MyDirectionalConvexFunctionType JRest(JRestA, JRestb, phi, x, descDir);
    // }}}

    { // Debug
      Interval<double> D;
      JRest.subDiff(0, D);

      dverb << "## Directional derivative (as per subdifferential of "
               "restriction): " << D[1] << " (coordinates of the restriction)"
            << std::endl;
      /*
        It is possible that this differs quite a lot from the
        directional derivative computed in the descentDirection()
        method:

        If phi is nonsmooth at x, so that the directional
        derivatives jump, and |x| is computed to be too small or too
        large globally or locally, the locally computed
        subdifferential and the globally computed subdifferential
        will no longer coincide!

        The assertion D[1] <= 0 may thus fail.
      */
    }

    int count;
    // FIXME: The value of x_old should not matter if the factor is 1.0,
    // correct?
    double const stepsize = bisection.minimize(JRest, 0.0, 1.0, count);
    dverb << "Number of iterations in the bisection method: " << count
          << std::endl;
    ;

    x.axpy(stepsize, descDir);
  }
}
}
#endif