From 7949bec1764f220d1aee22f74f410526512b79ec Mon Sep 17 00:00:00 2001
From: Elias Pipping <elias.pipping@fu-berlin.de>
Date: Thu, 8 Sep 2011 16:57:30 +0200
Subject: [PATCH] New bisection example; not yet fully functional

---
 src/Makefile.am              |   8 +-
 src/bisection-example-new.cc | 280 +++++++++++++++++++++++++++++++++++
 src/mynonlinearity.cc        |  52 +++++++
 3 files changed, 339 insertions(+), 1 deletion(-)
 create mode 100644 src/bisection-example-new.cc
 create mode 100644 src/mynonlinearity.cc

diff --git a/src/Makefile.am b/src/Makefile.am
index b1eb1ad1..a28ee65b 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -2,7 +2,13 @@
 SUBDIRS =
 
 noinst_PROGRAMS = \
-	bisection-example-flexible
+	bisection-example-flexible \
+	bisection-example-new
+
+bisection_example_new_SOURCES = \
+	bisection-example-new.cc \
+	mynonlinearity.cc \
+	properscalarincreasingconvexfunction.hh
 
 bisection_example_flexible_SOURCES = \
 	bisection-example-flexible.cc \
diff --git a/src/bisection-example-new.cc b/src/bisection-example-new.cc
new file mode 100644
index 00000000..1faa2e7d
--- /dev/null
+++ b/src/bisection-example-new.cc
@@ -0,0 +1,280 @@
+/* -*- mode:c++; mode: flymake; mode:semantic -*- */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <dune/common/exceptions.hh>
+#include <dune/common/stdstreams.hh>
+
+#include <dune/fufem/interval.hh>
+
+#include <dune/tnnmg/nonlinearities/smallfunctional.hh>
+#include <dune/tnnmg/problem-classes/bisection.hh>
+#include <dune/tnnmg/problem-classes/directionalconvexfunction.hh>
+
+#include <dune/tnnmg/problem-classes/nonlinearity.hh>
+
+#include <cassert>
+#include <cstdlib>
+#include <limits>
+
+#include "mynonlinearity.cc"
+#include "properscalarincreasingconvexfunction.hh"
+
+template <int dimension, class Function = TrivialFunction>
+class SampleFunctional {
+public:
+  typedef Dune::FieldVector<double, dimension> SmallVector;
+  typedef Dune::FieldMatrix<double, dimension, dimension> SmallMatrix;
+
+  SampleFunctional(SmallMatrix A, SmallVector b) : A_(A), b_(b), func_() {}
+
+  double operator()(const SmallVector v) const {
+    SmallVector y;
+    A_.mv(v, y);                        // y = Av
+    y /= 2;                             // y = 1/2 Av
+    y -= b_;                            // y = 1/2 Av - b
+    return y * v + func_(v.two_norm()); // <1/2 Av - b,v> + H(|v|)
+  }
+
+  double directionalDerivative(const SmallVector x,
+                               const SmallVector dir) const {
+    double norm = dir.two_norm();
+
+    if (norm == 0)
+      DUNE_THROW(Dune::Exception, "Directional derivatives cannot be computed "
+                                  "w.r.t. the zero-direction.");
+
+    SmallVector tmp = dir;
+    tmp /= norm;
+
+    return pseudoDirectionalDerivative(x, tmp);
+  }
+
+  SmallVector minimise(const SmallVector x, unsigned int iterations) const {
+    SmallVector descDir = ModifiedGradient(x);
+
+    /* We have
+
+       1/2 <A(u+xv),u+xv>-<b,u+xv> = 1/2 <Av,v> x^2 - <b-Au,v> x + <1/2 Au-b,u>
+
+       since A is symmetric.
+     */
+
+    SmallVector tmp2;
+    A_.mv(descDir, tmp2);
+    double const rest_A = tmp2 * descDir;
+
+    SmallVector tmp3;
+    A_.mv(x, tmp3);
+    double const rest_b = (b_ - tmp3) * descDir;
+
+    typedef MyNonlinearity<dimension, SampleFunction> MyNonlinearityType;
+    MyNonlinearityType phi;
+
+    DirectionalConvexFunction <
+        Nonlinearity<
+            Dune::FieldVector<double,
+                              dimension>, // MyNonlinearityType::SmallVector,
+            Dune::FieldMatrix<double, dimension,
+                              dimension> // MyNonlinearityType::SmallMatrix> >
+            > rest(rest_A, rest_b, phi, x, descDir);
+
+    if (descDir == SmallVector(0.0))
+      return SmallVector(0.0);
+
+    Dune::dverb << "Starting at x with J(x) = " << operator()(x) << std::endl;
+    Dune::dverb << "Minimizing in direction w with dJ(x,w) = "
+                << directionalDerivative(x, descDir) << std::endl;
+
+    double l = 0;
+    double r = 1;
+    SmallVector tmp;
+    while (true) {
+      tmp = x;
+      tmp.axpy(r, descDir);
+      if (pseudoDirectionalDerivative(tmp, descDir) >= 0)
+        break;
+
+      l = r;
+      r *= 2;
+      Dune::dverb << "Widened interval!" << std::endl;
+    }
+    Dune::dverb << "Interval now [" << l << "," << r << "]" << std::endl;
+
+#ifndef NDEBUG
+    {
+      SmallVector tmpl = x;
+      tmpl.axpy(l, descDir);
+      SmallVector tmpr = x;
+      tmpr.axpy(r, descDir);
+      assert(directionalDerivative(tmpl, descDir) < 0);
+      assert(directionalDerivative(tmpr, descDir) > 0);
+    }
+#endif
+
+    double m = l / 2 + r / 2;
+    SmallVector middle = SmallVector(0.0);
+    for (unsigned int count = 0; count < iterations; ++count) {
+      Dune::dverb << "now at m = " << m << std::endl;
+      Dune::dverb << "Value of J here: " << operator()(x + middle) << std::endl;
+
+      middle = descDir;
+      middle *= m;
+
+      double pseudoDerivative =
+          pseudoDirectionalDerivative(x + middle, descDir);
+
+      if (pseudoDerivative < 0) {
+        l = m;
+        m = (m + r) / 2;
+      } else if (pseudoDerivative > 0) {
+        r = m;
+        m = (l + m) / 2;
+      } else
+        break;
+    }
+    return middle;
+  }
+
+private:
+  SmallMatrix A_;
+  SmallVector b_;
+
+  Function func_;
+
+  // Gradient of the smooth part
+  SmallVector SmoothGrad(const SmallVector x) const {
+    SmallVector y;
+    A_.mv(x, y); // y = Av
+    y -= b_;     // y = Av - b
+    return y;
+  }
+
+  SmallVector PlusGrad(const SmallVector x) const {
+    SmallVector y = SmoothGrad(x);
+    y.axpy(func_.rightDifferential(x.two_norm()) / x.two_norm(), x);
+    return y;
+  }
+
+  SmallVector MinusGrad(const SmallVector x) const {
+    SmallVector y = SmoothGrad(x);
+    y.axpy(func_.leftDifferential(x.two_norm()) / x.two_norm(), x);
+    return y;
+  }
+
+  // |dir|-times the directional derivative wrt dir/|dir|.  If only
+  // the sign of the directionalDerivative matters, this saves the
+  // cost of normalising.
+  double pseudoDirectionalDerivative(const SmallVector x,
+                                     const SmallVector dir) const {
+    if (x == SmallVector(0.0))
+      return func_.rightDifferential(0) * dir.two_norm();
+
+    if (x * dir > 0)
+      return PlusGrad(x) * dir;
+    else
+      return MinusGrad(x) * dir;
+  }
+
+  SmallVector ModifiedGradient(const SmallVector x) const {
+    if (x == SmallVector(0.0)) {
+      SmallVector d = SmoothGrad(x);
+      // Decline of the smooth part in the negative gradient direction
+      double smoothDecline = -(d * d);
+      double nonlinearDecline =
+          func_.rightDifferential(0.0) * d.two_norm(); // TODO: is this correct?
+      double combinedDecline = smoothDecline + nonlinearDecline;
+
+      return (combinedDecline < 0) ? d : SmallVector(0.0);
+    }
+
+    SmallVector const pg = PlusGrad(x);
+    SmallVector const mg = MinusGrad(x);
+    SmallVector ret;
+    // TODO: collinearity checks suck
+    if (pg * x == pg.two_norm() * x.two_norm() &&
+        -(mg * x) == mg.two_norm() * x.two_norm()) {
+      return SmallVector(0);
+    } else if (pg * x >= 0 && mg * x >= 0) {
+      ret = pg;
+    } else if (pg * x <= 0 && mg * x <= 0) {
+      ret = mg;
+    } else {
+      ret = project(SmoothGrad(x), x);
+    }
+    ret *= -1;
+    return ret;
+  }
+
+  // No normalising is done!
+  SmallVector project(const SmallVector z, const SmallVector x) const {
+    SmallVector y = z;
+    y.axpy(-(z * x) / x.two_norm2(), x);
+    return y;
+  }
+};
+
+void testSampleFunction() {
+  int const dim = 2;
+  typedef SampleFunctional<dim, SampleFunction> SampleFunctional;
+
+  SampleFunctional::SmallMatrix A;
+  A[0][0] = 3;
+  A[0][1] = 0;
+  A[1][0] = 0;
+  A[1][1] = 3;
+  SampleFunctional::SmallVector b;
+  b[0] = 1;
+  b[1] = 2;
+
+  SampleFunctional J(A, b);
+
+  std::cout << J.directionalDerivative(b, b) << std::endl;
+  assert(J.directionalDerivative(b, b) == 2 * sqrt(5) + 2);
+
+  SampleFunctional::SmallVector start = b;
+  start *= 17;
+  SampleFunctional::SmallVector correction = J.minimise(start, 20);
+  assert(J(start + correction) <= J(start));
+  assert(std::abs(J(start + correction) + 0.254644) < 1e-8);
+  std::cout << J(start + correction) << std::endl;
+}
+
+void testTrivialFunction() {
+  int const dim = 2;
+  typedef SampleFunctional<dim> SampleFunctional;
+
+  SampleFunctional::SmallMatrix A;
+  A[0][0] = 3;
+  A[0][1] = 0;
+  A[1][0] = 0;
+  A[1][1] = 3;
+  SampleFunctional::SmallVector b;
+  b[0] = 1;
+  b[1] = 2;
+
+  SampleFunctional J(A, b);
+
+  std::cout << J.directionalDerivative(b, b) << std::endl;
+  assert(J.directionalDerivative(b, b) == 2 * sqrt(5));
+
+  SampleFunctional::SmallVector start = b;
+  start *= 17;
+  SampleFunctional::SmallVector correction = J.minimise(start, 20);
+  assert(J(start + correction) <= J(start));
+  assert(std::abs(J(start + correction) + 0.83333333) < 1e-8);
+  std::cout << J(start + correction) << std::endl;
+}
+
+int main() {
+  try {
+    testSampleFunction();
+    testTrivialFunction();
+    return 0;
+  }
+  catch (Dune::Exception &e) {
+    Dune::derr << "Dune reported error: " << e << std::endl;
+  }
+}
diff --git a/src/mynonlinearity.cc b/src/mynonlinearity.cc
new file mode 100644
index 00000000..243baa4e
--- /dev/null
+++ b/src/mynonlinearity.cc
@@ -0,0 +1,52 @@
+/* -*- mode:c++; mode: flymake; mode:semantic -*- */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <dune/common/fvector.hh>
+#include <dune/common/fmatrix.hh>
+
+#include <dune/istl/bcrsmatrix.hh>
+#include <dune/istl/bvector.hh>
+
+#include <dune/fufem/interval.hh>
+
+#include <dune/tnnmg/problem-classes/nonlinearity.hh>
+
+#include "properscalarincreasingconvexfunction.hh"
+
+template <int dimension, class OuterFunction = TrivialFunction>
+class MyNonlinearity
+    : public Nonlinearity<Dune::FieldVector<double, dimension>,
+                          Dune::FieldMatrix<double, dimension, dimension>> {
+public:
+  typedef Dune::FieldVector<double, dimension> SmallVector;
+  typedef Dune::FieldMatrix<double, dimension, dimension> SmallMatrix;
+
+  void directionalSubDiff(SmallVector u, SmallVector v, Interval<double> &D) {
+    if (u == SmallVector(0.0)) {
+      D[0] = D[1] = func_.rightDifferential(0);
+    } else if (u * v > 0) {
+      D[1] = (v * u) * func_.rightDifferential(u.two_norm()) / u.two_norm();
+      D[0] = (v * u) * func_.leftDifferential(u.two_norm()) / u.two_norm();
+    } else {
+      D[1] = (v * u) * func_.leftDifferential(u.two_norm()) / u.two_norm();
+      D[0] = (v * u) * func_.rightDifferential(u.two_norm()) / u.two_norm();
+    }
+  }
+
+  double operator()(const Dune::BlockVector<SmallVector> &) const { return 3; }
+  void addGradient(const Dune::BlockVector<SmallVector> &,
+                   Dune::BlockVector<SmallVector> &) const {}
+  void addHessian(const Dune::BlockVector<SmallVector> &,
+                  Dune::BCRSMatrix<SmallMatrix> &) const {}
+  void addHessianIndices(Dune::MatrixIndexSet &) const {}
+  void setVector(const Dune::BlockVector<SmallVector> &) {}
+  void updateEntry(int, double, int) {}
+  void subDiff(int, double, Interval<double> &, int) const {}
+  double regularity(int, double, int) const { return 3; }
+  void domain(int, Interval<double> &, int) const {}
+
+  OuterFunction func_;
+};
-- 
GitLab