diff --git a/src/Makefile.am b/src/Makefile.am
index db22653d1e0e04f72c72b7c95e54cbb88ae9b2a0..70038f30a3c22194b95b7d1e0e32d0b73645b1ea 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -7,7 +7,8 @@ noinst_PROGRAMS = \
 bisection_example_new_SOURCES = \
 	bisection-example-new.cc \
 	mynonlinearity.cc \
-	properscalarincreasingconvexfunction.hh
+	properscalarincreasingconvexfunction.hh \
+	samplefunctional.hh
 
 check-am:
 	./bisection-example-new
diff --git a/src/bisection-example-new.cc b/src/bisection-example-new.cc
index 34010a48121cf063e5ddb43aa141bf60708a70be..3c2fba4b8bdddc5d7616760bf2919508487addc5 100644
--- a/src/bisection-example-new.cc
+++ b/src/bisection-example-new.cc
@@ -5,142 +5,8 @@
 #endif
 
 #include <dune/common/exceptions.hh>
-#include <dune/common/stdstreams.hh>
 
-#include <dune/tnnmg/problem-classes/bisection.hh>
-#include <dune/tnnmg/problem-classes/directionalconvexfunction.hh>
-
-#include "mynonlinearity.cc"
-#include "properscalarincreasingconvexfunction.hh"
-
-template <int dimension, class Function = TrivialFunction>
-class SampleFunctional {
-public:
-  typedef Dune::FieldVector<double, dimension> SmallVector;
-  typedef Dune::FieldMatrix<double, dimension, dimension> SmallMatrix;
-  typedef Function FunctionType;
-
-  SampleFunctional(SmallMatrix _A, SmallVector _b) : A(_A), b(_b), func_() {}
-
-  double operator()(const SmallVector v) const {
-    SmallVector y;
-    A.mv(v, y);                         // y = Av
-    y /= 2;                             // y = 1/2 Av
-    y -= b;                             // y = 1/2 Av - b
-    return y * v + func_(v.two_norm()); // <1/2 Av - b,v> + H(|v|)
-  }
-
-  SmallVector descentDirection(const SmallVector x) const {
-    if (x == SmallVector(0.0)) {
-      SmallVector d = SmoothGrad(x);
-      // Decline of the smooth part in the negative gradient direction
-      double smoothDecline = -(d * d);
-      double nonlinearDecline =
-          func_.rightDifferential(0.0) * d.two_norm(); // TODO: is this correct?
-      double combinedDecline = smoothDecline + nonlinearDecline;
-
-      return (combinedDecline < 0) ? d : SmallVector(0.0);
-    }
-
-    SmallVector const pg = PlusGrad(x);
-    SmallVector const mg = MinusGrad(x);
-    SmallVector ret;
-    // TODO: collinearity checks suck
-    if (pg * x == pg.two_norm() * x.two_norm() &&
-        -(mg * x) == mg.two_norm() * x.two_norm()) {
-      return SmallVector(0);
-    } else if (pg * x >= 0 && mg * x >= 0) {
-      ret = pg;
-    } else if (pg * x <= 0 && mg * x <= 0) {
-      ret = mg;
-    } else {
-      ret = project(SmoothGrad(x), x);
-    }
-    ret *= -1;
-    return ret;
-  }
-
-  SmallMatrix A;
-  SmallVector b;
-
-private:
-  Function func_;
-
-  // Gradient of the smooth part
-  SmallVector SmoothGrad(const SmallVector x) const {
-    SmallVector y;
-    A.mv(x, y); // y = Av
-    y -= b;     // y = Av - b
-    return y;
-  }
-
-  SmallVector PlusGrad(const SmallVector x) const {
-    SmallVector y = SmoothGrad(x);
-    y.axpy(func_.rightDifferential(x.two_norm()) / x.two_norm(), x);
-    return y;
-  }
-
-  SmallVector MinusGrad(const SmallVector x) const {
-    SmallVector y = SmoothGrad(x);
-    y.axpy(func_.leftDifferential(x.two_norm()) / x.two_norm(), x);
-    return y;
-  }
-
-  // No normalising is done!
-  SmallVector project(const SmallVector z, const SmallVector x) const {
-    SmallVector y = z;
-    y.axpy(-(z * x) / x.two_norm2(), x);
-    return y;
-  }
-};
-
-template <class Functional>
-void minimise(const Functional J, const typename Functional::SmallVector x,
-              typename Functional::SmallVector &corr) {
-  typedef typename Functional::SmallVector SmallVector;
-  SmallVector descDir = J.descentDirection(x);
-
-  if (descDir == SmallVector(0.0)) {
-    corr = SmallVector(0.0);
-    return;
-  }
-
-  // {{{ Construct a restriction of J to the line x + t * descDir
-
-  /* We have
-
-     1/2 <A(u+xv),u+xv>-<b,u+xv> = 1/2 <Av,v> x^2 - <b-Au,v> x + <1/2 Au-b,u>
-
-     since A is symmetric.
-  */
-  SmallVector tmp;
-
-  J.A.mv(descDir, tmp);                // Av
-  double const JRestA = tmp * descDir; // <Av,v>
-
-  J.A.mv(x, tmp);                              // Au
-  double const JRestb = (J.b - tmp) * descDir; // <b-Au,v>
-
-  typedef MyNonlinearity<SmallVector::dimension,
-                         typename Functional::FunctionType> MyNonlinearityType;
-  MyNonlinearityType phi;
-  typedef DirectionalConvexFunction<MyNonlinearityType>
-  MyDirectionalConvexFunctionType;
-  MyDirectionalConvexFunctionType JRest(JRestA, JRestb, phi, x, descDir);
-  // }}}
-
-  // FIXME: default values are used
-  Bisection bisection;
-  int count;
-  // FIXME: does x_old = 1 make any sense?!
-  double const stepsize = bisection.minimize(JRest, 0.0, 1.0, count);
-  Dune::dverb << "Number of iterations in the bisection method: " << count
-              << std::endl;
-  ;
-
-  corr = descDir;
-  corr *= stepsize;
-}
+#include "samplefunctional.hh"
 
 template <int dim, class Function>
 void functionTester(
diff --git a/src/samplefunctional.hh b/src/samplefunctional.hh
new file mode 100644
index 0000000000000000000000000000000000000000..0d16b7015efdd26020b8f7bc991c02ef2b03230d
--- /dev/null
+++ b/src/samplefunctional.hh
@@ -0,0 +1,143 @@
+/* -*- mode:c++; mode:semantic -*- */
+
+#ifndef SAMPLE_FUNCTIONAL_HH
+#define SAMPLE_FUNCTIONAL_HH
+
+#include <dune/common/stdstreams.hh>
+
+#include <dune/tnnmg/problem-classes/bisection.hh>
+#include <dune/tnnmg/problem-classes/directionalconvexfunction.hh>
+
+#include "mynonlinearity.cc"
+#include "properscalarincreasingconvexfunction.hh"
+
+template <int dimension, class Function = TrivialFunction>
+class SampleFunctional {
+public:
+  typedef Dune::FieldVector<double, dimension> SmallVector;
+  typedef Dune::FieldMatrix<double, dimension, dimension> SmallMatrix;
+  typedef Function FunctionType;
+
+  SampleFunctional(SmallMatrix _A, SmallVector _b) : A(_A), b(_b), func_() {}
+
+  double operator()(const SmallVector v) const {
+    SmallVector y;
+    A.mv(v, y);                         // y = Av
+    y /= 2;                             // y = 1/2 Av
+    y -= b;                             // y = 1/2 Av - b
+    return y * v + func_(v.two_norm()); // <1/2 Av - b,v> + H(|v|)
+  }
+
+  SmallVector descentDirection(const SmallVector x) const {
+    if (x == SmallVector(0.0)) {
+      SmallVector d = SmoothGrad(x);
+      // Decline of the smooth part in the negative gradient direction
+      double smoothDecline = -(d * d);
+      double nonlinearDecline =
+          func_.rightDifferential(0.0) * d.two_norm(); // TODO: is this correct?
+      double combinedDecline = smoothDecline + nonlinearDecline;
+
+      return (combinedDecline < 0) ? d : SmallVector(0.0);
+    }
+
+    SmallVector const pg = PlusGrad(x);
+    SmallVector const mg = MinusGrad(x);
+    SmallVector ret;
+    // TODO: collinearity checks suck
+    if (pg * x == pg.two_norm() * x.two_norm() &&
+        -(mg * x) == mg.two_norm() * x.two_norm()) {
+      return SmallVector(0);
+    } else if (pg * x >= 0 && mg * x >= 0) {
+      ret = pg;
+    } else if (pg * x <= 0 && mg * x <= 0) {
+      ret = mg;
+    } else {
+      ret = project(SmoothGrad(x), x);
+    }
+    ret *= -1;
+    return ret;
+  }
+
+  SmallMatrix A;
+  SmallVector b;
+
+private:
+  Function func_;
+
+  // Gradient of the smooth part
+  SmallVector SmoothGrad(const SmallVector x) const {
+    SmallVector y;
+    A.mv(x, y); // y = Av
+    y -= b;     // y = Av - b
+    return y;
+  }
+
+  SmallVector PlusGrad(const SmallVector x) const {
+    SmallVector y = SmoothGrad(x);
+    y.axpy(func_.rightDifferential(x.two_norm()) / x.two_norm(), x);
+    return y;
+  }
+
+  SmallVector MinusGrad(const SmallVector x) const {
+    SmallVector y = SmoothGrad(x);
+    y.axpy(func_.leftDifferential(x.two_norm()) / x.two_norm(), x);
+    return y;
+  }
+
+  // No normalising is done!
+  SmallVector project(const SmallVector z, const SmallVector x) const {
+    SmallVector y = z;
+    y.axpy(-(z * x) / x.two_norm2(), x);
+    return y;
+  }
+};
+
+template <class Functional>
+void minimise(const Functional J, const typename Functional::SmallVector x,
+              typename Functional::SmallVector &corr) {
+  typedef typename Functional::SmallVector SmallVector;
+  SmallVector descDir = J.descentDirection(x);
+
+  if (descDir == SmallVector(0.0)) {
+    corr = SmallVector(0.0);
+    return;
+  }
+
+  // {{{ Construct a restriction of J to the line x + t * descDir
+
+  /* We have
+
+     1/2 <A(u+xv),u+xv>-<b,u+xv> = 1/2 <Av,v> x^2 - <b-Au,v> x + <1/2 Au-b,u>
+
+     since A is symmetric.
+  */
+  SmallVector tmp;
+
+  J.A.mv(descDir, tmp);                // Av
+  double const JRestA = tmp * descDir; // <Av,v>
+
+  J.A.mv(x, tmp);                              // Au
+  double const JRestb = (J.b - tmp) * descDir; // <b-Au,v>
+
+  typedef MyNonlinearity<SmallVector::dimension,
+                         typename Functional::FunctionType> MyNonlinearityType;
+  MyNonlinearityType phi;
+  typedef DirectionalConvexFunction<MyNonlinearityType>
+  MyDirectionalConvexFunctionType;
+  MyDirectionalConvexFunctionType JRest(JRestA, JRestb, phi, x, descDir);
+  // }}}
+
+  // FIXME: default values are used
+  Bisection bisection;
+  int count;
+  // FIXME: does x_old = 1 make any sense?!
+  double const stepsize = bisection.minimize(JRest, 0.0, 1.0, count);
+  Dune::dverb << "Number of iterations in the bisection method: " << count
+              << std::endl;
+  ;
+
+  corr = descDir;
+  corr *= stepsize;
+}
+
+#endif