diff --git a/src/mynonlinearity.hh b/src/mynonlinearity.hh index cd701fdc66f3ad7f3b1c16b6ca5f313bcae809b1..e8d4fe8d5173f6c198cb2a8853a97b3e9ff507b7 100644 --- a/src/mynonlinearity.hh +++ b/src/mynonlinearity.hh @@ -22,10 +22,12 @@ class MyNonlinearity { typedef SmallVector VectorType; typedef SmallMatrix MatrixType; + double operator()(VectorType const x) const { return func_(x.two_norm()); } + // directional subdifferential: at u on the line u + t*v // u and v are assumed to be non-zero void directionalSubDiff(VectorType const u, VectorType const v, - Interval<double>& D) const { + Interval<double> &D) const { if (u == SmallVector(0.0)) { D[0] = D[1] = func_.rightDifferential(0); return; @@ -42,8 +44,18 @@ class MyNonlinearity { } } - void directionalDomain(const VectorType&, const VectorType&, - Interval<double>& dom) const { + void upperGradient(const SmallVector x, SmallVector &ret) const { + ret = x; + ret *= func_.rightDifferential(x.two_norm()) / x.two_norm(); + } + + void lowerGradient(const SmallVector x, SmallVector &ret) const { + ret = x; + ret *= func_.leftDifferential(x.two_norm()) / x.two_norm(); + } + + void directionalDomain(const VectorType &, const VectorType &, + Interval<double> &dom) const { dom[0] = std::numeric_limits<double>::min(); dom[1] = std::numeric_limits<double>::max(); } diff --git a/src/samplefunctional.hh b/src/samplefunctional.hh index 5b479c8001d2da3199b4f6bf692262b3f7d9a356..a44e2e8ac487d47346c6ebb6074815d541d03971 100644 --- a/src/samplefunctional.hh +++ b/src/samplefunctional.hh @@ -21,25 +21,28 @@ class SampleFunctional { typedef Dune::FieldMatrix<double, dimension, dimension> SmallMatrix; typedef Function FunctionType; - SampleFunctional(SmallMatrix _A, SmallVector _b) : A(_A), b(_b), func_() {} + typedef MyNonlinearity<dimension, Function> NonlinearityType; + + SampleFunctional(SmallMatrix _A, SmallVector _b) : A(_A), b(_b) {} double operator()(const SmallVector v) const { SmallVector y; - A.mv(v, y); // y = Av - y /= 2; // y = 1/2 Av - y -= b; // y = 1/2 Av - b - return y * v + func_(v.two_norm()); // <1/2 Av - b,v> + H(|v|) + A.mv(v, y); // y = Av + y /= 2; // y = 1/2 Av + y -= b; // y = 1/2 Av - b + return y * v + phi_(v); // <1/2 Av - b,v> + H(|v|) } void descentDirection(const SmallVector x, SmallVector &ret) const { if (x == SmallVector(0.0)) { + // If there is a direction of descent, this is it SmallVector const d = smoothGradient(x); - // Decline of the smooth part in the negative gradient direction - double smoothDecline = -(d * d); - double nonlinearDecline = - func_.rightDifferential(0.0) * d.two_norm(); // TODO: is this correct? - double combinedDecline = smoothDecline + nonlinearDecline; + Interval<double> D; + phi_.directionalSubDiff(x, d, D); + double const nonlinearDecline = D[1]; + double const smoothDecline = -(d * d); + double const combinedDecline = smoothDecline + nonlinearDecline; if (combinedDecline < 0) { ret = d; @@ -82,7 +85,7 @@ class SampleFunctional { SmallVector b; private: - Function func_; + NonlinearityType phi_; // Gradient of the smooth part SmallVector smoothGradient(const SmallVector x) const { @@ -93,14 +96,16 @@ class SampleFunctional { } SmallVector upperGradient(const SmallVector x) const { - SmallVector y = smoothGradient(x); - y.axpy(func_.rightDifferential(x.two_norm()) / x.two_norm(), x); + SmallVector y; + phi_.upperGradient(x, y); + y += smoothGradient(x); return y; } SmallVector lowerGradient(const SmallVector x) const { - SmallVector y = smoothGradient(x); - y.axpy(func_.leftDifferential(x.two_norm()) / x.two_norm(), x); + SmallVector y; + phi_.lowerGradient(x, y); + y += smoothGradient(x); return y; }