Commit df9d8b9d authored by Carsten Gräser's avatar Carsten Gräser
Browse files

Merge branch 'master' into new_interface

parents fdf83307 80a485bd
# Default cmake build directory
build-cmake
# /
/Makefile
......@@ -77,6 +79,10 @@
/dune/tnnmg/solvers/Makefile.in
/dune/tnnmg/solvers/Makefile
# /dune/tnnmg/test/
/dune/tnnmg/test/Makefile.in
/dune/tnnmg/test/Makefile
# /m4/
/m4/Makefile.in
/m4/Makefile
......
---
# Install external dependencies
before_script:
- pushd /duneci/modules
- git clone https://git.imp.fu-berlin.de/agnumpde/dune-solvers.git
- dunecontrol --opts=/duneci/opts.gcc --only=dune-solvers all
- popd
dune:git--clang:
image: duneci/dune:git
script:
- dunecontrol --opts=/duneci/opts.clang --current all
- dunecontrol --current make build_tests
- dunecontrol --current make test
dune:git--gcc:
image: duneci/dune:git
script:
- dunecontrol --opts=/duneci/opts.gcc --current all
- dunecontrol --current make build_tests
- dunecontrol --current make test
add_subdirectory("test")
install(FILES
c1functional.hh
c1directionalrestriction.hh
......
......@@ -2,6 +2,7 @@
#define DUNE_TNNMG_FUNCTIONALS_NORMFUNCTIONAL_HH
#include <dune/common/bitsetvector.hh>
#include <dune/tnnmg/problem-classes/nonlinearity.hh>
#include <dune/tnnmg/functionals/nonsmoothconvexfunctional.hh>
namespace Dune {
......@@ -105,7 +106,7 @@ namespace Dune {
* \param u base point
* \param v direction
*/
virtual void directionalSubDiff(const VectorType& u, const VectorType& v, Dune::Solvers::Interval<double>& subdifferential)
virtual void directionalSubDiff(const VectorType& u, const VectorType& v, Dune::Solvers::Interval<double>& subdifferential) const
{
subdifferential[0] = 0.0;
subdifferential[1] = 0.0;
......@@ -117,10 +118,8 @@ namespace Dune {
subdifferential[0] += coefficients_[row] * (u[row]*v[row]) / norm;
subdifferential[1] += coefficients_[row] * (u[row]*v[row]) / norm;
} else {
for (int j=0; j<blockSize; j++) {
subdifferential[0] -= coefficients_[row] * v[row][j];
subdifferential[1] += coefficients_[row] * v[row][j];
}
subdifferential[0] -= coefficients_[row] * v[row].two_norm();
subdifferential[1] += coefficients_[row] * v[row].two_norm();
}
}
}
......@@ -197,7 +196,7 @@ namespace Dune {
virtual void setVector(const VectorType& v)
{
u_ = v;
};
}
/** \brief Update the (i,j)-th entry of the internal position vector u_pos to x.
*
......@@ -209,7 +208,7 @@ namespace Dune {
virtual void updateEntry(int i, double x, int j)
{
u_[i][j] = x;
};
}
private:
......
......@@ -3,6 +3,7 @@
#include <dune/common/fvector.hh>
#include <dune/common/fmatrix.hh>
#include <dune/common/deprecated.hh>
#include <dune/istl/bcrsmatrix.hh>
#include <dune/istl/matrix.hh>
......@@ -147,6 +148,17 @@ namespace Dune {
return energy;
}
/** \brief Evaluates the functional at a given vector u
*
* \param[in] u vector to evaluate the energy at
* \returns computed energy
* \deprecated Use operator() instead
*/
double computeEnergy(const VectorType& u) const DUNE_DEPRECATED_MSG("Use operator() instead")
{
return operator()(u);
}
public:
//! a scalar factor in front of the quadratic part
double a;
......
#ifndef DUNE_TNNMG_FUNCTIONALS_SUMFUNCTIONAL_HH
#define DUNE_TNNMG_FUNCTIONALS_SUMFUNCTIONAL_HH
#include <memory>
#include <dune/common/shared_ptr.hh>
#include "dune/tnnmg/functionals/nonsmoothconvexfunctional.hh"
namespace Dune {
......@@ -25,7 +28,7 @@ namespace Dune {
psi_ = Dune::stackobject_to_shared_ptr(psi);
}
SumFunctional(Dune::shared_ptr<NonlinearityType>& phi, Dune::shared_ptr<NonlinearityType>& psi) :
SumFunctional(std::shared_ptr<NonlinearityType>& phi, std::shared_ptr<NonlinearityType>& psi) :
phi_(phi),
psi_(psi)
{}
......@@ -115,8 +118,8 @@ namespace Dune {
}
private:
Dune::shared_ptr<NonlinearityType> phi_;
Dune::shared_ptr<NonlinearityType> psi_;
std::shared_ptr<NonlinearityType> phi_;
std::shared_ptr<NonlinearityType> psi_;
};
} // namespace TNNMG
......
set(TESTS
normfunctionaltest
)
foreach(_test ${TESTS})
dune_add_test(SOURCES ${_test}.cc)
endforeach()
// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
// vi: set et ts=4 sw=2 sts=2:
#ifndef DUNE_TNNMG_FUNCTIONALS_FUNCTIONALTEST_HH
#define DUNE_TNNMG_FUNCTIONALS_FUNCTIONALTEST_HH
/** \file
* \brief Contains various unit tests for nonlinear convex functionals
*/
#include <vector>
#include <cmath>
#include <limits>
#include <array>
#include <iostream>
#include <dune/common/exceptions.hh>
#include <dune/common/classname.hh>
#include <dune/istl/matrixindexset.hh>
#include <dune/solvers/common/interval.hh>
namespace Dune {
namespace TNNMG {
/** \brief Test whether functional is convex
* \tparam Functional Type of the functional to be tested
* \param functional The functional to be tested
* \param testPoints Test convexity along segment between pairs of these points
*/
template <class Functional>
void testConvexity(const Functional& functional,
const std::vector<typename Functional::VectorType>& testPoints)
{
for (size_t i=0; i<testPoints.size(); i++)
for (size_t j=0; j<testPoints.size(); j++)
{
// short-hand for the two test points
const auto& p0 = testPoints[i];
const auto& p1 = testPoints[j];
// pre-compute functional values at the segment ends
double v0 = functional(p0);
double v1 = functional(p1);
// Test convexity at a few selected points between the two test points
for (double t : {0.0, 0.2, 0.4, 0.6, 0.8, 1.0})
{
// convex combination between the two test points
typename Functional::VectorType p(p0.size());
for (size_t k=0; k<p0.size(); k++)
for (size_t l=0; l<p0[k].size(); l++)
p[k][l] = (1-t)*p0[k][l] + t*p1[k][l];
// Test for convexity
if (functional(p) > ((1-t)*v0 + t*v1) + 1e-10)
DUNE_THROW(Exception, "Functional is not convex!");
}
}
}
/** \brief Test whether a functional is positive 1-homogeneous
*
* Being positive 1-homogeneous is not a requirement for functionals to work
* in a TNNMG algorithm. However, various important functionals are homogeneous
* in this sense, therefore it is convenient to have a test for it.
*
* \tparam Functional Type of the functional to be tested
* \param functional The functional to be tested
* \param testDirections Test homogeneity at these points
*/
template <class Functional>
void testHomogeneity(const Functional& functional,
const std::vector<typename Functional::VectorType>& testDirections)
{
for (auto&& testDirection : testDirections)
{
// Test convexity at a few selected points between the two test points
for (double t : {0.0, 0.25, 0.5, 0.75, 1.0, 1.5, 2.0, 5.0})
{
auto scaledDirection = testDirection;
scaledDirection *= t;
if (std::abs(functional(scaledDirection) - t*functional(testDirection)) > 1e-6)
DUNE_THROW(MathError, "Functional is not positive 1-homogeneous!");
}
}
}
/** \brief Test the addGradient method
*
* This method tests the Functional::addGradient method by comparing its result with a
* Finite-Difference-Approximation. Test points from the testPoints input argument
* are skipped if the method Functional::regularity returns a value larger than 1e6
* for them. In that case the functional is not differentiable at that point.
*
* \tparam Functional Type of the functional to be tested
* \param functional The functional to be tested
* \param testPoints Test addGradient at these points
*/
template <class Functional>
void testGradient(Functional& functional,
const std::vector<typename Functional::VectorType>& testPoints)
{
for (auto&& testPoint : testPoints)
{
// Get the gradient at the current test point as computed by 'functional'
typename Functional::VectorType gradient(testPoint.size());
gradient = 0;
functional.addGradient(testPoint, gradient);
// Compute FD approximation to the gradient
// Best value: square root of the machine precision
const double eps = std::sqrt(std::numeric_limits<double>::epsilon());
for (size_t i=0; i<testPoint.size(); i++)
for (size_t j=0; j<testPoint[i].size(); j++)
{
// Do not compare anything if the functional claims to be non-differentiable
// at the test point in the given direction
functional.setVector(testPoint);
if (functional.regularity(i, testPoint[i][j], j) > 1e10)
continue;
auto forwardPoint = testPoint;
forwardPoint[i][j] += eps;
auto backwardPoint = testPoint;
backwardPoint[i][j] -= eps;
auto fdGradient = (functional(forwardPoint) - functional(backwardPoint)) / (2*eps);
if (std::abs(fdGradient - gradient[i][j]) > 100*eps)
{
std::cout << "Bug in addGradient for functional type " << Dune::className<Functional>() << ":" << std::endl;
std::cerr << "Gradient doesn't match FD approximation at coefficient (" << i << ", " << j << ")" << std::endl;
std::cerr << "Gradient: " << gradient[i][j] << ", FD: " << fdGradient << std::endl;
DUNE_THROW(MathError,"");
}
}
}
}
/** \brief Test the addHessian method
*
* This method tests the Functional::addHessian method by comparing its result with a
* Finite-Difference-Approximation. Test points from the testPoints input argument
* are skipped if the method Functional::regularity returns a value larger than 1e6
* for them. In that case the functional is not differentiable at that point.
*
* \tparam Functional Type of the functional to be tested
* \param functional The functional to be tested
* \param testPoints Test addHessian at these points
*/
template <class Functional>
void testHessian(Functional& functional,
const std::vector<typename Functional::VectorType>& testPoints)
{
for (auto&& testPoint : testPoints)
{
// Get the Hessian at the current test point as computed by 'functional'
/** \todo This code is conservative, and adds the entire matrix to the pattern.
* A smarter code would ask the functional for all entries it intends to write,
* but I don't know if and how this is possible.
*/
MatrixIndexSet diagonalPattern;
diagonalPattern.resize(testPoint.size(), testPoint.size());
for (size_t i=0; i<testPoint.size(); i++)
for (size_t j=0; j<testPoint.size(); j++)
diagonalPattern.add(i,j);
typename Functional::MatrixType hessian;
diagonalPattern.exportIdx(hessian);
hessian = 0;
functional.addHessian(testPoint, hessian);
// FD step size. Best value: fourth root of the machine precision
const double eps = std::pow(std::numeric_limits<double>::epsilon(),0.25);
for (size_t i=0; i<testPoint.size(); i++)
{
for (size_t j=0; j<testPoint.size(); j++)
{
for (size_t k=0; k<testPoint[i].size(); k++)
{
for (size_t l=0; l<testPoint[j].size(); l++)
{
// Do not compare anything if the functional claims to be non-differentiable
// at the test point in the given direction
functional.setVector(testPoint);
bool nonDifferentiableIK = functional.regularity(i, testPoint[i][k], k) > 1e10;
bool nonDifferentiableJL = functional.regularity(j, testPoint[j][l], l) > 1e10;
if (nonDifferentiableIK || nonDifferentiableJL)
continue;
// Compute an approximation to the derivative by finite differences
std::array<typename Functional::VectorType, 4> neighborPoints;
std::fill(neighborPoints.begin(), neighborPoints.end(), testPoint);
neighborPoints[0][i][k] += eps;
neighborPoints[0][j][l] += eps;
neighborPoints[1][i][k] -= eps;
neighborPoints[1][j][l] += eps;
neighborPoints[2][i][k] += eps;
neighborPoints[2][j][l] -= eps;
neighborPoints[3][i][k] -= eps;
neighborPoints[3][j][l] -= eps;
std::array<double,4> neighborValues;
for (int ii = 0; ii < 4; ii++)
neighborValues[ii] = functional(neighborPoints[ii]);
// The current partial derivative
double derivative = hessian.exists(i,j) ? hessian[i][j][k][l] : 0.0;
double finiteDiff = (neighborValues[0]
- neighborValues[1] - neighborValues[2]
+ neighborValues[3]) / (4 * eps * eps);
// Check whether second derivative and its FD approximation match
if (std::abs(derivative - finiteDiff) > eps)
{
std::cout << "Bug in addHessian for functional type "
<< Dune::className<Functional>() << ":" << std::endl;
std::cout << " Second derivative does not agree with FD approximation" << std::endl;
std::cout << " Expected: " << derivative << ", FD: " << finiteDiff << std::endl;
std::cout << std::endl;
DUNE_THROW(MathError,"");
}
}
}
}
}
}
}
/** \brief Test the directionalSubDiff method
*
* This method tests the Functional::directionalSubDiff method by comparing its result with a
* Finite-Difference-Approximation.
*
* \tparam Functional Type of the functional to be tested
* \param functional The functional to be tested
* \param testPoints Test directionalSubDiff at these points
*/
template <class Functional>
void testDirectionalSubdifferential(const Functional& functional,
const std::vector<typename Functional::VectorType>& testPoints)
{
// Step size. Best value: square root of the machine precision
const double eps = std::sqrt(std::numeric_limits<double>::epsilon());
// Loop over all test points
for (auto&& testPoint : testPoints)
{
// Abuse test points also as test directions
for (auto&& testDirection : testPoints)
{
Solvers::Interval<double> subDifferential;
functional.directionalSubDiff(testPoint, testDirection, subDifferential);
auto forwardPoint = testPoint;
forwardPoint.axpy(eps,testDirection);
auto backwardPoint = testPoint;
backwardPoint.axpy(-eps,testDirection);
auto forwardFDGradient = (functional(forwardPoint) - functional(testPoint)) / eps;
auto backwardFDGradient = (functional(testPoint) - functional(backwardPoint)) / eps;
if (std::abs(backwardFDGradient - subDifferential[0]) > 100*eps
or std::abs(forwardFDGradient - subDifferential[1]) > 100*eps)
{
std::cout << "Bug in directionalSubDiff for functional type " << Dune::className<Functional>() << ":" << std::endl;
std::cerr << "Subdifferential doesn't match FD approximation" << std::endl;
std::cerr << "SubDifferential: " << subDifferential
<< ", FD: [" << backwardFDGradient << ", " << forwardFDGradient << "]" << std::endl;
std::cerr << "Test point:" << std::endl << testPoint << std::endl;
std::cerr << "Test direction:" << std::endl << testDirection << std::endl;
DUNE_THROW(MathError,"");
}
}
}
}
/** \brief Test the subDiff method
*
* This method tests the Functional::subDiff method by comparing its result with a
* Finite Difference approximation.
*
* \tparam Functional Type of the functional to be tested
* \param functional The functional to be tested
* \param testPoints Test subDiff at these points
*/
template <class Functional>
void testSubDiff(Functional& functional,
const std::vector<typename Functional::VectorType>& testPoints)
{
for (auto&& testPoint : testPoints)
{
// Step size. Best value: square root of the machine precision
const double eps = std::sqrt(std::numeric_limits<double>::epsilon());
for (size_t i=0; i<testPoint.size(); i++)
for (size_t j=0; j<testPoint[i].size(); j++)
{
Solvers::Interval<double> subDifferential;
// subDiff needs to be given the current point internally
functional.setVector(testPoint);
functional.subDiff(i, testPoint[i][j], subDifferential, j);
auto forwardPoint = testPoint;
forwardPoint[i][j] += eps;
auto backwardPoint = testPoint;
backwardPoint[i][j] -= eps;
auto forwardFDGradient = (functional(forwardPoint) - functional(testPoint)) / eps;
auto backwardFDGradient = (functional(testPoint) - functional(backwardPoint)) / eps;
if (std::abs(backwardFDGradient - subDifferential[0]) > 100*eps
or std::abs(forwardFDGradient - subDifferential[1]) > 100*eps)
{
std::cout << "Bug in subDiff for functional type " << Dune::className<Functional>() << ":" << std::endl;
std::cerr << "Subdifferential doesn't match FD approximation at coefficient (" << i << ", " << j << ")" << std::endl;
std::cerr << "SubDifferential: " << subDifferential
<< ", FD: [" << backwardFDGradient << ", " << forwardFDGradient << "]" << std::endl;
DUNE_THROW(MathError,"");
}
}
}
}
} // namespace TNNMG
} // namespace Dune
#endif // DUNE_TNNMG_FUNCTIONALS_FUNCTIONALTEST_HH
\ No newline at end of file
// -*- tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*-
// vi: set et ts=4 sw=2 sts=2:
#include <config.h>
#include <iostream>
#include <dune/tnnmg/functionals/normfunctional.hh>
#include <dune/tnnmg/functionals/test/functionaltest.hh>
using namespace Dune;
int main(int argc, char** argv)
{
// Create a norm functional on (R^3)^2 for testing
std::vector<double> coefficients = {2.0, 3.0};
typedef TNNMG::NormFunctional<3> Functional;
Functional functional(coefficients);
// A set of local test points, i.e., values for a single vector block
std::vector<Functional::LocalVectorType> localTestPoints = {{0,0,0},
{1,0,0},
{0,-2,0},
{3.14,3.14,-4}};
// Create real test points (i.e., block vectors) from the given values for a single block
std::vector<Functional::VectorType> testPoints(localTestPoints.size()*localTestPoints.size());
for (size_t i=0; i<testPoints.size(); i++)
testPoints[i].resize(coefficients.size());
for (size_t i=0; i<localTestPoints.size(); i++)
for (size_t j=0; j<localTestPoints.size(); j++)
{
testPoints[j*localTestPoints.size()+i][0] = localTestPoints[i];
testPoints[j*localTestPoints.size()+i][1] = localTestPoints[j];
}
// Test whether the functional is convex
testConvexity(functional, testPoints);
// Test whether the functional positive 1-homogeneous
// We abuse the test points as test directions.
testHomogeneity(functional, testPoints);
// Test the first derivative at the given test points
testGradient(functional, testPoints);
// Test the first derivative at the given test points
testHessian(functional, testPoints);
// Test the directional subdifferential at the given test points
testDirectionalSubdifferential(functional, testPoints);
// Test the partial subdifferential at the given test points
testSubDiff(functional, testPoints);
return 0;
}
......@@ -62,7 +62,7 @@ public:
const GradientMatrix& B,
const TransposedGradientMatrix& Bt,
const WeightVector& weights,
Dune::array<LocalAnisotropyPointer, variants> gamma,
std::array<LocalAnisotropyPointer, variants> gamma,
double TOL = 1e-15) :
B_(B),
Bt_(Bt),
......@@ -314,7 +314,7 @@ protected:
const GradientMatrix& B_;
const TransposedGradientMatrix& Bt_;
const WeightVector& weights_;
Dune::array<LocalAnisotropyPointer, variants> gamma_;
std::array<LocalAnisotropyPointer, variants> gamma_;
const double TOL_;
Vector u_;
......
#ifndef SUM_NONLINEARITY_HH
#define SUM_NONLINEARITY_HH
#include "dune/tnnmg/problem-classes/nonlinearity.hh"
#include <memory>
#include <dune/common/shared_ptr.hh>
#include "dune/tnnmg/problem-classes/nonlinearity.hh"
template < class LocalVectorType=Dune::FieldVector<double,1>, class LocalMatrixType=Dune::FieldMatrix<double,1,1> >
class SumNonlinearity: public Nonlinearity<LocalVectorType, LocalMatrixType>
......@@ -23,7 +25,7 @@ class SumNonlinearity: public Nonlinearity<LocalVectorType, LocalMatrixType>
psi_ = Dune::stackobject_to_shared_ptr(psi);
}
SumNonlinearity(Dune::shared_ptr<NonlinearityType>& phi, Dune::shared_ptr<NonlinearityType>& psi) :
SumNonlinearity(std::shared_ptr<NonlinearityType>& phi, std::shared_ptr<NonlinearityType>& psi) :