Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Sign in
Toggle navigation
Menu
Open sidebar
agnumpde
dune-tnnmg
Commits
01f0b453
Commit
01f0b453
authored
Oct 19, 2020
by
graeser
Browse files
Merge branch 'feature/make-old-tnnmg-compile-again' into 'master'
Feature/make old tnnmg compile again See merge request
!21
parents
f9897a89
aded2b58
Pipeline
#32288
passed with stage
in 6 minutes and 12 seconds
Changes
12
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
dune/tnnmg/functionals/sumfunctional.hh
View file @
01f0b453
#ifndef DUNE_TNNMG_FUNCTIONALS_SUMFUNCTIONAL_HH
#define DUNE_TNNMG_FUNCTIONALS_SUMFUNCTIONAL_HH
#ifdef USE_OLD_TNNMG
#include
<memory>
#include
<dune/common/shared_ptr.hh>
#include
"dune/tnnmg/functionals/nonsmoothconvexfunctional.hh"
#else
#include
<dune/common/hybridutilities.hh>
#include
<dune/common/std/apply.hh>
#endif
#ifndef USE_OLD_TNNMG
namespace
Dune
{
namespace
TNNMG
{
...
...
@@ -149,13 +145,12 @@ auto coordinateRestriction(const ShiftedSumFunctional<F...>& f, const Index& i)
}
// end namespace TNNMG
}
// end namespace Dune
#else
namespace
Dune
{
namespace
TNNMG
{
template
<
class
LocalVectorType
=
Dune
::
FieldVector
<
double
,
1
>,
class
LocalMatrixType
=
Dune
::
FieldMatrix
<
double
,
1
,
1
>
>
class
SumFunctional
:
public
NonsmoothConvexFunctional
<
LocalVectorType
,
LocalMatrixType
>
class
Vintage
SumFunctional
:
public
NonsmoothConvexFunctional
<
LocalVectorType
,
LocalMatrixType
>
{
public:
typedef
NonsmoothConvexFunctional
<
LocalVectorType
,
LocalMatrixType
>
NonlinearityType
;
...
...
@@ -166,13 +161,13 @@ namespace Dune {
typedef
typename
NonlinearityType
::
MatrixType
MatrixType
;
typedef
typename
NonlinearityType
::
IndexSet
IndexSet
;
SumFunctional
(
NonlinearityType
&
phi
,
NonlinearityType
&
psi
)
Vintage
SumFunctional
(
NonlinearityType
&
phi
,
NonlinearityType
&
psi
)
{
phi_
=
Dune
::
stackobject_to_shared_ptr
(
phi
);
psi_
=
Dune
::
stackobject_to_shared_ptr
(
psi
);
}
SumFunctional
(
std
::
shared_ptr
<
NonlinearityType
>&
phi
,
std
::
shared_ptr
<
NonlinearityType
>&
psi
)
:
Vintage
SumFunctional
(
std
::
shared_ptr
<
NonlinearityType
>&
phi
,
std
::
shared_ptr
<
NonlinearityType
>&
psi
)
:
phi_
(
phi
),
psi_
(
psi
)
{}
...
...
@@ -269,6 +264,5 @@ namespace Dune {
}
// namespace TNNMG
}
// namespace Dune
#endif
#endif
dune/tnnmg/functionals/test/functionaltest.hh
View file @
01f0b453
...
...
@@ -32,11 +32,7 @@ namespace TNNMG {
*/
template
<
class
Functional
>
void
testConvexity
(
const
Functional
&
functional
,
#ifdef USE_OLD_TNNMG
const
std
::
vector
<
typename
Functional
::
VectorType
>&
testPoints
)
#else
const
std
::
vector
<
typename
Functional
::
Vector
>&
testPoints
)
#endif
{
for
(
size_t
i
=
0
;
i
<
testPoints
.
size
();
i
++
)
for
(
size_t
j
=
0
;
j
<
testPoints
.
size
();
j
++
)
...
...
@@ -76,11 +72,7 @@ void testConvexity(const Functional& functional,
*/
template
<
class
Functional
>
void
testHomogeneity
(
const
Functional
&
functional
,
#ifdef USE_OLD_TNNMG
const
std
::
vector
<
typename
Functional
::
VectorType
>&
testDirections
)
#else
const
std
::
vector
<
typename
Functional
::
Vector
>&
testDirections
)
#endif
{
for
(
auto
&&
testDirection
:
testDirections
)
{
...
...
@@ -109,20 +101,12 @@ void testHomogeneity(const Functional& functional,
*/
template
<
class
Functional
>
void
testGradient
(
Functional
&
functional
,
#ifdef USE_OLD_TNNMG
const
std
::
vector
<
typename
Functional
::
VectorType
>&
testPoints
)
#else
const
std
::
vector
<
typename
Functional
::
Vector
>&
testPoints
)
#endif
{
for
(
auto
&&
testPoint
:
testPoints
)
{
// Get the gradient at the current test point as computed by 'functional'
#ifdef USE_OLD_TNNMG
typename
Functional
::
VectorType
gradient
(
testPoint
.
size
());
#else
typename
Functional
::
Vector
gradient
(
testPoint
.
size
());
#endif
gradient
=
0
;
functional
.
addGradient
(
testPoint
,
gradient
);
...
...
@@ -170,11 +154,7 @@ void testGradient(Functional& functional,
*/
template
<
class
Functional
>
void
testHessian
(
Functional
&
functional
,
#ifdef USE_OLD_TNNMG
const
std
::
vector
<
typename
Functional
::
VectorType
>&
testPoints
)
#else
const
std
::
vector
<
typename
Functional
::
Vector
>&
testPoints
)
#endif
{
for
(
auto
&&
testPoint
:
testPoints
)
{
...
...
@@ -267,11 +247,7 @@ void testHessian(Functional& functional,
*/
template
<
class
Functional
>
void
testDirectionalSubdifferential
(
const
Functional
&
functional
,
#ifdef USE_OLD_TNNMG
const
std
::
vector
<
typename
Functional
::
VectorType
>&
testPoints
)
#else
const
std
::
vector
<
typename
Functional
::
Vector
>&
testPoints
)
#endif
{
// Step size. Best value: square root of the machine precision
const
double
eps
=
std
::
sqrt
(
std
::
numeric_limits
<
double
>::
epsilon
());
...
...
@@ -320,11 +296,7 @@ void testDirectionalSubdifferential(const Functional& functional,
*/
template
<
class
Functional
>
void
testSubDiff
(
Functional
&
functional
,
#ifdef USE_OLD_TNNMG
const
std
::
vector
<
typename
Functional
::
VectorType
>&
testPoints
)
#else
const
std
::
vector
<
typename
Functional
::
Vector
>&
testPoints
)
#endif
{
for
(
auto
&&
testPoint
:
testPoints
)
{
...
...
dune/tnnmg/iterationsteps/CMakeLists.txt
View file @
01f0b453
install
(
FILES
acceleratednonlineargsstep.hh
blockseparabletnnmgstep.hh
genericnonlineargs.hh
genericnonlinearjacobi.hh
linearcorrection.hh
...
...
dune/tnnmg/iterationsteps/blockseparabletnnmgstep.hh
0 → 100644
View file @
01f0b453
#ifndef DUNE_TNNMG_ITERATIONSTEPS_BLOCK_SEPARABLE_TNNMG_STEP_HH
#define DUNE_TNNMG_ITERATIONSTEPS_BLOCK_SEPARABLE_TNNMG_STEP_HH
#include
<string>
#include
<sstream>
#include
<vector>
#include
<iomanip>
#include
<dune/common/timer.hh>
#include
"dune/solvers/iterationsteps/iterationstep.hh"
#include
"dune/solvers/iterationsteps/lineariterationstep.hh"
/**
* \brief This is the old implementation of a TNNMG step.
*
* \deprecated
* This is only added for a transition period.
*/
template
<
class
TNNMGProblemTypeTEMPLATE
,
class
NonlinearSmootherType
>
class
TruncatedNonsmoothNewtonMultigrid
:
public
IterationStep
<
typename
TNNMGProblemTypeTEMPLATE
::
VectorType
>
{
public:
typedef
TNNMGProblemTypeTEMPLATE
TNNMGProblemType
;
typedef
typename
TNNMGProblemType
::
MatrixType
MatrixType
;
typedef
typename
TNNMGProblemType
::
VectorType
VectorType
;
typedef
typename
TNNMGProblemType
::
Linearization
Linearization
;
typedef
typename
Linearization
::
MatrixType
CoarseMatrixType
;
typedef
typename
Linearization
::
VectorType
CoarseVectorType
;
typedef
typename
Linearization
::
BitVectorType
CoarseBitVectorType
;
typedef
LinearIterationStep
<
CoarseMatrixType
,
CoarseVectorType
,
CoarseBitVectorType
>
CoarseLinearIterationStep
;
struct
Statistics
{
int
iterationCount
;
double
smoothingTime
;
double
linearizationTime
;
double
coarseCorrectionTime
;
double
postProcessingTime
;
};
TruncatedNonsmoothNewtonMultigrid
()
:
problem_
(
0
),
nonlinearSmoother_
(
0
),
linearIterationStep_
(
0
),
preSmoothingSteps_
(
1
),
postSmoothingSteps_
(
0
),
linearIterationSteps_
(
1
)
{};
TruncatedNonsmoothNewtonMultigrid
(
CoarseLinearIterationStep
&
linearIterationStep
,
NonlinearSmootherType
&
nonlinearSmoother
)
:
problem_
(
0
),
nonlinearSmoother_
(
&
nonlinearSmoother
),
linearIterationStep_
(
&
linearIterationStep
),
preSmoothingSteps_
(
1
),
postSmoothingSteps_
(
0
),
linearIterationSteps_
(
1
)
{};
void
setNonlinearSmoother
(
NonlinearSmootherType
&
nonlinearSmoother
)
{
nonlinearSmoother_
=
&
nonlinearSmoother
;
};
void
setLinearIterationStep
(
CoarseLinearIterationStep
&
linearIterationStep
)
{
linearIterationStep_
=
&
linearIterationStep
;
};
void
setProblem
(
VectorType
&
x
,
TNNMGProblemType
&
problem
)
{
this
->
x_
=
&
x
;
this
->
problem_
=
&
problem
;
};
void
setSmoothingSteps
(
int
pre
,
int
linear
,
int
post
)
{
preSmoothingSteps_
=
pre
;
linearIterationSteps_
=
linear
;
postSmoothingSteps_
=
post
;
};
virtual
void
preprocess
()
{
outStream_
.
str
(
""
);
outStream_
<<
problem_
->
getOutput
(
true
);
outStream_
<<
" step size "
;
outStream_
<<
" energy "
;
statistics_
.
iterationCount
=
0
;
statistics_
.
smoothingTime
=
0.0
;
statistics_
.
linearizationTime
=
0.0
;
statistics_
.
coarseCorrectionTime
=
0.0
;
statistics_
.
postProcessingTime
=
0.0
;
}
void
iterate
()
{
// clear previous output data
outStream_
.
str
(
""
);
// do some time measurement
Dune
::
Timer
timer
;
++
statistics_
.
iterationCount
;
VectorType
&
x
=
*
x_
;
TNNMGProblemType
&
problem
=
*
problem_
;
// apply nonlinear smoother (pre smoothing)
timer
.
reset
();
nonlinearSmoother_
->
setProblem
(
x
,
problem
);
nonlinearSmoother_
->
ignoreNodes_
=
ignoreNodes_
;
for
(
int
i
=
0
;
i
<
preSmoothingSteps_
;
++
i
)
nonlinearSmoother_
->
iterate
();
x
=
nonlinearSmoother_
->
getSol
();
statistics_
.
smoothingTime
+=
timer
.
elapsed
();
// assemble and truncate linear system for coarse correction
timer
.
reset
();
Linearization
linearization
;
problem
.
assembleTruncate
(
x
,
linearization
,
*
ignoreNodes_
);
statistics_
.
linearizationTime
+=
timer
.
elapsed
();
// solve linear system for coarse correction
CoarseVectorType
v
(
linearization
.
b
.
size
());
v
=
0.0
;
// apply linear solver to coarse problem
timer
.
reset
();
{
linearIterationStep_
->
setProblem
(
linearization
.
A
,
v
,
linearization
.
b
);
linearIterationStep_
->
ignoreNodes_
=
&
(
linearization
.
ignore
);
linearIterationStep_
->
preprocess
();
for
(
int
i
=
0
;
i
<
linearIterationSteps_
;
++
i
)
linearIterationStep_
->
iterate
();
v
=
linearIterationStep_
->
getSol
();
}
statistics_
.
coarseCorrectionTime
+=
timer
.
elapsed
();
// post processing
timer
.
reset
();
// project correction
VectorType
projected_v
;
problem
.
projectCoarseCorrection
(
x
,
v
,
projected_v
,
linearization
);
// compute damping parameter
double
alpha
=
problem
.
computeDampingParameter
(
x
,
projected_v
);
// apply damped coarse correction
x
.
axpy
(
alpha
,
projected_v
);
statistics_
.
postProcessingTime
+=
timer
.
elapsed
();
// apply nonlinear smoother (post smoothing)
timer
.
reset
();
nonlinearSmoother_
->
setProblem
(
x
,
problem
);
for
(
int
i
=
0
;
i
<
postSmoothingSteps_
;
++
i
)
nonlinearSmoother_
->
iterate
();
x
=
nonlinearSmoother_
->
getSol
();
statistics_
.
smoothingTime
+=
timer
.
elapsed
();
outStream_
<<
problem
.
getOutput
();
outStream_
.
setf
(
std
::
ios
::
scientific
);
outStream_
<<
std
::
setw
(
15
)
<<
alpha
;
outStream_
<<
std
::
setw
(
15
)
<<
problem
.
computeEnergy
(
x
);
}
VectorType
getSol
()
{
return
*
x_
;
}
std
::
string
getOutput
()
const
{
std
::
string
s
=
outStream_
.
str
();
outStream_
.
str
(
""
);
return
s
;
}
const
Statistics
&
getStatistics
()
const
{
return
statistics_
;
}
using
IterationStep
<
VectorType
>::
ignoreNodes_
;
private:
Statistics
statistics_
;
using
IterationStep
<
VectorType
>::
x_
;
TNNMGProblemType
*
problem_
;
NonlinearSmootherType
*
nonlinearSmoother_
;
CoarseLinearIterationStep
*
linearIterationStep_
;
int
preSmoothingSteps_
;
int
postSmoothingSteps_
;
int
linearIterationSteps_
;
mutable
std
::
ostringstream
outStream_
;
};
#endif // #define DUNE_TNNMG_ITERATIONSTEPS_BLOCK_SEPARABLE_TNNMG_STEP_HH
dune/tnnmg/problem-classes/bisection.hh
View file @
01f0b453
...
...
@@ -18,20 +18,13 @@ class Bisection
* \param safety acceptance factor for inexact minimization
*/
Bisection
(
double
acceptError
=
0.0
,
double
acceptFactor
=
1.0
,
double
requiredResidual
=
1e-12
,
#ifdef USE_OLD_TNNMG
bool
fastQuadratic
=
true
,
#endif
double
safety
=
1e-14
)
:
#ifdef USE_OLD_TNNMG
fastQuadratic_
(
fastQuadratic
),
#endif
acceptFactor_
(
acceptFactor
),
acceptError_
(
acceptError
),
requiredResidual_
(
requiredResidual
),
safety_
(
safety
)
{};
#ifndef USE_OLD_TNNMG
/** \brief minimize convex functional
*
* Computes an approximate minimizer of the convex functional
...
...
@@ -211,218 +204,6 @@ class Bisection
return
x
;
}
#else // The following is for the old tnnmg implementation
/** \brief minimize convex functional
*
* Computes an approximate minimizer of the convex functional
* @f[ f(x) = \frac 1 2 ax^2 - rx + \phi(x) @f]
* using bisection. If \f$ x^* \f$ is the exact minimizer
* and \f$ x^{old} \f$ is the old value the returned
* approximation @f$ x @f$ satisfies
* \f[
* (x - x^{old}) \in [ factor , 1] (x^*- x^{old}).
* \f]
* This guarantees enough descent for every fixed \f$ factor \f$ and hence convergence of the Gauss-Seidel method.
*
* \param J The convex functional
* \param x initial value
* \param x_old old value, needed for inexact minimization
* \param [out] count return the number of times the subdifferential of J was evaluated
* \return approximate minimizer
*/
template
<
class
Functional
>
double
minimize
(
const
Functional
&
J
,
double
x
,
double
x_old
,
int
&
count
,
int
verbosity
=
0
)
const
{
size_t
n
=
0
;
Dune
::
Solvers
::
Interval
<
double
>
I
;
Dune
::
Solvers
::
Interval
<
double
>
DJ
;
Dune
::
Solvers
::
Interval
<
double
>
dom
;
count
=
0
;
J
.
domain
(
dom
);
double
A
=
J
.
quadraticPart
();
double
b
=
J
.
linearPart
();
// try to minimize the quadratic part exactly
if
(
fastQuadratic_
)
{
double
z
=
dom
.
projectIn
(
b
/
A
);
J
.
subDiff
(
z
,
DJ
);
++
count
;
if
(
DJ
.
containsZero
(
safety_
))
return
z
;
}
// try if projected initial guess is sufficient
x
=
dom
.
projectIn
(
x
);
J
.
subDiff
(
x
,
DJ
);
++
count
;
if
(
DJ
.
containsZero
(
safety_
))
{
if
(
verbosity
>
0
)
std
::
cout
<<
"Bisection: initial iterate "
<<
x
<<
" accepted, DJ = "
<<
DJ
<<
std
::
endl
;
return
x
;
}
// compute initial interval
// if quadratic part is strictly positive we can compute one bound from the other
if
(
DJ
[
0
]
>
0.0
)
{
I
[
1
]
=
x
;
if
(
A
>
0.0
)
I
[
0
]
=
dom
.
projectFromBelow
(
x
-
DJ
[
0
]
/
A
);
else
{
I
[
0
]
=
dom
.
projectFromBelow
(
I
[
1
]
-
DJ
[
0
]);
J
.
subDiff
(
I
[
0
],
DJ
);
++
count
;
while
(
DJ
[
0
]
>
safety_
)
{
I
[
0
]
=
I
[
1
]
-
2.0
*
(
I
[
1
]
-
I
[
0
]);
if
(
I
[
0
]
<
dom
[
0
])
{
I
[
0
]
=
dom
[
0
];
break
;
}
J
.
subDiff
(
I
[
0
],
DJ
);
++
count
;
}
}
}
else
{
I
[
0
]
=
x
;
if
(
A
>
0.0
)
I
[
1
]
=
dom
.
projectFromAbove
(
x
-
DJ
[
1
]
/
A
);
else
{
I
[
1
]
=
dom
.
projectFromAbove
(
I
[
0
]
-
DJ
[
1
]);
J
.
subDiff
(
I
[
1
],
DJ
);
++
count
;
while
(
DJ
[
1
]
<
-
safety_
)
{
I
[
1
]
=
I
[
0
]
-
2.0
*
(
I
[
0
]
-
I
[
1
]);
if
(
I
[
1
]
>
dom
[
1
])
{
I
[
1
]
=
dom
[
1
];
break
;
}
J
.
subDiff
(
I
[
1
],
DJ
);
++
count
;
}
}
}
if
(
verbosity
>
0
)
{
std
::
cout
<<
" # | I[0] | x | I[1] | dJ(I[0]) | dJ(x) | dJ(I[1]) |"
<<
std
::
endl
;
std
::
cout
<<
"----|--------------|--------------|--------------|--------------|--------------|--------------|"
<<
std
::
endl
;
}
// compute midpoint
x
=
(
I
[
0
]
+
I
[
1
])
/
2.0
;
// We remember the value of x from the last loop in order
// to check if the iteration becomes stationary. This
// is necessary to guarantee termination because it
// might happen, that I[0] and I[1] are successive
// floating point numbers, while the values at both
// interval boundaries do not match the normal
// termination criterion.
//
// Checking ((I[0] < x) and (x < I[1])) does not work
// since this is true in the above case if x is taken
// from the register of a floating point unit that
// uses a higher accuracy internally
//
// The above does e.g. happen with the x87 floating
// point unit that uses 80 bit internally. In this
// specific case the problem could be avoided by
// changing the x87 rounding mode with
//
// int mode = 0x27F;
// asm ("fldcw %0" : : "m" (*&mode));
//
// In the general case one can avoid the problem at least
// for the equality check using a binary comparison
// that circumvents the floating point unit.
double
xLast
=
I
[
0
];
// iterate while mid point does not coincide with boundary numerically
while
(
not
(
binaryEqual
(
x
,
xLast
)))
{
++
n
;
// evaluate subdifferential
J
.
subDiff
(
x
,
DJ
);
++
count
;
if
(
verbosity
>
0
)
{
Dune
::
Solvers
::
Interval
<
double
>
DJ0
,
DJ1
;
J
.
subDiff
(
I
[
0
],
DJ0
);
J
.
subDiff
(
I
[
1
],
DJ1
);
const
std
::
streamsize
p
=
std
::
cout
.
precision
();
std
::
cout
<<
std
::
setw
(
4
)
<<
n
<<
"|"
<<
std
::
setprecision
(
8
)
<<
std
::
setw
(
14
)
<<
I
[
0
]
<<
"|"
<<
std
::
setw
(
14
)
<<
x
<<
"|"
<<
std
::
setw
(
14
)
<<
I
[
1
]
<<
"|"
<<
std
::
setw
(
14
)
<<
DJ0
[
0
]
<<
"|"
<<
std
::
setw
(
14
)
<<
DJ
[
0
]
<<
"|"
<<
std
::
setw
(
14
)
<<
DJ1
[
0
]
<<
"|"
<<
std
::
endl
;
std
::
cout
<<
std
::
setprecision
(
p
);
}
// stop if at least 'factor' of full minimization step is realized
// if DJ[0]<= 0 we have
//
// x_old <= I[0] <= x <= x* <= I[1]
//
// and hence the criterion
//
// alpha|x*-x_old| <= alpha|I[1]-x_old| <= |x-x_old| <= |x*-x_old|
//
// similar for DJ[1] >= 0
if
((
I
[
0
]
>=
x_old
)
and
(
DJ
[
0
]
<=
0.0
)
and
(
std
::
abs
((
x
-
x_old
)
/
(
I
[
1
]
-
x_old
))
>=
acceptFactor_
)
and
(
-
requiredResidual_
<=
DJ
[
1
]))
break
;
if
((
I
[
1
]
<=
x_old
)
and
(
DJ
[
1
]
>=
0.0
)
and
(
std
::
abs
((
x
-
x_old
)
/
(
I
[
0
]
-
x_old
))
>=
acceptFactor_
)
and
(
DJ
[
0
]
<=
requiredResidual_
))
break
;
// stop if error is less than acceptError_
if
(
std
::
abs
(
I
[
1
]
-
I
[
0
])
<=
acceptError_
)
break
;
// stop if 0 in DJ numerically
if
(
DJ
.
containsZero
(
safety_
))
break
;
// adjust bounds and compute new mid point
if
(
DJ
[
0
]
>
0.0
)
I
[
1
]
=
x
;
else
I
[
0
]
=
x
;
xLast
=
x
;
x
=
(
I
[
0
]
+
I
[
1
])
/
2.0
;
}
return
x
;
}
void
setFastQuadratic
(
bool
fastQuadratic
)
{
fastQuadratic_
=
fastQuadratic
;
}
#endif
private:
...
...
@@ -487,9 +268,6 @@ class Bisection
return
true
;
}
#ifdef USE_OLD_TNNMG
bool
fastQuadratic_
;