Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
D
dune-tectonic
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Wiki
Requirements
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Snippets
Locked files
Deploy
Releases
Container Registry
Model registry
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
Repository analytics
Code review analytics
Issue analytics
Insights
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
Show more breadcrumbs
podlesny
dune-tectonic
Commits
7949bec1
Commit
7949bec1
authored
13 years ago
by
Elias Pipping
Committed by
Elias Pipping
13 years ago
Browse files
Options
Downloads
Patches
Plain Diff
New bisection example; not yet fully functional
parent
e252d317
No related branches found
No related tags found
No related merge requests found
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
src/Makefile.am
+7
-1
7 additions, 1 deletion
src/Makefile.am
src/bisection-example-new.cc
+280
-0
280 additions, 0 deletions
src/bisection-example-new.cc
src/mynonlinearity.cc
+52
-0
52 additions, 0 deletions
src/mynonlinearity.cc
with
339 additions
and
1 deletion
src/Makefile.am
+
7
−
1
View file @
7949bec1
...
@@ -2,7 +2,13 @@
...
@@ -2,7 +2,13 @@
SUBDIRS
=
SUBDIRS
=
noinst_PROGRAMS
=
\
noinst_PROGRAMS
=
\
bisection-example-flexible
bisection-example-flexible
\
bisection-example-new
bisection_example_new_SOURCES
=
\
bisection-example-new.cc
\
mynonlinearity.cc
\
properscalarincreasingconvexfunction.hh
bisection_example_flexible_SOURCES
=
\
bisection_example_flexible_SOURCES
=
\
bisection-example-flexible.cc
\
bisection-example-flexible.cc
\
...
...
This diff is collapsed.
Click to expand it.
src/bisection-example-new.cc
0 → 100644
+
280
−
0
View file @
7949bec1
/* -*- mode:c++; mode: flymake; mode:semantic -*- */
#ifdef HAVE_CONFIG_H
#include
"config.h"
#endif
#include
<dune/common/exceptions.hh>
#include
<dune/common/stdstreams.hh>
#include
<dune/fufem/interval.hh>
#include
<dune/tnnmg/nonlinearities/smallfunctional.hh>
#include
<dune/tnnmg/problem-classes/bisection.hh>
#include
<dune/tnnmg/problem-classes/directionalconvexfunction.hh>
#include
<dune/tnnmg/problem-classes/nonlinearity.hh>
#include
<cassert>
#include
<cstdlib>
#include
<limits>
#include
"mynonlinearity.cc"
#include
"properscalarincreasingconvexfunction.hh"
template
<
int
dimension
,
class
Function
=
TrivialFunction
>
class
SampleFunctional
{
public:
typedef
Dune
::
FieldVector
<
double
,
dimension
>
SmallVector
;
typedef
Dune
::
FieldMatrix
<
double
,
dimension
,
dimension
>
SmallMatrix
;
SampleFunctional
(
SmallMatrix
A
,
SmallVector
b
)
:
A_
(
A
),
b_
(
b
),
func_
()
{}
double
operator
()(
const
SmallVector
v
)
const
{
SmallVector
y
;
A_
.
mv
(
v
,
y
);
// y = Av
y
/=
2
;
// y = 1/2 Av
y
-=
b_
;
// y = 1/2 Av - b
return
y
*
v
+
func_
(
v
.
two_norm
());
// <1/2 Av - b,v> + H(|v|)
}
double
directionalDerivative
(
const
SmallVector
x
,
const
SmallVector
dir
)
const
{
double
norm
=
dir
.
two_norm
();
if
(
norm
==
0
)
DUNE_THROW
(
Dune
::
Exception
,
"Directional derivatives cannot be computed "
"w.r.t. the zero-direction."
);
SmallVector
tmp
=
dir
;
tmp
/=
norm
;
return
pseudoDirectionalDerivative
(
x
,
tmp
);
}
SmallVector
minimise
(
const
SmallVector
x
,
unsigned
int
iterations
)
const
{
SmallVector
descDir
=
ModifiedGradient
(
x
);
/* We have
1/2 <A(u+xv),u+xv>-<b,u+xv> = 1/2 <Av,v> x^2 - <b-Au,v> x + <1/2 Au-b,u>
since A is symmetric.
*/
SmallVector
tmp2
;
A_
.
mv
(
descDir
,
tmp2
);
double
const
rest_A
=
tmp2
*
descDir
;
SmallVector
tmp3
;
A_
.
mv
(
x
,
tmp3
);
double
const
rest_b
=
(
b_
-
tmp3
)
*
descDir
;
typedef
MyNonlinearity
<
dimension
,
SampleFunction
>
MyNonlinearityType
;
MyNonlinearityType
phi
;
DirectionalConvexFunction
<
Nonlinearity
<
Dune
::
FieldVector
<
double
,
dimension
>
,
// MyNonlinearityType::SmallVector,
Dune
::
FieldMatrix
<
double
,
dimension
,
dimension
>
// MyNonlinearityType::SmallMatrix> >
>
rest
(
rest_A
,
rest_b
,
phi
,
x
,
descDir
);
if
(
descDir
==
SmallVector
(
0.0
))
return
SmallVector
(
0.0
);
Dune
::
dverb
<<
"Starting at x with J(x) = "
<<
operator
()(
x
)
<<
std
::
endl
;
Dune
::
dverb
<<
"Minimizing in direction w with dJ(x,w) = "
<<
directionalDerivative
(
x
,
descDir
)
<<
std
::
endl
;
double
l
=
0
;
double
r
=
1
;
SmallVector
tmp
;
while
(
true
)
{
tmp
=
x
;
tmp
.
axpy
(
r
,
descDir
);
if
(
pseudoDirectionalDerivative
(
tmp
,
descDir
)
>=
0
)
break
;
l
=
r
;
r
*=
2
;
Dune
::
dverb
<<
"Widened interval!"
<<
std
::
endl
;
}
Dune
::
dverb
<<
"Interval now ["
<<
l
<<
","
<<
r
<<
"]"
<<
std
::
endl
;
#ifndef NDEBUG
{
SmallVector
tmpl
=
x
;
tmpl
.
axpy
(
l
,
descDir
);
SmallVector
tmpr
=
x
;
tmpr
.
axpy
(
r
,
descDir
);
assert
(
directionalDerivative
(
tmpl
,
descDir
)
<
0
);
assert
(
directionalDerivative
(
tmpr
,
descDir
)
>
0
);
}
#endif
double
m
=
l
/
2
+
r
/
2
;
SmallVector
middle
=
SmallVector
(
0.0
);
for
(
unsigned
int
count
=
0
;
count
<
iterations
;
++
count
)
{
Dune
::
dverb
<<
"now at m = "
<<
m
<<
std
::
endl
;
Dune
::
dverb
<<
"Value of J here: "
<<
operator
()(
x
+
middle
)
<<
std
::
endl
;
middle
=
descDir
;
middle
*=
m
;
double
pseudoDerivative
=
pseudoDirectionalDerivative
(
x
+
middle
,
descDir
);
if
(
pseudoDerivative
<
0
)
{
l
=
m
;
m
=
(
m
+
r
)
/
2
;
}
else
if
(
pseudoDerivative
>
0
)
{
r
=
m
;
m
=
(
l
+
m
)
/
2
;
}
else
break
;
}
return
middle
;
}
private
:
SmallMatrix
A_
;
SmallVector
b_
;
Function
func_
;
// Gradient of the smooth part
SmallVector
SmoothGrad
(
const
SmallVector
x
)
const
{
SmallVector
y
;
A_
.
mv
(
x
,
y
);
// y = Av
y
-=
b_
;
// y = Av - b
return
y
;
}
SmallVector
PlusGrad
(
const
SmallVector
x
)
const
{
SmallVector
y
=
SmoothGrad
(
x
);
y
.
axpy
(
func_
.
rightDifferential
(
x
.
two_norm
())
/
x
.
two_norm
(),
x
);
return
y
;
}
SmallVector
MinusGrad
(
const
SmallVector
x
)
const
{
SmallVector
y
=
SmoothGrad
(
x
);
y
.
axpy
(
func_
.
leftDifferential
(
x
.
two_norm
())
/
x
.
two_norm
(),
x
);
return
y
;
}
// |dir|-times the directional derivative wrt dir/|dir|. If only
// the sign of the directionalDerivative matters, this saves the
// cost of normalising.
double
pseudoDirectionalDerivative
(
const
SmallVector
x
,
const
SmallVector
dir
)
const
{
if
(
x
==
SmallVector
(
0.0
))
return
func_
.
rightDifferential
(
0
)
*
dir
.
two_norm
();
if
(
x
*
dir
>
0
)
return
PlusGrad
(
x
)
*
dir
;
else
return
MinusGrad
(
x
)
*
dir
;
}
SmallVector
ModifiedGradient
(
const
SmallVector
x
)
const
{
if
(
x
==
SmallVector
(
0.0
))
{
SmallVector
d
=
SmoothGrad
(
x
);
// Decline of the smooth part in the negative gradient direction
double
smoothDecline
=
-
(
d
*
d
);
double
nonlinearDecline
=
func_
.
rightDifferential
(
0.0
)
*
d
.
two_norm
();
// TODO: is this correct?
double
combinedDecline
=
smoothDecline
+
nonlinearDecline
;
return
(
combinedDecline
<
0
)
?
d
:
SmallVector
(
0.0
);
}
SmallVector
const
pg
=
PlusGrad
(
x
);
SmallVector
const
mg
=
MinusGrad
(
x
);
SmallVector
ret
;
// TODO: collinearity checks suck
if
(
pg
*
x
==
pg
.
two_norm
()
*
x
.
two_norm
()
&&
-
(
mg
*
x
)
==
mg
.
two_norm
()
*
x
.
two_norm
())
{
return
SmallVector
(
0
);
}
else
if
(
pg
*
x
>=
0
&&
mg
*
x
>=
0
)
{
ret
=
pg
;
}
else
if
(
pg
*
x
<=
0
&&
mg
*
x
<=
0
)
{
ret
=
mg
;
}
else
{
ret
=
project
(
SmoothGrad
(
x
),
x
);
}
ret
*=
-
1
;
return
ret
;
}
// No normalising is done!
SmallVector
project
(
const
SmallVector
z
,
const
SmallVector
x
)
const
{
SmallVector
y
=
z
;
y
.
axpy
(
-
(
z
*
x
)
/
x
.
two_norm2
(),
x
);
return
y
;
}
};
void
testSampleFunction
()
{
int
const
dim
=
2
;
typedef
SampleFunctional
<
dim
,
SampleFunction
>
SampleFunctional
;
SampleFunctional
::
SmallMatrix
A
;
A
[
0
][
0
]
=
3
;
A
[
0
][
1
]
=
0
;
A
[
1
][
0
]
=
0
;
A
[
1
][
1
]
=
3
;
SampleFunctional
::
SmallVector
b
;
b
[
0
]
=
1
;
b
[
1
]
=
2
;
SampleFunctional
J
(
A
,
b
);
std
::
cout
<<
J
.
directionalDerivative
(
b
,
b
)
<<
std
::
endl
;
assert
(
J
.
directionalDerivative
(
b
,
b
)
==
2
*
sqrt
(
5
)
+
2
);
SampleFunctional
::
SmallVector
start
=
b
;
start
*=
17
;
SampleFunctional
::
SmallVector
correction
=
J
.
minimise
(
start
,
20
);
assert
(
J
(
start
+
correction
)
<=
J
(
start
));
assert
(
std
::
abs
(
J
(
start
+
correction
)
+
0.254644
)
<
1e-8
);
std
::
cout
<<
J
(
start
+
correction
)
<<
std
::
endl
;
}
void
testTrivialFunction
()
{
int
const
dim
=
2
;
typedef
SampleFunctional
<
dim
>
SampleFunctional
;
SampleFunctional
::
SmallMatrix
A
;
A
[
0
][
0
]
=
3
;
A
[
0
][
1
]
=
0
;
A
[
1
][
0
]
=
0
;
A
[
1
][
1
]
=
3
;
SampleFunctional
::
SmallVector
b
;
b
[
0
]
=
1
;
b
[
1
]
=
2
;
SampleFunctional
J
(
A
,
b
);
std
::
cout
<<
J
.
directionalDerivative
(
b
,
b
)
<<
std
::
endl
;
assert
(
J
.
directionalDerivative
(
b
,
b
)
==
2
*
sqrt
(
5
));
SampleFunctional
::
SmallVector
start
=
b
;
start
*=
17
;
SampleFunctional
::
SmallVector
correction
=
J
.
minimise
(
start
,
20
);
assert
(
J
(
start
+
correction
)
<=
J
(
start
));
assert
(
std
::
abs
(
J
(
start
+
correction
)
+
0.83333333
)
<
1e-8
);
std
::
cout
<<
J
(
start
+
correction
)
<<
std
::
endl
;
}
int
main
()
{
try
{
testSampleFunction
();
testTrivialFunction
();
return
0
;
}
catch
(
Dune
::
Exception
&
e
)
{
Dune
::
derr
<<
"Dune reported error: "
<<
e
<<
std
::
endl
;
}
}
This diff is collapsed.
Click to expand it.
src/mynonlinearity.cc
0 → 100644
+
52
−
0
View file @
7949bec1
/* -*- mode:c++; mode: flymake; mode:semantic -*- */
#ifdef HAVE_CONFIG_H
#include
"config.h"
#endif
#include
<dune/common/fvector.hh>
#include
<dune/common/fmatrix.hh>
#include
<dune/istl/bcrsmatrix.hh>
#include
<dune/istl/bvector.hh>
#include
<dune/fufem/interval.hh>
#include
<dune/tnnmg/problem-classes/nonlinearity.hh>
#include
"properscalarincreasingconvexfunction.hh"
template
<
int
dimension
,
class
OuterFunction
=
TrivialFunction
>
class
MyNonlinearity
:
public
Nonlinearity
<
Dune
::
FieldVector
<
double
,
dimension
>
,
Dune
::
FieldMatrix
<
double
,
dimension
,
dimension
>>
{
public:
typedef
Dune
::
FieldVector
<
double
,
dimension
>
SmallVector
;
typedef
Dune
::
FieldMatrix
<
double
,
dimension
,
dimension
>
SmallMatrix
;
void
directionalSubDiff
(
SmallVector
u
,
SmallVector
v
,
Interval
<
double
>
&
D
)
{
if
(
u
==
SmallVector
(
0.0
))
{
D
[
0
]
=
D
[
1
]
=
func_
.
rightDifferential
(
0
);
}
else
if
(
u
*
v
>
0
)
{
D
[
1
]
=
(
v
*
u
)
*
func_
.
rightDifferential
(
u
.
two_norm
())
/
u
.
two_norm
();
D
[
0
]
=
(
v
*
u
)
*
func_
.
leftDifferential
(
u
.
two_norm
())
/
u
.
two_norm
();
}
else
{
D
[
1
]
=
(
v
*
u
)
*
func_
.
leftDifferential
(
u
.
two_norm
())
/
u
.
two_norm
();
D
[
0
]
=
(
v
*
u
)
*
func_
.
rightDifferential
(
u
.
two_norm
())
/
u
.
two_norm
();
}
}
double
operator
()(
const
Dune
::
BlockVector
<
SmallVector
>
&
)
const
{
return
3
;
}
void
addGradient
(
const
Dune
::
BlockVector
<
SmallVector
>
&
,
Dune
::
BlockVector
<
SmallVector
>
&
)
const
{}
void
addHessian
(
const
Dune
::
BlockVector
<
SmallVector
>
&
,
Dune
::
BCRSMatrix
<
SmallMatrix
>
&
)
const
{}
void
addHessianIndices
(
Dune
::
MatrixIndexSet
&
)
const
{}
void
setVector
(
const
Dune
::
BlockVector
<
SmallVector
>
&
)
{}
void
updateEntry
(
int
,
double
,
int
)
{}
void
subDiff
(
int
,
double
,
Interval
<
double
>
&
,
int
)
const
{}
double
regularity
(
int
,
double
,
int
)
const
{
return
3
;
}
void
domain
(
int
,
Interval
<
double
>
&
,
int
)
const
{}
OuterFunction
func_
;
};
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment