#include <Python.h> #ifdef HAVE_CONFIG_H #include "config.h" #endif #ifndef HAVE_PYTHON #error Python is required #endif #ifdef HAVE_IPOPT #undef HAVE_IPOPT #endif #ifndef srcdir #error srcdir unset #endif #ifndef DIM #error DIM unset #endif #if !HAVE_ALUGRID #error ALUGRID is required #endif #include <cmath> #include <exception> #include <fstream> #include <iostream> #include <boost/format.hpp> #include <dune/common/bitsetvector.hh> #include <dune/common/exceptions.hh> #include <dune/common/fmatrix.hh> #include <dune/common/function.hh> #include <dune/common/fvector.hh> #include <dune/common/parametertree.hh> #include <dune/common/parametertreeparser.hh> #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wignored-qualifiers" #include <dune/grid/alugrid.hh> #pragma clang diagnostic pop #include <dune/grid/common/mcmgmapper.hh> #include <dune/grid/utility/structuredgridfactory.hh> #include <dune/istl/bcrsmatrix.hh> #include <dune/istl/bvector.hh> #include <dune/fufem/assemblers/assembler.hh> #include <dune/fufem/assemblers/localassemblers/vonmisesstressassembler.hh> #include <dune/fufem/boundarypatch.hh> #include <dune/fufem/dunepython.hh> #include <dune/fufem/functions/basisgridfunction.hh> #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wsign-compare" #include <dune/fufem/functionspacebases/p0basis.hh> #pragma clang diagnostic pop #include <dune/fufem/functionspacebases/p1nodalbasis.hh> #include <dune/fufem/sharedpointermap.hh> #include <dune/solvers/norms/energynorm.hh> #include <dune/solvers/norms/sumnorm.hh> #include <dune/solvers/solvers/loopsolver.hh> #include <dune/solvers/solvers/solver.hh> #include <dune/tnnmg/problem-classes/convexproblem.hh> #include <dune/tnnmg/nonlinearities/zerononlinearity.hh> #include <dune/tnnmg/problem-classes/blocknonlineartnnmgproblem.hh> #include <dune/tectonic/myblockproblem.hh> #include <dune/tectonic/globalnonlinearity.hh> #include "assemblers.hh" #include "friction_writer.hh" #include "solverfactory.hh" #include "vtk.hh" #include "enums.hh" #include "enum_parser.cc" #include "enum_state_model.cc" #include "enum_scheme.cc" #include "enum_verbosity.cc" #include "timestepping.hh" #include "state/stateupdater.hh" #include "state/ruinastateupdater.hh" #include "state/dieterichstateupdater.hh" size_t const dims = DIM; template <class Vector, class Matrix, class Function, int dimension> std::shared_ptr<TimeSteppingScheme<Vector, Matrix, Function, dimension>> initTimeStepper(Config::scheme scheme, Function const &velocityDirichletFunction, Dune::BitSetVector<dimension> const &velocityDirichletNodes, Matrix const &massMatrix, Matrix const &stiffnessMatrix, Matrix const &dampingMatrix, Vector const &u_initial, Vector const &v_initial, Vector const &a_initial) { switch (scheme) { case Config::Newmark: return std::make_shared<Newmark<Vector, Matrix, Function, dims>>( stiffnessMatrix, massMatrix, dampingMatrix, u_initial, v_initial, a_initial, velocityDirichletNodes, velocityDirichletFunction); case Config::BackwardEuler: return std::make_shared<BackwardEuler<Vector, Matrix, Function, dims>>( stiffnessMatrix, massMatrix, dampingMatrix, u_initial, v_initial, velocityDirichletNodes, velocityDirichletFunction); default: assert(false); } } template <class ScalarVector, class Vector> std::shared_ptr<StateUpdater<ScalarVector, Vector>> initStateUpdater( Config::stateModel model, ScalarVector const &alpha_initial, Dune::BitSetVector<1> const &frictionalNodes, FrictionData const &fd) { switch (model) { case Config::Dieterich: return std::make_shared<DieterichStateUpdater<ScalarVector, Vector>>( alpha_initial, frictionalNodes, fd.L); case Config::Ruina: return std::make_shared<RuinaStateUpdater<ScalarVector, Vector>>( alpha_initial, frictionalNodes, fd.L); default: assert(false); } } void initPython() { Python::start(); Python::run("import sys"); Python::run("sys.path.append('" srcdir "')"); } int main(int argc, char *argv[]) { try { Dune::ParameterTree parset; Dune::ParameterTreeParser::readINITree(srcdir "/one-body-sample.parset", parset); Dune::ParameterTreeParser::readOptions(argc, argv, parset); using SmallVector = Dune::FieldVector<double, dims>; using SmallMatrix = Dune::FieldMatrix<double, dims, dims>; using SmallScalarMatrix = Dune::FieldMatrix<double, 1, 1>; using Matrix = Dune::BCRSMatrix<SmallMatrix>; using ScalarMatrix = Dune::BCRSMatrix<SmallScalarMatrix>; using Vector = Dune::BlockVector<SmallVector>; using ScalarVector = Dune::BlockVector<Dune::FieldVector<double, 1>>; using Nonlinearity = Dune::GlobalNonlinearity<Matrix, Vector>; auto const E = parset.get<double>("body.E"); auto const nu = parset.get<double>("body.nu"); auto const density = parset.get<double>("body.density"); double const gravity = 9.81; // {{{ Set up grid using Grid = Dune::ALUGrid<dims, dims, Dune::simplex, Dune::nonconforming>; Dune::FieldVector<typename Grid::ctype, dims> lowerLeft(0); Dune::FieldVector<typename Grid::ctype, dims> upperRight(1); upperRight[0] = parset.get<size_t>("body.width"); upperRight[1] = parset.get<size_t>("body.height"); std::shared_ptr<Grid> grid; { Dune::array<unsigned int, dims> elements; std::fill(elements.begin(), elements.end(), 1); elements[0] = parset.get<size_t>("body.width"); elements[1] = parset.get<size_t>("body.height"); grid = Dune::StructuredGridFactory<Grid>::createSimplexGrid( lowerLeft, upperRight, elements); } SmallVector zenith(0); zenith[1] = 1; auto const refinements = parset.get<size_t>("grid.refinements"); grid->globalRefine(refinements); size_t const fineVertexCount = grid->size(grid->maxLevel(), dims); using GridView = Grid::LeafGridView; GridView const leafView = grid->leafView(); // }}} // Set up bases using P0Basis = P0Basis<GridView, double>; P0Basis const p0Basis(leafView); Assembler<P0Basis, P0Basis> const p0Assembler(p0Basis, p0Basis); // Set up the boundary Dune::BitSetVector<dims> velocityDirichletNodes(fineVertexCount, false); Dune::BitSetVector<dims> const &displacementDirichletNodes = velocityDirichletNodes; Dune::BitSetVector<dims> accelerationDirichletNodes(fineVertexCount, false); Dune::BitSetVector<1> neumannNodes(fineVertexCount, false); BoundaryPatch<GridView> const neumannBoundary(leafView, neumannNodes); Vector vertexCoordinates(fineVertexCount); Dune::BitSetVector<1> frictionalNodes(fineVertexCount, false); { Dune::MultipleCodimMultipleGeomTypeMapper< GridView, Dune::MCMGVertexLayout> const vertexMapper(leafView); for (auto it = leafView.begin<dims>(); it != leafView.end<dims>(); ++it) { assert(it->geometry().corners() == 1); size_t const id = vertexMapper.map(*it); vertexCoordinates[id] = it->geometry().corner(0); auto const &localCoordinates = vertexCoordinates[id]; // lower face if (localCoordinates[1] == lowerLeft[1]) { frictionalNodes[id] = true; velocityDirichletNodes[id][1] = true; } // upper face else if (localCoordinates[1] == upperRight[1]) velocityDirichletNodes[id] = true; // right face (except for both corners) else if (localCoordinates[0] == upperRight[0]) ; // left face (except for both corners) else if (localCoordinates[0] == lowerLeft[0]) ; } } BoundaryPatch<GridView> const frictionalBoundary(leafView, frictionalNodes); // Set up functions for time-dependent boundary conditions using Function = Dune::VirtualFunction<double, double>; using FunctionMap = SharedPointerMap<std::string, Function>; FunctionMap functions; { initPython(); Python::import("one-body-sample") .get("Functions") .toC<typename FunctionMap::Base>(functions); } auto const &velocityDirichletFunction = functions.get("velocityDirichletCondition"), &neumannFunction = functions.get("neumannCondition"); // Set up normal stress, mass matrix, and gravity functional double normalStress; { double volume = 1.0; for (size_t i = 0; i < dims; ++i) volume *= (upperRight[i] - lowerLeft[i]); double area = 1.0; for (size_t i = 0; i < dims; ++i) if (i != 1) area *= (upperRight[i] - lowerLeft[i]); // volume * gravity * density / area = normal stress // V * g * rho / A = sigma_n // m^d * N/kg * kg/m^d / m^(d-1) = N/m^(d-1) normalStress = volume * gravity * density / area; } FrictionData const frictionData(parset.sub("boundary.friction"), normalStress); using MyAssembler = MyAssembler<GridView, dims>; MyAssembler myAssembler(leafView); Matrix A, C, M; myAssembler.assembleElasticity(E, nu, A); myAssembler.assembleViscosity(parset.get<double>("body.shearViscosity"), parset.get<double>("body.bulkViscosity"), C); myAssembler.assembleMass(density, M); EnergyNorm<Matrix, Vector> const ANorm(A), MNorm(M); // Q: Does it make sense to weigh them in this manner? SumNorm<Vector> const AMNorm(1.0, ANorm, 1.0, MNorm); ScalarMatrix frictionalBoundaryMass; myAssembler.assembleFrictionalBoundaryMass(frictionalBoundary, frictionalBoundaryMass); EnergyNorm<ScalarMatrix, ScalarVector> const stateEnergyNorm( frictionalBoundaryMass); // Assemble forces Vector gravityFunctional; myAssembler.assembleBodyForce(gravity, density, zenith, gravityFunctional); auto myGlobalNonlinearity = myAssembler.assembleFrictionNonlinearity( frictionalBoundary, frictionData); // Problem formulation: right-hand side auto const createRHS = [&](double _relativeTime, Vector &_ell) { myAssembler.assembleNeumann(neumannBoundary, _ell, neumannFunction, _relativeTime); _ell += gravityFunctional; }; Vector ell(fineVertexCount); createRHS(0.0, ell); // {{{ Initial conditions ScalarVector alpha_initial(fineVertexCount); alpha_initial = std::log(parset.get<double>("boundary.friction.initialState")); using LinearFactory = SolverFactory< dims, BlockNonlinearTNNMGProblem<ConvexProblem< ZeroNonlinearity<SmallVector, SmallMatrix>, Matrix>>, Grid>; ZeroNonlinearity<SmallVector, SmallMatrix> zeroNonlinearity; // Solve the stationary problem Vector u_initial(fineVertexCount); u_initial = 0.0; { LinearFactory displacementFactory(parset.sub("solver.tnnmg"), // FIXME refinements, *grid, displacementDirichletNodes); auto multigridStep = displacementFactory.getSolver(); typename LinearFactory::ConvexProblem convexProblem( 1.0, A, zeroNonlinearity, ell, u_initial); typename LinearFactory::BlockProblem initialDisplacementProblem( parset, convexProblem); auto const &localParset = parset.sub("u0.solver"); multigridStep->setProblem(u_initial, initialDisplacementProblem); LoopSolver<Vector> initialDisplacementProblemSolver( multigridStep, localParset.get<size_t>("maximumIterations"), localParset.get<double>("tolerance"), &ANorm, localParset.get<Solver::VerbosityMode>("verbosity"), false); // absolute error initialDisplacementProblemSolver.preprocess(); initialDisplacementProblemSolver.solve(); } Vector v_initial(fineVertexCount); v_initial = 0.0; { // Prescribe a homogeneous velocity field in the x-direction // This way, the initial condition for v and the Dirichlet // condition match up at t=0 double v_initial_const; velocityDirichletFunction.evaluate(0.0, v_initial_const); for (auto &x : v_initial) x[0] = v_initial_const; } Vector a_initial(fineVertexCount); a_initial = 0.0; { /* We solve Au + Cv + Ma + Psi(v) = ell, thus Ma = - (Au + Cv + Psi(v) - ell) */ Vector accelerationRHS(fineVertexCount); { accelerationRHS = 0.0; Arithmetic::addProduct(accelerationRHS, A, u_initial); Arithmetic::addProduct(accelerationRHS, C, v_initial); myGlobalNonlinearity->updateLogState(alpha_initial); // NOTE: We assume differentiability of Psi at v0 here! myGlobalNonlinearity->addGradient(v_initial, accelerationRHS); accelerationRHS -= ell; accelerationRHS *= -1.0; } LinearFactory accelerationFactory(parset.sub("solver.tnnmg"), // FIXME refinements, *grid, accelerationDirichletNodes); auto multigridStep = accelerationFactory.getSolver(); typename LinearFactory::ConvexProblem convexProblem( 1.0, M, zeroNonlinearity, accelerationRHS, a_initial); typename LinearFactory::BlockProblem initialAccelerationProblem( parset, convexProblem); auto const &localParset = parset.sub("a0.solver"); multigridStep->setProblem(a_initial, initialAccelerationProblem); LoopSolver<Vector> initialAccelerationProblemSolver( multigridStep, localParset.get<size_t>("maximumIterations"), localParset.get<double>("tolerance"), &MNorm, localParset.get<Solver::VerbosityMode>("verbosity"), false); // absolute error initialAccelerationProblemSolver.preprocess(); initialAccelerationProblemSolver.solve(); } // }}} FrictionWriter<Dune::BitSetVector<1>> writer(frictionData, frictionalNodes); writer.writeInfo(alpha_initial, u_initial, v_initial); // Set up TNNMG solver using NonlinearFactory = SolverFactory< dims, MyBlockProblem<ConvexProblem< Dune::GlobalNonlinearity<Matrix, Vector>, Matrix>>, Grid>; NonlinearFactory factory(parset.sub("solver.tnnmg"), refinements, *grid, velocityDirichletNodes); auto multigridStep = factory.getSolver(); { std::fstream vertexCoordinateWriter("coordinates", std::fstream::out); for (size_t i = 0; i < fineVertexCount; ++i) if (frictionalNodes[i][0]) vertexCoordinateWriter << vertexCoordinates[i] << std::endl; vertexCoordinateWriter.close(); } std::fstream iterationWriter("iterations", std::fstream::out), relaxationWriter("relaxation", std::fstream::out); auto timeSteppingScheme = initTimeStepper(parset.get<Config::scheme>("timeSteps.scheme"), velocityDirichletFunction, velocityDirichletNodes, M, A, C, u_initial, v_initial, a_initial); auto stateUpdater = initStateUpdater<ScalarVector, Vector>( parset.get<Config::stateModel>("boundary.friction.stateModel"), alpha_initial, frictionalNodes, frictionData); Vector v = v_initial; ScalarVector alpha(fineVertexCount); auto const timesteps = parset.get<size_t>("timeSteps.number"), maximumStateFPI = parset.get<size_t>("v.fpi.maximumIterations"), maximumIterations = parset.get<size_t>("v.solver.maximumIterations"); auto const tau = parset.get<double>("problem.finalTime") / timesteps, tolerance = parset.get<double>("v.solver.tolerance"), fixedPointTolerance = parset.get<double>("v.fpi.tolerance"), relaxation = parset.get<double>("v.fpi.relaxation"), requiredReduction = parset.get<double>("v.fpi.requiredReduction"); auto const printProgress = parset.get<bool>("io.printProgress"); auto const verbosity = parset.get<Solver::VerbosityMode>("v.solver.verbosity"); for (size_t run = 1; run <= timesteps; ++run) { if (printProgress) std::cout << boost::format("%7d ") % run << " " << std::flush; stateUpdater->nextTimeStep(); timeSteppingScheme->nextTimeStep(); auto const relativeTime = double(run) / double(timesteps); createRHS(relativeTime, ell); Matrix velocityMatrix; Vector velocityRHS(fineVertexCount); Vector velocityIterate(fineVertexCount); stateUpdater->setup(tau); timeSteppingScheme->setup(ell, tau, relativeTime, velocityRHS, velocityIterate, velocityMatrix); LoopSolver<Vector> velocityProblemSolver(multigridStep, maximumIterations, tolerance, &AMNorm, verbosity, false); // absolute error size_t iterationCounter; auto solveVelocityProblem = [&](Vector &_velocityIterate, ScalarVector const &_alpha) { myGlobalNonlinearity->updateLogState(_alpha); // NIT: Do we really need to pass u here? typename NonlinearFactory::ConvexProblem const convexProblem( 1.0, velocityMatrix, *myGlobalNonlinearity, velocityRHS, _velocityIterate); typename NonlinearFactory::BlockProblem velocityProblem(parset, convexProblem); multigridStep->setProblem(_velocityIterate, velocityProblem); velocityProblemSolver.preprocess(); velocityProblemSolver.solve(); iterationCounter = velocityProblemSolver.getResult().iterations; }; // Since the velocity explodes in the quasistatic case, use the // displacement as a convergence criterion // Q: is this reasonable? Vector u; Vector u_saved; ScalarVector alpha_saved; double lastStateCorrection; for (size_t stateFPI = 1; stateFPI <= maximumStateFPI; ++stateFPI) { stateUpdater->solve(v); stateUpdater->extractLogState(alpha); if (stateFPI == 1) relaxationWriter << "N "; else { double const stateCorrection = stateEnergyNorm.diff(alpha, alpha_saved); if (stateFPI <= 2 // lastStateCorrection is only set for stateFPI > 2 or stateCorrection < requiredReduction * lastStateCorrection) relaxationWriter << "N "; else { alpha *= (1.0 - relaxation); Arithmetic::addProduct(alpha, relaxation, alpha_saved); relaxationWriter << "Y "; } lastStateCorrection = stateCorrection; } solveVelocityProblem(velocityIterate, alpha); timeSteppingScheme->postProcess(velocityIterate); timeSteppingScheme->extractDisplacement(u); timeSteppingScheme->extractVelocity(v); iterationWriter << iterationCounter << " "; if (printProgress) std::cout << '.' << std::flush; if (stateFPI > 1) { double const velocityCorrection = AMNorm.diff(u_saved, u); if (velocityCorrection < fixedPointTolerance) break; } if (stateFPI == maximumStateFPI) DUNE_THROW(Dune::Exception, "FPI failed to converge"); alpha_saved = alpha; u_saved = u; } if (printProgress) std::cout << std::endl; writer.writeInfo(alpha, u, v); iterationWriter << std::endl; relaxationWriter << std::endl; if (parset.get<bool>("io.writeVTK")) { auto const gridDisplacement = std::make_shared< BasisGridFunction<typename MyAssembler::VertexBasis, Vector> const>( myAssembler.vertexBasis, u); ScalarVector vonMisesStress; VonMisesStressAssembler<Grid, P0Basis::LocalFiniteElement> localStressAssembler(E, nu, gridDisplacement); p0Assembler.assembleFunctional(localStressAssembler, vonMisesStress); writeVtk(myAssembler.vertexBasis, u, alpha, p0Basis, vonMisesStress, (boost::format("obs%d") % run).str()); } } iterationWriter.close(); relaxationWriter.close(); Python::stop(); } catch (Dune::Exception &e) { Dune::derr << "Dune reported error: " << e << std::endl; } catch (std::exception &e) { std::cerr << "Standard exception: " << e.what() << std::endl; } }