Commit 200fd1d0 authored by Dave Moxey's avatar Dave Moxey

Merge branch 'feature/helmsmooth3D' into 'master'

Feature/helmsmooth3D

This MR enables the hemholtz smoother for 3D problems

It also activates PETSc MUMPS support when a Fortran compiler is present because currently, only direct solvers can reliably handle the resulting linear system.

See merge request !714
parents 890158aa 0e2b4c98
......@@ -31,6 +31,9 @@ v4.4.0
file (!678)
- Extend ExtractDataToCoeffs to support interpolation between basis types for
quads and hexahedra (!682)
- Enabled MUMPS support in PETSc if a Fortran compiler was found and added 3D
support to the Helmholtz smoother used e.g. in FieldConverts C0Projection
module (!714)
**ADRSolver:**
- Add a projection equation system for C^0 projections (!675)
......
CMAKE_MINIMUM_REQUIRED(VERSION 2.8.7)
CMAKE_MINIMUM_REQUIRED(VERSION 2.8.8)
SET(CMAKE_BUILD_TYPE Release CACHE STRING "Choose the type of build,
options are: None(CMAKE_CXX_FLAGS or CMAKE_C_FLAGS used) Debug Release
RelWithDebInfo MinSizeRel.")
PROJECT(Nektar++)
PROJECT(Nektar++ C CXX)
INCLUDE(CheckLanguage)
CHECK_LANGUAGE(Fortran)
IF(CMAKE_Fortran_COMPILER)
ENABLE_LANGUAGE(Fortran)
ELSE()
MESSAGE(STATUS "No Fortran support")
ENDIF()
# Helps organize projects in IDEs.
SET_PROPERTY(GLOBAL PROPERTY USE_FOLDERS ON)
......
......@@ -30,16 +30,30 @@ IF (NEKTAR_USE_PETSC)
SET(PETSC_C_COMPILER "${CMAKE_C_COMPILER}")
SET(PETSC_CXX_COMPILER "${CMAKE_CXX_COMPILER}")
SET(PETSC_Fortran_COMPILER "${CMAKE_Fortran_COMPILER}")
IF (NEKTAR_USE_MPI)
IF (NOT MPI_BUILTIN)
SET(PETSC_C_COMPILER "${MPI_C_COMPILER}")
SET(PETSC_CXX_COMPILER "${MPI_CXX_COMPILER}")
SET(PETSC_Fortran_COMPILER "${MPI_Fortran_COMPILER}")
ENDIF (NOT MPI_BUILTIN)
ELSE (NEKTAR_USE_MPI)
SET(PETSC_NO_MPI "--with-mpi=0")
ENDIF (NEKTAR_USE_MPI)
IF(CMAKE_Fortran_COMPILER)
IF(NEKTAR_USE_MPI AND NOT MPI_Fortran_COMPILER)
MESSAGE(ERROR "MPI_Fortran_COMPILER not set")
ENDIF()
# we use a MUMPS build in ordering here, in the future it might make
# sense to hook it up with metis/scotch since this MIGHT be faster
SET(PETSC_MUMPS --download-scalapack --download-mumps)
ELSE()
MESSAGE(WARNING "No Fortran support. Building PETSc without MUMPS support")
SET(PETSC_Fortran_COMPILER "0")
ENDIF()
EXTERNALPROJECT_ADD(
petsc-3.7.2
PREFIX ${TPSRC}
......@@ -52,17 +66,21 @@ IF (NEKTAR_USE_PETSC)
URL http://www.nektar.info/thirdparty/petsc-lite-3.7.2.tar.gz
URL_MD5 "26c2ff8eaaa9e49aea063f839f5daa7e"
CONFIGURE_COMMAND
OMPI_FC=${CMAKE_Fortran_COMPILER}
OMPI_CC=${CMAKE_C_COMPILER}
OMPI_CXX=${CMAKE_CXX_COMPILER}
${PYTHON_EXECUTABLE} ./configure
./configure
--with-fc=${PETSC_Fortran_COMPILER}
--with-cc=${PETSC_C_COMPILER}
--with-cxx=${PETSC_CXX_COMPILER}
--with-shared-libraries=1
--with-pic=1
--with-x=0
--with-ssl=0
--prefix=${TPDIST}
--with-petsc-arch=c-opt
--with-fc=0
${PETSC_MUMPS}
${PETSC_NO_MPI}
BUILD_COMMAND MAKEFLAGS= make)
......
......@@ -986,7 +986,7 @@ namespace Nektar
bndcnt += m_bndCondExpansions[i]->GetNcoeffs();
}
}
m_locToGloMap->UniversalAssemble(wsp);
m_locToGloMap->UniversalAssemble(gamma);
// Add weak boundary conditions to forcing
Vmath::Vadd(contNcoeffs, wsp, 1, gamma, 1, wsp, 1);
......
......@@ -631,7 +631,83 @@ namespace Nektar
GeneralMatrixOp_IterPerExp(gkey,inarray,outarray);
}
}
/**
* First compute the inner product of forcing function with respect to
* base, and then solve the system with the linear advection operator.
* @param velocity Array of advection velocities in physical space
* @param inarray Forcing function.
* @param outarray Result.
* @param lambda reaction coefficient
* @param coeffstate State of Coefficients, Local or Global
* @param dirForcing Dirichlet Forcing.
*/
// could combine this with HelmholtzCG.
void ContField3D::v_LinearAdvectionDiffusionReactionSolve(
const Array<OneD, Array<OneD, NekDouble> > &velocity,
const Array<OneD, const NekDouble> &inarray,
Array<OneD, NekDouble> &outarray,
const NekDouble lambda,
CoeffState coeffstate,
const Array<OneD, const NekDouble> &dirForcing)
{
// Inner product of forcing
int contNcoeffs = m_locToGloMap->GetNumGlobalCoeffs();
Array<OneD, NekDouble> wsp(contNcoeffs);
IProductWRTBase(inarray, wsp, eGlobal);
// Note -1.0 term necessary to invert forcing function to
// be consistent with matrix definition
Vmath::Neg(contNcoeffs, wsp, 1);
// Forcing function with weak boundary conditions
int i, j;
int bndcnt = 0;
Array<OneD, NekDouble> gamma(contNcoeffs, 0.0);
for (i = 0; i < m_bndCondExpansions.num_elements(); ++i)
{
if (m_bndConditions[i]->GetBoundaryConditionType() !=
SpatialDomains::eDirichlet)
{
for (j = 0; j < (m_bndCondExpansions[i])->GetNcoeffs(); j++)
{
gamma[m_locToGloMap->GetBndCondCoeffsToGlobalCoeffsMap(
bndcnt++)] += (m_bndCondExpansions[i]->GetCoeffs())[j];
}
}
else
{
bndcnt += m_bndCondExpansions[i]->GetNcoeffs();
}
}
m_locToGloMap->UniversalAssemble(gamma);
// Add weak boundary conditions to forcing
Vmath::Vadd(contNcoeffs, wsp, 1, gamma, 1, wsp, 1);
// Solve the system
StdRegions::ConstFactorMap factors;
factors[StdRegions::eFactorLambda] = lambda;
StdRegions::VarCoeffMap varcoeffs;
varcoeffs[StdRegions::eVarCoeffVelX] = velocity[0];
varcoeffs[StdRegions::eVarCoeffVelY] = velocity[1];
varcoeffs[StdRegions::eVarCoeffVelZ] = velocity[2];
GlobalLinSysKey key(StdRegions::eLinearAdvectionDiffusionReaction,
m_locToGloMap,
factors,
varcoeffs);
if (coeffstate == eGlobal)
{
GlobalSolve(key, wsp, outarray, dirForcing);
}
else
{
Array<OneD, NekDouble> tmp(contNcoeffs, 0.0);
GlobalSolve(key, wsp, tmp, dirForcing);
GlobalToLocal(tmp, outarray);
}
}
int ContField3D::GetGlobalMatrixNnz(const GlobalMatrixKey &gkey)
{
ASSERTL1(gkey.LocToGloMapIsDefined(),
......
......@@ -186,6 +186,16 @@ namespace Nektar
const Array<OneD,const NekDouble> &inarray,
Array<OneD, NekDouble> &outarray,
CoeffState coeffstate);
// Solve the linear advection problem assuming that m_coeffs
// vector contains an intial estimate for solution
MULTI_REGIONS_EXPORT virtual void v_LinearAdvectionDiffusionReactionSolve(
const Array<OneD, Array<OneD, NekDouble> > &velocity,
const Array<OneD, const NekDouble> &inarray,
Array<OneD, NekDouble> &outarray,
const NekDouble lambda,
CoeffState coeffstate = eLocal,
const Array<OneD, const NekDouble> &dirForcing = NullNekDouble1DArray);
virtual void v_ClearGlobalLinSysManager(void);
......
......@@ -81,6 +81,17 @@ namespace Nektar
PetscInitialized(&isInitialized);
if (!isInitialized)
{
#ifdef NEKTAR_USE_MPI
std::string commType =
m_expList.lock()->GetSession()->GetComm()->GetType();
if (commType.find("MPI") != std::string::npos)
{
LibUtilities::CommMpiSharedPtr comm =
boost::static_pointer_cast<LibUtilities::CommMpi>(
m_expList.lock()->GetSession()->GetComm());
PETSC_COMM_WORLD = comm->GetComm();
}
#endif
PetscInitializeNoArguments();
}
......@@ -159,6 +170,13 @@ namespace Nektar
// Do system solve
KSPSolve(m_ksp, m_b, m_x);
KSPConvergedReason reason;
KSPGetConvergedReason(m_ksp, &reason);
ASSERTL0(reason > 0,
"PETSc solver diverged, reason is: " +
std::string(KSPConvergedReasons[reason]));
// Scatter results to local vector
VecScatterBegin(m_ctx, m_x, m_locVec,
INSERT_VALUES, SCATTER_FORWARD);
......
......@@ -897,7 +897,7 @@ namespace Nektar
v_BwdTrans(inarray,tmp);
VarCoeffType varcoefftypes[] = {eVarCoeffVelX, eVarCoeffVelY};
VarCoeffType varcoefftypes[] = {eVarCoeffVelX, eVarCoeffVelY, eVarCoeffVelZ};
//calculate u dx + v dy + ..
Vmath::Zero(totpts,tmp_adv,1);
......
......@@ -207,7 +207,8 @@ namespace Nektar
eVarCoeffD02,
eVarCoeffD12,
eVarCoeffVelX,
eVarCoeffVelY
eVarCoeffVelY,
eVarCoeffVelZ
};
const char* const VarCoeffTypeMap[] = {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment