Commit 91c5c3c0 authored by Dave Moxey's avatar Dave Moxey

Merge remote-tracking branch 'upstream/master' into fix/gs-verbose

parents 1492051c 790e7416
......@@ -4,16 +4,52 @@ Changelog
v4.4.0
------
**Library**:
- Add support for variable polynomial order for 3D simulations with continuous
Galerkin discretisation (!604)
- Bump version of gsmpi to suppress autotuning output unless `--verbose` is
specified (!652)
- Add support for variable polynomial order with periodic boundary conditions
(!658)
- Statistics are now printed for lowest level of multi-level static condensation
(!656)
- Sped up interpolataion from pts files and fixed parallel pts import (!584)
- Increased required boost version to 1.56.0 (!584)
**IncNavierStokesSolver:**
- Add ability to simulate additional scalar fields (!624)
**NekMesh:**
- Modify curve module to allow for spline input (!628)
**FieldConvert:**
- Add module to stretch homogeneous direction (!609)
v4.3.3
------
**Library**:
- Auto-detect a shared filesystem and removed --shared-filesystem option (!654)
- Fix filters when using adaptive driver to avoid output being overwritten after
each adaptive update (!588)
- Minor fix to suppress Xxt output unless `--verbose` is specified (!642)
- Fix of DirectFull solver in case where only Neumann boundary conditions
are imposed. (!655)
**FieldConvert**:
- Fix to avoid repeated import of field file (!649)
- Fix issue with C^0 projection (!644)
- Fix verbose output when using --procid (!648)
**NekMesh:**
- Fix namespace issue in Star-CCM+ input header in NekMesh (!661)
**CompressibleFlowSolver**:
- Fix issue with residual output (!647)
- Issues with 1D Euler solver fixed (!565)
- Fix deadlocking issue with boundary conditions (!657)
**Packaging**:
- Fix NekMesh dependencies for DEB package (!650)
- Fix PETSc build on newer linux distributions (!646)
v4.3.2
------
......@@ -26,6 +62,7 @@ v4.3.2
output is produced in physical space (!621).
- Fix minor performance issue with time integration schemes (!632)
- Fix FilterCheckpoint filter to be consistent with `IO_CheckSteps` (!633)
- Fix CMake configuration for building on Windows 10 with VS 2015 (!641)
- Fix `IO_CheckSteps` to avoid missing first checkpoint (!639)
- Fix bug in iterative solver where only root process would ASSERT when
exceeding the maximum number of iterations (!636)
......@@ -95,7 +132,7 @@ v4.3.0
(!537)
- Fix bug with initial conditions of CG simulations using variable P (!543)
- Fix bug in 3DH2D with non-zero Dirichlet boundary conditions (!545)
- Added in a method to convert equispaced interpolated points back to
- Added in a method to convert equispaced interpolated points back to
coefficients which requires the introduction of a new StdRegions matrix.(!561)
- Empty XML tags which would override non-empty XML tags are now ignored (!581)
- Add contribution guide (!551)
......
......@@ -51,6 +51,7 @@ project. It's a pretty simple process:
new features?
- Have you run your branch through buildbot and do all the tests pass?
- Is there documentation in the user guide and/or developer guide?
- Have you added a CHANGELOG entry, including the MR number?
- Are there any massive files you might have added in the commit history? We try
to keep test files as small as possible. If so you'll need to rebase or
filter-branch to remove those from the commit history.
......
......@@ -2,7 +2,7 @@ FIND_LIBRARY(WIN32_BLAS NAMES libblas PATHS ${TPSRC})
FIND_LIBRARY(WIN32_LAPACK NAMES liblapack PATHS ${TPSRC})
IF (NOT WIN32_BLAS OR NOT WIN32_LAPACK)
IF (CMAKE_CL_64)
IF (CMAKE_CL_64 OR CMAKE_GENERATOR MATCHES Win64)
SET(WIN_ZIP_FILE "win64-blas-lapack.zip")
SET(WIN_ZIP_MD5_VALUE "b5ad4f7335ca964bbdafab129e44a046")
SET(WIN_ZIP_SHA1_VALUE "adb331fa195db264e95e46b902887f12971dbf48")
......
......@@ -8,9 +8,9 @@
#If the user has not set BOOST_ROOT, look in a couple common places first.
MESSAGE(STATUS "Searching for Boost:")
SET(MIN_VER "1.52.0")
SET(MIN_VER "1.56.0")
SET(NEEDED_BOOST_LIBS thread iostreams date_time filesystem system
program_options regex timer)
program_options regex timer chrono)
SET(Boost_DEBUG 0)
SET(Boost_NO_BOOST_CMAKE ON)
IF( BOOST_ROOT )
......@@ -67,7 +67,7 @@ IF (THIRDPARTY_BUILD_BOOST)
# Only build the libraries we need
SET(BOOST_LIB_LIST --with-system --with-iostreams --with-filesystem
--with-program_options --with-date_time --with-thread
--with-regex --with-timer)
--with-regex --with-timer --with-chrono)
IF (NOT WIN32)
# We need -fPIC for 64-bit builds
......@@ -86,6 +86,8 @@ IF (THIRDPARTY_BUILD_BOOST)
SET(TOOLSET msvc-11.0)
ELSEIF (MSVC12)
SET(TOOLSET msvc-12.0)
ELSEIF (MSVC14)
SET(TOOLSET msvc-14.0)
ENDIF()
ELSE(APPLE)
SET(TOOLSET gcc)
......@@ -160,6 +162,9 @@ IF (THIRDPARTY_BUILD_BOOST)
ENDIF(THIRDPARTY_BUILD_ZLIB)
# Set up CMake variables
SET(Boost_CHRONO_LIBRARY boost_chrono)
SET(Boost_CHRONO_LIBRARY_DEBUG boost_chrono)
SET(Boost_CHRONO_LIBRARY_RELEASE boost_chrono)
SET(Boost_DATE_TIME_LIBRARY boost_date_time)
SET(Boost_DATE_TIME_LIBRARY_DEBUG boost_date_time)
SET(Boost_DATE_TIME_LIBRARY_RELEASE boost_date_time)
......@@ -189,7 +194,7 @@ IF (THIRDPARTY_BUILD_BOOST)
SET(Boost_CONFIG_INCLUDE_DIR ${TPINC})
SET(Boost_LIBRARY_DIRS ${TPSRC}/dist/lib)
SET(Boost_CONFIG_LIBRARY_DIR ${TPLIB})
SET(Boost_LIBRARIES boost_date_time boost_filesystem boost_iostreams boost_program_options boost_regex boost_system boost_thread boost_timer)
SET(Boost_LIBRARIES boost_chrono boost_date_time boost_filesystem boost_iostreams boost_program_options boost_regex boost_system boost_thread boost_timer)
LINK_DIRECTORIES(${Boost_LIBRARY_DIRS})
STRING(REPLACE ";" ", " NEEDED_BOOST_LIBS_STRING "${NEEDED_BOOST_LIBS}")
......
......@@ -26,6 +26,8 @@ IF (NEKTAR_USE_PETSC)
IF (THIRDPARTY_BUILD_PETSC)
INCLUDE(ExternalProject)
FIND_PACKAGE(PythonInterp 2 REQUIRED)
SET(PETSC_C_COMPILER "${CMAKE_C_COMPILER}")
SET(PETSC_CXX_COMPILER "${CMAKE_CXX_COMPILER}")
......@@ -39,24 +41,23 @@ IF (NEKTAR_USE_PETSC)
ENDIF (NEKTAR_USE_MPI)
EXTERNALPROJECT_ADD(
petsc-3.5.2
petsc-3.7.2
PREFIX ${TPSRC}
STAMP_DIR ${TPBUILD}/stamp
DOWNLOAD_DIR ${TPSRC}
SOURCE_DIR ${TPBUILD}/petsc-3.5.2
TMP_DIR ${TPBUILD}/petsc-3.5.2-tmp
SOURCE_DIR ${TPBUILD}/petsc-3.7.2
TMP_DIR ${TPBUILD}/petsc-3.7.2-tmp
INSTALL_DIR ${TPDIST}
BINARY_DIR ${TPBUILD}/petsc-3.5.2
URL http://www.nektar.info/thirdparty/petsc-lite-3.5.2.tar.gz
URL_MD5 "d707336a98d7cb31d843804d020edc94"
BINARY_DIR ${TPBUILD}/petsc-3.7.2
URL http://www.nektar.info/thirdparty/petsc-lite-3.7.2.tar.gz
URL_MD5 "26c2ff8eaaa9e49aea063f839f5daa7e"
CONFIGURE_COMMAND
OMPI_CC=${CMAKE_C_COMPILER}
OMPI_CXX=${CMAKE_CXX_COMPILER}
./configure
${PYTHON_EXECUTABLE} ./configure
--with-cc=${PETSC_C_COMPILER}
--with-cxx=${PETSC_CXX_COMPILER}
--with-shared-libraries=0
--with-pic=1
--with-shared-libraries=1
--with-x=0
--with-ssl=0
--prefix=${TPDIST}
......@@ -71,7 +72,7 @@ IF (NEKTAR_USE_PETSC)
"PETSc includes" FORCE)
LINK_DIRECTORIES(${TPDIST}/lib)
MESSAGE(STATUS "Build PETSc: ${TPDIST}/${LIB_DIR}/lib${PETSC_LIBRARIES}.a")
MESSAGE(STATUS "Build PETSc: ${TPDIST}/${LIB_DIR}/lib${PETSC_LIBRARIES}.so")
SET(PETSC_CONFIG_INCLUDE_DIR ${TPINC})
ELSE (THIRDPARTY_BUILD_PETSC)
INCLUDE(FindPETSc)
......@@ -82,13 +83,13 @@ IF (NEKTAR_USE_PETSC)
ENDIF (NOT PETSC_FOUND)
SET(PETSC_CONFIG_INCLUDE_DIR ${PETSC_INCLUDES})
INCLUDE_DIRECTORIES(${PETSC_INCLUDES})
ADD_CUSTOM_TARGET(petsc-3.5.2 ALL)
ADD_CUSTOM_TARGET(petsc-3.7.2 ALL)
ENDIF (THIRDPARTY_BUILD_PETSC)
ADD_DEFINITIONS(-DNEKTAR_USING_PETSC)
INCLUDE_DIRECTORIES(SYSTEM ${PETSC_INCLUDES})
IF (NOT NEKTAR_USE_MPI)
INCLUDE_DIRECTORIES(SYSTEM ${PETSC_INCLUDES}/mpiuni)
INCLUDE_DIRECTORIES(SYSTEM ${PETSC_INCLUDES}/petsc/mpiuni)
ENDIF (NOT NEKTAR_USE_MPI)
MARK_AS_ADVANCED(PETSC_CURRENT PETSC_DIR PETSC_LIBRARIES PETSC_INCLUDES)
......
......@@ -23,10 +23,6 @@ Override a parameter (or define a new one) specified in the XML file.
\hangindent=1.5cm
Override a solverinfo (or define a new one) specified in the XML file.
\lstinline[style=BashInputStyle]{--shared-filesystem}\\
\hangindent=1.5cm
By default when running in parallel the complete mesh is loaded by all processes, although partitioning is done uniquely on the root process only and communicated to the other processes. Each process then writes out its own partition to the local working directory. This is the most robust approach in accounting for systems where the distributed nodes do not share a common filesystem. In the case that there is a common filesystem, this option forces only the root process to load the complete mesh, perform partitioning and write out the session files for all partitions. This avoids potential memory issues when multiple processes attempt to load the complete mesh on a single node.
\lstinline[style=BashInputStyle]{--npx [int]}\\
\hangindent=1.5cm
When using a fully-Fourier expansion, specifies the number of processes to use in the x-coordinate direction.
......
......@@ -1107,6 +1107,61 @@ element is taken as the average of the function in the quadrature
points of the element. If these functions are defined, the values set
for the tolerances in the \texttt{PARAMETERS} section are ignored.
\section{Advecting extra passive scalar fields}
In some cases, it might be useful to advect passive scalar fields with the velocity obtained from the
solution of the Navier-Stokes equation. For example, in study of mass transfer or heat transfer problems
where getting analytical expression for advection velocity is not possible, the transport (advection-diffusion)
equation needs to be solved along with the Navier-Stokes equation to get the scalar concentration or
temperature distribution in the flow field.
In the input file, the extra field variables that are being advected need to be defined after the variables
representing the velocity components. The pressure needs to be at the end of the list. For example, for
a 2D simulation the expansions and variables would be defined as
\begin{lstlisting}[style=XMLStyle]
<EXPANSIONS>
<E COMPOSITE="C[0]" NUMMODES="5" FIELDS="u,v,c1,c2,p" TYPE="MODIFIED" />
</EXPANSIONS>
<VARIABLES>
<V ID="0"> u </V>
<V ID="1"> v </V>
<V ID="2"> c1 </V>
<V ID="3"> c2 </V>
<V ID="4"> p </V>
</VARIABLES>
\end{lstlisting}
where u, v are the velocity components, c1 and c2 are extra fields that are being advected and p is the pressure.
In addition, diffusion coefficients for each extra variable can be specified by adding a function
\inltt{DiffusionCoefficient}
\begin{lstlisting}[style=XMLStyle]
<FUNCTION NAME="DiffusionCoefficient">
<E VAR="c1" VALUE="0.1" />
<E VAR="c2" VALUE="0.01" />
</FUNCTION>
\end{lstlisting}
Boundary conditions for the extra fields are set up in the same way as the velocity and pressure
\begin{lstlisting}[style=XMLStyle]
<BOUNDARYCONDITIONS>
<REGION REF="0">
<D VAR="u" VALUE="0" />
<D VAR="v" VALUE="0" />
<D VAR="c1" VALUE="1" />
<D VAR="c2" VALUE="0" />
<N VAR="p" USERDEFINEDTYPE="H" VALUE="0" />
</REGION>
</BOUNDARYCONDITIONS>
\end{lstlisting}
It should be noted that if the diffusion coefficient is too small, the transport equation becomes advection dominated.
This leads to small grid spacing required to resolve all physical scales associated with the transport equation
(the ratio of resolution required for transport to Navier Stokes equation scales with $Sc^{3/4}$, where $Sc$ is the
Schmidt number = kinematic viscosity/diffusion coefficient). Hence, small diffusion coefficient might lead to spurious
oscillations if the mesh spacing is not small enough.
\section{Examples}
\subsection{Kovasznay Flow 2D}
......
......@@ -102,6 +102,7 @@ possibly also Reynolds stresses) into single file;
\item \inltt{equispacedoutput}: Write data as equi-spaced output using simplices to represent the data for connecting points;
\item \inltt{extract}: Extract a boundary field;
\item \inltt{homplane}: Extract a plane from 3DH1D expansions;
\item \inltt{homstretch}: Stretch a 3DH1D expansion by an integer factor;
\item \inltt{innerproduct}: take the inner product between one or a series of fields with another field (or series of fields).
\item \inltt{interpfield}: Interpolates one field to another, requires fromxml, fromfld to be defined;
\item \inltt{interppointdatatofld}: Interpolates given discrete data using a finite difference approximation to a fld file given an xml file;
......@@ -327,6 +328,21 @@ The output file \inltt{file-plane.fld} can be processed in a similar
way as described in section \ref{s:utilities:fieldconvert:sub:convert}
to visualise it either in Tecplot or in Paraview.
\subsection{Stretch a 3DH1D expansion: \textit{homstretch} module}
To stretch a 3DH1D expansion in the z-direction, use the command:
\begin{lstlisting}[style=BashInputStyle]
FieldConvert -m homstretch:factor=value file.xml file.fld file-stretch.fld
\end{lstlisting}
The number of modes in the resulting field can be chosen using the command-line
parameter \inltt{output-points-hom-z}. Note that the output for
this module should always be a \inltt{.fld} file and this should not
be used in combination with other modules using a single command.
The output file \inltt{file-stretch.fld} can be processed in a similar
way as described in section \ref{s:utilities:fieldconvert:sub:convert}
to visualise it either in Tecplot or in Paraview.
\subsection{Inner Product of a single or series of fields with respect to a single or series of fields: \textit{innerproduct} module}
You can take the inner product of one field with another field using
......
......@@ -2,7 +2,7 @@
<test>
<description>Helmholtz 2D CG with P=7, all BCs, iterative ML, par(3)</description>
<executable>Helmholtz2D</executable>
<parameters>--use-metis -I GlobalSysSoln=IterativeMultiLevelStaticCond Helmholtz2D_P7_AllBCs.xml</parameters>
<parameters>--use-metis --verbose -I GlobalSysSoln=IterativeMultiLevelStaticCond Helmholtz2D_P7_AllBCs.xml</parameters>
<processes>3</processes>
<files>
<file description="Session File">Helmholtz2D_P7_AllBCs.xml</file>
......
......@@ -2,7 +2,7 @@
<test>
<description>Helmholtz 3D CG, hexes, mixed BCs, iterative ML</description>
<executable>Helmholtz3D</executable>
<parameters>--use-metis -I GlobalSysSoln=IterativeMultiLevelStaticCond Helmholtz3D_Hex_AllBCs_P6.xml</parameters>
<parameters>--use-metis --verbose -I GlobalSysSoln=IterativeMultiLevelStaticCond Helmholtz3D_Hex_AllBCs_P6.xml</parameters>
<processes>3</processes>
<files>
<file description="Session File">Helmholtz3D_Hex_AllBCs_P6.xml</file>
......
......@@ -2,7 +2,7 @@
<test>
<description>Helmholtz 3D CG, hexes, mixed BCs, iterative ML, Scotch</description>
<executable>Helmholtz3D</executable>
<parameters>--use-scotch -I GlobalSysSoln=IterativeMultiLevelStaticCond Helmholtz3D_Hex_AllBCs_P6.xml</parameters>
<parameters>--use-scotch --verbose -I GlobalSysSoln=IterativeMultiLevelStaticCond Helmholtz3D_Hex_AllBCs_P6.xml</parameters>
<processes>3</processes>
<files>
<file description="Session File">Helmholtz3D_Hex_AllBCs_P6.xml</file>
......
......@@ -2,7 +2,7 @@
<test>
<description>Helmholtz 3D CG, prisms, Neumann BCs, iterative ML, Par(3), Scotch</description>
<executable>Helmholtz3D</executable>
<parameters>--use-scotch Helmholtz3D_Prism.xml</parameters>
<parameters>--use-scotch --verbose Helmholtz3D_Prism.xml</parameters>
<processes>3</processes>
<files>
<file description="Session File">Helmholtz3D_Prism.xml</file>
......
This diff is collapsed.
......@@ -47,6 +47,7 @@
#include <LibUtilities/BasicUtils/FileSystem.h>
using namespace std;
namespace Nektar
{
......@@ -138,11 +139,14 @@ void PtsIO::Import(const string &inFile,
// Load metadata
ImportFieldMetaData(infile, fieldmetadatamap);
// TODO: This currently only loads the filename matching our rank.
filenames.clear();
boost::format pad("P%1$07d.%2$s");
pad % m_comm->GetRank() % GetFileEnding();
filenames.push_back(pad.str());
if (filenames.size() == m_comm->GetSize())
{
// only load the file that matches this rank
filenames.clear();
boost::format pad("P%1$07d.%2$s");
pad % m_comm->GetRank() % GetFileEnding();
filenames.push_back(pad.str());
}
for (int i = 0; i < filenames.size(); ++i)
{
......@@ -160,7 +164,19 @@ void PtsIO::Import(const string &inFile,
<< doc1.ErrorCol() << std::endl;
ASSERTL0(loadOkay1, errstr.str());
ImportFieldData(doc1, ptsField);
if (i == 0)
{
ImportFieldData(doc1, ptsField);
}
else
{
LibUtilities::PtsFieldSharedPtr newPtsField;
ImportFieldData(doc1, newPtsField);
Array<OneD, Array<OneD, NekDouble> > pts;
newPtsField->GetPts(pts);
ptsField->AddPoints(pts);
}
}
}
else
......
......@@ -194,6 +194,8 @@ namespace Nektar
// Create communicator
CreateComm(argc, argv);
TestSharedFilesystem();
// If running in parallel change the default global sys solution
// type.
if (m_comm->GetSize() > 1)
......@@ -237,6 +239,8 @@ namespace Nektar
}
}
TestSharedFilesystem();
// If running in parallel change the default global sys solution
// type.
if (m_comm->GetSize() > 1)
......@@ -285,7 +289,7 @@ namespace Nektar
// In verbose mode, print out parameters and solver info sections
if (m_verbose && m_comm)
{
if (m_comm->GetRank() == 0 && m_parameters.size() > 0)
if (m_comm->TreatAsRankZero() && m_parameters.size() > 0)
{
cout << "Parameters:" << endl;
ParameterMap::iterator x;
......@@ -296,7 +300,7 @@ namespace Nektar
cout << endl;
}
if (m_comm->GetRank() == 0 && m_solverInfo.size() > 0)
if (m_comm->TreatAsRankZero() && m_solverInfo.size() > 0)
{
cout << "Solver Info:" << endl;
SolverInfoMap::iterator x;
......@@ -310,6 +314,44 @@ namespace Nektar
}
void SessionReader::TestSharedFilesystem()
{
m_sharedFilesystem = false;
if (m_comm->GetSize() > 1)
{
if (m_comm->GetRank() == 0)
{
std::ofstream testfile("shared-fs-testfile");
testfile << "" << std::endl;
ASSERTL1(!testfile.fail(), "Test file creation failed");
testfile.close();
}
m_comm->Block();
int exists = (bool)boost::filesystem::exists("shared-fs-testfile");
m_comm->AllReduce(exists, LibUtilities::ReduceSum);
m_sharedFilesystem = (exists == m_comm->GetSize());
if ((m_sharedFilesystem && m_comm->GetRank() == 0) ||
!m_sharedFilesystem)
{
std::remove("shared-fs-testfile");
}
}
else
{
m_sharedFilesystem = false;
}
if (m_verbose && m_comm->GetRank() == 0 && m_sharedFilesystem)
{
cout << "Shared filesystem detected" << endl;
}
}
/**
* @brief Parses the command-line arguments for known options and
* filenames.
......@@ -327,7 +369,6 @@ namespace Nektar
"override a SOLVERINFO property")
("parameter,P", po::value<vector<std::string> >(),
"override a parameter")
("shared-filesystem,s", "Using shared filesystem.")
("npx", po::value<int>(),
"number of procs in X-dir")
("npy", po::value<int>(),
......@@ -619,6 +660,10 @@ namespace Nektar
return m_comm;
}
bool SessionReader::GetSharedFilesystem()
{
return m_sharedFilesystem;
}
/**
* This routine finalises any parallel communication.
......@@ -1555,7 +1600,7 @@ namespace Nektar
// Get row of comm, or the whole comm if not split
CommSharedPtr vCommMesh = m_comm->GetRowComm();
const bool isRoot = (m_comm->GetRank() == 0);
const bool isRoot = m_comm->TreatAsRankZero();
// Delete any existing loaded mesh
if (m_xmlDoc)
......@@ -1588,7 +1633,8 @@ namespace Nektar
// If the mesh is already partitioned, we are done. Remaining
// processes must load their partitions.
if (isPartitioned) {
if (isPartitioned)
{
if (!isRoot)
{
m_xmlDoc = MergeDoc(m_filenames);
......@@ -1656,13 +1702,13 @@ namespace Nektar
{
SessionReaderSharedPtr vSession = GetSharedThisPtr();
int nParts = vCommMesh->GetSize();
if (DefinesCmdLineArgument("shared-filesystem"))
if (m_sharedFilesystem)
{
CommSharedPtr vComm = GetComm();
vector<unsigned int> keys, vals;
int i;
if (vComm->GetRank() == 0)
if (isRoot)
{
m_xmlDoc = MergeDoc(m_filenames);
......
......@@ -195,6 +195,8 @@ namespace Nektar
LIB_UTILITIES_EXPORT const std::string GetSessionNameRank() const;
/// Returns the communication object.
LIB_UTILITIES_EXPORT CommSharedPtr &GetComm();
/// Returns the communication object.
LIB_UTILITIES_EXPORT bool GetSharedFilesystem();
/// Finalises the session.
LIB_UTILITIES_EXPORT void Finalise();
......@@ -457,6 +459,8 @@ namespace Nektar
FilterMap m_filters;
/// Be verbose
bool m_verbose;
/// Running on a shared filesystem
bool m_sharedFilesystem;
/// Map of original composite ordering for parallel periodic bcs.
CompositeOrdering m_compOrder;
/// Map of original boundary region ordering for parallel periodic
......@@ -481,6 +485,8 @@ namespace Nektar
/// Returns a shared pointer to the current object.
inline SessionReaderSharedPtr GetSharedThisPtr();
LIB_UTILITIES_EXPORT void TestSharedFilesystem();
/// Parse the program arguments and fill #m_cmdLineOptions
std::vector<std::string> ParseCommandLineArguments(
int argc, char *argv[]);
......
......@@ -399,6 +399,7 @@ TARGET_LINK_LIBRARIES(LibUtilities LINK_PUBLIC
${Boost_FILESYSTEM_LIBRARY}
${Boost_SYSTEM_LIBRARY}
${Boost_TIMER_LIBRARY}
${Boost_CHRONO_LIBRARY}
debug ${ZLIB_LIBRARY_DEBUG} optimized ${ZLIB_LIBRARY_RELEASE}
)
......@@ -460,7 +461,7 @@ ENDIF( NEKTAR_USE_BLAS_LAPACK )
IF( NEKTAR_USE_PETSC )
TARGET_LINK_LIBRARIES(LibUtilities LINK_PRIVATE ${PETSC_LIBRARIES})
TARGET_LINK_LIBRARIES(LibUtilities LINK_PUBLIC ${CMAKE_DL_LIBS})
ADD_DEPENDENCIES(LibUtilities petsc-3.5.2)
ADD_DEPENDENCIES(LibUtilities petsc-3.7.2)
ENDIF( NEKTAR_USE_PETSC )
INSTALL(FILES ${ExpressionTemplates} DESTINATION ${NEKTAR_INCLUDE_DIR}/ExpressionTemplates COMPONENT dev)
......
......@@ -1232,7 +1232,7 @@ namespace Nektar
}
void AssemblyMap::PrintStats(
std::ostream &out, std::string variable) const
std::ostream &out, std::string variable, bool printHeader) const
{
LibUtilities::CommSharedPtr vRowComm
= m_session->GetComm()->GetRowComm();
......@@ -1262,7 +1262,9 @@ namespace Nektar
Array<OneD, NekDouble> tmpLoc (m_numLocalBndCoeffs, 1.0);
Array<OneD, NekDouble> tmpGlob(m_numGlobalBndCoeffs, 0.0);
Vmath::Assmb(m_numLocalBndCoeffs, tmpLoc.get(), m_localToGlobalBndMap.get(), tmpGlob.get());
Vmath::Assmb(
m_numLocalBndCoeffs, tmpLoc.get(),
m_localToGlobalBndMap.get(), tmpGlob.get());
UniversalAssembleBnd(tmpGlob);
int totGlobDof = globCnt;
......@@ -1307,8 +1309,12 @@ namespace Nektar
if (isRoot)
{
out << "Assembly map statistics for field " << variable << ":"
<< endl;
if (printHeader)
{
out << "Assembly map statistics for field " << variable
<< ":" << endl;
}
out << " - Number of local/global dof : "
<< totLocalDof << " " << totGlobDof << endl;
out << " - Number of local/global boundary dof : "
......@@ -1341,9 +1347,12 @@ namespace Nektar
}
}
out << " - Local dof dist. (min/max/mean/dev) : "
<< minval << " " << maxval << " " << (mean / n) << " "
<< sqrt(mean2/n - mean*mean/n/n) << endl;
if (maxval > 0.1)
{
out << " - Local dof dist. (min/max/mean/dev) : "
<< minval << " " << maxval << " " << (mean / n)
<< " " << sqrt(mean2/n - mean*mean/n/n) << endl;
}
vRowComm->Block();
......@@ -1380,6 +1389,69 @@ namespace Nektar
tmp[0] = m_numLocalBndCoeffs;
vRowComm->Send(0, tmp);
}
// Either we have no more levels in the static condensation, or we