Commit c30fe46d authored by Dave Moxey's avatar Dave Moxey

Add some user guide documentation, fix up CMake options and format H5.cpp

parent 83e034ac
......@@ -103,6 +103,13 @@ IF( NEKTAR_USE_VTK )
${NEKTAR++_TP_INCLUDE_DIRS} ${VTK_INCLUDE_DIRS})
ENDIF( NEKTAR_USE_VTK )
SET(NEKTAR_USE_HDF5 "@NEKTAR_USE_HDF5@")
IF( NEKTAR_USE_HDF5 )
SET(HDF5_INCLUDE_DIRS "@HDF5_INCLUDE_DIRS@")
SET(NEKTAR++_TP_INCLUDE_DIRS
${NEKTAR++_TP_INCLUDE_DIRS} ${HDF5_INCLUDE_DIRS})
ENDIF( NEKTAR_USE_HDF5 )
# find and add Nektar++ libraries
INCLUDE(${NEKTAR++_LIBRARY_DIRS}/cmake/Nektar++Libraries.cmake)
......
......@@ -14,18 +14,24 @@ IF (NEKTAR_USE_HDF5)
MESSAGE(FATAL_ERROR "HDF5 requires Nektar++ to be compiled using NEKTAR_USE_MPI.")
ENDIF()
# Try to find parallel system HDF5 first.
SET(HDF5_PREFER_PARALLEL ON)
FIND_PACKAGE(HDF5 QUIET)
IF (HDF5_FOUND)
IF (NOT HDF5_IS_PARALLEL)
MESSAGE(FATAL_ERROR "HDF5 detected but is not compiled in parallel.")
ENDIF()
INCLUDE_DIRECTORIES(SYSTEM ${HDF5_INCLUDE_DIRS})
SET(HDF5_CONFIG_INCLUDE_DIR ${TPINC})
ADD_CUSTOM_TARGET(hdf5-1.8.16 ALL)
IF (HDF5_FOUND AND NOT HDF5_IS_PARALLEL)
MESSAGE(STATUS "Non-parallel system HDF5 detected: will build instead.")
SET(BUILD_HDF5 ON)
ELSEIF(HDF5_FOUND)
SET(BUILD_HDF5 OFF)
ELSE()
SET(BUILD_HDF5 ON)
ENDIF()
CMAKE_DEPENDENT_OPTION(THIRDPARTY_BUILD_HDF5
"Build HDF5 from ThirdParty" ${BUILD_HDF5}
"NEKTAR_USE_HDF5" OFF)
IF(THIRDPARTY_BUILD_HDF5)
IF (NOT CMAKE_VERSION VERSION_GREATER 3.1.0)
MESSAGE(FATAL_ERROR "HDF5 compilation requires CMake 3.1.0 or later.")
ENDIF()
......@@ -56,17 +62,21 @@ IF (NEKTAR_USE_HDF5)
SET(HDF5_LIBRARIES hdf5-shared CACHE FILEPATH
"HDF5 libraries" FORCE)
SET(HDF5_INCLUDE_DIR ${TPDIST}/include CACHE FILEPATH
SET(HDF5_INCLUDE_DIRS ${TPDIST}/include CACHE FILEPATH
"HDF5 include directory" FORCE)
THIRDPARTY_SHARED_LIBNAME(HDF5_LIBRARIES)
MARK_AS_ADVANCED(HDF5_LIBRARIES)
MARK_AS_ADVANCED(HDF5_INCLUDE_DIR)
LINK_DIRECTORIES(${TPDIST}/lib)
MESSAGE(STATUS "Build HDF5: ${HDF5_LIBRARIES}")
SET(HDF5_CONFIG_INCLUDE_DIR ${TPINC})
ELSE()
SET(HDF5_CONFIG_INCLUDE_DIR ${HDF5_INCLUDE_DIRS})
ADD_CUSTOM_TARGET(hdf5-1.8.16 ALL)
ENDIF()
ENDIF (NEKTAR_USE_HDF5)
\ No newline at end of file
MARK_AS_ADVANCED(HDF5_LIBRARIES)
MARK_AS_ADVANCED(HDF5_INCLUDE_DIRS)
INCLUDE_DIRECTORIES(SYSTEM ${HDF5_INCLUDE_DIRS})
ENDIF()
\ No newline at end of file
......@@ -27,6 +27,14 @@ Override a solverinfo (or define a new one) specified in the XML file.
\hangindent=1.5cm
By default when running in parallel the complete mesh is loaded by all processes, although partitioning is done uniquely on the root process only and communicated to the other processes. Each process then writes out its own partition to the local working directory. This is the most robust approach in accounting for systems where the distributed nodes do not share a common filesystem. In the case that there is a common filesystem, this option forces only the root process to load the complete mesh, perform partitioning and write out the session files for all partitions. This avoids potential memory issues when multiple processes attempt to load the complete mesh on a single node.
\lstinline[style=BashInputStyle]{--io-format [format]}\\
\hangindent=1.5cm Determines the output format for writing \nekpp field files
that are used to store, for example, checkpoint and solution field files. The
default for \inlsh{format} is \inlsh{Xml}, which is an XML-based format, which
is written as one file per processor. If \nekpp is compiled with HDF5 support,
then an alternative option is \inlsh{Hdf5}, which will write one file for all
processors and can be more efficient for very large-scale parallel jobs.
\lstinline[style=BashInputStyle]{--npx [int]}\\
\hangindent=1.5cm
When using a fully-Fourier expansion, specifies the number of processes to use in the x-coordinate direction.
......
......@@ -74,6 +74,8 @@ OpenMPI & & \cmark & & & For
parallel execution\\
GSMPI & & & & \cmark & For
parallel execution\\
HDF5 & & \cmark & \cmark & \cmark & For
large-scale parallel I/O\\
PETSc & & & \cmark & \cmark &
Alternative linear solvers\\
Scotch & & \cmark & \cmark & \cmark &
......@@ -230,6 +232,8 @@ OpenMPI & & \texttt{openmpi} & & & For
parallel execution\\
GSMPI & & & & \cmark & For
parallel execution\\
HDF5 & & \cmark & \cmark & \cmark & For
large-scale parallel I/O\\
PETSc & & \texttt{petsc} & \cmark & \cmark &
Alternative linear solvers\\
Scotch & & \texttt{scotch} & \cmark & \cmark &
......@@ -563,6 +567,14 @@ can be selected using the following options:
(FFTs). This is used only when using domains with homogeneous coordinate
directions.
\item \inlsh{NEKTAR\_USE\_HDF5}
Build \nekpp with support for HDF5. This enables input/output in the HDF5
parallel file format, which can be very efficient for large numbers of
processors. HDF5 output can be enabled by using a command-line option or
in the \inlsh{SOLVERINFO} section of the XML file. This option requires
that \nekpp be built with MPI support with \inlsh{NEKTAR\_USE\_MPI} enabled.
\item \inlsh{NEKTAR\_USE\_MKL}
Use the Intel MKL library. This is typically available on cluster
......
......@@ -53,6 +53,18 @@ in its compressed format \inltt{test.xml.gz}.
%
%
%
\section{Convert field files between XML and HDF5 format}
%
When \nekpp is compiled with HDF5 support, solvers can select the format used
for output of \inltt{.fld} files. FieldConvert can be used to convert between
these formats using an option on the \inltt{.fld} output module. For example, if
\inltt{in.fld} is stored in the default XML format, it can be converted to HDF5
format by issuing the command
%
\begin{lstlisting}[style=BashInputStyle]
FieldConvert in.fld out.fld:fld:format=Hdf5
\end{lstlisting}
%
\section{Range option \textit{-r}}
The Fieldconvert range option \inltt{-r} allows the user to specify
a sub-range of the mesh (computational domain) by using an
......
......@@ -65,6 +65,9 @@ namespace Nektar
namespace LibUtilities
{
std::string fldCmdFormat = SessionReader::RegisterCmdLineArgument(
"io-format", "i", "Default input/output format (e.g. Xml, Hdf5)");
/**
* @brief Returns the FieldIO factory.
*/
......@@ -170,10 +173,16 @@ FieldIOSharedPtr FieldIO::CreateDefault(
const LibUtilities::SessionReaderSharedPtr session)
{
std::string iofmt("Xml");
if (session->DefinesSolverInfo("FieldIOFormat"))
if (session->DefinesSolverInfo("IOFormat"))
{
iofmt = session->GetSolverInfo("FieldIOFormat");
iofmt = session->GetSolverInfo("IOFormat");
}
if (session->DefinesCmdLineArgument("io-format"))
{
iofmt = session->GetCmdLineArgument<std::string>("io-format");
}
return GetFieldIOFactory().CreateInstance(
iofmt,
session->GetComm(),
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment