...
 
Commits (93)
# Exclude the usual build environment stuff.
.clang-format
.git
.gitignore
.gitlab-ci.yml
ThirdParty
build
builds
.DS_Store
# Make sure Dockerfiles aren't included in build context to
# improve caching.
pkg/docker/nektar/Dockerfile
pkg/docker/nektar-workbook/Dockerfile
pkg/docker/nektar-env/Dockerfile
.*
!.gitignore
!.gitlab-ci.yml
!.gitattributes
!.clang-format
!.dockerignore
# Ignore builds and ThirdParty directories
build
builds
......
stages:
- docker-build-env
- docker-build-image
- docker-test-image
- docker-cleanup-image
before_script:
- docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
.docker_build_template: &docker_build_template
image: docker:stable
stage: docker-build-image
only:
- master
- /^docker\/.*$/
script:
- docker pull ${image_name} || true
- DOCKER_BUILDKIT=1 docker build -t ${image_name} -f ${dockerfile} .
- docker tag ${image_name} ${image_name}:$CI_COMMIT_SHORT_SHA
- docker push ${image_name}:$CI_COMMIT_SHORT_SHA
- if [ ! -z $CI_COMMIT_TAG ]; then docker tag ${image_name} ${image_name}:$CI_COMMIT_TAG; docker push ${image_name}:$CI_COMMIT_TAG; fi
- docker push ${image_name}:latest
.docker-test-template: &docker_test_template
image: docker:stable
stage: docker-test-image
only:
- master
- /^docker\/.*$/
script:
- docker pull ${image_name} || true
- docker create --name ${container_name}-$CI_COMMIT_SHORT_SHA -t ${image_name}:latest
- docker cp $(pwd)/${test_dir} ${container_name}-$CI_COMMIT_SHORT_SHA:/docker-entrypoint
- docker start ${container_name}-$CI_COMMIT_SHORT_SHA
- docker exec ${container_name}-$CI_COMMIT_SHORT_SHA ${test_cmd}
- docker rm -f ${container_name}-$CI_COMMIT_SHORT_SHA
docker-build-env:
<<: *docker_build_template
stage: docker-build-env
variables:
image_name: nektarpp/nektar-env
dockerfile: pkg/docker/nektar-env/Dockerfile
only:
refs:
- master
- /^docker\/.*$/
changes:
- pkg/docker/nektar-env/Dockerfile
docker-build-nektar:
<<: *docker_build_template
variables:
image_name: nektarpp/nektar
dockerfile: pkg/docker/nektar/Dockerfile
docker-build-workbook:
<<: *docker_build_template
variables:
image_name: nektarpp/nektar-workbook
dockerfile: pkg/docker/nektar-workbook/Dockerfile
docker-test-nektar:
<<: *docker_test_template
variables:
image_name: nektarpp/nektar
container_name: nektar
test_dir: solvers/IncNavierStokesSolver/Tests
test_cmd: mpirun -n 3 IncNavierStokesSolver data/ChanFlow_m3_par.xml
docker-cleanup-nektar:
stage: docker-cleanup-image
when: on_failure
script:
- docker rm -f nektar-$CI_COMMIT_SHORT_SHA
......@@ -6,6 +6,7 @@ v5.0.0
**Library**
- Added in sum factorisation version for pyramid expansions and orthogonal
expansion in pyramids (!750)
- Added detection of 'abort' file to cleanly terminate simulation early (!772)
- Significant overhaul of CMake infrastructure (!770, !804)
- Fix ThridpartyCCM options (!802)
- Fix Windows CRLF tokens in GEO reader and improve comment handling (!805)
......@@ -62,7 +63,8 @@ v5.0.0
- Fix missing metadata import from Hdf5 files (!971)
- Fix missing flags for periodic BC in DiffusionLDG (!985)
- Add the moving reference frame as a forcing (!987)
- Added rtree for element bounding box lookup to accelerate interpolation (!996)
- Added rtree for element bounding box lookup to accelerate interpolation (!996,
!1066)
- Fix integration weights on prisms and pyramids if not using the default
integration rule (!998)
- Fix missing ContainsPoint in Pyramid expansion (!1000)
......@@ -82,6 +84,11 @@ v5.0.0
- Fix XML attributes in conditions.cpp to be unordered (!1015)
- Fix issue with HDF5 mesh input in serial (!1049)
- Add estimate of filters CPU time (!1044)
- Update CompressibleFlowSolver/Examples/Test_IsentropicVortex1.xml example (!1045)
- Add error if HDG used with periodic BCs (!1071)
- Fix issues related to leading factors, arithmetic order and associativity of
exponential operator in expression evaluator (!1066)
- Remove use of `using namespace std` in header files (!1066)
**NekMesh**:
- Add feature to read basic 2D geo files as CAD (!731)
......@@ -166,6 +173,7 @@ v5.0.0
- Modified pressure outlet BCs to allow for the reference static pressure to be
set from the VALUE fields (!981)
- hp scaling for Laplacian AV (!1013)
- Removed smooth AV (!1072)
**AcousticSolver:**
- Added two new boundary conditions to the APE system: RiemannInvariantBC
......@@ -184,11 +192,18 @@ v5.0.0
**PulseWaveSolver**
- Added two new boundary conditions: AInflow and UInflow
**CardiacEPSolver**
- Converted FentonKarma model to dimensional form and added variants (!1011)
**Documentation**:
- Added an initial developer's guide (!1001)
**Tester**
- Fix build with boost 1.67 (!947)
- Various change to tests to decrease test time (!1053)
**Packaging:**
- Add Dockerfiles and gitlab CI configuration for automatic builds (!1021)
v4.4.2
------
......
......@@ -29,7 +29,7 @@ IF (THIRDPARTY_BUILD_CCMIO)
EXTERNALPROJECT_ADD(
libccmio-2.6.1
PREFIX ${TPSRC}
URL http://portal.nersc.gov/svn/visit/trunk/third_party/libccmio-2.6.1.tar.gz
URL http://visit.ilight.com/svn/visit/trunk/third_party/libccmio-2.6.1.tar.gz
URL_MD5 f81fbdfb960b1a4f3bcc7feee491efe4
STAMP_DIR ${TPBUILD}/stamp
DOWNLOAD_DIR ${TPSRC}
......
......@@ -69,8 +69,9 @@ IF (NEKTAR_USE_FFTW)
# version of some C header files and -isystem reorders include paths).
GET_FILENAME_COMPONENT(X "${CMAKE_CXX_IMPLICIT_INCLUDE_DIRECTORIES}" ABSOLUTE)
GET_FILENAME_COMPONENT(Y ${FFTW_INCLUDE_DIR} ABSOLUTE)
STRING(FIND "${X}" "${Y}" X_FIND)
IF (NOT Y MATCHES ".*${X}.*")
IF (X_FIND EQUAL -1)
INCLUDE_DIRECTORIES(SYSTEM ${FFTW_INCLUDE_DIR})
ENDIF()
......
......@@ -13,11 +13,11 @@ FIND_PROGRAM(HTLATEX htlatex)
MARK_AS_ADVANCED(HTLATEX)
ADD_CUSTOM_TARGET(developer-guide-html
TEXINPUTS=${CMAKE_SOURCE_DIR}:${DEVGUIDESRC}//:
TEXINPUTS=${DEVGUIDESRC}//:
${HTLATEX} ${DEVGUIDESRC}/developers-guide.tex
"${DEVGUIDESRC}/styling.cfg,html,3,next,NoFonts"
COMMAND BIBINPUTS=${DEVGUIDESRC} ${BIBTEX} developers-guide.aux
COMMAND TEXINPUTS=${CMAKE_SOURCE_DIR}:${DEVGUIDESRC}//:
COMMAND TEXINPUTS=${DEVGUIDESRC}//:
${HTLATEX} ${DEVGUIDESRC}/developers-guide.tex
"${DEVGUIDESRC}/styling.cfg,html,3,next,NoFonts"
WORKING_DIRECTORY ${DEVGUIDE}/html
......@@ -43,13 +43,10 @@ FOREACH(pdf ${pdffiles})
ENDFOREACH()
ADD_CUSTOM_TARGET(developer-guide-pdf
export TEXINPUTS=${CMAKE_SOURCE_DIR}//: &&
${PDFLATEX} --output-directory ${DEVGUIDE} ${DEVGUIDESRC}/developers-guide.tex
COMMAND TEXMFOUTPUT=${DEVGUIDE} ${BIBTEX} ${DEVGUIDE}/developers-guide.aux
COMMAND TEXMFOUTPUT=${DEVGUIDE} ${MAKEINDEX} ${DEVGUIDE}/developers-guide.idx
COMMAND TEXINPUTS=${CMAKE_SOURCE_DIR}//:
${PDFLATEX} --output-directory ${DEVGUIDE} ${DEVGUIDESRC}/developers-guide.tex
COMMAND TEXINPUTS=${CMAKE_SOURCE_DIR}//:
${PDFLATEX} --output-directory ${DEVGUIDE} ${DEVGUIDESRC}/developers-guide.tex
COMMAND ${PDFLATEX} --output-directory ${DEVGUIDE} ${DEVGUIDESRC}/developers-guide.tex
COMMAND ${PDFLATEX} --output-directory ${DEVGUIDE} ${DEVGUIDESRC}/developers-guide.tex
WORKING_DIRECTORY ${DEVGUIDESRC}
)
../../VERSION
\ No newline at end of file
......@@ -42,7 +42,6 @@
\usepackage{rotating}
%end of Yu Pan's commands
\title{A Programmer's Guide to Nektar++}
%\makeatletter\@addtoreset{chapter}{part}\makeatother%
......@@ -75,7 +74,7 @@
\tableofcontents
\input{preface/preface.tex}
\import{preface/}{preface}
\input{introduction/introduction.tex}
%
......@@ -85,20 +84,20 @@
\part{Building-Blocks of Our Framework (Inside the Library)} \label{part:library}
\input{library/library-master.tex}
\import{library/}{library-master}
%%
\part{Solvers} \label{part:solvers}
\input{solvers/solvers-master.tex}
\import{solvers/}{solvers-master}
%%
\part{Utilities} \label{part:utilities}
\input{utilities/utilities-master.tex}
\import{utilities/}{utilities-master}
\part{NekPy: Python interface to \nek{}} \label{part:nekpy}
\input{python/python-master.tex}
\import{python/}{python-master}
\bibliographystyle{plain}
......
......@@ -5,7 +5,7 @@ This directory contains two important files for all of {\nek}: NektarUnivConsts
The file NektarUnivConsts.hpp contains various default constants used within {\nek} as seen here:
\lstinputlisting[language=C++, firstline=47, lastline=59]{library/LibUtilities/BasicConst/NektarUnivConsts.hpp}
\lstinputlisting[language=C++, firstline=47, lastline=59]{src/library/LibUtilities/BasicConst/NektarUnivConsts.hpp}
The file NektarUnivTypeDefs.hpp contains the low level typedefs such as: NekDouble, NekInt, OneD, TwoD, ThreeD, FourD, and
enumerations such as Direction (xDir, yDir and zDir) and OutputFormat.
......@@ -10,27 +10,27 @@ should all be considered equally important and relevant. Along the same lines
these areas of the code represent the deepest members of the code hierarchy, these
items should rarely be modified.
%
\input{library/LibUtilities/basicconst.tex}
\import{library/LibUtilities/}{basicconst.tex}
%
\input{library/LibUtilities/basicutils.tex}
\import{library/LibUtilities/}{basicutils.tex}
%
\input{library/LibUtilities/communication.tex}
\import{library/LibUtilities/}{communication.tex}
%
\input{library/LibUtilities/fft.tex}
\import{library/LibUtilities/}{fft.tex}
%
\input{library/LibUtilities/foundations.tex}
\import{library/LibUtilities/}{foundations.tex}
%
\input{library/LibUtilities/interpreter.tex}
\import{library/LibUtilities/}{interpreter.tex}
%
\input{library/LibUtilities/kernel.tex}
\import{library/LibUtilities/}{kernel.tex}
%
\input{library/LibUtilities/linearalgebra.tex}
\import{library/LibUtilities/}{linearalgebra.tex}
%
\input{library/LibUtilities/memory.tex}
\import{library/LibUtilities/}{memory.tex}
%
\input{library/LibUtilities/polylib.tex}
\import{library/LibUtilities/}{polylib.tex}
%
\input{library/LibUtilities/timeintegration.tex}
\import{library/LibUtilities/}{timeintegration.tex}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\chapter{Inside the Library: StdRegions}
......@@ -43,11 +43,11 @@ the reader with an overview of the primary data structures introduced within the
StdRegions Directory (often done through C++ objects), and then present the major
algorithms -- expressed as either object methods or functions -- employed over these data structures.
\input{library/StdRegions/stdreg-fundamentals.tex}
\import{library/StdRegions/}{stdreg-fundamentals.tex}
%
\input{library/StdRegions/stdreg-datastructures.tex}
\import{library/StdRegions/}{stdreg-datastructures.tex}
%
\input{library/StdRegions/stdreg-algorithms.tex}
\import{library/StdRegions/}{stdreg-algorithms.tex}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
......@@ -99,11 +99,11 @@ light-weight data structure we devised was MeshGraph, and it was meant
to meet the second item listed above.
\input{library/SpatialDomains/spdomains-fundamentals.tex}
\import{library/SpatialDomains/}{spdomains-fundamentals.tex}
%
\input{library/SpatialDomains/spdomains-datastructures.tex}
\import{library/SpatialDomains/}{spdomains-datastructures.tex}
%
\input{library/SpatialDomains/spdomains-algorithms.tex}
\import{library/SpatialDomains/}{spdomains-algorithms.tex}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
......@@ -120,11 +120,11 @@ within the LocalRegions Directory (often done through C++ objects),
and then present the major algorithms -- expressed as either object
methods or functions -- employed over these data structures.
\input{library/LocalRegions/localreg-fundamentals.tex}
\import{library/LocalRegions/}{localreg-fundamentals.tex}
%
\input{library/LocalRegions/localreg-datastructures.tex}
\import{library/LocalRegions/}{localreg-datastructures.tex}
%
\input{library/LocalRegions/localreg-algorithms.tex}
\import{library/LocalRegions/}{localreg-algorithms.tex}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
......@@ -139,11 +139,11 @@ within the Collections Directory (often done through C++ objects), and
then present the major algorithms -- expressed as either object
methods or functions -- employed over these data structures.
\input{library/Collections/collections-fundamentals.tex}
\import{library/Collections/}{collections-fundamentals.tex}
%
\input{library/Collections/collections-datastructures.tex}
\import{library/Collections/}{collections-datastructures.tex}
%
\input{library/Collections/collections-algorithms.tex}
\import{library/Collections/}{collections-algorithms.tex}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
......@@ -160,13 +160,13 @@ within the MultiRegions Directory (often done through C++ objects),
and then present the major algorithms -- expressed as either object
methods or functions -- employed over these data structures.
\input{library/MultiRegions/multireg-fundamentals.tex}
\import{library/MultiRegions/}{multireg-fundamentals.tex}
%
\input{library/MultiRegions/multireg-datastructures.tex}
\import{library/MultiRegions/}{multireg-datastructures.tex}
%
\input{library/MultiRegions/multireg-algorithms.tex}
\import{library/MultiRegions/}{multireg-algorithms.tex}
%
\input{library/MultiRegions/multireg-preconditioners.tex}
\import{library/MultiRegions/}{multireg-preconditioners.tex}
......@@ -184,11 +184,11 @@ Directory (often done through C++ objects), and then present the major
algorithms -- expressed as either object methods or functions --
employed over these data structures.
\input{library/GlobalMapping/globalmapping-fundamentals.tex}
\import{library/GlobalMapping/}{globalmapping-fundamentals.tex}
%
\input{library/GlobalMapping/globalmapping-datastructures.tex}
\import{library/GlobalMapping/}{globalmapping-datastructures.tex}
%
\input{library/GlobalMapping/globalmapping-algorithms.tex}
\import{library/GlobalMapping/}{globalmapping-algorithms.tex}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
......@@ -201,11 +201,11 @@ the reader with an overview of the primary data structures introduced within the
FieldUtils Directory (often done through C++ objects), and then present the major
algorithms -- expressed as either object methods or functions -- employed over these data structures.
\input{library/FieldUtils/fieldutils-fundamentals.tex}
\import{library/FieldUtils/}{fieldutils-fundamentals.tex}
%
\input{library/FieldUtils/fieldutils-datastructures.tex}
\import{library/FieldUtils/}{fieldutils-datastructures.tex}
%
\input{library/FieldUtils/fieldutils-algorithms.tex}
\import{library/FieldUtils/}{fieldutils-algorithms.tex}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
......@@ -218,10 +218,10 @@ the reader with an overview of the primary data structures introduced within the
SolverUtils Directory (often done through C++ objects), and then present the major
algorithms -- expressed as either object methods or functions -- employed over these data structures.
\input{library/SolverUtils/solverutils-fundamentals.tex}
\import{library/SolverUtils/}{solverutils-fundamentals.tex}
%
\input{library/SolverUtils/solverutils-datastructures.tex}
\import{library/SolverUtils/}{solverutils-datastructures.tex}
%
\input{library/SolverUtils/solverutils-algorithms.tex}
\import{library/SolverUtils/}{solverutils-algorithms.tex}
......@@ -171,23 +171,28 @@ these as parameters and return arrays very easily. However bear in mind the
following caveats:
\begin{itemize}
\item NumPy arrays created from Array objects will share their memory, so that
changing the C++ array changes the contents of the NumPy array.
\item Any NumPy \texttt{ndarray} created from an \texttt{Array<OneD, >} (and
vice versa) will share their memory. Although this avoids expensive memory
copies, it means that changing the C++ array changes the contents of the NumPy
array (and vice versa).
\item Many functions in Nektar++ return Arrays through argument parameters. In
Python this is a very unnatural way to write functions. For example:
\begin{lstlisting}[language=Python]
# This is good
x, y, z = exp.GetCoords()
# This is bad
x, y, z = np.zeros(10), np.zeros(10), np.zeros(10)
exp.GetCoords(x,y,z)
\end{lstlisting}
Use thin wrappers to overcome this problem. For examples of how to do this,
particularly in returning tuples, consult the `StdRegions/StdExpansion.cpp`
wrapper.
\item \texttt{TwoD} and \texttt{ThreeD} arrays are not supported.
Python this is a very unnatural way to write functions. For example:
\begin{lstlisting}[language=Python]
# This is good
x, y, z = exp.GetCoords()
# This is bad
x, y, z = np.zeros(10), np.zeros(10), np.zeros(10)
exp.GetCoords(x,y,z)
\end{lstlisting}
Use thin wrappers to overcome this problem. For examples of how to do this,
particularly in returning tuples, consult the
\texttt{StdRegions/StdExpansion.cpp} wrapper which contains numerous examples.
\item \texttt{TwoD} and \texttt{ThreeD} arrays are not supported.
\end{itemize}
More information on the memory management and how the memory is shared can be
found in Section~\ref{sec:nekpy:memory}.
\subsection{Inheritance}
Nektar++ makes heavy use of inheritance, which can be translated to Python quite
......
../../../library
\ No newline at end of file
......@@ -1459,7 +1459,7 @@ FORMULA_TRANSPARENT = YES
# The default value is: NO.
# This tag requires that the tag GENERATE_HTML is set to YES.
USE_MATHJAX = NO
USE_MATHJAX = YES
# When MathJax is enabled you can set the default output format to be used for
# the MathJax output. See the MathJax site (see:
......
......@@ -13,11 +13,11 @@ FIND_PROGRAM(HTLATEX htlatex)
MARK_AS_ADVANCED(HTLATEX)
ADD_CUSTOM_TARGET(user-guide-html
export TEXINPUTS=${CMAKE_SOURCE_DIR}:${USERGUIDESRC}//: &&
export TEXINPUTS=${USERGUIDESRC}//: &&
${HTLATEX} ${USERGUIDESRC}/user-guide.tex
"${USERGUIDESRC}/styling.cfg,html,3,next,NoFonts"
COMMAND BIBINPUTS=${CMAKE_SOURCE_DIR}/docs ${BIBTEX} user-guide.aux
COMMAND TEXINPUTS=${CMAKE_SOURCE_DIR}:${USERGUIDESRC}//:
COMMAND TEXINPUTS=${USERGUIDESRC}//:
${HTLATEX} ${USERGUIDESRC}/user-guide.tex
"${USERGUIDESRC}/styling.cfg,html,3,next,NoFonts"
WORKING_DIRECTORY ${USERGUIDE}/html
......@@ -43,14 +43,11 @@ FOREACH(pdf ${pdffiles})
ENDFOREACH()
ADD_CUSTOM_TARGET(user-guide-pdf
export TEXINPUTS=${CMAKE_SOURCE_DIR}//: &&
${PDFLATEX} --output-directory ${USERGUIDE} ${USERGUIDESRC}/user-guide.tex
COMMAND BIBINPUTS=${CMAKE_SOURCE_DIR}/docs TEXMFOUTPUT=${USERGUIDE}
${BIBTEX} ${USERGUIDE}/user-guide.aux
COMMAND TEXMFOUTPUT=${USERGUIDE} ${MAKEINDEX} ${USERGUIDE}/user-guide.idx
COMMAND TEXINPUTS=${CMAKE_SOURCE_DIR}//:
${PDFLATEX} --output-directory ${USERGUIDE} ${USERGUIDESRC}/user-guide.tex
COMMAND TEXINPUTS=${CMAKE_SOURCE_DIR}//:
${PDFLATEX} --output-directory ${USERGUIDE} ${USERGUIDESRC}/user-guide.tex
COMMAND ${PDFLATEX} --output-directory ${USERGUIDE} ${USERGUIDESRC}/user-guide.tex
COMMAND ${PDFLATEX} --output-directory ${USERGUIDE} ${USERGUIDESRC}/user-guide.tex
WORKING_DIRECTORY ${USERGUIDESRC}
)
../../VERSION
\ No newline at end of file
......@@ -43,6 +43,8 @@ $ \dfrac{\partial u}{\partial t} + \mathbf{V}\nabla u = f$ &
\inltt{UnsteadyAdvection} & All & Continuous/Discontinuous \\
$\dfrac{\partial u}{\partial t} = \epsilon \nabla^2 u$ &
\inltt{UnsteadyDiffusion} & All & Continuous/Discontinuous \\
$\dfrac{\partial u}{\partial t} = \epsilon \nabla^2 u + R(u)$ &
\inltt{UnsteadyReactionDiffusion} & All & Continuous \\
$\dfrac{\partial u}{\partial t} + \mathbf{V}\nabla u = \epsilon \nabla^2 u$ &
\inltt{UnsteadyAdvectionDiffusion} & All & Continuous/Discontinuous \\
$\dfrac{\partial u}{\partial t} + u\nabla u = 0$ &
......@@ -82,6 +84,7 @@ The solver info are listed below:
\midrule
\inltt{UnsteadyAdvection} & \checkmark & & &\\
\inltt{UnsteadyDiffusion} & \checkmark & \checkmark & &\\
\inltt{UnsteadyReactionDiffusion} & & & \checkmark &\\
\inltt{UnsteadyAdvectionDiffusion} & & & \checkmark &\\
\inltt{UnsteadyInviscidBurger} & \checkmark & & &\\
\bottomrule
......@@ -552,5 +555,57 @@ streamwise direction, as shown in Fig.~\ref{f:adrsolver:masstransport}.
\end{center}
\end{figure}
\subsection{Unsteady reaction-diffusion systems}
Reaction-diffusion systems are prevalent in a number of areas relating to the
modelling of various physical phenomena, and are particularly prevalent in the
study of chemical interactions and pattern formation. The ADRSolver supports the
solution of a single-variable system
\[
\frac{\partial u}{\partial t} = \epsilon\nabla^2{u}{x} + R(u)
\]
where the diffusion coefficient $\epsilon$ and reaction term $R(u)$ are defined
using the session file.
\subsubsection{Numerical restrictions}
The reaction-diffusion system is only supported in a selected configuration,
which is mostly defined inside the \inltt{SOLVERINFO} block:
\begin{itemize}
\item use of a continuous Galerkin discretisation;
\item use an implicit-explicit (IMEX) timestepping scheme, such as
\inltt{IMEXOrder3};
\end{itemize}
This naturally leads to the following \inltt{SOLVERINFO} configuration:
\begin{lstlisting}[style=XMLStyle]
<SOLVERINFO>
<I PROPERTY="EQTYPE" VALUE="UnsteadyReactionDiffusion" />
<I PROPERTY="Projection" VALUE="Continuous" />
<I PROPERTY="DiffusionAdvancement" VALUE="Implicit" />
<I PROPERTY="TimeIntegrationMethod" VALUE="IMEXOrder3" />
</SOLVERINFO>
\end{lstlisting}
Further to this, the reaction term $R(u)$ is imposed by the definition of a body
forcing function. For example, the reaction term $R(u) = 0.1u$ may be defined
using the function:
\begin{lstlisting}[style=XMLStyle]
<!-- Body force to enforce reaction term -->
<FUNCTION NAME="BodyForce">
<E VAR="u" EVARS="u" VALUE="0.1*u" />
</FUNCTION>
\end{lstlisting}
Note in particular the use of the \inltt{EVARS} (equation variables) attribute,
which permits the definition of this function in terms of the scalar variable
$u$. This function should be used together with an appropriate \inltt{FORCING}
block (as described in section~\ref{sec:xml:forcing}):
\begin{lstlisting}[style=XMLStyle]
<FORCING>
<FORCE TYPE="Body">
<BODYFORCE> BodyForce </BODYFORCE>
</FORCE>
</FORCING>
\end{lstlisting}
An example of a simple unsteady reaction-diffusion problem can be found in the
\inltt{Tests} directory in the session file \inltt{ReactionDiffusion2D.xml}.
......@@ -444,8 +444,7 @@ Under the two following sections it is possible to define the initial conditions
\section{Examples}
\subsection{Shock capturing}
Compressible flows can be characterised by abrupt changes in density within the flow domain often referred to as shocks. These discontinuities lead to numerical instabilities (Gibbs phenomena). This problem is prevented by locally adding a diffusion term to the equations to damp the numerical oscillations.
An artificial diffusion term is introduced locally to the Euler equations to deal with flow discontinuity and the consequential numerical oscillations. Two models are implemented, a non-smooth and a smooth artificial viscosity model.
Compressible flows can be characterised by abrupt changes in density within the flow domain often referred to as shocks. These discontinuities can lead to numerical instabilities (Gibbs phenomena). This problem is prevented by locally adding a diffusion term to the equations to damp the numerical oscillations.
\subsubsection{Non-smooth artificial viscosity model}
For the non-smooth artificial viscosity model the added artificial viscosity is constant in each element and discontinuous between the elements. The Euler system is augmented by an added Laplacian term on right hand side of equation \ref{eq:euler} \cite{persson2006sub}.
......@@ -480,9 +479,9 @@ where $s_0 = s_\kappa - 4.25\;log_{10}(p)$.
To enable the non-smooth viscosity model, the following line has to be added to the \inltt{SOLVERINFO} section:
\begin{lstlisting}[style=XmlStyle]
<SOLVERINFO>
<SOLVERINFO>
<I PROPERTY="ShockCaptureType" VALUE="NonSmooth" />
<SOLVERINFO>
<SOLVERINFO>
\end{lstlisting}
The diffusivity and the sensor can be controlled by the following parameters:
\begin{lstlisting}[style=XmlStyle]
......@@ -502,45 +501,6 @@ The diffusivity and the sensor can be controlled by the following parameters:
\end{center}
\end{figure}
\subsubsection{Smooth artificial viscosity model}
For the smooth artificial viscosity model an extra PDE for the artificial viscosity is appended to the Euler system
\begin{equation}\label{eq:eulerplusvis}\begin{split}
\frac{\partial \epsilon}{\partial t} &= \nabla\cdot \left(\nabla \epsilon\right) + \frac{1}{\tau}\left(\frac{h}{p}\lambda_{max}S_\kappa - \epsilon\right)\ \ \ \ \ \ \textrm{on}\ \ \ \ \ \ \ \Omega\\
\frac{\partial \epsilon}{\partial n} &= 0\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \textrm{on}\ \ \ \ \ \ \ \Gamma
\end{split}
\end{equation}
where $S_\kappa$ is a normalised sensor value and serves as a forcing term for the artificial viscosity. A smooth artificial viscosity distribution is obtained.\\
\\
To enable the smooth viscosity model, the following line has to be added to the \inltt{SOLVERINFO} section:
\begin{lstlisting}[style=XmlStyle]
<SOLVERINFO>
<I PROPERTY="ShockCaptureType" VALUE="Smooth" />
<SOLVERINFO>
\end{lstlisting}
Furthermore, the extra viscosity variable \inltt{eps} has to be added to the variable list:
\begin{lstlisting}[style=XmlStyle]
<VARIABLES>
<V ID="0"> rho </V>
<V ID="1"> rhou </V>
<V ID="2"> rhov </V>
<V ID="4"> E </V>
<V ID="5"> eps </V>
</VARIABLES>
\end{lstlisting}
A similar addition has to be made for the boundary conditions and initial conditions. The tests that have been run started with a uniform homogeneous boundary condition and initial condition.
The following parameters can be set in the xml session file:
\begin{lstlisting}[style=XmlStyle]
<PARAMETERS>
<P> Skappa = -1.3 </P>
<P> Kappa = 0.2 </P>
<P> mu0 = 1.0 </P>
<P> FH = 3 </P>
<P> FL = 0.01*FH </P>
<P> C1 = 0.03 </P>
<P> C2 = 5/3*C1 </P>
</PARAMETERS>
\end{lstlisting}
where for now \inltt{FH} and \inltt{FL} are used to tune which range of the sensor is used as a forcing term and \inltt{C1} and \inltt{C2} are fixed constants which can be played around with to make the model more diffusive or not. However these constants are generally fixed.
\subsection{Variable polynomial order}
A sensor based $p$-adaptive algorithm is implemented to optimise the computational cost and accuracy.
The DG scheme allows one to use different polynomial orders since the fluxes over the elements are determined using a Riemann solver and there is now further coupling between the elements. Furthermore, the initial $p$-adaptive algorithm uses the same sensor as the shock capturing algorithm to identify the smoothness of the local solution so it rather straightforward to implement both algorithms at the same time.\\
......
......@@ -557,9 +557,9 @@ three-dimensional incompressible Navier-Stokes simulation, this produces an FLD
file with the variables \inltt{u}, \inltt{v}, \inltt{w} and \inltt{p}. If we
wanted to use this velocity field as input for an advection velocity, the
advection-diffusion-reaction solver expects the variables \inltt{Vx}, \inltt{Vy}
and \inltt{Vz}.
We can manually specify this mapping by adding a colon to the
and \inltt{Vz}. We can manually specify this mapping by adding a colon to the
filename, indicating the variable names in the target file that align with the
desired function variable names. This gives a definition such as:
\begin{lstlisting}[style=XMLStyle]
<FUNCTION NAME="AdvectionVelocity">
......@@ -570,8 +570,9 @@ We can manually specify this mapping by adding a colon to the
There are some caveats with this syntax:
\begin{itemize}
\item You must specify the same number of fields for both the variable, and
after the colon. For example, the following is not valid.
\item The same number of fields must be defined for both the \inltt{VAR}
attribute and in the comma-separated list after the colon. For example, the
following is not valid:
\begin{lstlisting}[style=XMLStyle,gobble=4]
<FUNCTION NAME="AdvectionVelocity">
<F VAR="Vx,Vy,Vz" FILE="file.fld:u" />
......
\section{Forcing}
\label{sec:xml:forcing}
An optional section of the file allows forcing functions to be defined. These are enclosed in the
\inltt{FORCING} tag. The forcing type is enclosed within the \inltt{FORCE} tag and expressed in the file as:
......
......@@ -32,8 +32,11 @@
//
///////////////////////////////////////////////////////////////////////////////
#include <boost/algorithm/string/predicate.hpp>
#include <Collections/CollectionOptimisation.h>
#include <LibUtilities/BasicUtils/ParseUtils.h>
#include <LibUtilities/BasicUtils/Timer.h>
using namespace std;
......
......@@ -130,9 +130,8 @@ int main(int argc, char *argv[])
{
std::cerr
<< "Usage: FieldIOBenchmarker [options] inputfile [outputfile]"
<< endl;
std::cout << desc;
std::cout << endl;
<< std::endl;
std::cout << desc << std::endl;
return 1;
}
......@@ -168,7 +167,7 @@ int main(int argc, char *argv[])
break;
default:
std::cout << "Unrecognised mode: " << mode << std::endl;
std::cout << desc << endl;
std::cout << desc << std::endl;
return 1;
break;
}
......@@ -426,7 +425,7 @@ Results TestRead(Experiment &exp)
const std::string ft = FieldIO::GetFileType(exp.dataSource, exp.comm);
if (exp.verbose)
{
std::cout << ft << endl;
std::cout << ft << std::endl;
}
Results res(exp.n, 0.0);
......@@ -515,7 +514,7 @@ Results TestWrite(Experiment &exp)
catch (fs::filesystem_error &e)
{
ASSERTL0(e.code().value() == berrc::no_such_file_or_directory,
"Filesystem error: " + string(e.what()));
"Filesystem error: " + std::string(e.what()));
}
// Synchronise to make sure we're all at the same point.
......
from NekPy.LibUtilities import SessionReader
from NekPy.StdRegions import ConstFactorMap, ConstFactorType
from NekPy.LibUtilities import SessionReader, ReduceOperator
from NekPy.StdRegions import ConstFactorMap, ConstFactorType, VarCoeffMap, VarCoeffType
from NekPy.SpatialDomains import MeshGraph
from NekPy.MultiRegions import ContField2D
......@@ -14,6 +14,9 @@ if len(sys.argv) < 2:
session = SessionReader.CreateInstance(sys.argv)
graph = MeshGraph.Read(session)
# Grab the communicator from the SessionReader.
comm = session.GetComm()
# Override polynomial order and create ContField2D.
graph.SetExpansionsToPolyOrder(10)
exp = ContField2D(session, graph, session.GetVariable(0))
......@@ -23,14 +26,34 @@ lamb = session.GetParameter("Lambda")
factors = ConstFactorMap()
factors[ConstFactorType.FactorLambda] = lamb
# Test use of variable coefficients.
coeffs = VarCoeffMap()
coeffs[VarCoeffType.VarCoeffD00] = np.ones(exp.GetNpoints())
coeffs[VarCoeffType.VarCoeffD11] = np.ones(exp.GetNpoints())
# Construct right hand side forcing term.
x, y = exp.GetCoords()
sol = np.sin(np.pi * x) * np.sin(np.pi * y)
fx = -(lamb + 2*np.pi*np.pi) * sol
# Solve Helmholtz equation.
helm_sol = exp.BwdTrans(exp.HelmSolve(fx, factors))
print("L infinity error: %.6e" % np.max(np.abs(helm_sol - sol)))
helm_sol = exp.BwdTrans(exp.HelmSolve(fx, factors, coeffs))
L2_error = exp.L2(helm_sol, sol)
Linf_error = exp.Linf(helm_sol, sol)
Linf_error_comm = comm.AllReduce(
np.max(np.abs(helm_sol - sol)), ReduceOperator.ReduceMax)
# Test reduction of Array types.
reduce_test = np.zeros(comm.GetSize())
reduce_test[comm.GetRank()] = 1.0
comm.AllReduce(reduce_test, ReduceOperator.ReduceSum)
# Print out some stats for debugging.
if comm.GetRank() == 0:
print("L 2 error (variable nek) : %.6e" % L2_error)
print("L inf error (variable nek) : %.6e" % Linf_error)
print("L inf error (variable nekpy) : %.6e" % Linf_error_comm)
print("Reduction test : %d" % round(reduce_test.sum()))
# Clean up!
session.Finalise()
......@@ -3,12 +3,25 @@
<description>Helmholtz solver in 2D domain</description>
<executable python="true"> Helmholtz2D.py </executable>
<parameters>Helmholtz2D_P7.xml</parameters>
<processes>4</processes>
<files>
<file description="Session File">../../MultiRegions/Tests/Helmholtz2D_P7.xml</file>
</files>
<metrics>
<metric type="Linf" id="1">
<value tolerance="1e-7">6.120209e-05</value>
<value tolerance="1e-7" variable="nek">6.120209e-05</value>
<value tolerance="1e-7" variable="nekpy">6.120209e-05</value>
</metric>
<metric type="L2" id="2">
<value tolerance="1e-7" variable="nek">4.302677e-05</value>
</metric>
<metric type="regex" id="3">
<regex>^Reduction test.*: (\d+)</regex>
<matches>
<match>
<field id="0">4</field>
</match>
</matches>
</metric>
</metrics>
</test>
......@@ -434,7 +434,7 @@ struct Field
std::string filename)
{
LibUtilities::CommSharedPtr c = m_comm;
string fmt = LibUtilities::FieldIO::GetFileType(filename, c);
std::string fmt = LibUtilities::FieldIO::GetFileType(filename, c);
auto it = m_fld.find(fmt);
if (it == m_fld.end())
......
......@@ -110,7 +110,7 @@ void swap_endian(T &u)
}
template <typename T>
void swap_endian(vector<T> &u)
void swap_endian(std::vector<T> &u)
{
size_t vecSize = u.size();
for (int i = 0; i < vecSize; ++i)
......@@ -151,9 +151,9 @@ struct ConfigOption
{
return boost::lexical_cast<T>(m_value);
}
catch (const exception &e)
catch (const std::exception &e)
{
cerr << e.what() << endl;
std::cerr << e.what() << std::endl;
abort();
}
}
......@@ -268,8 +268,9 @@ protected:
std::ofstream m_fldFile;
};
typedef pair<ModuleType, std::string> ModuleKey;
FIELD_UTILS_EXPORT std::ostream &operator<<(ostream &os, const ModuleKey &rhs);
typedef std::pair<ModuleType, std::string> ModuleKey;
FIELD_UTILS_EXPORT std::ostream &operator<<(
std::ostream &os, const ModuleKey &rhs);
typedef std::shared_ptr<Module> ModuleSharedPtr;
typedef LibUtilities::NekFactory<ModuleKey, Module, FieldSharedPtr>
......
......@@ -138,7 +138,7 @@ void ProcessFieldFromString::Process(po::variables_map &vm)
}
// Create new function
LibUtilities::AnalyticExpressionEvaluator strEval;
LibUtilities::Interpreter strEval;
int exprId = -1;
string fieldstr = m_config["fieldstr"].as<string>();
exprId = strEval.DefineFunction(varstr.c_str(), fieldstr);
......
......@@ -154,7 +154,7 @@ void ProcessIsoContour::Process(po::variables_map &vm)
Array<OneD, NekDouble> pts(m_f->m_fieldPts->GetNpoints());
// evaluate new function
LibUtilities::AnalyticExpressionEvaluator strEval;
LibUtilities::Interpreter strEval;
string varstr = "x y z";
vector<Array<OneD, const NekDouble> > interpfields;
......