Commit 93112dc2 authored by Dave Moxey's avatar Dave Moxey

Merge branch 'master' into feature/mesh-deform

Conflicts:
	library/SpatialDomains/Geometry.cpp
parents 7e7504e5 dbb6cb15
......@@ -266,7 +266,7 @@ INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR})
# Build active components
IF (NEKTAR_BUILD_LIBRARY)
SET(NEKTAR++_LIBRARIES SolverUtils LibUtilities StdRegions SpatialDomains LocalRegions
MultiRegions)
MultiRegions Collections)
INCLUDE_DIRECTORIES(library)
ADD_SUBDIRECTORY(library)
INSTALL(EXPORT Nektar++Libraries DESTINATION ${LIB_DIR}/cmake)
......
......@@ -63,7 +63,7 @@ solvers in the nektar++-solvers package.")
SET(CPACK_RPM_PACKAGE_URL "www.nektar.info")
SET(CPACK_RPM_COMPONENT_INSTALL ON)
SET(CPACK_RPM_PACKAGE_REQUIRES "fftw3, libboost_date_time1_44_0, libboost_filesystem1_44_0, libboost_iostreams1_44_0, libboost_system1_44_0, libboost_thread1_44_0, zlib")
SET(CPACK_RPM_PACKAGE_REQUIRES "fftw3, libboost_date_time1_44_0, libboost_filesystem1_44_0, libboost_iostreams1_44_0, libboost_system1_44_0, libboost_thread1_44_0, libboost_timer1_44_0, zlib")
SET(CPACK_RPM_PACKAGE_DESCRIPTION "
The nektar++ packages provide a spectral/hp element framework for the numerical
solution of partial differential equations (PDEs). Demonstration codes are
......@@ -75,7 +75,7 @@ solvers in the nektar++-solvers package.")
MESSAGE(STATUS "Generating Packaging for DEB")
SET(CPACK_DEB_PACKAGE_URL "www.nektar.info")
SET(CPACK_DEB_COMPONENT_INSTALL ON)
SET(CPACK_DEBIAN_PACKAGE_DEPENDS "libfftw3-3,libboost-date-time1.42.0,libboost-filesystem1.42.0,libboost-iostreams1.42.0,libboost-program-options1.42.0,libboost-system1.42.0,libboost-thread1.42.0,zlib1g")
SET(CPACK_DEBIAN_PACKAGE_DEPENDS "libfftw3-3,libboost-date-time1.42.0,libboost-filesystem1.42.0,libboost-iostreams1.42.0,libboost-program-options1.42.0,libboost-system1.42.0,libboost-thread1.42.0,libboost-timer1.42.0,zlib1g")
SET(CPACK_DEBIAN_PACKAGE_DESCRIPTION
"${CPACK_PACKAGE_DESCRIPTION_SUMMARY}
${CPACK_PACKAGE_DESCRIPTION}")
......
......@@ -9,7 +9,7 @@
#If the user has not set BOOST_ROOT, look in a couple common places first.
MESSAGE(STATUS "Searching for Boost:")
SET(NEEDED_BOOST_LIBS thread iostreams date_time filesystem system
program_options regex)
program_options regex timer)
SET(Boost_DEBUG 0)
SET(Boost_NO_BOOST_CMAKE ON)
IF( BOOST_ROOT )
......@@ -66,7 +66,7 @@ IF (THIRDPARTY_BUILD_BOOST)
# Only build the libraries we need
SET(BOOST_LIB_LIST --with-system --with-iostreams --with-filesystem
--with-program_options --with-date_time --with-thread
--with-regex)
--with-regex --with-timer)
IF (NOT WIN32)
# We need -fPIC for 64-bit builds
......@@ -180,11 +180,15 @@ IF (THIRDPARTY_BUILD_BOOST)
SET(Boost_THREAD_LIBRARY boost_thread)
SET(Boost_THREAD_LIBRARY_DEBUG boost_thread)
SET(Boost_THREAD_LIBRARY_RELEASE boost_thread)
SET(Boost_TIMER_LIBRARY boost_timer)
SET(Boost_TIMER_LIBRARY_DEBUG boost_timer)
SET(Boost_TIMER_LIBRARY_RELEASE boost_timer)
SET(Boost_INCLUDE_DIRS ${TPSRC}/dist/include)
SET(Boost_CONFIG_INCLUDE_DIR ${TPINC})
SET(Boost_LIBRARY_DIRS ${TPSRC}/dist/lib)
SET(Boost_CONFIG_LIBRARY_DIR ${TPLIB})
SET(Boost_LIBRARIES boost_date_time boost_filesystem boost_iostreams boost_program_options boost_regex boost_system boost_thread)
SET(Boost_LIBRARIES boost_date_time boost_filesystem boost_iostreams boost_program_options boost_regex boost_system boost_thread boost_timer)
LINK_DIRECTORIES(${Boost_LIBRARY_DIRS})
STRING(REPLACE ";" ", " NEEDED_BOOST_LIBS_STRING "${NEEDED_BOOST_LIBS}")
......
......@@ -156,8 +156,9 @@ used within the code. We then initialise it in \inlsh{MyConceptImpl1.cpp}
\begin{lstlisting}[style=C++Style]
string MyConceptImpl1::className
= GetMyConceptFactory().RegisterCreatorFunction(
"Impl1", MyConceptImpl1::create, "First implementation of my
concept.");
"Impl1",
MyConceptImpl1::create,
"First implementation of my concept.");
\end{lstlisting}
The first parameter specifies the value of the key which should be used to
select this implementation. The second parameter is a function pointer to our
......@@ -239,3 +240,115 @@ a significant performance penalty (such as in tight inner loops). If needed,
Arrays allow access to the C-style array through the \texttt{Array::data} member
function.
\section{Threading}
\begin{notebox}
Threading is not currently included in the main code distribution. However, this
hybrid MPI/pthread functionality should be available within the next few months.
\end{notebox}
We investigated adding threaded parallelism to the already MPI parallel
Nektar++. MPI parallelism has multiple processes that exchange data using
network or network-like communications. Each process retains its own memory
space and cannot affect any other process’s memory space except through the MPI
API. A thread, on the other hand, is a separately scheduled set of instructions
that still resides within a single process’s memory space. Therefore threads
can communicate with one another simply by directly altering the process’s
memory space. The project's goal was to attempt to utilise this difference to
speed up communications in parallel code.
A design decision was made to add threading in an implementation independent
fashion. This was achieved by using the standard factory methods which
instantiate an abstract thread manager, which is then implemented by a concrete
class. For the reference implementation it was decided to use the Boost library
rather than native p-threads because Nektar++ already depends on the Boost
libraries, and Boost implements threading in terms of p-threads anyway.
It was decided that the best approach would be to use a thread pool. This
resulted in the abstract classes ThreadManager and ThreadJob. ThreadManager is
a singleton class and provides an interface for the Nektar++ programmer to
start, control, and interact with threads. ThreadJob has only one method, the
virtual method run(). Subclasses of ThreadJob must override run() and provide a
suitable constructor. Instances of these subclasses are then handed to the
ThreadManager which dispatches them to the running threads. Many thousands of
ThreadJobs may be queued up with the ThreadManager and strategies may be
selected by which the running threads take jobs from the queue. Synchronisation
methods are also provided within the ThreadManager such as wait(), which waits
for the thread queue to become empty, and hold(), which pauses a thread that
calls it until all the threads have called hold(). The API was thoroughly
documented in Nektar++’s existing Javadoc style.
Classes were then written for a concrete implementation of ThreadManager using
the Boost library. Boost has the advantage of being available on all Nektar++’s
supported platforms. It would not be difficult, however, to implement
ThreadManager using some other functionality, such as native p-threads.
Two approaches to utilising these thread classes were then investigated. The
bottom-up approach identifies likely regions of the code for parallelisation,
usually loops around a simple and independent operation. The top-down approach
seeks to run as much of the code as is possible within a threaded environment.
The former approach was investigated first due to its ease of implementation.
The operation chosen was the multiplication of a very large sparse block
diagonal matrix with a vector, where the matrix is stored as its many smaller
sub matrices. The original algorithm iterated over the sub matrices multiplying
each by the vector and accumulating the result. The new parallel algorithm
sends ThreadJobs consisting of batches of sub matrices to the thread pool. The
worker threads pick up the ThreadJobs and iterate over the sub matrices in the
job accumulating the result in a thread specific result vector. This latter
detail helps to avoid the problem of cache ping-pong which is where multiple
threads try to write to the same memory location, repeatedly invalidating one
another's caches.
Clearly this approach will work best when the sub matrices are large and there
are many of them . However, even for test cases that would be considered large
it became clear that the code was still spending too much time in its scalar
regions.
This led to the investigation of the top-down approach. Here the intent is to
run as much of the code as possible in multiple threads. This is a much more
complicated approach as it requires that the overall problem can be partitioned
suitably, that a mechanism be available to exchange data between the threads,
and that any code using shared resources be thread safe. As Nektar++ already
has MPI parallelism the first two requirements (data partitioning and exchange)
are already largely met. However since MPI parallelism is implemented by having
multiple independent processes that do not share memory space, global data in
the Nektar++ code, such as class static members or singleton instances, are now
vulnerable to change by all the threads running in a process.
To Nektar++’s communication class, Comm, was added a new class, ThreadedComm.
This class encapsulates a Comm object and provides extra functionality without
altering the API of Comm (this is the Decorator pattern). To the rest of the
Nektar++ library this Comm object behaves the same whether it is a purely MPI
Comm object or a hybrid threading plus MPI object. The existing data
partitioning code can be used with very little modification and the parts of the
Nektar++ library that exchange data are unchanged. When a call is made to
exchange data with other workers ThreadedComm first has the master thread on
each process (i.e. the first thread) use the encapsulated Comm object (typically
an MPI object) to exchange the necessary data between the other processes, and
then exchanges data with the local threads using direct memory to memory copies.
As an example: take the situation where there are two processes A and B,
possibly running on different computers, each with two threads 1 and 2. A
typical data exchange in Nektar++ uses the Comm method AllToAll(...) in which
each worker sends data to each of the other workers. Thread A1 will send data
from itself and thread A2 via the embedded MPI Comm to thread B1, receiving in
turn data from threads B1 and B2. Each thread will then pick up the data it
needs from the master thread on its process using direct memory to memory
copies. Compared to the situation where there are four MPI processes the number
of communications that actually pass over the network is reduced. Even MPI
implementations that are clever enough to recognise when processes are on the
same host must make a system call to transfer data between processes.
The code was then audited for situations where threads would be attempting to
modify global data. Where possible such situations were refactored so that each
thread has a copy of the global data. Where the original design of Nektar++ did
not permit this access to global data was mediated through locking and
synchronisation. This latter approach is not favoured except for global data
that is used infrequently because locking reduces concurrency.
The code has been tested and Imperial College cluster cx1 and has shown good
scaling. However it is not yet clear that the threading approach outperforms
the MPI approach; it is possible that the speedups gained through avoiding
network operations are lost due to locking and synchronisation issues. These
losses could be mitigated through more in-depth refactoring of Nektar++.
\ No newline at end of file
......@@ -126,7 +126,7 @@ Both the local coordinate axis along an intersecting edge will then point in the
same direction. Obviously, these conditions will not be fulfilled by default.
But in order to do so, the direction of the local coordinate axis should be
reversed in following situations:
\begin{lstlisting}
\begin{lstlisting}[style=C++Style]
if ((LocalEdgeId == 0)||(LocalEdgeId == 1)) {
if( EdgeOrientation == Backward ) {
change orientation of local coordinate axis
......@@ -184,7 +184,7 @@ mode within element e. This index i in this map array corresponds to the index o
\item globalID represents the ID of the corresponding global degree of freedom.
\end{itemize}
However, rather than this two-dimensional structure of the mapping array,
However, rather than this two-dimensional structure of the mapping array,\\
\texttt{LocalToGlobalMap2D::m\_locToContMap} stores the mapping array as a
one-dimensional array which is the concatenation of the different elemental
mapping arrays map[e]. This mapping array can then be used to assemble the
......@@ -215,7 +215,7 @@ the vertex mode, we will make a clear distinction between them in the
The fill-in of the mapping array can than be summarised by the following part of
(simplified) code:
\begin{lstlisting}
\begin{lstlisting}[style=C++Style]
for(e = 0; e < Number_Of_2D_Elements; e++) {
for(i = 0; i < Number_Of_Vertices_Of_Element_e; i++) {
offsetValue = ...
......
......@@ -215,8 +215,8 @@ switch between some (depending on the problem) of the following
time-integration schemes:
\begin{center}
\small
\begin{tabular}{ll}
\footnotesize
\begin{tabular}{p{4cm}p{10cm}}
\toprule
AdamsBashforthOrder1 & Adams-Bashforth Forward multi-step scheme of order 1\\
AdamsBashforthOrder2 & Adams-Bashforth Forward multi-step scheme of order 2\\
......
@misc{nektar-website,
title={Nektar++: Spectral/hp element framework},
url={http://www.nektar.info},
year={2014}
}
@book{KaSh05,
title={Spectral/hp Element Methods for Computational Fluid Dynamics},
author={Karniadakis, G. E. and Sherwin, S. J.},
publisher={Oxford Science Publications},
year={2005}
}
@article{Bu06,
title={General linear methods},
author={Butcher, J. C.},
journal={Acta Numerica},
volume={15},
pages={157-256}
}
@article{VoEsBoChKi11,
title={A generic framework for time-stepping partial differential equations (PDEs):
general linear methods, object-oriented implementation and application to fluid
problems},
author={Vos, P. E. J. and Eskilsson, C. and Bolis, A. and Chun, S. and Kirby, R. M.
and Sherwin, S. J.},
journal={International Journal of Computational Fluid Dynamics},
volume={25},
issue={3},
pages={107-125},
year={2011}
}
......@@ -31,6 +31,7 @@ openany, % A chapter may start on either a recto or verso page.
\usepackage{makeidx}
\usepackage{import}
%%% PAGE LAYOUT
%%%-----------------------------------------------------------------------------
\setlrmarginsandblock{0.15\paperwidth}{*}{1} % Left and right margin
......@@ -207,7 +208,9 @@ openany, % A chapter may start on either a recto or verso page.
columns=fullflexible,
backgroundcolor=\color{black!05},
linewidth=0.9\linewidth,
xleftmargin=0.1\linewidth
xleftmargin=0.1\linewidth,
showspaces=false,
showstringspaces=false
}
\usepackage{tikz}
......@@ -371,7 +374,42 @@ Scientific Computing and Imaging Institute, University of Utah, USA}
\clearpage
\chapter{Introduction}
Welcome to the developer's guide for Nektar++\cite{nektar-website}.
Nektar++ \cite{CaMoCoBoRo15} is a tensor product based finite element package
designed to allow one to construct efficient classical low polynomial order
$h$-type solvers (where $h$ is the size of the finite element) as well as higher
$p$-order piecewise polynomial order solvers. The framework currently has the
following capabilities:
\begin{itemize}
\item Representation of one, two and three-dimensional fields as a collection of
piecewise continuous or discontinuous polynomial domains.
\item Segment, plane and volume domains are permissible, as well as domains
representing curves and surfaces (dimensionally-embedded domains).
\item Hybrid shaped elements, i.e triangles and quadrilaterals or tetrahedra,
prisms and hexahedra.
\item Both hierarchical and nodal expansion bases.
\item Continuous or discontinuous Galerkin operators.
\item Cross platform support for Linux, Mac OS X and Windows.
\end{itemize}
The framework comes with a number of solvers and also allows one to construct a
variety of new solvers.
Our current goals are to develop:
\begin{itemize}
\item Automatic auto-tuning of optimal operator implementations based upon not
only $h$ and $p$ but also hardware considerations and mesh connectivity.
\item Temporal and spatial adaption.
\item Features enabling evaluation of high-order meshing techniques.
\end{itemize}
This document provides implementation details for the design of the libraries,
Nektar++-specific data structures and algorithms and other development
information.
\begin{warningbox}
This document is still under development and may be incomplete in parts.
\end{warningbox}
\mainmatter
......@@ -392,7 +430,7 @@ Welcome to the developer's guide for Nektar++\cite{nektar-website}.
%%% -------------------------------------------------------------
\bibliographystyle{plain}
\bibliography{developer-guide}
\bibliography{../refs}
\printindex
......
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -268,6 +268,71 @@ in the human arterial system},
publisher={Cambridge Univ Press}
}
@book{KaSh05,
title={Spectral/hp Element Methods for Computational Fluid Dynamics},
author={Karniadakis, G. E. and Sherwin, S. J.},
publisher={Oxford Science Publications},
year={2005}
}
@article{Bu06,
title={General linear methods},
author={Butcher, J. C.},
journal={Acta Numerica},
volume={15},
pages={157-256},
year={2006}
}
@article{VoEsBoChKi11,
title={A generic framework for time-stepping partial differential equations (PDEs):
general linear methods, object-oriented implementation and application to fluid
problems},
author={Vos, P. E. J. and Eskilsson, C. and Bolis, A. and Chun, S. and Kirby, R. M.
and Sherwin, S. J.},
journal={International Journal of Computational Fluid Dynamics},
volume={25},
issue={3},
pages={107-125},
year={2011}
}
@article{CaMoCoBoRo15,
title={Nektar++: An open-source spectral/hp element framework},
author={Cantwell, CD and Moxey, D and Comerford, A and Bolis, A and Rocco, G and Mengaldo, G and De Grazia, D and Yakovlev, S and Lombard, J-E and Ekelschot, D and others},
journal={Computer Physics Communications},
year={2015},
publisher={Elsevier}
}
@book{Ga39,
title={Orthogonal polynomials},
author={Gabor Szeg{\"o}},
volume={23},
year={1939},
publisher={American Mathematical Soc.}
}
@book{AbSt64,
title={Handbook of mathematical functions},
author={Abramowitz, Milton and Stegun, Irene A},
year={1972},
publisher={Dover}
}
@techreport{CaHuYoQu88,
title={Spectral methods in fluid dynamics},
author={Canuto, Claudio and Hussaini, M Yousuff and Quarteroni, Alfio and Zang, Thomas A},
year={1988},
institution={Springer}
}
@article{GhOs70,
title={Quadrature formulae},
author={Ghizzetti, Alessandro and Ossicini, Aldo},
year={1970},
publisher={Birkh{\"a}user}
}
@article{DoKa14,
title={A robust and accurate outflow boundary condition for incompressible flow
simulations on severely-truncated unbounded domains},
......
\chapter{Introduction}
Nektar++ is a tensor product based finite element package designed to allow one
to construct efficient classical low polynomial order $h$-type solvers (where
$h$ is the size of the finite element) as well as higher $p$-order piecewise
polynomial order solvers. The framework currently has the following
capabilities:
Nektar++ \cite{CaMoCoBoRo15} is a tensor product based finite element package
designed to allow one to construct efficient classical low polynomial order
$h$-type solvers (where $h$ is the size of the finite element) as well as higher
$p$-order piecewise polynomial order solvers. The framework currently has the
following capabilities:
\begin{itemize}
\item Representation of one, two and three-dimensional fields as a collection of
......
......@@ -128,6 +128,76 @@ For very complex operators -- in particular \inltt{HelmholtzMatrixOp} -- always
set \inltt{DO\_BLOCK\_MAT\_OP} to \inltt{1} as sum-factorisation for these
operator types can be costly.
\section{Collections}
The Collections library adds optimisations to perform certain elemental
operations collectively by applying an operator using a matrix-matrix operation,
rather than a sequence of matrix-vector multiplications. Certain operators
benefit more than other from this treatment, so the following implementations
are available:
\begin{itemize}
\item StdMat: Perform operations using collated matrix-matrix type elemental
operation.
\item SumFac: Perform operation using collated matrix-matrix type sum
factorisation operations.
\item IterPerExp: Loop through elements, performing matrix-vector operation.
\item NoCollections: Use the original LocalRegions implementation to
perform the operation.
\end{itemize}
All configuration relating to Collections is given in the \inltt{COLLECTIONS}
XML element within the \inltt{NEKTAR} XML element.
\subsection{Default implementation}
The default implementation for all operators may be chosen through setting the
\inltt{DEFAULT} attribute of the \inltt{COLLECTIONS} XML element to one of
\inltt{StdMat}, \inltt{SumFac}, \inltt{IterPerExp} or \inltt{NoCollection}. For
example, the following uses the collated matrix-matrix type elemental operation
for all operators and expansion orders:
\begin{lstlisting}[style=XmlStyle]
<COLLECTIONS DEFAULT="StdMat" />
\end{lstlisting}
\subsection{Auto-tuning}
The choice of implementation for each operator, for the given mesh and
expansion orders, can be selected automatically through
auto-tuning. To enable this, add the following to the \nekpp session
file:
\begin{lstlisting}[style=XmlStyle]
<COLLECTIONS DEFAULT="auto" />
\end{lstlisting}
This will collate elements from the given mesh and given expansion orders,
run and time each implementation strategy in turn, and select the fastest
performing case. Note that the selections will be mesh- and order- specific.
The selections made via auto-tuning are output if the \inlsh{--verbose}
command-line switch is given.
\subsection{Manual selection}
The choice of implementation for each operator may be set manually within the
\inltt{COLLECTIONS} tag as shown in the following example. Different implementations may be chosen for different element shapes and expansion orders.
Specifying \inltt{*} for \inltt{ORDER} sets the default implementation for any
expansion orders not explicity defined.
\begin{lstlisting}[style=XmlStyle]
<COLLECTIONS>
<OPERATOR TYPE="BwdTrans">
<ELEMENT TYPE="T" ORDER="*" IMPTYPE="IterPerExp" />
<ELEMENT TYPE="T" ORDER="1-5" IMPTYPE="StdMat" />
</OPERATOR>
<OPERATOR TYPE="IProductWRTBase">
<ELEMENT TYPE="Q" ORDER="*" IMPTYPE="SumFac" />
</OPERATOR>
</COLLECTIONS>
\end{lstlisting}
Manual selection is intended to document the optimal selections on a given
hardware platform after extensive prior testing, to avoid the need to run the
auto-tuning for each run.
\subsection{Collection size}
The maximum number of elements within a single collection can be enforced using
the \inltt{MAXSIZE} attribute.
%%% Local Variables:
%%% mode: latex
%%% TeX-master: "../user-guide"
......
......@@ -4,7 +4,7 @@
The aim of APESolver is to predict aerodynamic sound generation. Through
the application of a splitting technique, the flow-induced acoustic field is
totally decoupled from the underlying incompressible hydrodynamic field. The
acoustic perturbation equations proposed by Ewert and Shroeder are employed as
acoustic perturbation equations (APE-1/APE-4) proposed by Ewert and Shroeder are employed as
the governing equations of the acoustic field and they assure stable
aeroacoustic simulation due to the suppression of the term related to the
production of perturbed vorticity. These equations are similar to the linearised
......@@ -12,16 +12,21 @@ perturbed compressible equations, but while in the original formulation the flow
decomposition is based on solenoidal vortical perturbations as well as
irrotational acoustic perturbations, in this case perturbations are assumed to
be exclusively of acoustic nature.
\begin{align*}
\frac{\partial \mathbf{u}'}{\partial t}
+\nabla(\mathbf{u}' \cdot \mathbf{U})+\frac{1}{\rho_0}\nabla p' &= 0 \\
\begin{subequations}
\begin{align*}
\frac{\partial p'}{\partial t}
+\nabla \cdot (\gamma P \mathbf{u}' + p'\mathbf{U})&=-\frac{DP'}{Dt}
\end{align*}
where $(\mathbf{U},P)$ represents the base flow, $(\mathbf{u}',p')$
the perturbations and $\mathrm{D}/\mathrm{D}t$ the material derivative.
$P'=P-p_{\infty}$ is the acoustic source term, with $p_\infty$
the pressure at a reference value.
+ \overline{c}^2 \frac{\partial \overline{\rho} u'_i}{\partial x_i}
+ \overline{c}^2 \frac{\partial \overline{u}_i p' / \overline{c}^2}{\partial x_i}
&= \overline{c}^2 q_c
\\
\frac{\partial u'_i}{\partial t}
+ \frac{\partial \overline{u}_j u'_j}{\partial x_i}
+ \frac{\partial p' / \overline{\rho}}{\partial x_i}
&= 0
\end{align*}
\end{subequations}
where $(\overline{u}_i,\overline{p}, \overline{\rho}, \overline{c}^2 = \gamma \overline{p} / \overline{\rho} )$ represents the base flow and $(u'_i,p')$ the perturbations.
$\overline{c}^2 q_c$ is the acoustic source term.
\section{Usage}
\begin{lstlisting}[style=BashInputStyle]
......@@ -40,15 +45,13 @@ Currently, only \inltt{APEUpwind} supported.
\subsection{Parameters}
\begin{itemize}
\item \inltt{Rho0}: Density
\item \inltt{Gamma}: Ratio of specific heats
\item \inltt{Pinfinity}: Ambient pressure
\end{itemize}
\subsection{Functions}
\begin{itemize}
\item \inltt{BaseFlow} Baseflow $(\mathbf{U},P)$ defined by the variables \inltt{U0,V0,W0,P0}
\item \inltt{Source} Source term $P'=P-p_{\infty}$
\item \inltt{BaseFlow} Baseflow $(\overline{u}_i, \overline{p}, \overline{\rho})$ defined by the variables \inltt{u0,v0,w0,p0,rho0}
\item \inltt{Source} Source term $\overline{c}^2 q_c$
\item \inltt{InitialConditions}
\end{itemize}
......@@ -57,14 +60,14 @@ Currently, only \inltt{APEUpwind} supported.
\subsection{Aeroacoustic Wave Propagation}
In this section we explain how to set up a simple simulation of aeroacoustics in
Nektar++. We will study the propagation of an acoustic wave in the simple case
where the base flow is $\mathbf{U}=0, P=p_{\infty}=10^6$. The geometry consists