Commit b720cadb authored by Chris Cantwell's avatar Chris Cantwell

Merge branch 'feature/substepping' into 'master'

Feature/substepping

Introduction of a working version of a substepping version of the Incompressible Navier Stokes solver. 


See merge request !553
parents 7176bc05 1264d599
......@@ -22,7 +22,8 @@ v4.3.0
(!537)
- Fix bug with initial conditions of CG simulations using variable P (!543)
- Fix bug in 3DH2D with non-zero Dirichlet boundary conditions (!545)
- Added in a method to convert equispaced interpolated points back to coefficients which requires the introduction of a new StdRegions matrix.(!561)
- Added in a method to convert equispaced interpolated points back to
coefficients which requires the introduction of a new StdRegions matrix.(!561)
- Empty XML tags which would override non-empty XML tags are now ignored (!581)
- Add contribution guide (!551)
- Add a filter to calculate exponential moving averages (!566)
......@@ -45,7 +46,8 @@ v4.3.0
- Fix bug in C^0 projection (!541))
- Add command line option to set number of homogeneous planes (!540)
- Add module to project set of points to a fld file(!561)
- Add support for interpolating to a box of points and fix ability to run interppointstofld module in parallel when using a plane or box optio(!561)n
- Add support for interpolating to a box of points and fix ability to run
interppointstofld module in parallel when using a plane or box option (!561)
- Add option to output equi-spaced points in VTU format (!550)
- Add module innerproduct (!568)
- Add command line option of `--part-only` and `--part-only-overlapping` (!569)
......
......@@ -154,7 +154,7 @@ ENDIF()
INCLUDE (NektarCommon)
# Set various ThirdParty locations
SET(TPURL http://www.nektar.info/thirdparty)
SET(TPURL https://www.nektar.info/thirdparty)
SET(TPSRC ${CMAKE_SOURCE_DIR}/ThirdParty)
SET(TPBUILD ${CMAKE_BINARY_DIR}/ThirdParty)
SET(TPDIST ${CMAKE_BINARY_DIR}/ThirdParty/dist)
......
......@@ -355,3 +355,75 @@ year={2011}
year = 2007,
publisher = {IOS Press}
}
@Article{KaIsOr91,
author = "G. E. Karniadakis and M. Israeli and S. A. Orszag",
title = "High-order splitting methods for the incompressible
{Navier}--{Stokes} equations",
journal = JCP,
year = 1991,
volume = 97,
number = 2,
pages = "414--443"
}
@article{GuSh03,
Author="J.L. Guermond and J. Shen",
title="Velocity-correction projection methods for incompressible flows",
journal="SIAM J. Numer.\ Anal.",
volume=41,
pages = "112--134",
year=2003
}
@article{XiShDoKa,
author = {Xiu, D and Sherwin, SJ and Dong, S and Karniadakis, GE},
doi = {10.1007/s10915-004-4647-1},
journal = {J. Sci. Comp. },
pages = {323--346},
title = {Strong and auxiliary forms of the semi-Lagrangian method for incompressible flows},
url = {http://dx.doi.org/10.1007/s10915-004-4647-1},
volume = {25},
year = {2005}
}
@inproceedings{Sh03,
author = {Sherwin, S},
pages = {43--52},
publisher = {Elsevier Science},
title = {A substepping Navier-Stokes splitting scheme for spectral/hp element discretisations},
url = {http://www2.imperial.ac.uk/ssherw/spectralhp/papers/Conferences/pcfd_sherwin.pdf},
year = {2003}
}
@Article{MaPaRo,
author = {Y. Maday and A. T. Patera and E.M. Ronquist},
title = {An operator-integration-factor splitting method for time-dependent problems: Application to incompressible fludi flow},
journal = {J. Sci. Comp.},
year = {1990},
volume = {4},
pages = {263-292},
}
@article{AiSh,
author = {Ainsworth, M and Sherwin, S},
doi = {10.1016/S0045-7825(98)00356-9},
journal = {COMPUTER METHODS IN APPLIED MECHANICS AND ENGINEERING},
pages = {243--266},
title = {Domain decomposition preconditioners for p and hp finite element approximation of Stokes equations},
url = {http://dx.doi.org/10.1016/S0045-7825(98)00356-9},
volume = {175},
year = {1999}
}
@article{ShAi,
author = {Sherwin, SJ and Ainsworth, M},
doi = {10.1016/S0168-9274(99)00102-6},
journal = {APPLIED NUMERICAL MATHEMATICS},
pages = {357--363},
title = {Unsteady Navier-Stokes solvers using hybrid spectral/hp element methods},
url = {http://dx.doi.org/10.1016/S0168-9274(99)00102-6},
volume = {33},
year = {2000}
}
......@@ -161,7 +161,7 @@ We consider the hyperbolic partial differential equation:
\begin{equation}
\dfrac{\partial u}{\partial t} + \dfrac{\partial f}{\partial x} = 0,
\end{equation}
where $f = a u$ i s the advection flux.
where $f = a u$ is the advection flux.
\subsubsection{Input file}
The input for this example is given in the example file \inlsh{Advection1D.xml}
......
......@@ -462,7 +462,7 @@ For the non-smooth artificial viscosity model the added artificial viscosity is
\end{figure}
\subsubsection{Smooth artificial viscosity model}
For the smooth artificial viscosity model an extra PDE for the artificial viscosity is appended to the Euler system
\begin{equation}\label{eq:euler}\begin{split}
\begin{equation}\label{eq:eulerplusvis}\begin{split}
\frac{\partial \epsilon}{\partial t} &= \nabla\cdot \left(\nabla \epsilon\right) + \frac{1}{\tau}\left(\frac{h}{p}\lambda_{max}S_\kappa - \epsilon\right)\ \ \ \ \ \ \textrm{on}\ \ \ \ \ \ \ \Omega\\
\frac{\partial \epsilon}{\partial n} &= 0\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \textrm{on}\ \ \ \ \ \ \ \Gamma
\end{split}
......
......@@ -50,6 +50,7 @@ openany, % A chapter may start on either a recto or verso page.
\maxsecnumdepth{paragraph} % Subsections (and higher) are numbered
\setsecnumdepth{paragraph}
\makeatletter %
\makechapterstyle{standard}{
\setlength{\beforechapskip}{0\baselineskip}
......@@ -72,6 +73,7 @@ openany, % A chapter may start on either a recto or verso page.
%\chapterstyle{standard}
\chapterstyle{madsen}
\setsecheadstyle{\normalfont\large\bfseries}
\setsubsecheadstyle{\normalfont\normalsize\bfseries}
\setparaheadstyle{\normalfont\normalsize\bfseries}
......@@ -143,7 +145,8 @@ openany, % A chapter may start on either a recto or verso page.
%%% NEW COMMANDS
%%%-----------------------------------------------------------------------------
%%%----------------------------------------------------------------------------% Definition of subsubsubsection
\newcommand{\subsubsubsection}[1]{\vspace{9pt}\noindent{\textbf{\em #1:}}\vspace*{3pt}}
% Nektar++ version
\usepackage{xspace}
......@@ -385,6 +388,7 @@ Scientific Computing and Imaging Institute, University of Utah, USA}
\begin{document}
\frontmatter
% Render pretty title page if not building HTML
......@@ -420,13 +424,18 @@ Scientific Computing and Imaging Institute, University of Utah, USA}
\import{xml/}{xml}
\part{Applications and Utilities}
\part{Preprocessing \& Postprocessing}
\label{p:prepost}
\import{utilities/}{preprocessing}
\import{utilities/}{postprocessing}
\part{Solver Applications}
\label{p:applications}
\import{solvers/}{solvers}
\import{utilities/}{utilities}
\part{Reference}
\import{optimisation/}{optimisation}
......
......@@ -140,6 +140,33 @@ where the file \inltt{test-C0Proj.fld} can be processed in a similar
way as described in section \ref{s:utilities:fieldconvert:sub:convert}
to visualise the result either in Tecplot, Paraview or VisIt.
The option \inltt{localtoglobalmap} will do a global gather of the
coefficients and then scatter them back to the local elements. This
will replace the coefficients shared between two elements with the
coefficients of one of the elements (most likely the one with the
highest id). Although not a formal projection it does not require any
matrix inverse and so is very cheap to perform.
The option \inltt{usexmlbcs} will enforce the boundary conditions
specified in the input xml file.
The option \inltt{helmsmoothing=$L$} will perform a Helmholtz
smoothing projection of the form
\[
\left (\nabla^2 + \left (\frac{2 \pi}{L}\right )^2 \right ) \hat{u}^{new} =
\left (\frac{2 \pi}{L}\right )^2 \hat{u}^{orig}
\]
which can be interpreted in a Fourier sense as smoothing the original
coefficients using a low pass filter of the form
\[
\hat{u}_k^{new} = \frac{1}{(1 + k^2/K_0^2)} \hat{u}_k^{orig} \mbox{\, where \,}
K_0 = \frac{2 \pi}{L}
\]
and so $L$ is the length scale below which the coefficients values are
halved or more. Since this form of the Helmholtz operator is not
possitive definite, currently a direct solver is necessary and so this
smoother is mainly of use in two-dimensions.
\subsection{Calculate Q-Criterion: \textit{QCriterion} module}
To perform the Q-criterion calculation and obtain an output
data containing the Q-criterion solution, the user can run
......@@ -506,14 +533,17 @@ the .dat file, i.e.
Optionally \inltt{smooth} can be specified to smooth the isocontour
with default values \inltt{smoothnegdiffusion}=0.495,
\inltt{smoothnegdiffusion}=0.5 and \inltt{smoothiter}=100. This option
automatically calls a \inltt{globalcondense} option which remove
multiply defined verties from the simplex definition which arise as
isocontour are generated element by element.
typically should be used wiht the \inltt{globalcondense} option which
removes multiply defined verties from the simplex definition which
arise as isocontour are generated element by element. The
\inltt{smooth} option preivously automatically called the
\inltt{globalcondense} option but this has been depracated since it is
now possible to read isocontour files directly and so it is useful to
have these as separate options.
In addition to the \inltt{smooth} or \inltt{globalcondense} options
you can specify \inltt{removesmallcontour}=100 which will remove
separate isocontours of less than 100 triangles. This optin requires
\inltt{smooth} or \inltt{globalcondense} to be specified.
separate isocontours of less than 100 triangles.
\begin{notebox}
Currently this option is only set up for triangles, quadrilaterals,
......@@ -552,8 +582,6 @@ FieldConvert -m meanmode file.xml file.fld file-mean.fld
The output file \inltt{file-mean.fld} can be processed in a similar
way as described in section \ref{s:utilities:fieldconvert:sub:convert}
to visualise the result either in Tecplot or in Paraview or VisIt.
setnantovalue option
%
%
%
......
......@@ -538,6 +538,8 @@ To extract a surface use the command:
where the integers are surface IDs to be extracted.
An optional arguemnt of \inltt{detectbnd} can be added to identify the boundary composites as part of the surface extraction.
\subsection{Linearisation}
The ability to remove all the high-order information in a mesh can be useful
......
......@@ -4,8 +4,6 @@ SET(CollectionTimingSource CollectionTiming.cpp)
ADD_NEKTAR_EXECUTABLE(CollectionTiming demos CollectionTimingSource)
TARGET_LINK_LIBRARIES(CollectionTiming ${LinkLibraries})
#ADD_NEKTAR_TEST(LinearAdvDiffReact2D_P7_Modes)
IF (NEKTAR_USE_MPI)
# ADD_NEKTAR_TEST(Helmholtz2D_CG_P7_Modes_AllBCs_xxt_full)
ENDIF (NEKTAR_USE_MPI)
......@@ -67,6 +67,7 @@ int main(int argc, char *argv[])
Array<OneD,NekDouble> fce;
Array<OneD,NekDouble> xc0,xc1,xc2;
StdRegions::ConstFactorMap factors;
StdRegions::VarCoeffMap varcoeffs;
FlagList flags;
#ifdef TIMING
NekDouble st;
......@@ -145,6 +146,34 @@ int main(int argc, char *argv[])
}
//----------------------------------------------
//----------------------------------------------
// Set up variable coefficients if defined
if (vSession->DefinesFunction("d00"))
{
Array<OneD, NekDouble> d00(nq,0.0);
LibUtilities::EquationSharedPtr d00func =
vSession->GetFunction("d00",0);
d00func->Evaluate(xc0, xc1, xc2, d00);
varcoeffs[StdRegions::eVarCoeffD00] = d00;
}
if (vSession->DefinesFunction("d11"))
{
Array<OneD, NekDouble> d11(nq,0.0);
LibUtilities::EquationSharedPtr d11func =
vSession->GetFunction("d11",0);
d11func->Evaluate(xc0, xc1, xc2, d11);
varcoeffs[StdRegions::eVarCoeffD11] = d11;
}
if (vSession->DefinesFunction("d22"))
{
Array<OneD, NekDouble> d22(nq,0.0);
LibUtilities::EquationSharedPtr d22func =
vSession->GetFunction("d22",0);
d22func->Evaluate(xc0, xc1, xc2, d22);
varcoeffs[StdRegions::eVarCoeffD22] = d22;
}
//----------------------------------------------
//----------------------------------------------
// Define forcing function for first variable defined in file
fce = Array<OneD,NekDouble>(nq);
......@@ -164,7 +193,8 @@ int main(int argc, char *argv[])
//Helmholtz solution taking physical forcing after setting
//initial condition to zero
Vmath::Zero(Exp->GetNcoeffs(),Exp->UpdateCoeffs(),1);
Exp->HelmSolve(Fce->GetPhys(), Exp->UpdateCoeffs(), flags, factors);
Exp->HelmSolve(Fce->GetPhys(), Exp->UpdateCoeffs(), flags, factors,
varcoeffs);
//----------------------------------------------
Timing("Helmholtz Solve ..");
......
......@@ -7,93 +7,92 @@
#include <StdRegions/StdRegions.hpp>
using namespace Nektar;
using namespace StdRegions;
using namespace StdRegions;
using namespace std;
NekDouble Tri_sol(NekDouble x, NekDouble y, int order1, int order2);
int main(int argc, char *argv[])
{
int i,j;
int order, nq1,nq2;
LibUtilities::PointsType Qtype1,Qtype2;
LibUtilities::BasisType btype1,btype2;
StdExpansion *E;
if(argc != 4)
{
fprintf(stderr,"Usage: EquiToCoeff2D order nq1 nq2 \n");
exit(1);
}
btype1 = (LibUtilities::BasisType) LibUtilities::eModified_A;
btype2 = (LibUtilities::BasisType) LibUtilities::eModified_B;
order = atoi(argv[1]);
nq1 = atoi(argv[2]);
nq2 = atoi(argv[3]);
Qtype1 = LibUtilities::eGaussLobattoLegendre;
Qtype2 = LibUtilities::eGaussRadauMAlpha1Beta0;
//-----------------------------------------------
// Define a segment expansion based on basis definition
const LibUtilities::PointsKey Pkey1(nq1,Qtype1);
const LibUtilities::PointsKey Pkey2(nq2,Qtype2);
const LibUtilities::BasisKey b1(btype1,order,Pkey1);
const LibUtilities::BasisKey b2(btype2,order,Pkey2);
E = new StdTriExp (b1,b2);
Array<OneD,NekDouble> x = Array<OneD,NekDouble>(nq1*nq2);
Array<OneD,NekDouble> y = Array<OneD,NekDouble>(nq1*nq2);
E->GetCoords(x,y);
Array<OneD,NekDouble> sol = Array<OneD,NekDouble>(nq1*nq2);
//----------------------------------------------
// Define solution to be projected
for(i = 0; i < nq1*nq2; ++i)
{
sol[i] = Tri_sol(x[i],y[i],order,order);
}
//----------------------------------------------
Array<OneD, NekDouble> esol(order*(order+1)/2);
Array<OneD, NekDouble> coeffs(order*(order+1)/2);
//---------------------------------------------
// Interpolate to equispaced points
E->PhysInterpToSimplexEquiSpaced(sol,esol,order);
//---------------------------------------------
//--------------------------------------------
// Project from equispaced points back to coeffs
E->EquiSpacedToCoeffs(esol,coeffs);
//--------------------------------------------
Array<OneD,NekDouble> phys = Array<OneD,NekDouble>(nq1*nq2);
//--------------------------------------------
// Project from equispaced points back to coeffs
E->BwdTrans(coeffs,phys);
//--------------------------------------------
//--------------------------------------------
// Calculate L_inf error
cout << "L infinity error: " << E->Linf(phys,sol) << endl;
cout << "L 2 error: " << E->L2 (phys,sol) << endl;
//--------------------------------------------
int i, j;
int order, nq1, nq2;
LibUtilities::PointsType Qtype1, Qtype2;
LibUtilities::BasisType btype1, btype2;
StdExpansion *E;
if (argc != 4)
{
fprintf(stderr, "Usage: EquiToCoeff2D order nq1 nq2 \n");
exit(1);
}
btype1 = (LibUtilities::BasisType)LibUtilities::eModified_A;
btype2 = (LibUtilities::BasisType)LibUtilities::eModified_B;
order = atoi(argv[1]);
nq1 = atoi(argv[2]);
nq2 = atoi(argv[3]);
Qtype1 = LibUtilities::eGaussLobattoLegendre;
Qtype2 = LibUtilities::eGaussRadauMAlpha1Beta0;
//-----------------------------------------------
// Define a segment expansion based on basis definition
const LibUtilities::PointsKey Pkey1(nq1, Qtype1);
const LibUtilities::PointsKey Pkey2(nq2, Qtype2);
const LibUtilities::BasisKey b1(btype1, order, Pkey1);
const LibUtilities::BasisKey b2(btype2, order, Pkey2);
E = new StdTriExp(b1, b2);
Array<OneD, NekDouble> x = Array<OneD, NekDouble>(nq1 * nq2);
Array<OneD, NekDouble> y = Array<OneD, NekDouble>(nq1 * nq2);
E->GetCoords(x, y);
Array<OneD, NekDouble> sol = Array<OneD, NekDouble>(nq1 * nq2);
//----------------------------------------------
// Define solution to be projected
for (i = 0; i < nq1 * nq2; ++i)
{
sol[i] = Tri_sol(x[i], y[i], order, order);
}
//----------------------------------------------
Array<OneD, NekDouble> esol(order * (order + 1) / 2);
Array<OneD, NekDouble> coeffs(order * (order + 1) / 2);
//---------------------------------------------
// Interpolate to equispaced points
E->PhysInterpToSimplexEquiSpaced(sol, esol, order);
//---------------------------------------------
//--------------------------------------------
// Project from equispaced points back to coeffs
E->EquiSpacedToCoeffs(esol, coeffs);
//--------------------------------------------
Array<OneD, NekDouble> phys = Array<OneD, NekDouble>(nq1 * nq2);
//--------------------------------------------
// Project from equispaced points back to coeffs
E->BwdTrans(coeffs, phys);
//--------------------------------------------
//--------------------------------------------
// Calculate L_inf error
cout << "L infinity error: " << E->Linf(phys, sol) << endl;
cout << "L 2 error: " << E->L2(phys, sol) << endl;
//--------------------------------------------
}
NekDouble Tri_sol(NekDouble x, NekDouble y, int order1, int order2){
int l,k;
NekDouble Tri_sol(NekDouble x, NekDouble y, int order1, int order2)
{
int l, k;
NekDouble sol = 0;
for(k = 0; k < order1; ++k)
for (k = 0; k < order1; ++k)
{
for(l = 0; l < order2-k; ++l)
for (l = 0; l < order2 - k; ++l)
{
sol += pow(x,k)*pow(y,l);
sol += pow(x, k) * pow(y, l);
}
}
......
......@@ -56,7 +56,6 @@
#include <LibUtilities/Foundations/Foundations.hpp>
#include <boost/algorithm/string.hpp>
#include <boost/graph/adjacency_list.hpp>
#include <boost/graph/adjacency_iterator.hpp>
......@@ -313,7 +312,6 @@ namespace Nektar
}
}
// check to see if compressed
// check to see if compressed
std::string IsCompressed;
vSubElement->QueryStringAttribute("COMPRESSED",&IsCompressed);
......@@ -1178,7 +1176,6 @@ namespace Nektar
int acnt = 0;
int vcnt = 0;
int nWeight = nGraphVerts;
BoostAdjacencyIterator adjvertit, adjvertit_end;
Array<OneD, int> xadj(nGraphVerts+1,0);
Array<OneD, int> adjncy(2*nGraphEdges);
Array<OneD, int> vwgt(nWeight, 1);
......@@ -1870,7 +1867,7 @@ namespace Nektar
{
// get index from full list;
int idx = m_meshCurvedPts[c.ptid]
.index[c.ptoffset+i];
.index[c.ptoffset+i];
// if index is not already in curved
// points add it or set index to location
......
......@@ -214,7 +214,7 @@ class PtsField
int m_dim;
/// Names of the field variables
vector<std::string> m_fieldNames;
/// Point data. For a n-dimensional field, the first n fields are the
/// Point data. For a n-dimensional field, the first m_dim fields are the
/// points spatial coordinates. Structure: m_pts[fieldIdx][ptIdx]
Array<OneD, Array<OneD, NekDouble> > m_pts;
/// Number of points per edge. Empty if the point data has no
......
......@@ -141,6 +141,14 @@ namespace Nektar
const PointsKey &tpoints1,
NekDouble *to)
{
// default interpolation
if((fpoints0 == tpoints0)&&(fpoints1 == tpoints1))
{
Vmath::Vcopy(tpoints0.GetNumPoints()*tpoints1.GetNumPoints(),
from,1,to,1);
return;
}
DNekMatSharedPtr I0,I1;
Array<OneD, NekDouble> wsp(tpoints1.GetNumPoints()*fpoints0.GetNumPoints()); // fnp0*tnp1
......
This diff is collapsed.
......@@ -102,6 +102,10 @@ namespace Nektar
const int nq0,
const int nq1,
Array<OneD, int> &idmap);
void v_NormVectorIProductWRTBase(
const Array<OneD, const Array<OneD, NekDouble> > &Fvec,
Array<OneD, NekDouble> &outarray);
protected:
virtual void v_DGDeriv(
const int dir,
......@@ -130,11 +134,11 @@ namespace Nektar
StdRegions::Orientation orient);
virtual void v_GetFacePhysVals(
const int face,
const StdRegions::StdExpansionSharedPtr &FaceExp,
const Array<OneD, const NekDouble> &inarray,
Array<OneD, NekDouble> &outarray,
StdRegions::Orientation orient);
const int face,
const StdRegions::StdExpansionSharedPtr &FaceExp,
const Array<OneD, const NekDouble> &inarray,
Array<OneD, NekDouble> &outarray,
StdRegions::Orientation orient);
//-----------------------------
// Low Energy Basis functions
......
......@@ -462,16 +462,6 @@ namespace Nektar
}
}
void PyrExp::v_GetTracePhysVals(
const int face,
const StdRegions::StdExpansionSharedPtr &FaceExp,
const Array<OneD, const NekDouble> &inarray,
Array<OneD, NekDouble> &outarray,
StdRegions::Orientation orient)
{
v_GetFacePhysVals(face,FaceExp,inarray,outarray,orient);
}
void PyrExp::v_ComputeFaceNormal(const int face)
{
const SpatialDomains::GeomFactorsSharedPtr &geomFactors =
......
......@@ -128,14 +128,6 @@ namespace Nektar
Array<OneD, int> &outarray);
LOCAL_REGIONS_EXPORT void v_ComputeFaceNormal(const int face);
LOCAL_REGIONS_EXPORT virtual void v_GetTracePhysVals(
const int face,
const StdRegions::StdExpansionSharedPtr &FaceExp,
const Array<OneD, const NekDouble> &inarray,
Array<OneD, NekDouble> &outarray,
StdRegions::Orientation orient);
//---------------------------------------
// Matrix creation functions
//---------------------------------------
......
......@@ -601,6 +601,13 @@ namespace Nektar
IProductWRTBase(Fn,outarray);
}
void QuadExp::v_NormVectorIProductWRTBase(
const Array<OneD, const Array<OneD, NekDouble> > &Fvec,
Array<OneD, NekDouble> &outarray)
{
NormVectorIProductWRTBase(Fvec[0], Fvec[1], Fvec[2], outarray);
}
StdRegions::StdExpansionSharedPtr QuadExp::v_GetStdExp(void) const
{
return MemoryManager<StdRegions::StdQuadExp>
......
......@@ -129,12 +129,17 @@ namespace Nektar
const int dir,
const Array<OneD, const NekDouble> &inarray,
Array<OneD, NekDouble> &outarray);
LOCAL_REGIONS_EXPORT virtual void v_NormVectorIProductWRTBase(
const Array<OneD, const NekDouble> &Fx,
const Array<OneD, const NekDouble> &Fy,
const Array<OneD, const NekDouble> &Fz,
Array<OneD, NekDouble> &outarray);
LOCAL_REGIONS_EXPORT virtual void v_NormVectorIProductWRTBase(
const Array<OneD, const Array<OneD, NekDouble> > &Fvec,
Array<OneD, NekDouble> &outarray);
//---------------------------------------
// Evaluation functions
//---------------------------------------
......
......@@ -637,6 +637,13 @@ cout<<"deps/dx ="<<inarray_d0[i]<<" deps/dy="<<inarray_d1[i]<<endl;
v_IProductWRTBase(Fn,outarray);
}
void SegExp::v_NormVectorIProductWRTBase(
const Array<OneD, const Array<OneD, NekDouble> > &Fvec,
Array<OneD, NekDouble> &outarray)
{
NormVectorIProductWRTBase(Fvec[0], Fvec[1], outarray);
}
//-----------------------------
// Evaluation functions