Commit 07a0458e authored by Dave Moxey's avatar Dave Moxey

Merge branch 'master' into fix/mesh-partition

parents fa1f7400 952804d8
......@@ -334,8 +334,8 @@ year={2011}
publisher={Birkh{\"a}user}
}
@article{DoKa14,
title={A robust and accurate outflow boundary condition for incompressible flow
simulations on severely-truncated unbounded domains},
title={A robust and accurate outflow boundary condition for incompressible
flow simulations on severely-truncated unbounded domains},
author={S. Dong and G. E. Karniadakis and C. Chryssostomidis},
journal={Journal of Computational Physics},
volume={261},
......@@ -352,4 +352,4 @@ year={2011}
pages = {157--172},
year = 2007,
publisher = {IOS Press}
}
\ No newline at end of file
}
......@@ -1573,6 +1573,71 @@ It is possible to visualise the wall shear stress distribution by running the Fl
\includegraphics[width=12cm]{Figures/WSS.png}
\caption{Non-dimensional wall shear stress distribution.}
\end{center}
\end{figure}
\subsection{finite-strip modeling of flow past flexible cables}
As a computationally efficient model, strip theory-based modeling technique has been proposed previously to predict vortex-induced vibration (VIV) for higher Reynolds number flows. In the strip theory-based model, the fluid flow solution is obtained on a series of 2D computational planes (also called as “strips”) along the riser’s axis direction. These strips then are coupled with each other through structural dynamic model of the riser, and then VIV response prediction is achieved by the strip-structure interactions.In the 2D strip theory, it is assumed that the flow is purely two-dimensional without spanwise correlation, which allows the problems to be split into various 2D planes. A consequence of 2D strip solution under this assumption is that it is unable to reflect the influence of spanwise wake turbulence on the structural dynamics. In order to overcome this shortcoming, we proposed a new module in the framework of Nektar++, in which a spanwise scale is locally allocated to each one of the strips, so that the spanwise velocity correlation is reconstructed in the flow field within each strips. In particular, this model lets the fluid domain to be divided in $N$ strips with thickness ratio of $L_{z}/D$ and evenly distributed along the spanwise ($z$) direction. The gap between the neighboring strips, represented by $L_{g}$, satisfies relation $L_{c}=N(L_{z}+L_{g})$. Since the strip in this model has finite scale in the $z$-direction, we named it as finite strip to distinguish from traditional 2D strip plane. Next, the flow dynamics within each individual strips are modeled by viscous incompressible Navier-Stokes equations, while a tensioned beam model is employed to govern the dynamics of the flexible structures. In this example, we will show how to perform a finite-strip model to predict the vortex-induced vibration responses of flexible cables. Let us consider a vortex-induced vibration of a slender cable with an aspect ratio of $L_z/D$=4$\pi$, which is immersed in uniform flows at Re=100.
\subsubsection{Input File}
The cable with a mass ratio (defined as the ratio of the total oscillating mass to the mass of displaced fluid) of 1 has diameter $D=1$, the 2D mesh is composed of 284 quadrilateral elements. The spanwise direction is split in 16 strips with thickness ratio of $L_c/D$=$\pi$/8 and one pair of complex Fourier modes for each one of the strips. We will use a sixth order polynomial expansion for the spectral element and the input file for this example is \inlsh{CylFlow\_HomoStrip.xml}.
\begin{lstlisting}[style=XMLStyle]
<E COMPOSITE="C[73]" NUMMODES="6" TYPE="MODIFIED" FIELDS="u,v,w,p" />
\end{lstlisting}
To use the finite strip routines we need just to insert a flag of \inlsh{"HomoStrip"} in the solver information as below, in addition, we need to specify the types of vibration and support ends for the cables. In this case, the vibration type is specified as \inlsh{VALUE="CONSTRAINED"}, which means that the cable's vibration is constrained only in the crossflow direction. Other options include \inlsh{VALUE="FREE"} and \inlsh{"FORCED"}, respectively corresponding to the free vibrations in both streamwise and crossflow directions and forced vibration by specified functions given in input file. For the support ends of the cable, another option of \inlsh{VALUE="PINNED-PINNED"} is available for the simulations, which satisfies the condition of zero values of displacements on the support ends.
\subsubsection{Solver information:~}
\begin{lstlisting}[style=XMLStyle]
<SOLVERINFO>
<I PROPERTY="HomoStrip" VALUE="True"/>
<I PROPERTY="VibrationType" VALUE="CONSTRAINED"/>
<I PROPERTY="SupportType" VALUE="FREE-FREE"/>
</SOLVERINFO>
\end{lstlisting}
\subsubsection{Parameters}
All the simulation parameters are specified in the section as follows.
\begin{lstlisting}[style=XMLStyle]
<PARAMETERS>
<P> LZ = PI/8 </P> <!--thickness ratio-->
<P> LC = 4*PI </P> <!--aspect ratio-->
<P> A = 0.025 </P>
<P> omega = 1.0 </P>
<P> PROC_Z = 16 </P>
<P> Strip_Z = 16 </P> <!--number of the strips-->
<P> DistStrip = PI/4 </P> <!--distance of the strips-->
<P> StructStiff = 0.02 </P>
<P> StructRho = 2.0 </P>
<P> CableTension = 8.82 </P>
<P> BendingStiff = 0.0 </P>
<P> FictDamp = 0.0 </P>
<P> FictMass = 3.0 </P>
</PARAMETERS>
\end{lstlisting}
\subsubsection{Running the solver}
In this example we will run the solver in parallel. We can specify the number of the strips by providing an additional flag to the solver, –nsz. In this example, we will run 16 strips, therefore it would be specified as –nsz 16. The solver can now be run as follows
\begin{lstlisting}[style=BashInputStyle]
mpirun -np 16 IncNavierStokesSolver CylFlow_HomoStrip.xml --npz 16 --nsz 16
\end{lstlisting}
The simulation results are illustrated in spanwise vorticity contours in Figure \ref{f:incns:finite-strip-modeling}. The wake response of the cable appears as standing wave patter in the earlier stage and then it transitions into traveling wave response, as shown in this figure.
\begin{figure}
\begin{center}
\includegraphics[width=7cm]{Figures/strip-16-time-100.png}
\includegraphics[width=7cm]{Figures/strip-16-time-600.png}
\caption{Spanwise vorticity contours in standing wave and traveling wave patterns predicted in finite strip modeling.}
\label{f:incns:finite-strip-modeling}
\end{center}
\end{figure}
\subsection{2D direct stability analysis of the channel flow}
......
......@@ -93,9 +93,9 @@ stands for \inltt{m}odule)..
Specifically, FieldConvert has these additional functionalities
%
\begin{enumerate}
\item \inltt{AddFld}: Sum two .fld files;
\item \inltt{C0Projection}: Computes the C0 projection of a given output file;
\item \inltt{QCriterion}: Computes the Q-Criterion for a given output file;
\item \inltt{addFld}: Sum two .fld files;
\item \inltt{concatenate}: Concatenate a \nekpp binary output (.chk or .fld) field file into single file;
\item \inltt{equispacedoutput}: Write data as equi-spaced output using simplices to represent the data for connecting points;
\item \inltt{extract}: Extract a boundary field;
......@@ -103,8 +103,13 @@ Specifically, FieldConvert has these additional functionalities
\item \inltt{interppointdatatofld}: Interpolates given discrete data using a finite difference approximation to a fld file given an xml file;
\item \inltt{interppoints}: Interpolates a set of points to another, requires fromfld and fromxml to be defined, a line or plane of points can be defined;
\item \inltt{isocontour}: Extract an isocontour of ``fieldid'' variable and at value ``fieldvalue''. Optionally ``fieldstr'' can be specified for a string defiition or ``smooth'' for smoothing;
\item \inltt{scaleinputfld}: Rescale input field by a constant factor.
\item \inltt{jacobianenergy}: Shows high frequency energy of Jacobian;
\item \inltt{printfldnorms}: Print L2 and LInf norms to stdout;
\item \inltt{scalargrad}: Computes scalar gradient field;
\item \inltt{scaleinputfld}: Rescale input field by a constant factor;
\item \inltt{shear}: Computes time-averaged shear stress metrics: TAWSS, OSI, transWSS, TAAFI, TACFI, WSSG;
\item \inltt{vorticity}: Computes the vorticity field.
\item \inltt{wss}: Computes wall shear stress field.
\end{enumerate}
The module list above can be seen by running the command
%
......@@ -117,25 +122,6 @@ In the following we will detail the usage of each module.
%
%
\subsubsection{Sum two .fld files: \textit{AddFld} module}
To sum two .fld files one can use the \inltt{AddFld} module of FieldConvert
%
\begin{lstlisting}[style=BashInputStyle]
FieldConvert -m addfld:fromfld=file1.fld:scale=-1 file1.xml file2.fld file3.fld
\end{lstlisting}
%
In this case we use it in conjunction with the command \inltt{scale}
which multiply the values of a given .fld file by a constant \inltt{value}.
\inltt{file1.fld} is the file multiplied by \inltt{value}, \inltt{file1.xml}
is the associated session file, \inltt{file2.fld} is the .fld file which
is summed to \inltt{file1.fld} and finally \inltt{file3.fld} is the output
which contain the sum of the two .fld files.
\inltt{file3.fld} can be processed in a similar way as described
in section \ref{s:utilities:fieldconvert:sub:convert} to visualise
it either in Tecplot or in Paraview the result.
%
%
%
\subsubsection{Smooth the data: \textit{C0Projection} module}
To smooth the data of a given .fld file one can
use the \inltt{C0Projection} module of FieldConvert
......@@ -162,6 +148,26 @@ to visualise either in Tecplot or in Paraview the result.
%
%
%
\subsubsection{Sum two .fld files: \textit{addFld} module}
To sum two .fld files one can use the \inltt{addFld} module of FieldConvert
%
\begin{lstlisting}[style=BashInputStyle]
FieldConvert -m addfld:fromfld=file1.fld:scale=-1 file1.xml file2.fld file3.fld
\end{lstlisting}
%
In this case we use it in conjunction with the command \inltt{scale}
which multiply the values of a given .fld file by a constant \inltt{value}.
\inltt{file1.fld} is the file multiplied by \inltt{value}, \inltt{file1.xml}
is the associated session file, \inltt{file2.fld} is the .fld file which
is summed to \inltt{file1.fld} and finally \inltt{file3.fld} is the output
which contain the sum of the two .fld files.
\inltt{file3.fld} can be processed in a similar way as described
in section \ref{s:utilities:fieldconvert:sub:convert} to visualise
it either in Tecplot or in Paraview the result.
%
%
%
\subsubsection{Concatenate two files: \textit{concatenate} module}
To concatenate \inltt{file1.fld} and \inltt{file2.fld} into \inltt{file-conc.fld}
one can run the following command
......@@ -233,6 +239,20 @@ a Paraview output.
%
%
%
\subsubsection{Compute the gradient of a field: \textit{gradient} module}
To compute the spatial gradients of all fields one can run the following command
%
\begin{lstlisting}[style=BashInputStyle]
FieldConvert -m gradient test.xml test.fld test-grad.fld
\end{lstlisting}
%
where the file \inltt{file-grad.fld} can be processed in a similar
way as described in section \ref{s:utilities:fieldconvert:sub:convert}
to visualise either in Tecplot or in Paraview the result.
%
%
%
%
\subsubsection{Interpolate one field to another: \textit{interpfield} module}
To interpolate one field to another, one can use the following command:
%
......@@ -259,7 +279,6 @@ faster.
%
%
\subsubsection{Interpolate scattered point data to a field: \textit{interppointdatatofld} module}
\label{s:utilities:fieldconvert:sub:interppointdatatofld}
To interpolate discrete point data to a field, use the interppointdatatofld module:
%
\begin{lstlisting}[style=BashInputStyle]
......@@ -288,6 +307,8 @@ Each line defines a point, while the first column contains its $x$-coordinate,
the second one contains the $a$-values, the third the $b$-values and so on.
In case of $n$-dimensional data, the $n$ coordinates are specified in the first $n$
columns accordingly.
Note that currently, the \textit{interppointdatatofld} module can only perform
interpolation in one dimension.
%
In order to interpolate 1D data to a $n$D field, specify the matching coordinate in
the output field using the \inltt{interpcoord} argument:
......@@ -300,23 +321,20 @@ FieldConvert -m interppointdatatofld:interppointdatatofld=1 3D-file1.xml \
This will interpolate the 1D scattered point data from \inltt{1D-file1.pts} to the
$y$-coordinate of the 3D mesh defined in \inltt{3D-file1.xml}. The resulting field
will have constant values along the $x$ and $z$ coordinates.
For 1D Interpolation, the module implements a quadratic scheme and automatically
falls back to a linear method if only two data points are given.
A modified inverse distance method is used for 2D and 3D interpolation.
Linear and quadratic interpolation require the data points in the \inlsh{.pts}-file to be
sorted by their location in ascending order.
The Inverse Distance implementation has no such requirement.
In the \inlsh{.pts}-file, all data points must be sorted by their location in ascending order.
The module implements a quadratic interpolation scheme and automatically falls back to a
linear scheme if only two data points are given.
%
%
%
\subsubsection{Interpolate a field to a series of points: \textit{interppoints} module}
You can interpolate one field to a series of given points using the following command:
\begin{lstlisting}[style=BashInputStyle]
FieldConvert -m interppoints:fromxml=from.xml:fromfld=file1.fld \
FieldConvert -m interppoints:fromxml=file1.xml:fromfld=file1.fld \
file2.pts file2.dat
\end{lstlisting}
This command will interpolate the field defined by \inltt{file1.xml} and
\inltt{file1.fld} to the points defined in \inltt{file2.xml} and output it to
\inltt{file1.fld} to the points defined in \inltt{file2.pts} and output it to
\inltt{file2.dat}.
The \inltt{fromxml} and \inltt{fromfld} must be specified in this module.
The format of the file \inltt{file2.pts} is of the same form as for the
......@@ -392,16 +410,64 @@ automatically calls a \inltt{globalcondense} option which remove
multiply defined verties from the simplex definition which arise as
isocontour are generated element by element.
In addition to the \inltt{smooth} or \inltt{globalcondense} options
you can specify \inltt{removesmallcontour}=100 which will remove
separate isocontours of less than 100 triangles. This optin requires
\inltt{smooth} or \inltt{globalcondense} to be specified.
\begin{notebox}
Currently this option is only set up for triangles, quadrilaterals,
tetrahedrons and prisms.
\end{notebox}
%
%
%
%
\subsubsection{Show high frequency energy of the Jacobian: \textit{jacobianenergy} module}
\subsubsection{Scale a given .fld: \textit{scaleinputfld} module}
\begin{lstlisting}[style=BashInputStyle]
FieldConvert -m jacobianenergy file.xml file.fld jacenergy.fld
\end{lstlisting}
The option \inltt{topmodes} can be used to specify the number of top modes to
keep.
The output file \inltt{jacenergy.fld} can be processed in a similar
way as described in section \ref{s:utilities:fieldconvert:sub:convert}
to visualise it either in Tecplot or in Paraview the result.
%
%
%
\subsubsection{Print L2 and LInf norms: \textit{printfldnorms} module}
\begin{lstlisting}[style=BashInputStyle]
FieldConvert -m printfldnorms test.xml test.fld
\end{lstlisting}
This module does not create an output file. The L2 and LInf norms for each field variable is printed to the stdout.
%
%
%
\subsubsection{Computes the scalar gradient: \textit{scalargrad} module}
The scalar gradient of a field is computed by running:
\begin{lstlisting}[style=BashInputStyle]
FieldConvert -m scalargrad:bnd=0 test.xml test.fld test-scalgrad.fld
\end{lstlisting}
The option \inltt{bnd} specifies which boundary region to extract. Note this is different to MeshConvert where the parameter \inltt{surf} is specified and corresponds to composites rather boundaries. If \inltt{bnd} is not provided, all boundaries are extracted to different fields. To process this file you will need an xml file of the same region.
%
%
%
\subsubsection{Scale a given .fld: \textit{scaleinputfld} module}
To scale a .fld file by a given scalar quantity, the user can run:
\begin{lstlisting}[style=BashInputStyle]
FieldConvert -m sacleinputfld:scale=value test.fld test-scal.fld
FieldConvert -m scaleinputfld:scale=value test.xml test.fld test-scal.fld
\end{lstlisting}
The argument \inltt{scale=value} rescales of a factor \inltt{value}
\inltt{test.fld} by the factor value.
......@@ -409,6 +475,29 @@ The output file \inltt{file-conc.fld} can be processed in a similar
way as described in section \ref{s:utilities:fieldconvert:sub:convert}
to visualise it either in Tecplot or in Paraview the result.
%
%
%
\subsubsection{Time-averaged shear stress metrics: \textit{shear} module}
Time-dependent wall shear stress derived metrics relevant to cardiovascular fluid dynamics research can be computed using this module. They are
\begin{itemize}
\item TAWSS: time-averaged wall shear stress;
\item OSI: oscillatory shear index;
\item transWSS: transverse wall shear stress;
\item TACFI: time-averaged cross-flow index;
\item TAAFI: time-averaged aneurysm formation index;
\item |WSSG|: wall shear stress gradient.
\end{itemize}
To compute these, the user can run:
\begin{lstlisting}[style=BashInputStyle]
FieldConvert -m shear:N=value:fromfld=test_id_b0.fld test.xml test-multishear.fld
\end{lstlisting}
The argument \inltt{N} and \inltt{fromfld} are compulsory arguments that respectively define the number of \inltt{fld} files corresponding to the number of discrete equispaced time-steps, and the first \inltt{fld} file which should have the form of \inltt{test\_id\_b0.fld} where the first underscore in the name marks the starting time-step file ID.
The input \inltt{.fld} files are the outputs of the \textit{wss} module. If they do not contain the surface normals (an optional output of the \textit{wss} modle), then the \textit{shear} module will not compute the last metric, |WSSG|.
%
%
%
......@@ -423,6 +512,17 @@ way as described in section \ref{s:utilities:fieldconvert:sub:convert}.
%
%
%
\subsubsection{Computing the wall shear stress: \textit{wss} module}
To obtain the wall shear stres vector and magnitude, the user can run:
\begin{lstlisting}[style=BashInputStyle]
FieldConvert -m wss:bnd=0:addnormals=1 test.xml test.fld test-wss.fld
\end{lstlisting}
The option \inltt{bnd} specifies which boundary region to extract. Note this is different to MeshConvert where the parameter \inltt{surf} is specified and corresponds to composites rather boundaries. If \inltt{bnd} is not provided, all boundaries are extracted to different fields. The \inltt{addnormals} is an optional command argument which, when turned on, outputs the normal vector of the extracted boundary region as well as the shear stress vector and magnitude. This option is off by default. To process the output file(s) you will need an xml file of the same region.
%
%
%
\subsubsection{Manipulating meshes with FieldConvert}
FieldConvert has support for two modules that can be used in conjunction with
the linear elastic solver, as shown in chapter~\ref{s:elasticity}. To do this,
......
......@@ -2,12 +2,4 @@
\input{meshconvert}
\input{fieldconvert}
\input{fldtovtk}
\input{fldtotecplot}
\input{xmltovtk}
\input{probefld}
\input{fieldconvert}
\ No newline at end of file
......@@ -298,7 +298,7 @@ the Incompressible Navier-Stokes solver,
Finally, multi-variable functions such as initial conditions and analytic
solutions may be specified for use in, or comparison with, simulations. These
may be specified using expressions (\inltt{<E>}) or imported from a file
(\inltt{<F>}) using the Nektar++ .fld or .pts file formats
(\inltt{<F>}) using the Nektar++ FLD file format
\begin{lstlisting}[style=XMLStyle]
<FUNCTION NAME="ExactSolution">
......@@ -309,31 +309,11 @@ may be specified using expressions (\inltt{<E>}) or imported from a file
</FUNCTION>
\end{lstlisting}
An .fld file is a solution file (in other words an .fld renamed as .rst) where
A restart file is a solution file (in other words an .fld renamed as .rst) where
the field data is specified. The expansion order used to generate the .rst file
must be the same as that for the simulation.
.pts files contain scattered point data which needs to be interpolated to the field.
For further information on the file format and the different interpolation schemes, see
section~\ref{s:utilities:fieldconvert:sub:interppointdatatofld}.
All filenames must be specified relative to the location of the .xml file.
must be the same as that for the simulation. The filename must be specified
relative to the location of the .xml file.
With the additional argument \inltt{TIMEDEPENDENT="1"}, different files can be
loaded for each timestep. The filenames are defined using
\href{http://www.boost.org/doc/libs/1_56_0/libs/format/doc/format.html#syntax}{boost::format syntax}
where the step time is used as variable. For example, the function
\inltt{Baseflow} would load the files \inltt{U0V0\_1.00000000E-05.fld},
\inltt{U0V0\_2.00000000E-05.fld} and so on.
\begin{lstlisting}[style=XMLStyle]
<FUNCTION NAME="Baseflow">
<F VAR="U0,V0" TIMEDEPENDENT="1"FILE="U0V0_%14.8E.fld"/>
</FUNCTION>
\end{lstlisting}
For .pts files, the time consuming computation of interpolation weights in only
performed for the first timestep. The weights are stored and reused in all subsequent steps,
which is why all consecutive .pts files must use the same ordering, number and location of
data points.
Other examples of this input feature can be the insertion of a forcing term,
\begin{lstlisting}[style=XMLStyle]
......
......@@ -875,6 +875,10 @@ namespace Nektar
}
}
/** \brief Get the normals along specficied face
* Get the face normals interplated to a points0 x points 0
* type distribution
**/
void PrismExp::v_ComputeFaceNormal(const int face)
{
const SpatialDomains::GeomFactorsSharedPtr &geomFactors =
......
......@@ -197,7 +197,7 @@ namespace Nektar
}
}
/**
/**
*
*/
ExpansionType ExpList::GetExpType(void)
......@@ -560,7 +560,7 @@ namespace Nektar
* array of size \f$N_{\mathrm{eof}}\f$.
*/
void ExpList::v_FwdTrans_IterPerExp(const Array<OneD, const NekDouble> &inarray,
Array<OneD, NekDouble> &outarray)
Array<OneD, NekDouble> &outarray)
{
Array<OneD,NekDouble> f(m_ncoeffs);
......@@ -1187,7 +1187,7 @@ namespace Nektar
* \f$Q_{\mathrm{tot}}\f$.
*/
void ExpList::v_BwdTrans_IterPerExp(const Array<OneD, const NekDouble> &inarray,
Array<OneD, NekDouble> &outarray)
Array<OneD, NekDouble> &outarray)
{
Array<OneD, NekDouble> tmp;
for (int i = 0; i < m_collections.size(); ++i)
......@@ -1235,7 +1235,7 @@ namespace Nektar
NekDouble tol,
bool returnNearestElmt)
{
NekDouble resid;
NekDouble nearpt = 1e6;
if (GetNumElmts() == 0)
{
......@@ -1255,7 +1255,7 @@ namespace Nektar
{
if ((*m_exp)[i]->GetGeom()->ContainsPoint(gloCoords,
locCoords,
tol, resid))
tol, nearpt))
{
w.SetX(gloCoords[0]);
w.SetY(gloCoords[1]);
......@@ -1291,7 +1291,7 @@ namespace Nektar
// retrieve local coordinate of point
(*m_exp)[min_id]->GetGeom()->GetLocCoords(gloCoords,
locCoords);
locCoords);
return min_id;
}
else
......@@ -1304,56 +1304,64 @@ namespace Nektar
{
static int start = 0;
int min_id = 0;
NekDouble resid_min = 1e6;
NekDouble nearpt_min = 1e6;
Array<OneD, NekDouble> savLocCoords(locCoords.num_elements());
// restart search from last found value
for (int i = start; i < (*m_exp).size(); ++i)
{
if ((*m_exp)[i]->GetGeom()->ContainsPoint(gloCoords, locCoords,
tol, resid))
if ((*m_exp)[i]->GetGeom()->ContainsPoint(gloCoords,
locCoords,
tol, nearpt))
{
start = i;
return i;
}
else
{
if(resid < resid_min)
if(nearpt < nearpt_min)
{
min_id = i;
resid_min = resid;
Vmath::Vcopy(locCoords.num_elements(),savLocCoords,1,locCoords,1);
nearpt_min = nearpt;
Vmath::Vcopy(locCoords.num_elements(),locCoords,1,savLocCoords,1);
}
}
}
for (int i = 0; i < start; ++i)
{
if ((*m_exp)[i]->GetGeom()->ContainsPoint(gloCoords, locCoords,
tol, resid))
if ((*m_exp)[i]->GetGeom()->ContainsPoint(gloCoords,
locCoords,
tol, nearpt))
{
start = i;
return i;
}
else
{
if(resid < resid_min)
if(nearpt < nearpt_min)
{
min_id = i;
resid_min = resid;
Vmath::Vcopy(locCoords.num_elements(),savLocCoords,1,locCoords,1);
nearpt_min = nearpt;
Vmath::Vcopy(locCoords.num_elements(),
locCoords,1,savLocCoords,1);
}
}
}
std::string msg = "Failed to find point in element to tolerance of "
+ boost::lexical_cast<std::string>(resid)
+ " using nearest point found";
WARNINGL0(true,msg.c_str());
std::string msg = "Failed to find point within element to tolerance of "
+ boost::lexical_cast<std::string>(tol)
+ " using local point ("
+ boost::lexical_cast<std::string>(locCoords[0]) +","
+ boost::lexical_cast<std::string>(locCoords[1]) +","
+ boost::lexical_cast<std::string>(locCoords[1])
+ ") in element: "
+ boost::lexical_cast<std::string>(min_id);
WARNINGL1(false,msg.c_str());
if(returnNearestElmt)
{
Vmath::Vcopy(locCoords.num_elements(),locCoords,1,savLocCoords,1);
Vmath::Vcopy(locCoords.num_elements(),savLocCoords,1,locCoords,1);
return min_id;
}
else
......@@ -1817,7 +1825,6 @@ namespace Nektar
ASSERTL0(false,
"This method is not defined or valid for this class type");
LibUtilities::TranspositionSharedPtr trans;
return trans;
}
......@@ -1834,7 +1841,6 @@ namespace Nektar
ASSERTL0(false,
"This method is not defined or valid for this class type");
Array<OneD, unsigned int> NoModes(1);
return NoModes;
}
......@@ -1843,7 +1849,6 @@ namespace Nektar
ASSERTL0(false,
"This method is not defined or valid for this class type");
Array<OneD, unsigned int> NoModes(1);
return NoModes;
}
......@@ -1936,6 +1941,7 @@ namespace Nektar