Commit b0dc56a3 authored by Dave Moxey's avatar Dave Moxey
Browse files

Merge branch 'feature/madrid-tutorial' into 'master'

meshing update for madrid

This updates a number of the 2D meshing routines to be more useful for the tutorial in madrid. I.e removing unhelpful IO
Also has some minor updates to the documentation.

See merge request !726
parents 689f5dd8 4ce0d8f5
......@@ -24,16 +24,16 @@ if(NOT DEFINED OCE_DIR)
# Check for OSX needs to come first because UNIX evaluates to true on OSX
if(${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
if(DEFINED MACPORTS_PREFIX)
find_package(OCE 0.17 QUIET HINTS ${MACPORTS_PREFIX}/Library/Frameworks)
find_package(OCE 0.15 QUIET HINTS ${MACPORTS_PREFIX}/Library/Frameworks)
elseif(DEFINED HOMEBREW_PREFIX)
find_package(OCE 0.17 QUIET HINTS ${HOMEBREW_PREFIX}/Cellar/oce/*)
find_package(OCE 0.15 QUIET HINTS ${HOMEBREW_PREFIX}/Cellar/oce/*)
endif()
elseif(UNIX)
set(OCE_DIR "/usr/local/share/cmake/")
endif()
endif()
find_package(OCE 0.17 QUIET)
find_package(OCE 0.15 QUIET)
if(OCE_FOUND)
message(STATUS "-- OpenCASCADE Community Edition has been found.")
set(OCC_INCLUDE_DIR ${OCE_INCLUDE_DIRS})
......
......@@ -83,7 +83,7 @@
pages = {293-301},
year = {1996},
}
@article{CoRaNa98,
author = {M. Courtemanche\, R. J. Ramirez and S. Nattel},
title = {Ionic mechanisms underlying human atrial action potential properties: insights from a mathematical model},
......@@ -103,7 +103,7 @@
pages = {1501-1526},
year = {1991},
}
@article{TuPa06,
author = {K. H. W. J. ten Tusscher and A. V. Panfilov},
title = {Alternans and spiral breakup in a human ventricular tissue model},
......@@ -123,7 +123,7 @@
pages = {4331-51},
year = {2011},
}
@article{ShKa96,
title={Tetrahedral< i> hp</i> Finite Elements: Algorithms and Flow Simulations},
author={Sherwin, SJ and Karniadakis, G Em},
......@@ -378,9 +378,9 @@ year={2011}
}
@article{GuSh03,
Author="J.L. Guermond and J. Shen",
title="Velocity-correction projection methods for incompressible flows",
journal="SIAM J. Numer.\ Anal.",
Author="J.L. Guermond and J. Shen",
title="Velocity-correction projection methods for incompressible flows",
journal="SIAM J. Numer.\ Anal.",
volume=41,
pages = "112--134",
year=2003
......@@ -460,3 +460,18 @@ year={2011}
pages = {1079-1097},
}
@inproceedings{TuPeMo16,
abstract = {The generation of sufficiently high quality unstructured high-order meshes remains a significant obstacle in the adoption of high-order methods. However, there is little consensus on which approach is the most robust, fastest and produces the 'best' meshes. We aim to provide a route to investigate this question, by examining popular high-order mesh generation methods in the context of an efficient variational framework for the generation of curvilinear meshes. By considering previous works in a variational form, we are able to compare their characteristics and study their robustness. Alongside a description of the theory and practical implementation details, including an efficient multi-threading parallelisation strategy, we demonstrate the effectiveness of the framework, showing how it can be used for both mesh quality optimisation and untangling of invalid meshes.},
author = {Turner, M and Peir{\'{o}}, J and Moxey, D},
booktitle = {25th International Meshing Roundtable},
doi = {10.1016/j.proeng.2016.11.069},
file = {:Users/mike/Downloads/1-s2.0-S1877705816333781-main.pdf:pdf},
issn = {18777058},
keywords = {energy functional,high-order mesh generation,numerical optimization,variational mesh generation},
pages = {340--352},
title = {{A Variational Framework for High-Order Mesh Generation}},
url = {www.elsevier.com/locate/procedia%5Cnhttp://linkinghub.elsevier.com/retrieve/pii/S1877705816333781},
volume = {163},
year = {2016}
}
......@@ -69,7 +69,7 @@ format by issuing the command
\begin{lstlisting}[style=BashInputStyle]
FieldConvert in.fld out.fld:fld:format=Hdf5
\end{lstlisting}
%
%
\section{Range option \textit{-r}}
The Fieldconvert range option \inltt{-r} allows the user to specify
a sub-range of the mesh (computational domain) by using an
......@@ -121,7 +121,7 @@ possibly also Reynolds stresses) into single file;
\item \inltt{extract}: Extract a boundary field;
\item \inltt{homplane}: Extract a plane from 3DH1D expansions;
\item \inltt{homstretch}: Stretch a 3DH1D expansion by an integer factor;
\item \inltt{innerproduct}: take the inner product between one or a series of fields with another field (or series of fields).
\item \inltt{innerproduct}: take the inner product between one or a series of fields with another field (or series of fields).
\item \inltt{interpfield}: Interpolates one field to another, requires fromxml, fromfld to be defined;
\item \inltt{interppointdatatofld}: Interpolates given discrete data using a finite difference approximation to a fld file given an xml file;
\item \inltt{interppoints}: Interpolates a set of points to another, requires fromfld and fromxml to be defined, a line or plane of points can be defined;
......@@ -130,7 +130,7 @@ possibly also Reynolds stresses) into single file;
\item \inltt{qualitymetric}: Evaluate a quality metric of the underlying mesh to show mesh quality;
\item \inltt{meanmode}: Extract mean mode (plane zero) of 3DH1D expansions;
\item \inltt{pointdatatofld}: Given discrete data at quadrature points
project them onto an expansion basis and output fld file;
project them onto an expansion basis and output fld file;
\item \inltt{printfldnorms}: Print L2 and LInf norms to stdout;
\item \inltt{scalargrad}: Computes scalar gradient field;
\item \inltt{scaleinputfld}: Rescale input field by a constant factor;
......@@ -217,7 +217,7 @@ new field. To use this we simply run
In this case, we have produced a Tecplot file which contains the mesh and a
variable that contains the composite ID. To assist in boundary identification,
the input file \inlsh{mesh.xml} should be a surface XML file that can be
obtained through the \mc \inltt{extract} module (see section
obtained through the \nm \inltt{extract} module (see section
\ref{s:utilities:nekmesh:extract}).
\subsection{Sum two .fld files: \textit{addFld} module}
......@@ -234,8 +234,8 @@ which multiply the values of a given .fld file by a constant \inltt{value}.
is the associated session file, \inltt{file2.fld} is the .fld file which
is summed to \inltt{file1.fld} and finally \inltt{file3.fld} is the output
which contain the sum of the two .fld files.
\inltt{file3.fld} can be processed in a similar way as described
in section \ref{s:utilities:fieldconvert:sub:convert} to visualise
\inltt{file3.fld} can be processed in a similar way as described
in section \ref{s:utilities:fieldconvert:sub:convert} to visualise
the result either in Tecplot, Paraview or VisIt.
%
%
......@@ -249,8 +249,8 @@ use the \inltt{combineAvg} module of FieldConvert
file3.fld
\end{lstlisting}
%
\inltt{file3.fld} can be processed in a similar way as described
in section \ref{s:utilities:fieldconvert:sub:convert} to visualise
\inltt{file3.fld} can be processed in a similar way as described
in section \ref{s:utilities:fieldconvert:sub:convert} to visualise
the result either in Tecplot, Paraview or VisIt.
%
%
......@@ -325,8 +325,8 @@ of interest. Finally to process the surface file one can use
FieldConvert test-b0.xml test-b0.fld test-b0.dat
\end{lstlisting}
%
This will obviously generate a Tecplot output if a .dat file
is specified as last argument. A .vtu extension will produce
This will obviously generate a Tecplot output if a .dat file
is specified as last argument. A .vtu extension will produce
a Paraview or VisIt output.
%
%
......@@ -348,25 +348,25 @@ to visualise the result either in Tecplot, Paraview or VisIt.
To obtain a 2D expansion containing one of the planes of a
3DH1D field file, use the command:
\begin{lstlisting}[style=BashInputStyle]
\begin{lstlisting}[style=BashInputStyle]
FieldConvert -m homplane:planeid=value file.xml file.fld file-plane.fld
\end{lstlisting}
If the option \inltt{wavespace} is used, the Fourier coefficients
corresponding to \inltt{planeid} are obtained. The command in this case is:
\begin{lstlisting}[style=BashInputStyle]
\begin{lstlisting}[style=BashInputStyle]
FieldConvert -m homplane:wavespace:planeid=value file.xml \
file.fld file-plane.fld
\end{lstlisting}
The output file \inltt{file-plane.fld} can be processed in a similar
The output file \inltt{file-plane.fld} can be processed in a similar
way as described in section \ref{s:utilities:fieldconvert:sub:convert}
to visualise it either in Tecplot or in Paraview.
\subsection{Stretch a 3DH1D expansion: \textit{homstretch} module}
To stretch a 3DH1D expansion in the z-direction, use the command:
\begin{lstlisting}[style=BashInputStyle]
\begin{lstlisting}[style=BashInputStyle]
FieldConvert -m homstretch:factor=value file.xml file.fld file-stretch.fld
\end{lstlisting}
The number of modes in the resulting field can be chosen using the command-line
......@@ -374,7 +374,7 @@ parameter \inltt{output-points-hom-z}. Note that the output for
this module should always be a \inltt{.fld} file and this should not
be used in combination with other modules using a single command.
The output file \inltt{file-stretch.fld} can be processed in a similar
The output file \inltt{file-stretch.fld} can be processed in a similar
way as described in section \ref{s:utilities:fieldconvert:sub:convert}
to visualise it either in Tecplot or in Paraview.
......@@ -392,7 +392,7 @@ determine the inner product of these fields. The input option
\inltt{fromfld} must therefore be specified in this module.
Optional arguments for this module are \inltt{fields} which allow you to specify
the fields that you wish to use for the inner product, i.e.
the fields that you wish to use for the inner product, i.e.
\begin{lstlisting}[style=BashInputStyle]
FieldConvert -m innerproduct:fromfld=file1.fld:fields=''0,1,2'' file2.xml \
file2.fld out.stdout
......@@ -412,7 +412,7 @@ will take the inner product between a file names
field1\_0.fld, field1\_1.fld, field1\_2.fld and field1\_3.fld with
respect to field2.fld.
Analogously including the options \inltt{allfromflds}, i.e.
Analogously including the options \inltt{allfromflds}, i.e.
\begin{lstlisting}[style=BashInputStyle]
FieldConvert -m innerproduct:fromfld=file1.fld:multifldids=''0-3'':\
allfromflds file2.xml file2.fld out.stdout
......@@ -424,7 +424,7 @@ the unique inner products are evaluated so if four from fields are
given only the related trianuglar number $4\times5/2=10$ of inner
products are evaluated.
This option can be run in parallel.
This option can be run in parallel.
%
%
......@@ -544,7 +544,7 @@ $(x0,y0)$ to $(x1,y1)$ which can also be used in 3D by specifying $(x0,y0,z0)$
to $(x1,y1,z1)$.
An extraction of a plane of points can also be specified by
\begin{lstlisting}[style=BashInputStyle]
\begin{lstlisting}[style=BashInputStyle]
FieldConvert -m interppoints:fromxml=file1.xml:fromfld=file1.fld:\
plane=npts1,npts2,x0,y0,z0,x1,y1,z1,x2,y2,z2,x3,y3,z3
\end{lstlisting}
......@@ -553,13 +553,13 @@ direction and $(x0,y0,z0)$, $(x1,y1,z1)$, $(x2,y2,z2)$ and $(x3,y3,z3)$
define the plane of points specified in a clockwise or anticlockwise direction.
In addition an extraction of a box of points can also be specified by
\begin{lstlisting}[style=BashInputStyle]
\begin{lstlisting}[style=BashInputStyle]
FieldConvert -m interppoints:fromxml=file1.xml:fromfld=file1.fld:\
box=npts1,npts2,npts3,xmin,xmax,ymin,ymax,zmin,zmax
\end{lstlisting}
where \inltt{npts1,npts2,npts3} is the number of equispaced points in each
direction and $(xmin,ymin,zmin)$ and $(xmax,ymax,zmax3)$
define the limits of the box of points.
where \inltt{npts1,npts2,npts3} is the number of equispaced points in each
direction and $(xmin,ymin,zmin)$ and $(xmax,ymax,zmax3)$
define the limits of the box of points.
For the plane and box interpolation there is an additional optional
argument \inltt{cp=p0,q} which adds to the interpolated fields the value of
......@@ -568,7 +568,7 @@ pressure and $q$ is the free stream dynamics pressure. If the input
does not contain a field ``p'' or a velocity field ``u,v,w'' then $cp$
and $cp0$ are not evaluated accordingly
%
\begin{notebox}
\begin{notebox}
This module runs in parallel for the plane and box extraction of points. In this case a series of .dat files are generated that can be concatinated together. Other options do not run in parallel.
\end{notebox}
%
......@@ -611,7 +611,7 @@ have these as separate options.
In addition to the \inltt{smooth} or \inltt{globalcondense} options
you can specify \inltt{removesmallcontour}=100 which will remove
separate isocontours of less than 100 triangles.
separate isocontours of less than 100 triangles.
\begin{notebox}
Currently this option is only set up for triangles, quadrilaterals,
......@@ -633,7 +633,7 @@ keep.
The output file \inltt{jacenergy.fld} can be processed in a similar
way as described in section \ref{s:utilities:fieldconvert:sub:convert}
to visualise the result either in Tecplot, Paraview or VisIt.
to visualise the result either in Tecplot, Paraview or VisIt.
\subsection{Calculate mesh quality: \textit{qualitymetric} module}
......@@ -675,11 +675,11 @@ Two quality metrics are implemented that produce scalar fields $Q$:
To obtain a 2D expansion containing the mean mode (plane zero in Fourier space) of a
3DH1D field file, use the command:
\begin{lstlisting}[style=BashInputStyle]
\begin{lstlisting}[style=BashInputStyle]
FieldConvert -m meanmode file.xml file.fld file-mean.fld
\end{lstlisting}
The output file \inltt{file-mean.fld} can be processed in a similar
The output file \inltt{file-mean.fld} can be processed in a similar
way as described in section \ref{s:utilities:fieldconvert:sub:convert}
to visualise the result either in Tecplot or in Paraview or VisIt.
%
......@@ -697,7 +697,7 @@ FieldConvert --noequispaced -m pointdatatofld file.pts file.xml file.fld
This command will read in the points provided in the \inltt{file.pts}
and assume these are given at the same quadrature distribution as the
mesh and expansions defined in \inltt{file.xml} and output the field
to \inltt{file.fld}. If the points do not match an error will be dumped.
to \inltt{file.fld}. If the points do not match an error will be dumped.
The file \inltt{file.pts} which is assumed to be given by an interpolation from another source is of the form:
......@@ -720,7 +720,7 @@ point, the first, second, and third columns contains the
$x,y,z$-coordinate and subsequent columns contain the field values, in
this case the $p$-value So in the general case of $n$-dimensional
data, the $n$ coordinates are specified in the first $n$ columns
accordingly followed by the field data.
accordingly followed by the field data.
The default argument is to use the equipapced (but potentially
collapsed) coordinates which can be obtained from the command.
......@@ -755,7 +755,7 @@ this option.
\subsection{Print L2 and LInf norms: \textit{printfldnorms} module}
\begin{lstlisting}[style=BashInputStyle]
\begin{lstlisting}[style=BashInputStyle]
FieldConvert -m printfldnorms test.xml test.fld out.stdout
\end{lstlisting}
......@@ -920,7 +920,7 @@ replacing \inltt{<nprocs>} with the number of processors. For the
\inltt{.dat} and \inltt{.plt} outputs the current version will proudce
a single output file. However it is also sometimes useful to produce
multiple output files, one for each partition, and this
can be done by using the \inltt{writemultiplefiles} option, i.e.
can be done by using the \inltt{writemultiplefiles} option, i.e.
\begin{lstlisting}[style=BashInputStyle]
mpirun -np <nprocs> FieldConvert test.xml test.fld \
test.dat:dat:writemultiplefiles
......@@ -962,7 +962,7 @@ FieldConvert --nprocs 10 --procid 2 \
This call will only therefore consider the interpolation process across one
partition (namely, partition 2). To create the full interpolated field requires
a loop over each of the partitions, which, in a bash shell can be run as
\begin{lstlisting}[style=BashInputStyle]
\begin{lstlisting}[style=BashInputStyle]
for n in `seq 0 9`; do
FieldConvert --nprocs 10 --procid $n \
-m interpfield:fromxml=file1.xml:fromfld=file1.fld \
......@@ -975,7 +975,7 @@ of the different parallel partitions in files with names \inltt{P0000000.fld},
parallel field file. However, the \inltt{Info.xml} file, which contains the
information about which elements lie in each partition, is missing. This can be
generated by using the command
\begin{lstlisting}[style=BashInputStyle]
\begin{lstlisting}[style=BashInputStyle]
FieldConvert --nprocs 10 file2.xml file2.fld/Info.xml:info
\end{lstlisting}
Note the final \inltt{:info} extension on the last argument is necessary to tell
......@@ -988,7 +988,7 @@ input/output XML files.
Another approach to serially proessing a large file is to initially process the
file into multiple partitions. This can be done with the \inltt{--part-only}
option. So the command
\begin{lstlisting}[style=BashInputStyle]
\begin{lstlisting}[style=BashInputStyle]
FieldConvert --part-only 10 file.xml file.fld
\end{lstlisting}
will partition the mesh into 10 paritions and write each partition into a
......@@ -998,7 +998,7 @@ partitioned XML files \inltt{P0000000.xml}, \inltt{P0000001.xml}, \dots,
There is also a \inltt{--part-only-overlapping} option, which can be run in the
same fashion.
\begin{lstlisting}[style=BashInputStyle]
\begin{lstlisting}[style=BashInputStyle]
FieldConvert --part-only-overlapping 10 file.xml file.fld
\end{lstlisting}
In this mode, the mesh is partitioned into 10 partitions in a similar manner,
......
This diff is collapsed.
......@@ -121,7 +121,7 @@ void Generator2D::Process()
for (int i = 1; i <= m_mesh->m_cad->GetNumSurf(); i++)
{
m_facemeshes[i] = MemoryManager<FaceMesh>::AllocateSharedPtr(
i, m_mesh, m_curvemeshes, m_mesh->m_cad->GetNumSurf() > 100);
i, m_mesh, m_curvemeshes, 100);
m_facemeshes[i]->OrientateCurves();
MakeBL(i, m_facemeshes[i]->GetEdges());
......@@ -145,7 +145,7 @@ void Generator2D::Process()
}
m_facemeshes[i] = MemoryManager<FaceMesh>::AllocateSharedPtr(
i, m_mesh, m_curvemeshes, m_mesh->m_cad->GetNumSurf() > 100);
i, m_mesh, m_curvemeshes, 100);
m_facemeshes[i]->Mesh();
}
......@@ -282,7 +282,7 @@ void Generator2D::MakeBL(int faceid, vector<EdgeLoop> e)
ElmtConfig conf(LibUtilities::eQuadrilateral, 1, false, false);
vector<int> tags;
tags.push_back(102);
tags.push_back(101);
ElementSharedPtr E = GetElementFactory().CreateInstance(
LibUtilities::eQuadrilateral, conf, qns, tags);
......
......@@ -111,6 +111,11 @@ public:
return m_2d;
}
void SetNACA(std::string i)
{
m_naca = i;
}
/**
* @brief Initialises CAD and makes surface, curve and vertex maps.
*
......@@ -203,6 +208,7 @@ protected:
std::map<int, CADVertSharedPtr> m_verts;
bool m_2d;
std::string m_naca;
};
typedef boost::shared_ptr<CADSystem> CADSystemSharedPtr;
......
......@@ -32,6 +32,7 @@
// Description: cad object methods.
//
////////////////////////////////////////////////////////////////////////////////
#include <LibUtilities/BasicUtils/ParseUtils.hpp>
#include <NekMeshUtils/CADSystem/OCE/CADSystemOCE.h>
#include <NekMeshUtils/CADSystem/OCE/CADVertOCE.h>
......@@ -51,8 +52,9 @@ std::string CADSystemOCE::key = GetEngineFactory().RegisterCreatorFunction(
bool CADSystemOCE::LoadCAD()
{
if (m_name.find('.') != std::string::npos)
if (m_naca.size() == 0)
{
//not a naca profile behave normally
// Takes step file and makes OpenCascade shape
STEPControl_Reader reader;
reader = STEPControl_Reader();
......@@ -67,8 +69,6 @@ bool CADSystemOCE::LoadCAD()
}
else
{
cout << m_name << " is not a STEP file, assuming it is "
<< "a 4 digit NACA code" << endl;
shape = BuildNACA(m_name);
}
......@@ -361,6 +361,9 @@ Array<OneD, NekDouble> CADSystemOCE::GetBoundingBox()
TopoDS_Shape CADSystemOCE::BuildNACA(string naca)
{
ASSERTL0(naca.length() == 4, "not a 4 digit code");
vector<NekDouble> data;
ParseUtils::GenerateUnOrderedVector(m_naca.c_str(), data);
ASSERTL0(data.size() == 5, "not a vaild domain");
int n = boost::lexical_cast<int>(naca);
NekDouble T = (n%100) / 100.0;
......@@ -442,17 +445,26 @@ TopoDS_Shape CADSystemOCE::BuildNACA(string naca)
BRepBuilderAPI_MakeWire aeroWireBuilder(aeroEdge, TeEdge);
TopoDS_Wire aeroWire = aeroWireBuilder.Wire();
BRepBuilderAPI_MakeEdge domInlBuilder(gp_Pnt(-2000.0,-2000.0,0.0),
gp_Pnt(-2000.0,2000.0,0.0));
gp_Trsf transform;
gp_Ax1 rotAx(gp_Pnt(500.0,0.0,0.0),gp_Dir(gp_Vec(0.0,0.0,-1.0)));
transform.SetRotation(rotAx, data[4]/180.0*M_PI);
TopLoc_Location mv(transform);
aeroWire.Move(mv);
BRepBuilderAPI_MakeEdge domInlBuilder(gp_Pnt(data[0]*1000.0,data[1]*1000.0,0.0),
gp_Pnt(data[0]*1000.0,data[3]*1000.0,0.0));
TopoDS_Edge inlEdge = domInlBuilder.Edge();
BRepBuilderAPI_MakeEdge domTopBuilder(gp_Pnt(-2000.0,2000.0,0.0),
gp_Pnt(5000.0,2000.0,0.0));
BRepBuilderAPI_MakeEdge domTopBuilder(gp_Pnt(data[0]*1000.0,data[3]*1000.0,0.0),
gp_Pnt(data[2]*1000.0,data[3]*1000.0,0.0));
TopoDS_Edge topEdge = domTopBuilder.Edge();
BRepBuilderAPI_MakeEdge domOutBuilder(gp_Pnt(5000.0,2000.0,0.0),
gp_Pnt(5000.0,-2000.0,0.0));
BRepBuilderAPI_MakeEdge domOutBuilder(gp_Pnt(data[2]*1000.0,data[3]*1000.0,0.0),
gp_Pnt(data[2]*1000.0,data[1]*1000.0,0.0));
TopoDS_Edge outEdge = domOutBuilder.Edge();
BRepBuilderAPI_MakeEdge domBotBuilder(gp_Pnt(5000.0,-2000.0,0.0),
gp_Pnt(-2000.0,-2000.0,0.0));
BRepBuilderAPI_MakeEdge domBotBuilder(gp_Pnt(data[2]*1000.0,data[1]*1000.0,0.0),
gp_Pnt(data[0]*1000.0,data[1]*1000.0,0.0));
TopoDS_Edge botEdge = domBotBuilder.Edge();
BRepBuilderAPI_MakeWire domWireBuilder(inlEdge, topEdge, outEdge, botEdge);
......
......@@ -78,5 +78,6 @@
#include <BRepBuilderAPI_MakeWire.hxx>
#include <BRepBuilderAPI_MakeFace.hxx>
#include <STEPControl_Writer.hxx>
#include <gp_Ax1.hxx>
#endif
......@@ -53,6 +53,8 @@ ProcessLoadCAD::ProcessLoadCAD(MeshSharedPtr m) : ProcessModule(m)
ConfigOption(false, "", "Generate prisms on these surfs");
m_config["2D"] =
ConfigOption(true, "", "allow 2d loading");
m_config["NACA"] =
ConfigOption(false, "", "naca domain");
}
ProcessLoadCAD::~ProcessLoadCAD()
......@@ -75,6 +77,11 @@ void ProcessLoadCAD::Process()
m_mesh->m_cad->Set2D();
}
if(m_config["NACA"].beenSet)
{
m_mesh->m_cad->SetNACA(m_config["NACA"].as<string>());
}
ASSERTL0(m_mesh->m_cad->LoadCAD(), "Failed to load CAD");
if (m_mesh->m_verbose)
......
......@@ -66,16 +66,14 @@ Line::Line(ElmtConfig pConf,
{
m_vertex.push_back(pNodeList[i]);
}
vector<NodeSharedPtr> edgeNodes;
if (m_conf.m_order > 1)
{
for (int j = 0; j < n; ++j)
{
edgeNodes.push_back(pNodeList[2 + j]);
m_volumeNodes.push_back(pNodeList[2 + j]);
}
}
m_edge.push_back(boost::shared_ptr<Edge>(new Edge(
pNodeList[0], pNodeList[1], edgeNodes, m_conf.m_edgeCurveType)));
}
SpatialDomains::GeometrySharedPtr Line::GetGeom(int coordDim)
......
......@@ -104,6 +104,18 @@ NekDouble Octree::Query(Array<OneD, NekDouble> loc)
{
// starting at master octant 0 move through succsesive m_octants which
// contain the point loc until a leaf is found
//first search through sourcepoints
NekDouble tmp = numeric_limits<double>::max();
for(int i = 0; i < m_lsources.size(); i++)
{
if(m_lsources[i].withinRange(loc))
{
tmp = min(m_lsources[i].delta,tmp);
}
}
OctantSharedPtr n = m_masteroct;
int quad;
......@@ -173,7 +185,7 @@ NekDouble Octree::Query(Array<OneD, NekDouble> loc)
found = true;
}
}
return n->GetDelta();
return min(n->GetDelta(),tmp);
}
void Octree::WriteOctree(string nm)
......@@ -468,7 +480,7 @@ void Octree::SmoothSurfaceOctants()
{
if (it->second[j]->IsDeltaKnown() &&
it->second[j]->GetDelta() < oct->GetDelta() &&
ddx(oct, it->second[j]) > 0.1)
ddx(oct, it->second[j]) > 0.2)
{
check.push_back(it->second[j]);
}
......@@ -485,9 +497,9 @@ void Octree::SmoothSurfaceOctants()
{
NekDouble r = oct->Distance(check[j]);
if (0.099 * r + check[j]->GetDelta() < deltaSM)
if (0.199 * r + check[j]->GetDelta() < deltaSM)
{
deltaSM = 0.099 * r + check[j]->GetDelta();
deltaSM = 0.199 * r + check[j]->GetDelta();
}
}
oct->SetDelta(deltaSM);
......@@ -538,9 +550,9 @@ void Octree::PropagateDomain()
{
NekDouble r = oct->Distance(known[j]);
if (0.14 * r + known[j]->GetDelta() < m_maxDelta)
if (0.199 * r + known[j]->GetDelta() < m_maxDelta)
{
deltaPrime.push_back(0.14 * r +
deltaPrime.push_back(0.199 * r +
known[j]->GetDelta());
}
else
......@@ -817,59 +829,6 @@ int Octree::CountElemt()
return int(total);
}
//struct to assist in the creation of linesources in the code
struct linesource
{
Array<OneD, NekDouble> x1, x2;
NekDouble R, delta;
linesource(Array<OneD, NekDouble> p1,
Array<OneD, NekDouble> p2,
NekDouble r,
NekDouble d)
: x1(p1), x2(p2), R(r), delta(d)
{
}
bool withinRange(Array<OneD, NekDouble> p)
{
Array<OneD, NekDouble> Le(3), Re(3), s(3);
for (int i = 0; i < 3; i++)
{
Le[i] = p[i] - x1[i];
Re[i] = p[i] - x2[i];
s[i] = x2[i] - x1[i];
}
Array<OneD, NekDouble> dev(3);
dev[0] = Le[1] * Re[2] - Re[1] * Le[2];
dev[1] = Le[0] * Re[2] - Re[0] * Le[2];
dev[2] = Le[0] * Re[1] - Re[0] * Le[1];
NekDouble dist =
sqrt(dev[0] * dev[0] + dev[1] * dev[1] + dev[2] * dev[2]) /
sqrt(s[0] * s[0] + s[1] * s[1] + s[2] * s[2]);
NekDouble t = -1.0 * ((x1[0] - p[0]) * s[0] + (x1[1] - p[1]) * s[1] +
(x1[1] - p[1]) * s[1]) /
Length() / Length();
if (dist < R && !(t > 1) && !(t < 0))
{
return true;