Commit 3e3f1e50 authored by David Moxey's avatar David Moxey

Merge branch 'master' into fix/error-output

parents 9567b2aa f9ebc406
......@@ -3,8 +3,3 @@
path = docs/tutorial
url = git@gitlab.nektar.info:nektar/tutorial
ignore = all
[submodule "docs/developer-guide"]
branch = master
path = docs/developer-guide
url = git@gitlab.nektar.info:nektar/developer-guide
ignore = all
......@@ -54,7 +54,7 @@ v5.0.0
- Fix ability to have periodic boundary conditions that are aligned by a
rotation rather than just a translation (!933)
- Added a coupling interface to exchange data between solvers at run time
and a DummySolver to test the implementations (!853, !931 !973)
and a DummySolver to test the implementations (!853, !931, !950, !973)
- Fix compilation issue with newer Boost versions and clang (!940)
- If only `NEKTAR_BUILD_LIBRARY` is enabled, only libraries up to and including
`MultiRegions` will be built by default (!945)
......@@ -69,6 +69,8 @@ v5.0.0
- Add HDF5 geometry format (!977)
- Combine and generalise demo code in StdRegions and LocalRegions (!993)
- Fix for error output to allow for custom error streams (!944)
- Fixed bug in ReOrientQuadFacePhysMap (!1003)
- Add NekPy Python interface (!962, !990, !989, !1004)
**NekMesh**:
- Add feature to read basic 2D geo files as CAD (!731)
......@@ -100,8 +102,10 @@ v5.0.0
- Fix manifold face curvature nodes (!913)
- Fix writing 1D surfaces (!930)
- Fix surface string parsin in BL splitting (!937)
- Enable use of distributed packages for triangle and TetGen (!953)
- Fix issue with MLSC after Scotch conversion (!943)
- Add support for Gmsh 4.0 mesh file format (!964)
- Fix issue with extracting 1D curved surface from 2D file (!984)
- Fix surface extraction, added regression test (!994)
**FieldConvert**:
......@@ -120,7 +124,9 @@ v5.0.0
- Add module for evaluating the mean of variables on the domain (!894)
- Add module for counting the total number of DOF (!948)
- Fixed wss module for compressible flows (!958)
- Made Sutherland's law non-dimensional (!972)
- Add module for removing fields from .fld files (!978)
- Fixed nparts option in FieldConvert and automated Info.xml generation (!995)
- Added if statement to fix case of 1D/2D manifold interpolation in 1D/2D space,
added check on dimensions for interpolation, fixed seg interp (!999)
......@@ -139,6 +145,7 @@ v5.0.0
seg, quad and hex elements (!771, !862)
- Fix compressible solver with NUMMODES=1 (!868)
- Introduce equations of state to account for real gas effects (!880)
- Made Sutherland's law non-dimensional (!972)
- Modified pressure outlet BCs to allow for the reference static pressure to be
set from the VALUE fields (!981)
......
......@@ -279,8 +279,7 @@ MACRO(ADD_NEKPY_LIBRARY name)
# Install __init__.py files.
CONFIGURE_FILE(${CMAKE_SOURCE_DIR}/cmake/python/init.py.in
${CMAKE_BINARY_DIR}/NekPy/${name}/__init__.py)
SET_TARGET_PROPERTIES(_${name} PROPERTIES
LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/NekPy/${name})
INSTALL(TARGETS _${name} DESTINATION ${CMAKE_BINARY_DIR}/NekPy/${name})
ENDMACRO()
MACRO(ADD_NEKPY_EXECUTABLE name source)
......@@ -288,4 +287,4 @@ MACRO(ADD_NEKPY_EXECUTABLE name source)
INSTALL(FILES ${source} DESTINATION ${CMAKE_INSTALL_PREFIX}/bin)
FILE(COPY ${source} DESTINATION ${CMAKE_CURRENT_BINARY_DIR})
ADD_CUSTOM_TARGET(${name} SOURCES ${source})
ENDMACRO()
\ No newline at end of file
ENDMACRO()
......@@ -7,31 +7,50 @@
########################################################################
IF(NEKTAR_USE_MESHGEN)
INCLUDE(ExternalProject)
EXTERNALPROJECT_ADD(
tetgen-1.5
PREFIX ${TPSRC}
URL ${TPURL}/tetgen.zip
URL_MD5 6d62e63f9b1e7a8ce53d5bc87e6a0a09
STAMP_DIR ${TPBUILD}/stamp
DOWNLOAD_DIR ${TPSRC}
SOURCE_DIR ${TPSRC}/tetgen-1.5
BINARY_DIR ${TPBUILD}/tetgen-1.5
TMP_DIR ${TPBUILD}/tetgen-1.5-tmp
INSTALL_DIR ${TPDIST}
CONFIGURE_COMMAND ${CMAKE_COMMAND}
# Search for system-installed Triangle installation
FIND_LIBRARY(TETGEN_LIBRARY NAMES tet)
FIND_PATH(TETGEN_INCLUDE_DIR tetgen.h)
IF(TETGEN_LIBRARY AND TETGEN_INCLUDE_DIR)
SET(BUILD_TETGEN OFF)
ELSE()
SET(BUILD_TETGEN ON)
ENDIF()
OPTION(THIRDPARTY_BUILD_TETGEN
"Build TetGen library from ThirdParty." ${BUILD_TETGEN})
IF (THIRDPARTY_BUILD_TETGEN)
INCLUDE(ExternalProject)
EXTERNALPROJECT_ADD(
tetgen-1.5
PREFIX ${TPSRC}
URL ${TPURL}/tetgen.zip
URL_MD5 6d62e63f9b1e7a8ce53d5bc87e6a0a09
STAMP_DIR ${TPBUILD}/stamp
DOWNLOAD_DIR ${TPSRC}
SOURCE_DIR ${TPSRC}/tetgen-1.5
BINARY_DIR ${TPBUILD}/tetgen-1.5
TMP_DIR ${TPBUILD}/tetgen-1.5-tmp
INSTALL_DIR ${TPDIST}
CONFIGURE_COMMAND ${CMAKE_COMMAND}
-G ${CMAKE_GENERATOR}
-DCMAKE_C_COMPILER:FILEPATH=${CMAKE_C_COMPILER}
-DCMAKE_CXX_COMPILER:FILEPATH=${CMAKE_CXX_COMPILER}
-DCMAKE_INSTALL_PREFIX:PATH=${TPDIST}
${TPSRC}/tetgen-1.5
)
THIRDPARTY_LIBRARY(TETGEN_LIBRARY STATIC tetgen
DESCRIPTION "Tetgen library")
SET(TETGEN_INCLUDE_DIR ${TPDIST}/include CACHE FILEPATH
"TetGen include" FORCE)
MESSAGE(STATUS "Build TetGen: ${TETGEN_LIBRARY}")
SET(TETGEN_CONFIG_INCLUDE_DIR ${TPINC})
)
THIRDPARTY_LIBRARY(TETGEN_LIBRARY STATIC tetgen
DESCRIPTION "Tetgen library")
SET(TETGEN_INCLUDE_DIR ${TPDIST}/include CACHE FILEPATH
"TetGen include" FORCE)
MESSAGE(STATUS "Build TetGen: ${TETGEN_LIBRARY}")
SET(TETGEN_CONFIG_INCLUDE_DIR ${TPINC})
ELSE()
ADD_CUSTOM_TARGET(tetgen-1.5 ALL)
MESSAGE(STATUS "Found TetGen: ${TETGEN_LIBRARY}")
SET(TETGEN_CONFIG_INCLUDE_DIR ${TETGEN_INCLUDE_DIR})
ENDIF()
MARK_AS_ADVANCED(TETGEN_LIBRARY)
MARK_AS_ADVANCED(TETGEN_INCLUDE_DIR)
......
......@@ -7,7 +7,15 @@
########################################################################
IF(NEKTAR_USE_MESHGEN)
SET(BUILD_TRIANGLE ON)
# Search for system-installed Triangle installation
FIND_LIBRARY(TRIANGLE_LIBRARY NAMES triangle)
FIND_PATH(TRIANGLE_INCLUDE_DIR triangle.h)
IF(TRIANGLE_LIBRARY AND TRIANGLE_INCLUDE_DIR)
SET(BUILD_TRIANGLE OFF)
ELSE()
SET(BUILD_TRIANGLE ON)
ENDIF()
OPTION(THIRDPARTY_BUILD_TRIANGLE
"Build Triangle library from ThirdParty." ${BUILD_TRIANGLE})
......
Subproject commit 0724faa50ed893acb7ca256b1582d288396bb5d4
Subproject commit 7c88d86e78a8908720b971d71ce8b5aafdd5eb92
Subproject commit 8f8ed6f96bad562bedd8c242a4dc247e605ca20d
......@@ -1153,63 +1153,13 @@ process each parallel partition in serial, for example when interpolating a
solution field from one mesh to another or creating an output file for
visualization.
\subsection{Using the \textit{nparts} options}
One option is to use the \inltt{nparts} command line
option. For example, the following command will create a
\inltt{.vtu} file using 10 partitions of \inltt{file1.xml}:
\begin{lstlisting}[style=BashInputStyle]
FieldConvert --nparts 10 file1.xml file1.fld file1.vtu
\end{lstlisting}
Note this will create a parallel vtu file as it processes each partition.
Another example is to interpolate \inltt{file1.fld} from one mesh
\inltt{file1.xml} to another \inltt{file2.xml}. If the mesh files are
large we can do this by partitioning \inltt{file2.xml} into 10 (or more)
partitions and interpolating each partition one by one using the
command:
\begin{lstlisting}[style=BashInputStyle]
FieldConvert --nparts 10 -m interpfield:fromxml=file1.xml:fromfld=file1.fld \
file2.xml file2.fld
\end{lstlisting}
Note that internally the routine uses the range option so that it
only has to load the part of \inltt{file1.xml} that overlaps with each
partition of \inltt{file2.xml}.
The resulting output will lie in a directory called \inltt{file2.fld}, with each
of the different parallel partitions in files with names \inltt{P0000000.fld},
\inltt{P0000001.fld}, \dots, \inltt{P0000009.fld}. This is nearly a complete
parallel field file. However, when the output file is in the .fld format,
the \inltt{Info.xml} file, which contains the information about which elements
lie in each partition, is not correct since it will only contain the information for one of the partitions. The correct \inltt{Info.xml} file can be generated by using the command
\begin{lstlisting}[style=BashInputStyle]
FieldConvert file2.xml file2.fld/Info.xml:info:nparts=10
\end{lstlisting}
Note the \inltt{:info} extension on the last argument is necessary to tell
FieldConvert that you wish to generate an info file, but with the extension
\inltt{.xml}. This syntax allows the routine not to get confused with the
input/output XML files.
\subsection{Running in parallel with the \textit{ nparts} option}
The examples above will process each partition serially which may now
take a while for many partitions. You can however run this option in
parallel using a smaller number of cores than the nparts.
For the example of creating a vtu file above you can use 4 processor concurrently wiht the command line:
\begin{lstlisting}[style=BashInputStyle]
mpirun -n 4 FieldConvert --nparts 10 file1.xml file1.fld file1.vtu
\end{lstlisting}
Obviously the executable will have to have been compiled with the MPI option for this to work.
\subsection{Using the \textit{ part-only} and \textit{ part-only-overlapping} options}
The options above will all load in the full \inltt{file1.xml}, partition
it into \inltt{nparts} files in a director called \inltt{file1\_xml}.
This can be expensive if the \inltt{file1.xml} is already large. So instead you can
pre-partition the file using the using the \inltt{--part-only}
option. So the command
Loading full \inltt{file1.xml} can be expensive if the
\inltt{file1.xml} is already large. So instead you can pre-partition
the file using the using the \inltt{--part-only} option. So the
command
\begin{lstlisting}[style=BashInputStyle]
FieldConvert --part-only 10 file.xml file.fld
\end{lstlisting}
......@@ -1218,13 +1168,6 @@ directory called \inltt{file\_xml}. If you enter this directory you will find
partitioned XML files \inltt{P0000000.xml}, \inltt{P0000001.xml}, \dots,
\inltt{P0000009.xml} which can then be processed individually as outlined above.
If you have a partitioned directory either from a parallel run or using the \inltt{--part-only} option you can now run the \inltt{FieldConvert} option using the command
\begin{lstlisting}[style=BashInputStyle]
mpirun -n 4 FieldConvert --nparts 10 file1\_xml:xml file1.fld file1.vtu
\end{lstlisting}
Note the form \inltt{file1\_xml:xml} option tells the code it is a parallel partition which should be treated as an \inltt{xml} type file.
There is also a \inltt{--part-only-overlapping} option, which can be run in the
same fashion.
\begin{lstlisting}[style=BashInputStyle]
......@@ -1241,6 +1184,58 @@ within a partition. using the \inltt{--part-only-overlapping} option will still
yield a shrinking isocontour, but the overlapping partitions help to overlap the
partiiton boundaries.
\subsection{Using the \textit{nparts} options}
If you have a partitioned directory either from a parallel run or
using the \inltt{--part-only} option you can now run the
\inltt{FieldConvert} option using the \inltt{nparts} command line
option, that is
\begin{lstlisting}[style=BashInputStyle]
FieldConvert --nparts 10 file1\_xml:xml file1.fld file1.vtu
\end{lstlisting}
Note the form \inltt{file1\_xml:xml} option tells the code it is a
parallel partition which should be treated as an \inltt{xml} type
file. the argument of \inltt{nparts} should correpsond to the number
of partitions used in generating the file1\_xml directory. This will
create a parallel vtu file as it processes each partition.
Another example is to interpolate \inltt{file1.fld} from one mesh
\inltt{file1.xml} to another \inltt{file2.xml}. If the mesh files are
large we can do this by partitioning \inltt{file2.xml} into 10 (or
more) partitions to generate the \inltt{file\_xml} directory and
interpolating each partition one by one using the command:
\begin{lstlisting}[style=BashInputStyle]
FieldConvert --nparts 10 -m interpfield:fromxml=file1.xml:fromfld=file1.fld \
file2_xml:xml file2.fld
\end{lstlisting}
Note that internally the routine uses the range option so that it only
has to load the part of \inltt{file1.xml} that overlaps with each
partition of \inltt{file2.xml}. The resulting output will lie in a
directory called \inltt{file2.fld}, with each of the different
parallel partitions in files with names \inltt{P0000000.fld},
\inltt{P0000001.fld}, \dots, \inltt{P0000009.fld}. In previous
versions of FieldConvert it was necessary to generate an updated
\inltt{Info.xml} file but in the current version it should
automatically be updating this file.
\subsection{Running in parallel with the \textit{ nparts} option}
The examples above will process each partition serially which may now
take a while for many partitions. You can however run this option in
parallel using a smaller number of cores than the nparts.
For the example of creating a vtu file above you can use 4 processor
concurrently wiht the command line:
\begin{lstlisting}[style=BashInputStyle]
mpirun -n 4 FieldConvert --nparts 10 file1\_xml:xml file1.fld file1.vtu
\end{lstlisting}
Obviously the executable will have to have been compiled with the MPI
option for this to work.
%%% Local Variables:
%%% mode: latex
%%% TeX-master: "../user-guide"
......
......@@ -220,6 +220,15 @@ void InputXml::Process(po::variables_map &vm)
m_f->m_session = LibUtilities::SessionReader::CreateInstance(
argc, (char **)argv, files, m_f->m_comm);
if (vm.count("nparts"))
{
// make sure have pre-partitioned mesh for nparts option
ASSERTL0(boost::icontains(files[0],"_xml"),
"Expect the mesh to have been pre-partitioned when "
" using the\"--nparts\" option. Please use \"--part-only\" "
"option to prepartition xml file.");
}
// Free up memory.
delete[] argv;
......
......@@ -317,6 +317,11 @@ protected:
return true;
}
bool v_IsSerial(void)
{
return true;
}
bool v_RemoveExistingFiles(void)
{
return false;
......
......@@ -218,6 +218,9 @@ void OutputFileBase::Process(po::variables_map &vm)
PrintErrorFromExp();
}
}
// put outfile back to filename in case of nparts option
RegisterConfig("outfile", filename);
}
// Restore m_exp
exp.swap(m_f->m_exp);
......
......@@ -107,7 +107,8 @@ void OutputFld::OutputFromExp(po::variables_map &vm)
}
}
}
fld->Write(filename, FieldDef, FieldData, m_f->m_fieldMetaDataMap);
fld->Write(filename, FieldDef, FieldData, m_f->m_fieldMetaDataMap,
false);
}
else
{
......
......@@ -68,75 +68,58 @@ void OutputInfo::Process(po::variables_map &vm)
{
// Extract the output filename and extension
string filename = m_config["outfile"].as<string>();
int i = 0;
// partition mesh
ASSERTL0(m_config["nparts"].as<string>().compare("NotSet") != 0,
"Need to specify nparts for info output");
const int nparts = m_config["nparts"].as<int>();
int nparts = m_config["nparts"].as<int>();
std::vector<std::string> files;
// load .xml ending
for (auto &x : m_f->m_inputfiles["xml"]) {
files.push_back(x);
}
// load any .xml.gz endings
for (auto &x: m_f->m_inputfiles["xml.gz"])
{
files.push_back(x);
}
ASSERTL0(m_f->m_comm->GetSize() == 1,
"OutputInfo module should be run in serial.");
// Default partitioner to use is Scotch. Override default with
// command-line flags if they are set.
string vPartitionerName = "Scotch";
if (m_f->m_session->DefinesCmdLineArgument("use-metis"))
{
vPartitionerName = "Metis";
}
if (m_f->m_session->DefinesCmdLineArgument("use-scotch"))
{
vPartitionerName = "Scotch";
}
// Construct mesh partitioning.
SpatialDomains::MeshPartitionSharedPtr meshPartition =
SpatialDomains::GetMeshPartitionFactory().CreateInstance(
vPartitionerName, m_f->m_session, m_f->m_graph->GetMeshDimension(),
m_f->m_graph->CreateMeshEntities(),
m_f->m_graph->CreateCompositeDescriptor());
meshPartition->PartitionMesh(nparts, true);
// get hold of local partition ids
std::vector<std::vector<unsigned int> > ElementIDs(nparts);
// Populate the list of element ID lists from all processes
for (i = 0; i < nparts; ++i)
{
std::vector<unsigned int> tmp;
meshPartition->GetElementIDs(i, tmp);
ElementIDs[i] = tmp;
}
// Input/output file
LibUtilities::CommSharedPtr c = m_f->m_comm;
std::shared_ptr<LibUtilities::FieldIOXml> fldXml =
std::static_pointer_cast<LibUtilities::FieldIOXml>(
LibUtilities::GetFieldIOFactory().CreateInstance("Xml", c, true));
// Set up output names
// open file and setup meta data.
fs::path pinfilename(filename);
std::vector<std::string> filenames;
for (int i = 0; i < nparts; ++i)
std::vector<std::vector<unsigned int> > ElementIDs;
for (int p = 0; p < nparts; ++p)
{
boost::format pad("P%1$07d.fld");
pad % i;
boost::format pad("P%1$07d.%2$s");
pad % p % "fld";
fs::path fullpath = pinfilename / pad.str();
string fname = LibUtilities::PortablePath(fullpath);
LibUtilities::DataSourceSharedPtr dataSource =
LibUtilities::XmlDataSource::create(fname);
std::vector<LibUtilities::FieldDefinitionsSharedPtr> fielddefs;
std::vector<unsigned int> PartElmtIDs;
// read in header of partition if it exists
fldXml->ImportFieldDefs(dataSource,fielddefs,false);
// create ElmenetIDs list then use
for(int i = 0; i < fielddefs.size(); ++i)
{
for(int j = 0; j < fielddefs[i]->m_elementIDs.size(); ++j)
{
PartElmtIDs.push_back(fielddefs[i]->m_elementIDs[j]);
}
}
ElementIDs.push_back(PartElmtIDs);
filenames.push_back(pad.str());
}
// Write the output file
LibUtilities::CommSharedPtr c = m_f->m_comm;
std::shared_ptr<LibUtilities::FieldIOXml> fldXml =
std::static_pointer_cast<LibUtilities::FieldIOXml>(
LibUtilities::GetFieldIOFactory().CreateInstance("Xml", c, true));
fldXml->WriteMultiFldFileIDs(filename, filenames, ElementIDs);
// Write the Info.xml file
string infofile =
LibUtilities::PortablePath(pinfilename / fs::path("Info.xml"));
fldXml->WriteMultiFldFileIDs(infofile,filenames, ElementIDs);
}
}
}
......@@ -75,6 +75,10 @@ OutputTecplot::OutputTecplot(FieldSharedPtr f) : OutputFileBase(f),
m_oneOutputFile(false)
{
m_requireEquiSpaced = true;
m_config["double"] =
ConfigOption(true, "0", "Write double-precision (binary) or scientific "
"format data: more accurate but more disk space"
" required");
}
OutputTecplot::~OutputTecplot()
......@@ -92,7 +96,7 @@ void OutputTecplot::Process(po::variables_map &vm)
{
m_oneOutputFile = (m_f->m_comm->GetSize()> 1);
}
OutputFileBase::Process(vm);
}
......@@ -453,6 +457,13 @@ void OutputTecplotBinary::WriteTecplotHeader(std::ofstream &outfile,
*/
void OutputTecplot::WriteTecplotZone(std::ofstream &outfile)
{
bool useDoubles = m_config["double"].as<bool>();
if (useDoubles)
{
outfile << std::scientific;
}
// Write either points or finite element block
if (m_zoneType != eOrdered)
{
......
......@@ -148,9 +148,6 @@ public:
OutputTecplotBinary(FieldSharedPtr f) : OutputTecplot(f)
{
m_binary = true;
m_config["double"] =
ConfigOption(true, "0", "Write double-precision data: more "
"accurate but more disk space required");
}
virtual ~OutputTecplotBinary()
......
......@@ -70,19 +70,6 @@ void ProcessWSS::Process(po::variables_map &vm)
int expdim = m_f->m_graph->GetSpaceDimension();
m_spacedim = expdim + m_f->m_numHomogeneousDir;
if (m_spacedim == 2)
{
m_f->m_variables.push_back("Shear_x");
m_f->m_variables.push_back("Shear_y");
m_f->m_variables.push_back("Shear_mag");
}
else
{
m_f->m_variables.push_back("Shear_x");
m_f->m_variables.push_back("Shear_y");
m_f->m_variables.push_back("Shear_z");
m_f->m_variables.push_back("Shear_mag");
}
if (m_f->m_exp[0]->GetNumElmts() == 0)
{
......@@ -108,11 +95,14 @@ void ProcessWSS::Process(po::variables_map &vm)
Array<OneD, MultiRegions::ExpListSharedPtr> BndExp(nshear);
Array<OneD, MultiRegions::ExpListSharedPtr> BndElmtExp(nfields);
// Resize m_exp
m_f->m_exp.resize(nfields + nshear);
for (i = 0; i < nshear; ++i)
// will resuse nfields expansions to write shear components.
if(nshear > nfields)
{
m_f->m_exp[nfields + i] = m_f->AppendExpList(m_f->m_numHomogeneousDir);
m_f->m_exp.resize(nshear);
for (i = nfields; i < nshear; ++i)
{
m_f->m_exp[nfields + i] = m_f->AppendExpList(m_f->m_numHomogeneousDir);
}
}
// Create map of boundary ids for partitioned domains
......@@ -137,8 +127,7 @@ void ProcessWSS::Process(po::variables_map &vm)
// bnd
for (i = 0; i < nshear; i++)
{
BndExp[i] =
m_f->m_exp[nfields + i]->UpdateBndCondExpansion(bnd);
BndExp[i] = m_f->m_exp[i]->UpdateBndCondExpansion(bnd);
}
for (i = 0; i < nfields; i++)
{
......@@ -294,6 +283,20 @@ void ProcessWSS::Process(po::variables_map &vm)
BndExp[nshear - 1]->UpdateCoeffs());
}
}
if (m_spacedim == 2)
{
m_f->m_variables[0] = "Shear_x";
m_f->m_variables[1] = "Shear_y";
m_f->m_variables[2] = "Shear_mag";
}
else
{
m_f->m_variables[0] = "Shear_x";
m_f->m_variables[1] = "Shear_y";
m_f->m_variables[2] = "Shear_z";
m_f->m_variables[3] = "Shear_mag";
}
}
void ProcessWSS::GetViscosity(
......@@ -369,14 +372,19 @@ void ProcessWSS::GetViscosity(
Vmath::Smul(npoints, cv_inv, energy, 1, temperature, 1 );
// Variable viscosity through the Sutherland's law
//
// WARNING, if this routine is modified the same must be done in the
// CompressibleFlowSolver function in VariableConverter.cpp
// (this class should be restructured).
const NekDouble C = .38175;
NekDouble mu_star = m_mu;
NekDouble T_star = m_pInf / (m_rhoInf * m_gasConstant);
NekDouble ratio;
for (int i = 0; i < npoints; ++i)
{
ratio = temperature[i] / T_star;