Commit 9f589b22 authored by Douglas Serson's avatar Douglas Serson

Merge branch 'master' into feature/FilterFieldConvert

Conflicts:
	CHANGELOG.md
parents 0e2476b6 790e7416
......@@ -4,6 +4,14 @@ Changelog
v4.4.0
------
**Library:**
- Add support for variable polynomial order for 3D simulations with continuous
Galerkin discretisation (!604)
- Add support for variable polynomial order with periodic boundary conditions
(!658)
- Statistics are now printed for lowest level of multi-level static condensation
(!656)
- Sped up interpolataion from pts files and fixed parallel pts import (!584)
- Increased required boost version to 1.56.0 (!584)
- New FieldUtils library allows support for most `FieldConvert` post-processing
operations during simulation using a new filter (!589)
......@@ -21,19 +29,29 @@ v4.4.0
v4.3.3
------
**Library**:
- Auto-detect a shared filesystem and removed --shared-filesystem option (!654)
- Fix filters when using adaptive driver to avoid output being overwritten after
each adaptive update (!588)
- Minor fix to suppress Xxt output unless `--verbose` is specified (!642)
- Fix of DirectFull solver in case where only Neumann boundary conditions
- Fix of DirectFull solver in case where only Neumann boundary conditions
are imposed. (!655)
**FieldConvert**:
- Fix to avoid repeated import of field file (!649)
- Fix issue with C^0 projection (!644)
- Fix verbose output when using --procid (!648)
**NekMesh:**
- Fix namespace issue in Star-CCM+ input header in NekMesh (!661)
**CompressibleFlowSolver**:
- Fix issue with residual output (!647)
- Issues with 1D Euler solver fixed (!565)
- Fix deadlocking issue with boundary conditions (!657)
**Packaging**:
- Fix NekMesh dependencies for DEB package (!650)
- Fix PETSc build on newer linux distributions (!646)
v4.3.2
------
......@@ -46,6 +64,7 @@ v4.3.2
output is produced in physical space (!621).
- Fix minor performance issue with time integration schemes (!632)
- Fix FilterCheckpoint filter to be consistent with `IO_CheckSteps` (!633)
- Fix CMake configuration for building on Windows 10 with VS 2015 (!641)
- Fix `IO_CheckSteps` to avoid missing first checkpoint (!639)
- Fix bug in iterative solver where only root process would ASSERT when
exceeding the maximum number of iterations (!636)
......@@ -115,7 +134,7 @@ v4.3.0
(!537)
- Fix bug with initial conditions of CG simulations using variable P (!543)
- Fix bug in 3DH2D with non-zero Dirichlet boundary conditions (!545)
- Added in a method to convert equispaced interpolated points back to
- Added in a method to convert equispaced interpolated points back to
coefficients which requires the introduction of a new StdRegions matrix.(!561)
- Empty XML tags which would override non-empty XML tags are now ignored (!581)
- Add contribution guide (!551)
......
......@@ -51,6 +51,7 @@ project. It's a pretty simple process:
new features?
- Have you run your branch through buildbot and do all the tests pass?
- Is there documentation in the user guide and/or developer guide?
- Have you added a CHANGELOG entry, including the MR number?
- Are there any massive files you might have added in the commit history? We try
to keep test files as small as possible. If so you'll need to rebase or
filter-branch to remove those from the commit history.
......
......@@ -2,7 +2,7 @@ FIND_LIBRARY(WIN32_BLAS NAMES libblas PATHS ${TPSRC})
FIND_LIBRARY(WIN32_LAPACK NAMES liblapack PATHS ${TPSRC})
IF (NOT WIN32_BLAS OR NOT WIN32_LAPACK)
IF (CMAKE_CL_64)
IF (CMAKE_CL_64 OR CMAKE_GENERATOR MATCHES Win64)
SET(WIN_ZIP_FILE "win64-blas-lapack.zip")
SET(WIN_ZIP_MD5_VALUE "b5ad4f7335ca964bbdafab129e44a046")
SET(WIN_ZIP_SHA1_VALUE "adb331fa195db264e95e46b902887f12971dbf48")
......
......@@ -8,9 +8,9 @@
#If the user has not set BOOST_ROOT, look in a couple common places first.
MESSAGE(STATUS "Searching for Boost:")
SET(MIN_VER "1.52.0")
SET(MIN_VER "1.56.0")
SET(NEEDED_BOOST_LIBS thread iostreams date_time filesystem system
program_options regex timer)
program_options regex timer chrono)
SET(Boost_DEBUG 0)
SET(Boost_NO_BOOST_CMAKE ON)
IF( BOOST_ROOT )
......@@ -67,7 +67,7 @@ IF (THIRDPARTY_BUILD_BOOST)
# Only build the libraries we need
SET(BOOST_LIB_LIST --with-system --with-iostreams --with-filesystem
--with-program_options --with-date_time --with-thread
--with-regex --with-timer)
--with-regex --with-timer --with-chrono)
IF (NOT WIN32)
# We need -fPIC for 64-bit builds
......@@ -86,6 +86,8 @@ IF (THIRDPARTY_BUILD_BOOST)
SET(TOOLSET msvc-11.0)
ELSEIF (MSVC12)
SET(TOOLSET msvc-12.0)
ELSEIF (MSVC14)
SET(TOOLSET msvc-14.0)
ENDIF()
ELSE(APPLE)
SET(TOOLSET gcc)
......@@ -160,6 +162,9 @@ IF (THIRDPARTY_BUILD_BOOST)
ENDIF(THIRDPARTY_BUILD_ZLIB)
# Set up CMake variables
SET(Boost_CHRONO_LIBRARY boost_chrono)
SET(Boost_CHRONO_LIBRARY_DEBUG boost_chrono)
SET(Boost_CHRONO_LIBRARY_RELEASE boost_chrono)
SET(Boost_DATE_TIME_LIBRARY boost_date_time)
SET(Boost_DATE_TIME_LIBRARY_DEBUG boost_date_time)
SET(Boost_DATE_TIME_LIBRARY_RELEASE boost_date_time)
......@@ -189,7 +194,7 @@ IF (THIRDPARTY_BUILD_BOOST)
SET(Boost_CONFIG_INCLUDE_DIR ${TPINC})
SET(Boost_LIBRARY_DIRS ${TPSRC}/dist/lib)
SET(Boost_CONFIG_LIBRARY_DIR ${TPLIB})
SET(Boost_LIBRARIES boost_date_time boost_filesystem boost_iostreams boost_program_options boost_regex boost_system boost_thread boost_timer)
SET(Boost_LIBRARIES boost_chrono boost_date_time boost_filesystem boost_iostreams boost_program_options boost_regex boost_system boost_thread boost_timer)
LINK_DIRECTORIES(${Boost_LIBRARY_DIRS})
STRING(REPLACE ";" ", " NEEDED_BOOST_LIBS_STRING "${NEEDED_BOOST_LIBS}")
......
......@@ -26,6 +26,8 @@ IF (NEKTAR_USE_PETSC)
IF (THIRDPARTY_BUILD_PETSC)
INCLUDE(ExternalProject)
FIND_PACKAGE(PythonInterp 2 REQUIRED)
SET(PETSC_C_COMPILER "${CMAKE_C_COMPILER}")
SET(PETSC_CXX_COMPILER "${CMAKE_CXX_COMPILER}")
......@@ -39,24 +41,23 @@ IF (NEKTAR_USE_PETSC)
ENDIF (NEKTAR_USE_MPI)
EXTERNALPROJECT_ADD(
petsc-3.5.2
petsc-3.7.2
PREFIX ${TPSRC}
STAMP_DIR ${TPBUILD}/stamp
DOWNLOAD_DIR ${TPSRC}
SOURCE_DIR ${TPBUILD}/petsc-3.5.2
TMP_DIR ${TPBUILD}/petsc-3.5.2-tmp
SOURCE_DIR ${TPBUILD}/petsc-3.7.2
TMP_DIR ${TPBUILD}/petsc-3.7.2-tmp
INSTALL_DIR ${TPDIST}
BINARY_DIR ${TPBUILD}/petsc-3.5.2
URL http://www.nektar.info/thirdparty/petsc-lite-3.5.2.tar.gz
URL_MD5 "d707336a98d7cb31d843804d020edc94"
BINARY_DIR ${TPBUILD}/petsc-3.7.2
URL http://www.nektar.info/thirdparty/petsc-lite-3.7.2.tar.gz
URL_MD5 "26c2ff8eaaa9e49aea063f839f5daa7e"
CONFIGURE_COMMAND
OMPI_CC=${CMAKE_C_COMPILER}
OMPI_CXX=${CMAKE_CXX_COMPILER}
./configure
${PYTHON_EXECUTABLE} ./configure
--with-cc=${PETSC_C_COMPILER}
--with-cxx=${PETSC_CXX_COMPILER}
--with-shared-libraries=0
--with-pic=1
--with-shared-libraries=1
--with-x=0
--with-ssl=0
--prefix=${TPDIST}
......@@ -71,7 +72,7 @@ IF (NEKTAR_USE_PETSC)
"PETSc includes" FORCE)
LINK_DIRECTORIES(${TPDIST}/lib)
MESSAGE(STATUS "Build PETSc: ${TPDIST}/${LIB_DIR}/lib${PETSC_LIBRARIES}.a")
MESSAGE(STATUS "Build PETSc: ${TPDIST}/${LIB_DIR}/lib${PETSC_LIBRARIES}.so")
SET(PETSC_CONFIG_INCLUDE_DIR ${TPINC})
ELSE (THIRDPARTY_BUILD_PETSC)
INCLUDE(FindPETSc)
......@@ -82,13 +83,13 @@ IF (NEKTAR_USE_PETSC)
ENDIF (NOT PETSC_FOUND)
SET(PETSC_CONFIG_INCLUDE_DIR ${PETSC_INCLUDES})
INCLUDE_DIRECTORIES(${PETSC_INCLUDES})
ADD_CUSTOM_TARGET(petsc-3.5.2 ALL)
ADD_CUSTOM_TARGET(petsc-3.7.2 ALL)
ENDIF (THIRDPARTY_BUILD_PETSC)
ADD_DEFINITIONS(-DNEKTAR_USING_PETSC)
INCLUDE_DIRECTORIES(SYSTEM ${PETSC_INCLUDES})
IF (NOT NEKTAR_USE_MPI)
INCLUDE_DIRECTORIES(SYSTEM ${PETSC_INCLUDES}/mpiuni)
INCLUDE_DIRECTORIES(SYSTEM ${PETSC_INCLUDES}/petsc/mpiuni)
ENDIF (NOT NEKTAR_USE_MPI)
MARK_AS_ADVANCED(PETSC_CURRENT PETSC_DIR PETSC_LIBRARIES PETSC_INCLUDES)
......
......@@ -23,10 +23,6 @@ Override a parameter (or define a new one) specified in the XML file.
\hangindent=1.5cm
Override a solverinfo (or define a new one) specified in the XML file.
\lstinline[style=BashInputStyle]{--shared-filesystem}\\
\hangindent=1.5cm
By default when running in parallel the complete mesh is loaded by all processes, although partitioning is done uniquely on the root process only and communicated to the other processes. Each process then writes out its own partition to the local working directory. This is the most robust approach in accounting for systems where the distributed nodes do not share a common filesystem. In the case that there is a common filesystem, this option forces only the root process to load the complete mesh, perform partitioning and write out the session files for all partitions. This avoids potential memory issues when multiple processes attempt to load the complete mesh on a single node.
\lstinline[style=BashInputStyle]{--npx [int]}\\
\hangindent=1.5cm
When using a fully-Fourier expansion, specifies the number of processes to use in the x-coordinate direction.
......
......@@ -2,7 +2,7 @@
<test>
<description>Helmholtz 2D CG with P=7, all BCs, iterative ML, par(3)</description>
<executable>Helmholtz2D</executable>
<parameters>--use-metis -I GlobalSysSoln=IterativeMultiLevelStaticCond Helmholtz2D_P7_AllBCs.xml</parameters>
<parameters>--use-metis --verbose -I GlobalSysSoln=IterativeMultiLevelStaticCond Helmholtz2D_P7_AllBCs.xml</parameters>
<processes>3</processes>
<files>
<file description="Session File">Helmholtz2D_P7_AllBCs.xml</file>
......
......@@ -2,7 +2,7 @@
<test>
<description>Helmholtz 3D CG, hexes, mixed BCs, iterative ML</description>
<executable>Helmholtz3D</executable>
<parameters>--use-metis -I GlobalSysSoln=IterativeMultiLevelStaticCond Helmholtz3D_Hex_AllBCs_P6.xml</parameters>
<parameters>--use-metis --verbose -I GlobalSysSoln=IterativeMultiLevelStaticCond Helmholtz3D_Hex_AllBCs_P6.xml</parameters>
<processes>3</processes>
<files>
<file description="Session File">Helmholtz3D_Hex_AllBCs_P6.xml</file>
......
......@@ -2,7 +2,7 @@
<test>
<description>Helmholtz 3D CG, hexes, mixed BCs, iterative ML, Scotch</description>
<executable>Helmholtz3D</executable>
<parameters>--use-scotch -I GlobalSysSoln=IterativeMultiLevelStaticCond Helmholtz3D_Hex_AllBCs_P6.xml</parameters>
<parameters>--use-scotch --verbose -I GlobalSysSoln=IterativeMultiLevelStaticCond Helmholtz3D_Hex_AllBCs_P6.xml</parameters>
<processes>3</processes>
<files>
<file description="Session File">Helmholtz3D_Hex_AllBCs_P6.xml</file>
......
......@@ -2,7 +2,7 @@
<test>
<description>Helmholtz 3D CG, prisms, Neumann BCs, iterative ML, Par(3), Scotch</description>
<executable>Helmholtz3D</executable>
<parameters>--use-scotch Helmholtz3D_Prism.xml</parameters>
<parameters>--use-scotch --verbose Helmholtz3D_Prism.xml</parameters>
<processes>3</processes>
<files>
<file description="Session File">Helmholtz3D_Prism.xml</file>
......
......@@ -82,7 +82,10 @@ void InputDat::Process(po::variables_map &vm)
if(m_f->m_verbose)
{
cout << "Processing input dat file" << endl;
if(m_f->m_comm->TreatAsRankZero())
{
cout << "Processing input dat file" << endl;
}
}
string line, word, tag;
......
......@@ -93,7 +93,7 @@ void InputFld::Process(po::variables_map &vm)
if(m_f->m_verbose)
{
if(m_f->m_comm->GetRank() == 0)
if(m_f->m_comm->TreatAsRankZero())
{
cout << "Processing input fld file" << endl;
}
......
......@@ -82,7 +82,10 @@ void InputPts::Process(po::variables_map &vm)
{
if (m_f->m_verbose)
{
cout << "Processing input pts file" << endl;
if(m_f->m_comm->TreatAsRankZero())
{
cout << "Processing input pts file" << endl;
}
}
string inFile = (m_f->m_inputfiles["pts"][0]).c_str();
......
......@@ -102,7 +102,7 @@ void InputXml::Process(po::variables_map &vm)
if(m_f->m_verbose)
{
if(m_f->m_comm->GetRank() == 0)
if(m_f->m_comm->TreatAsRankZero())
{
cout << "Processing input xml file" << endl;
timerpart.Start();
......@@ -234,11 +234,6 @@ void InputXml::Process(po::variables_map &vm)
cmdArgs.push_back("--verbose");
}
if(vm.count("shared-filesystem"))
{
cmdArgs.push_back("--shared-filesystem");
}
if(vm.count("part-only"))
{
cmdArgs.push_back("--part-only");
......@@ -275,7 +270,7 @@ void InputXml::Process(po::variables_map &vm)
if(m_f->m_verbose)
{
if(m_f->m_comm->GetRank() == 0)
if(m_f->m_comm->TreatAsRankZero())
{
timerpart.Stop();
NekDouble cpuTime = timerpart.TimePerTest(1);
......@@ -294,7 +289,7 @@ void InputXml::Process(po::variables_map &vm)
if(m_f->m_verbose)
{
if(m_f->m_comm->GetRank() == 0)
if(m_f->m_comm->TreatAsRankZero())
{
timerpart.Stop();
NekDouble cpuTime = timerpart.TimePerTest(1);
......@@ -395,7 +390,7 @@ void InputXml::Process(po::variables_map &vm)
if(m_f->m_verbose)
{
if(m_f->m_comm->GetRank() == 0)
if(m_f->m_comm->TreatAsRankZero())
{
timerpart.Stop();
NekDouble cpuTime = timerpart.TimePerTest(1);
......@@ -419,7 +414,7 @@ void InputXml::Process(po::variables_map &vm)
if(m_f->m_verbose)
{
if(m_f->m_comm->GetRank() == 0)
if(m_f->m_comm->TreatAsRankZero())
{
timerpart.Stop();
NekDouble cpuTime = timerpart.TimePerTest(1);
......
......@@ -73,7 +73,7 @@ void OutputFld::Process(po::variables_map &vm)
if (m_f->m_verbose)
{
if(m_f->m_comm->GetRank() == 0)
if(m_f->m_comm->TreatAsRankZero())
{
cout << "OutputFld: Writing boundary file(s): ";
for(int i = 0; i < m_f->m_bndRegionsToWrite.size(); ++i)
......@@ -228,7 +228,7 @@ void OutputFld::Process(po::variables_map &vm)
{
if (m_f->m_verbose)
{
if(m_f->m_comm->GetRank() == 0)
if(m_f->m_comm->TreatAsRankZero())
{
cout << "OutputFld: Writing file..." << endl;
}
......
......@@ -64,7 +64,10 @@ void OutputPts::Process(po::variables_map &vm)
if (m_f->m_verbose)
{
cout << "OutputPts: Writing file..." << endl;
if(m_f->m_comm->TreatAsRankZero())
{
cout << "OutputPts: Writing file..." << endl;
}
}
fs::path writefile(filename);
......
......@@ -86,7 +86,10 @@ void OutputTecplot::Process(po::variables_map &vm)
if (m_f->m_verbose)
{
cout << "OutputTecplot: Writing file..." << endl;
if(m_f->m_comm->TreatAsRankZero())
{
cout << "OutputTecplot: Writing file..." << endl;
}
}
......
......@@ -74,7 +74,10 @@ void OutputVtk::Process(po::variables_map &vm)
int i, j;
if (m_f->m_verbose)
{
cout << "OutputVtk: Writing file..." << endl;
if(m_f->m_comm->TreatAsRankZero())
{
cout << "OutputVtk: Writing file..." << endl;
}
}
// Extract the output filename and extension
......@@ -96,7 +99,7 @@ void OutputVtk::Process(po::variables_map &vm)
fs::path poutfile(filename.c_str());
fs::path specPath(path.c_str());
if(m_f->m_comm->GetRank() == 0)
if(m_f->m_comm->TreatAsRankZero())
{
try
{
......
......@@ -67,7 +67,10 @@ void OutputXml::Process(po::variables_map &vm)
if (m_f->m_verbose)
{
cout << "OutputXml: Writing file..." << endl;
if(m_f->m_comm->TreatAsRankZero())
{
cout << "OutputXml: Writing file..." << endl;
}
}
// Extract the output filename and extension
......
......@@ -72,7 +72,7 @@ void ProcessAddFld::Process(po::variables_map &vm)
{
if (m_f->m_verbose)
{
if(m_f->m_comm->GetRank() == 0)
if(m_f->m_comm->TreatAsRankZero())
{
cout << "ProcessAddFld: Adding new fld to input fld..." << endl;
}
......
......@@ -74,7 +74,7 @@ void ProcessBoundaryExtract::Process(po::variables_map &vm)
{
if (m_f->m_verbose)
{
if(m_f->m_comm->GetRank() == 0)
if(m_f->m_comm->TreatAsRankZero())
{
cout << "ProcessBoundaryExtract: Setting up boundary extraction..."
<< endl;
......
......@@ -70,7 +70,7 @@ void ProcessC0Projection::Process(po::variables_map &vm)
{
if (m_f->m_verbose)
{
if(m_f->m_comm->GetRank() == 0)
if(m_f->m_comm->TreatAsRankZero())
{
cout << "ProcessC0Projection: Projecting field into C0 space..."
<< endl;
......
......@@ -70,7 +70,7 @@ void ProcessCombineAvg::Process(po::variables_map &vm)
{
if (m_f->m_verbose)
{
if(m_f->m_comm->GetRank() == 0)
if(m_f->m_comm->TreatAsRankZero())
{
cout << "ProcessCombineAvg: Combining new fld into input avg fld..."
<< endl;
......
......@@ -85,7 +85,7 @@ void ProcessConcatenateFld::Process(po::variables_map &vm)
{
if (m_f->m_verbose)
{
if(m_f->m_comm->GetRank() == 0)
if(m_f->m_comm->TreatAsRankZero())
{
cout << "ProcessConcatenateFld: Concatenating field file..."
<< endl;
......
......@@ -64,7 +64,7 @@ namespace Nektar
{
if (m_f->m_verbose)
{
if(m_f->m_comm->GetRank() == 0)
if(m_f->m_comm->TreatAsRankZero())
{
cout << "ProcessDeform: Deforming grid..." << endl;
}
......
......@@ -126,7 +126,7 @@ namespace FieldUtils
{
if (m_f->m_verbose)
{
if(m_f->m_comm->GetRank() == 0)
if(m_f->m_comm->TreatAsRankZero())
{
cout << "ProcessDisplacement: Calculating displacement..."
<< endl;
......
......@@ -83,7 +83,7 @@ void ProcessEquiSpacedOutput::SetupEquiSpacedField(void)
{
if(m_f->m_verbose)
{
if(m_f->m_comm->GetRank() == 0)
if(m_f->m_comm->TreatAsRankZero())
{
cout << "Interpolating fields to equispaced..." << endl;
}
......
......@@ -66,7 +66,7 @@ void ProcessGrad::Process(po::variables_map &vm)
{
if (m_f->m_verbose)
{
if(m_f->m_comm->GetRank() == 0)
if(m_f->m_comm->TreatAsRankZero())
{
cout << "ProcessGrad: Calculating gradients..." << endl;
}
......
......@@ -70,7 +70,7 @@ void ProcessHomogeneousPlane::Process(po::variables_map &vm)
{
if (m_f->m_verbose)
{
if(m_f->m_comm->GetRank() == 0)
if(m_f->m_comm->TreatAsRankZero())
{
cout << "ProcessHomogeneousPlane: Extracting plane..." << endl;
}
......
......@@ -79,7 +79,7 @@ void ProcessInnerProduct::Process(po::variables_map &vm)
{
if (m_f->m_verbose)
{
if(m_f->m_comm->GetRank() == 0)
if(m_f->m_comm->TreatAsRankZero())
{
cout << "ProcessInnerProduct: Evaluating inner product..." << endl;
}
......
......@@ -40,6 +40,8 @@ using namespace std;
#include <LibUtilities/BasicUtils/SharedArray.hpp>
#include <LibUtilities/BasicUtils/ParseUtils.hpp>
#include <LibUtilities/BasicUtils/Progressbar.hpp>
#include <SolverUtils/Interpolator.h>
#include <boost/math/special_functions/fpclassify.hpp>
namespace Nektar
{
......@@ -78,7 +80,7 @@ void ProcessInterpField::Process(po::variables_map &vm)
{
if(m_f->m_verbose)
{
if(m_f->m_comm->GetRank() == 0)
if(m_f->m_comm->TreatAsRankZero())
{
cout << "ProcessInterpField: Interpolating field..." << endl;
}
......@@ -217,23 +219,44 @@ void ProcessInterpField::Process(po::variables_map &vm)
m_f->m_exp[0]->GetCoords(x1, y1, z1);
}
if(m_f->m_session->GetComm()->TreatAsRankZero())
{
cout << "Interpolating [" << flush;
}
NekDouble clamp_low = m_config["clamptolowervalue"].as<NekDouble>();
NekDouble clamp_up = m_config["clamptouppervalue"].as<NekDouble>();
NekDouble def_value = m_config["defaultvalue"].as<NekDouble>();
InterpolateField(m_fromField->m_exp, m_f->m_exp,
x1, y1, z1, clamp_low, clamp_up,def_value);
for (int i = 0; i < nfields; i++)
{
for (int j = 0; j < nq1; ++j)
{
m_f->m_exp[i]->UpdatePhys()[j] = def_value;
}
}
if(m_f->m_session->GetComm()->TreatAsRankZero())
SolverUtils::Interpolator interp;
if (m_f->m_comm->GetRank() == 0)
{
cout << "]" << endl;
interp.SetProgressCallback(&ProcessInterpField::PrintProgressbar,
this);
}
interp.Interpolate(m_fromField->m_exp, m_f->m_exp);
if (m_f->m_comm->GetRank() == 0)
{
cout << endl;
}
for (int i = 0; i < nfields; ++i)
{
for (int j = 0; j < nq1; ++j)
{
if (m_f->m_exp[i]->GetPhys()[j] > clamp_up)
{
m_f->m_exp[i]->UpdatePhys()[j] = clamp_up;
}
else if (m_f->m_exp[i]->GetPhys()[j] < clamp_low)
{
m_f->m_exp[i]->UpdatePhys()[j] = clamp_low;
}
}
}
// put field into field data for output
std::vector<LibUtilities::FieldDefinitionsSharedPtr> FieldDef
......@@ -255,85 +278,13 @@ void ProcessInterpField::Process(po::variables_map &vm)
m_f->m_data = FieldData;
}
void ProcessInterpField::InterpolateField(
vector<MultiRegions::ExpListSharedPtr> &field0,
vector<MultiRegions::ExpListSharedPtr> &field1,
Array<OneD, NekDouble> x,
Array<OneD, NekDouble> y,
Array<OneD, NekDouble> z,
NekDouble clamp_low,
NekDouble clamp_up,
NekDouble def_value)
void ProcessInterpField::PrintProgressbar(const int position,
const int goal) const
{
int expdim = field0[0]->GetCoordim(0);
Array<OneD, NekDouble> coords(expdim), Lcoords(expdim);
int nq1 = field1[0]->GetTotPoints();
int elmtid, offset;
int r, f;
static int intpts = 0;
ASSERTL0(field0.size() == field1.size(),
"Input field dimension must be same as output dimension");
for (r = 0; r < nq1; r++)
{
coords[0] = x[r];
coords[1] = y[r];
if (expdim == 3)
{
coords[2] = z[r];