Commit b06c39ef authored by Dave Moxey's avatar Dave Moxey
Browse files

Merge remote-tracking branch 'upstream/master' into feature/stats-sc-level

parents 0ba9aee2 bd7e75cb
......@@ -4,8 +4,14 @@ Changelog
v4.4.0
------
**Library:**
- Add support for variable polynomial order for 3D simulations with continuous
Galerkin discretisation (!604)
- Add support for variable polynomial order with periodic boundary conditions
(!658)
- Statistics are now printed for lowest level of multi-level static condensation
(!656)
- Sped up interpolataion from pts files and fixed parallel pts import (!584)
- Increased required boost version to 1.56.0 (!584)
**IncNavierStokesSolver:**
- Add ability to simulate additional scalar fields (!624)
......@@ -13,19 +19,35 @@ v4.4.0
**NekMesh:**
- Modify curve module to allow for spline input (!628)
**FieldConvert:**
- Add module to stretch homogeneous direction (!609)
v4.3.3
------
**Library**:
- Auto-detect a shared filesystem and removed --shared-filesystem option (!654)
- Fix filters when using adaptive driver to avoid output being overwritten after
each adaptive update (!588)
- Minor fix to suppress Xxt output unless `--verbose` is specified (!642)
- Fix of DirectFull solver in case where only Neumann boundary conditions
are imposed. (!655)
**FieldConvert**:
- Fix to avoid repeated import of field file (!649)
- Fix issue with C^0 projection (!644)
- Fix verbose output when using --procid (!648)
**NekMesh:**
- Fix namespace issue in Star-CCM+ input header in NekMesh (!661)
**CompressibleFlowSolver**:
- Fix issue with residual output (!647)
- Issues with 1D Euler solver fixed (!565)
- Fix deadlocking issue with boundary conditions (!657)
**Packaging**:
- Fix NekMesh dependencies for DEB package (!650)
- Fix PETSc build on newer linux distributions (!646)
v4.3.2
------
......@@ -38,6 +60,7 @@ v4.3.2
output is produced in physical space (!621).
- Fix minor performance issue with time integration schemes (!632)
- Fix FilterCheckpoint filter to be consistent with `IO_CheckSteps` (!633)
- Fix CMake configuration for building on Windows 10 with VS 2015 (!641)
- Fix `IO_CheckSteps` to avoid missing first checkpoint (!639)
- Fix bug in iterative solver where only root process would ASSERT when
exceeding the maximum number of iterations (!636)
......@@ -107,7 +130,7 @@ v4.3.0
(!537)
- Fix bug with initial conditions of CG simulations using variable P (!543)
- Fix bug in 3DH2D with non-zero Dirichlet boundary conditions (!545)
- Added in a method to convert equispaced interpolated points back to
- Added in a method to convert equispaced interpolated points back to
coefficients which requires the introduction of a new StdRegions matrix.(!561)
- Empty XML tags which would override non-empty XML tags are now ignored (!581)
- Add contribution guide (!551)
......
......@@ -51,6 +51,7 @@ project. It's a pretty simple process:
new features?
- Have you run your branch through buildbot and do all the tests pass?
- Is there documentation in the user guide and/or developer guide?
- Have you added a CHANGELOG entry, including the MR number?
- Are there any massive files you might have added in the commit history? We try
to keep test files as small as possible. If so you'll need to rebase or
filter-branch to remove those from the commit history.
......
......@@ -2,7 +2,7 @@ FIND_LIBRARY(WIN32_BLAS NAMES libblas PATHS ${TPSRC})
FIND_LIBRARY(WIN32_LAPACK NAMES liblapack PATHS ${TPSRC})
IF (NOT WIN32_BLAS OR NOT WIN32_LAPACK)
IF (CMAKE_CL_64)
IF (CMAKE_CL_64 OR CMAKE_GENERATOR MATCHES Win64)
SET(WIN_ZIP_FILE "win64-blas-lapack.zip")
SET(WIN_ZIP_MD5_VALUE "b5ad4f7335ca964bbdafab129e44a046")
SET(WIN_ZIP_SHA1_VALUE "adb331fa195db264e95e46b902887f12971dbf48")
......
......@@ -8,9 +8,9 @@
#If the user has not set BOOST_ROOT, look in a couple common places first.
MESSAGE(STATUS "Searching for Boost:")
SET(MIN_VER "1.52.0")
SET(MIN_VER "1.56.0")
SET(NEEDED_BOOST_LIBS thread iostreams date_time filesystem system
program_options regex timer)
program_options regex timer chrono)
SET(Boost_DEBUG 0)
SET(Boost_NO_BOOST_CMAKE ON)
IF( BOOST_ROOT )
......@@ -67,7 +67,7 @@ IF (THIRDPARTY_BUILD_BOOST)
# Only build the libraries we need
SET(BOOST_LIB_LIST --with-system --with-iostreams --with-filesystem
--with-program_options --with-date_time --with-thread
--with-regex --with-timer)
--with-regex --with-timer --with-chrono)
IF (NOT WIN32)
# We need -fPIC for 64-bit builds
......@@ -86,6 +86,8 @@ IF (THIRDPARTY_BUILD_BOOST)
SET(TOOLSET msvc-11.0)
ELSEIF (MSVC12)
SET(TOOLSET msvc-12.0)
ELSEIF (MSVC14)
SET(TOOLSET msvc-14.0)
ENDIF()
ELSE(APPLE)
SET(TOOLSET gcc)
......@@ -160,6 +162,9 @@ IF (THIRDPARTY_BUILD_BOOST)
ENDIF(THIRDPARTY_BUILD_ZLIB)
# Set up CMake variables
SET(Boost_CHRONO_LIBRARY boost_chrono)
SET(Boost_CHRONO_LIBRARY_DEBUG boost_chrono)
SET(Boost_CHRONO_LIBRARY_RELEASE boost_chrono)
SET(Boost_DATE_TIME_LIBRARY boost_date_time)
SET(Boost_DATE_TIME_LIBRARY_DEBUG boost_date_time)
SET(Boost_DATE_TIME_LIBRARY_RELEASE boost_date_time)
......@@ -189,7 +194,7 @@ IF (THIRDPARTY_BUILD_BOOST)
SET(Boost_CONFIG_INCLUDE_DIR ${TPINC})
SET(Boost_LIBRARY_DIRS ${TPSRC}/dist/lib)
SET(Boost_CONFIG_LIBRARY_DIR ${TPLIB})
SET(Boost_LIBRARIES boost_date_time boost_filesystem boost_iostreams boost_program_options boost_regex boost_system boost_thread boost_timer)
SET(Boost_LIBRARIES boost_chrono boost_date_time boost_filesystem boost_iostreams boost_program_options boost_regex boost_system boost_thread boost_timer)
LINK_DIRECTORIES(${Boost_LIBRARY_DIRS})
STRING(REPLACE ";" ", " NEEDED_BOOST_LIBS_STRING "${NEEDED_BOOST_LIBS}")
......
......@@ -26,6 +26,8 @@ IF (NEKTAR_USE_PETSC)
IF (THIRDPARTY_BUILD_PETSC)
INCLUDE(ExternalProject)
FIND_PACKAGE(PythonInterp 2 REQUIRED)
SET(PETSC_C_COMPILER "${CMAKE_C_COMPILER}")
SET(PETSC_CXX_COMPILER "${CMAKE_CXX_COMPILER}")
......@@ -39,24 +41,23 @@ IF (NEKTAR_USE_PETSC)
ENDIF (NEKTAR_USE_MPI)
EXTERNALPROJECT_ADD(
petsc-3.5.2
petsc-3.7.2
PREFIX ${TPSRC}
STAMP_DIR ${TPBUILD}/stamp
DOWNLOAD_DIR ${TPSRC}
SOURCE_DIR ${TPBUILD}/petsc-3.5.2
TMP_DIR ${TPBUILD}/petsc-3.5.2-tmp
SOURCE_DIR ${TPBUILD}/petsc-3.7.2
TMP_DIR ${TPBUILD}/petsc-3.7.2-tmp
INSTALL_DIR ${TPDIST}
BINARY_DIR ${TPBUILD}/petsc-3.5.2
URL http://www.nektar.info/thirdparty/petsc-lite-3.5.2.tar.gz
URL_MD5 "d707336a98d7cb31d843804d020edc94"
BINARY_DIR ${TPBUILD}/petsc-3.7.2
URL http://www.nektar.info/thirdparty/petsc-lite-3.7.2.tar.gz
URL_MD5 "26c2ff8eaaa9e49aea063f839f5daa7e"
CONFIGURE_COMMAND
OMPI_CC=${CMAKE_C_COMPILER}
OMPI_CXX=${CMAKE_CXX_COMPILER}
./configure
${PYTHON_EXECUTABLE} ./configure
--with-cc=${PETSC_C_COMPILER}
--with-cxx=${PETSC_CXX_COMPILER}
--with-shared-libraries=0
--with-pic=1
--with-shared-libraries=1
--with-x=0
--with-ssl=0
--prefix=${TPDIST}
......@@ -71,7 +72,7 @@ IF (NEKTAR_USE_PETSC)
"PETSc includes" FORCE)
LINK_DIRECTORIES(${TPDIST}/lib)
MESSAGE(STATUS "Build PETSc: ${TPDIST}/${LIB_DIR}/lib${PETSC_LIBRARIES}.a")
MESSAGE(STATUS "Build PETSc: ${TPDIST}/${LIB_DIR}/lib${PETSC_LIBRARIES}.so")
SET(PETSC_CONFIG_INCLUDE_DIR ${TPINC})
ELSE (THIRDPARTY_BUILD_PETSC)
INCLUDE(FindPETSc)
......@@ -82,13 +83,13 @@ IF (NEKTAR_USE_PETSC)
ENDIF (NOT PETSC_FOUND)
SET(PETSC_CONFIG_INCLUDE_DIR ${PETSC_INCLUDES})
INCLUDE_DIRECTORIES(${PETSC_INCLUDES})
ADD_CUSTOM_TARGET(petsc-3.5.2 ALL)
ADD_CUSTOM_TARGET(petsc-3.7.2 ALL)
ENDIF (THIRDPARTY_BUILD_PETSC)
ADD_DEFINITIONS(-DNEKTAR_USING_PETSC)
INCLUDE_DIRECTORIES(SYSTEM ${PETSC_INCLUDES})
IF (NOT NEKTAR_USE_MPI)
INCLUDE_DIRECTORIES(SYSTEM ${PETSC_INCLUDES}/mpiuni)
INCLUDE_DIRECTORIES(SYSTEM ${PETSC_INCLUDES}/petsc/mpiuni)
ENDIF (NOT NEKTAR_USE_MPI)
MARK_AS_ADVANCED(PETSC_CURRENT PETSC_DIR PETSC_LIBRARIES PETSC_INCLUDES)
......
......@@ -23,10 +23,6 @@ Override a parameter (or define a new one) specified in the XML file.
\hangindent=1.5cm
Override a solverinfo (or define a new one) specified in the XML file.
\lstinline[style=BashInputStyle]{--shared-filesystem}\\
\hangindent=1.5cm
By default when running in parallel the complete mesh is loaded by all processes, although partitioning is done uniquely on the root process only and communicated to the other processes. Each process then writes out its own partition to the local working directory. This is the most robust approach in accounting for systems where the distributed nodes do not share a common filesystem. In the case that there is a common filesystem, this option forces only the root process to load the complete mesh, perform partitioning and write out the session files for all partitions. This avoids potential memory issues when multiple processes attempt to load the complete mesh on a single node.
\lstinline[style=BashInputStyle]{--npx [int]}\\
\hangindent=1.5cm
When using a fully-Fourier expansion, specifies the number of processes to use in the x-coordinate direction.
......
......@@ -102,6 +102,7 @@ possibly also Reynolds stresses) into single file;
\item \inltt{equispacedoutput}: Write data as equi-spaced output using simplices to represent the data for connecting points;
\item \inltt{extract}: Extract a boundary field;
\item \inltt{homplane}: Extract a plane from 3DH1D expansions;
\item \inltt{homstretch}: Stretch a 3DH1D expansion by an integer factor;
\item \inltt{innerproduct}: take the inner product between one or a series of fields with another field (or series of fields).
\item \inltt{interpfield}: Interpolates one field to another, requires fromxml, fromfld to be defined;
\item \inltt{interppointdatatofld}: Interpolates given discrete data using a finite difference approximation to a fld file given an xml file;
......@@ -327,6 +328,21 @@ The output file \inltt{file-plane.fld} can be processed in a similar
way as described in section \ref{s:utilities:fieldconvert:sub:convert}
to visualise it either in Tecplot or in Paraview.
\subsection{Stretch a 3DH1D expansion: \textit{homstretch} module}
To stretch a 3DH1D expansion in the z-direction, use the command:
\begin{lstlisting}[style=BashInputStyle]
FieldConvert -m homstretch:factor=value file.xml file.fld file-stretch.fld
\end{lstlisting}
The number of modes in the resulting field can be chosen using the command-line
parameter \inltt{output-points-hom-z}. Note that the output for
this module should always be a \inltt{.fld} file and this should not
be used in combination with other modules using a single command.
The output file \inltt{file-stretch.fld} can be processed in a similar
way as described in section \ref{s:utilities:fieldconvert:sub:convert}
to visualise it either in Tecplot or in Paraview.
\subsection{Inner Product of a single or series of fields with respect to a single or series of fields: \textit{innerproduct} module}
You can take the inner product of one field with another field using
......
......@@ -36,169 +36,16 @@
#include <LibUtilities/BasicUtils/PtsField.h>
using namespace std;
namespace Nektar
{
namespace LibUtilities
{
/**
* @brief Compute the weights for an interpolation of the field values to physical points
*
* @param physCoords coordinates of the physical points
* @param coord_id id of the coordinate to use for interpolation.
*
* Set coord_id to -1 to use n-D interpolation for an n-dimensional field.
* The most suitable algorithm is chosen automatically.
*/
void PtsField::CalcWeights(
const Array<OneD, Array<OneD, NekDouble> > &physCoords,
short coordId)
{
ASSERTL1(physCoords.num_elements() >= m_dim,
"physCoords is smaller than number of dimesnions");
int nPhysPts = physCoords[0].num_elements();
int lastProg = 0;
m_weights = Array<OneD, Array<OneD, float> >(nPhysPts);
m_neighInds = Array<OneD, Array<OneD, unsigned int> >(nPhysPts);
// interpolate points and transform
for (int i = 0; i < nPhysPts; ++i)
{
Array<OneD, NekDouble> physPt(m_dim);
for (int j = 0; j < m_dim; ++j)
{
physPt[j] = physCoords[j][i];
}
if (m_dim == 1 || coordId >= 0)
{
if (m_dim == 1)
{
coordId = 0;
}
if (m_pts[0].num_elements() <= 2)
{
CalcW_Linear(i, physPt[coordId]);
}
else
{
CalcW_Quadratic(i, physPt[coordId]);
}
}
else
{
CalcW_Shepard(i, physPt);
}
int progress = int(100 * i / nPhysPts);
if (m_progressCallback && progress > lastProg)
{
m_progressCallback(i, nPhysPts);
lastProg = progress;
}
}
}
/**
* @brief Compute weights and perform the interpolate of field values to physical points
*
* @param physCoords coordinates of the physical points
* @param intFields interpolated field at the physical points
* @param coord_id id of the coordinate to use for interpolation.
*
* Set coord_id to -1 to use n-D interpolation for an n-dimensional field.
* The most suitable algorithm is chosen automatically.
*/
void PtsField::Interpolate(
const Array< OneD, Array< OneD, NekDouble > > &physCoords,
Array<OneD, Array<OneD, NekDouble> > &intFields,
short int coordId)
{
CalcWeights(physCoords, coordId);
Interpolate(intFields);
}
/**
* @brief Perform the interpolate of field values to physical points
*
* @param intFields interpolated field at the physical points
*
* The weights must have already been computed by @CalcWeights or set by
* @SetWeights.
*/
void PtsField::Interpolate(Array<OneD, Array<OneD, NekDouble> > &intFields)
{
ASSERTL1(m_weights[0].num_elements() == m_neighInds[0].num_elements(),
"weights / neighInds mismatch")
int nFields = m_fieldNames.size();
int nPhysPts = m_weights.num_elements();
// interpolate points and transform
intFields = Array<OneD, Array<OneD, NekDouble> >(nFields);
for (int i = 0; i < nFields; ++i)
{
intFields[i] = Array<OneD, NekDouble>(nPhysPts);
for (int j = 0; j < nPhysPts; ++j)
{
intFields[i][j] = 0.0;
int nPts = m_weights[j].num_elements();
for (int k = 0; k < nPts; ++k)
{
unsigned int nIdx = m_neighInds[j][k];
intFields[i][j] += m_weights[j][k] * m_pts[m_dim + i][nIdx];
}
}
}
}
/**
* @brief Set the interpolation weights for an interpolation
*
* @param weights Interpolation weights for each neighbour.
* Structure: m_weights[physPtIdx][neighbourIdx]
* @param neighbourInds Indices of the relevant neighbours for each physical point.
* Structure: m_neighInds[ptIdx][neighbourIdx]
*/
void PtsField::SetWeights(
const Array< OneD, Array< OneD, float > > &weights,
const Array< OneD, Array< OneD, unsigned int > > &neighbourInds)
{
ASSERTL0(weights.num_elements() == neighbourInds.num_elements(),
"weights and neighbourInds not of same number of physical points")
m_weights = weights;
m_neighInds = neighbourInds;
}
/**
* @brief Get the interpolation weights and corresponding neighbour indices
*
* @param weights Interpolation weights for each neighbour.
* Structure: m_weights[physPtIdx][neighbourIdx]
* @param neighbourInds Indices of the relevant neighbours for each physical point.
* Structure: m_neighInds[ptIdx][neighbourIdx]
*/
void PtsField::GetWeights(
Array< OneD, Array< OneD, float > > &weights,
Array< OneD, Array< OneD, unsigned int > > &neighbourInds) const
{
weights = m_weights;
neighbourInds = m_neighInds;
}
PtsField::PtsField(const int dim, const Array< OneD, Array< OneD, NekDouble > > &pts):
m_dim(dim),
m_pts(pts),
m_ptsType(ePtsFile)
PtsField::PtsField(const int dim,
const Array<OneD, Array<OneD, NekDouble> > &pts)
: m_dim(dim), m_pts(pts), m_ptsType(ePtsFile)
{
for (int i = 0; i < GetNFields(); ++i)
{
......@@ -206,8 +53,6 @@ PtsField::PtsField(const int dim, const Array< OneD, Array< OneD, NekDouble > >
}
}
/**
* @brief Set the connectivity data for ePtsTetBlock and ePtsTriBlock
*
......@@ -237,46 +82,40 @@ void PtsField::SetConnectivity(const vector< Array< OneD, int > > &conn)
m_ptsConn = conn;
}
void PtsField::SetDim(const int ptsDim)
{
m_dim = ptsDim;
}
int PtsField::GetDim() const
{
return m_dim;
}
int PtsField::GetNFields() const
{
return m_fieldNames.size();
return m_pts.num_elements() - m_dim;
}
vector<std::string> PtsField::GetFieldNames() const
{
return m_fieldNames;
}
std::string PtsField::GetFieldName(const int i) const
{
return m_fieldNames[i];
}
void PtsField::SetFieldNames(const vector<std::string> fieldNames)
{
ASSERTL0(fieldNames.size() == m_pts.num_elements() - m_dim,
"Number of given fieldNames does not match the number of stored fields");
"Number of given fieldNames does not match the number of stored "
"fields");
m_fieldNames = fieldNames;
}
void PtsField::AddField(const Array< OneD, NekDouble > &pts,
const string fieldName)
{
......@@ -298,31 +137,54 @@ void PtsField::AddField(const Array< OneD, NekDouble > &pts,
m_fieldNames.push_back(fieldName);
}
void PtsField::AddPoints(const Array<OneD, const Array<OneD, NekDouble> > &pts)
{
ASSERTL1(pts.num_elements() == m_pts.num_elements(),
"number of variables mismatch");
// TODO: dont copy, dont iterate
for (int i = 0; i < m_pts.num_elements(); ++i)
{
Array<OneD, NekDouble> tmp(m_pts[i].num_elements() + pts[i].num_elements());
for (int j = 0; j < m_pts[i].num_elements(); ++j)
{
tmp[j] = m_pts[i][j];
}
for (int j = 0; j < pts[i].num_elements(); ++j)
{
tmp[m_pts[i].num_elements() + j] = pts[i][j];
}
m_pts[i] = tmp;
}
}
int PtsField::GetNpoints() const
{
return m_pts[0].num_elements();
}
NekDouble PtsField::GetPointVal(const int fieldInd, const int ptInd) const
{
return m_pts[fieldInd][ptInd];
}
void PtsField::SetPointVal(const int fieldInd,
const int ptInd,
const NekDouble val)
{
m_pts[fieldInd][ptInd] = val;
}
void PtsField::GetPts(Array< OneD, Array< OneD, NekDouble > > &pts) const
{
pts = m_pts;
}
Array< OneD, NekDouble > PtsField::GetPts(const int fieldInd) const
{
return m_pts[fieldInd];
}
void PtsField::SetPts(Array< OneD, Array< OneD, NekDouble > > &pts)
{
ASSERTL1(pts.num_elements() == m_pts.num_elements(),
......@@ -331,13 +193,11 @@ void PtsField::SetPts(Array< OneD, Array< OneD, NekDouble > > &pts)
m_pts = pts;
}
vector<int> PtsField::GetPointsPerEdge() const
{
return m_nPtsPerEdge;
}
int PtsField::GetPointsPerEdge(const int i) const
{
return m_nPtsPerEdge[i];
......@@ -352,20 +212,18 @@ int PtsField::GetPointsPerEdge(const int i) const
*/
void PtsField::SetPointsPerEdge(const vector< int > nPtsPerEdge)
{
ASSERTL0(m_ptsType == ePtsLine || m_ptsType == ePtsPlane ||
m_ptsType == ePtsBox,
ASSERTL0(
m_ptsType == ePtsLine || m_ptsType == ePtsPlane || m_ptsType == ePtsBox,
"SetPointsPerEdge only supported for ePtsLine, ePtsPlane and ePtsBox.");
m_nPtsPerEdge = nPtsPerEdge;
}
PtsType PtsField::GetPtsType() const
{
return m_ptsType;
}
void PtsField::SetPtsType(const PtsType type)
{
m_ptsType = type;
......@@ -380,255 +238,5 @@ void PtsField::SetBoxSize(const vector< NekDouble> boxSize)
{
m_boxSize = boxSize;
}
/**
* @brief Compute interpolation weights for a 1D physical point using linear
* interpolation.
*
* @param physPtIdx The index of the physical point in its storage array
* @param coord The coordinate of the physical point
*/
void PtsField::CalcW_Linear(const int physPtIdx, const NekDouble coord)
{
int npts = m_pts[0].num_elements();
int i;
int numPts = 2;
m_neighInds[physPtIdx] = Array<OneD, unsigned int> (numPts);
m_weights[physPtIdx] = Array<OneD, float> (numPts, 0.0);
for (i = 0; i < npts - 1; ++i)
{
if ((m_pts[0][i] <= (coord + NekConstants::kNekZeroTol)) &&
(coord <= (m_pts[0][i + 1]+ NekConstants::kNekZeroTol)) )
{
NekDouble pdiff = m_pts[0][i + 1] - m_pts[0][i];
m_neighInds[physPtIdx][0] = i;
m_neighInds[physPtIdx][1] = i + 1;
m_weights[physPtIdx][0] = (m_pts[0][i + 1] - coord) / pdiff;
m_weights[physPtIdx][1] = (coord - m_pts[0][i]) / pdiff;
break;
}
}
ASSERTL0(i != npts - 1, "Failed to find coordinate " +
boost::lexical_cast<string>(coord) +
" within provided input points");
};
/**
* @brief Compute interpolation weights for a physical point using a modified
* Shepard algorithm.
*