Newer
Older
////////////////////////////////////////////////////////////////////////////////
//
// File: FieldIOHdf5.cpp
//
// For more information, please see: http://www.nektar.info/
//
// The MIT License
//
// Copyright (c) 2006 Division of Applied Mathematics, Brown University (USA),
// Department of Aeronautics, Imperial College London (UK), and Scientific
// Computing and Imaging Institute, University of Utah (USA).
//
// License for the specific language governing rights and limitations under
// Permission is hereby granted, free of charge, to any person obtaining a
// copy of this software and associated documentation files (the "Software"),
// to deal in the Software without restriction, including without limitation
// the rights to use, copy, modify, merge, publish, distribute, sublicense,
// and/or sell copies of the Software, and to permit persons to whom the
// Software is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included
// in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//
// Description: I/O routines relating to Fields into HDF
//
////////////////////////////////////////////////////////////////////////////////
#include <LibUtilities/BasicUtils/FieldIOHdf5.h>
David Moxey
committed
#include <LibUtilities/BasicUtils/ParseUtils.h>
#include <unordered_set>
#include <functional>
namespace berrc = boost::system::errc;
namespace Nektar
{
Dave Moxey
committed
namespace LibUtilities
{
namespace H5
{
Dave Moxey
committed
template <> inline DataTypeSharedPtr DataTypeTraits<BasisType>::GetType()
{
return PredefinedDataType::Native<int>();
}
}
Dave Moxey
committed
std::string FieldIOHdf5::className =
GetFieldIOFactory().RegisterCreatorFunction(
"Hdf5", FieldIOHdf5::create, "HDF5-based output of field data.");
/// Version of the Nektar++ HDF5 format, which is embedded into the main NEKTAR
/// group as an attribute.
const unsigned int FieldIOHdf5::FORMAT_VERSION = 1;
// The following definitions allow us to consistently refer to indexes pulled
// out of the various datasets.
/// A helper for FieldIOHdf5::v_Write and FieldIOHdf5::v_Import. Describes the
/// position of the number of elements in decomposition (i.e. field definition).
const unsigned int FieldIOHdf5::ELEM_DCMP_IDX = 0;
/// A helper for FieldIOHdf5::v_Write and FieldIOHdf5::v_Import. Describes the
/// position of the number of data points in decomposition (i.e. field
/// definition).
const unsigned int FieldIOHdf5::VAL_DCMP_IDX = 1;
/// A helper for FieldIOHdf5::v_Write and FieldIOHdf5::v_Import. Describes the
/// position of the number of elements multiplied by the dimension of the
/// element, giving number of modes when variable polynomial order is defined.
const unsigned int FieldIOHdf5::ORDER_DCMP_IDX = 2;
/// A helper for FieldIOHdf5::v_Write and FieldIOHdf5::v_Import. Describes the
/// position of the number of the number of y-planes for homogeneous
/// simulations.
const unsigned int FieldIOHdf5::HOMY_DCMP_IDX = 3;
/// A helper for FieldIOHdf5::v_Write and FieldIOHdf5::v_Import. Describes the
/// position of the number of the number of z-planes for homogeneous
/// simulations.
const unsigned int FieldIOHdf5::HOMZ_DCMP_IDX = 4;
/// A helper for FieldIOHdf5::v_Write and FieldIOHdf5::v_Import. Describes the
/// position of the number of the number of strips for homogeneous simulations.
const unsigned int FieldIOHdf5::HOMS_DCMP_IDX = 5;
/// The hash of the field definition information, which defines the name of the
/// attribute containing the field definition itself.
const unsigned int FieldIOHdf5::HASH_DCMP_IDX = 6;
/// A helper for FieldIOHdf5::v_Write. Describes the maximum number of items in
/// the decomposition per field definition.
const unsigned int FieldIOHdf5::MAX_DCMPS = FieldIOHdf5::HASH_DCMP_IDX + 1;
/// A helper for FieldIOHdf5::v_Write. Describes the position of the number of
/// elements in the cnt array.
const unsigned int FieldIOHdf5::ELEM_CNT_IDX = 0;
/// A helper for FieldIOHdf5::v_Write. Describes the position of the number of
/// data points in the cnt array.
const unsigned int FieldIOHdf5::VAL_CNT_IDX = 1;
/// A helper for FieldIOHdf5::v_Write. Describes the position of the number of
/// order points in the cnt array.
const unsigned int FieldIOHdf5::ORDER_CNT_IDX = 2;
/// A helper for FieldIOHdf5::v_Write. Describes the position of the number of
/// homogeneous y-planes in the cnt array.
const unsigned int FieldIOHdf5::HOMY_CNT_IDX = 3;
/// A helper for FieldIOHdf5::v_Write. Describes the position of the number of
/// homogeneous z-planes in the cnt array.
const unsigned int FieldIOHdf5::HOMZ_CNT_IDX = 4;
/// A helper for FieldIOHdf5::v_Write. Describes the position of the number of
/// homogeneous strips in the cnt array.
const unsigned int FieldIOHdf5::HOMS_CNT_IDX = 5;
/// A helper for FieldIOHdf5::v_Write. Describes the maximum number of items in
/// the cnt array per field definition.
const unsigned int FieldIOHdf5::MAX_CNTS = FieldIOHdf5::HOMS_CNT_IDX + 1;
/// A helper for FieldIOHdf5::v_Write. Describes the position of the element IDs
/// within the indexing set.
const unsigned int FieldIOHdf5::IDS_IDX_IDX = 0;
/// A helper for FieldIOHdf5::v_Write. Describes the position of the data size
/// within the indexing set.
const unsigned int FieldIOHdf5::DATA_IDX_IDX = 1;
/// A helper for FieldIOHdf5::v_Write. Describes the position of the element
/// order within the indexing set.
const unsigned int FieldIOHdf5::ORDER_IDX_IDX = 2;
/// A helper for FieldIOHdf5::v_Write. Describes the position of the number of
/// y-planes within the indexing set.
const unsigned int FieldIOHdf5::HOMY_IDX_IDX = 3;
/// A helper for FieldIOHdf5::v_Write. Describes the position of the number of
/// z-planes within the indexing set.
const unsigned int FieldIOHdf5::HOMZ_IDX_IDX = 4;
/// A helper for FieldIOHdf5::v_Write. Describes the position of the number of
/// homogeneous strips within the indexing set.
const unsigned int FieldIOHdf5::HOMS_IDX_IDX = 5;
/// A helper for FieldIOHdf5::v_Write. Describes the maximum number of items in
/// the indexing set.
const unsigned int FieldIOHdf5::MAX_IDXS = FieldIOHdf5::HOMS_IDX_IDX + 1;
Dave Moxey
committed
/**
* @brief Construct the FieldIO object for HDF5 output.
*
* @param pComm Communicator object.
* @param sharedFilesystem True if this system has a shared filesystem.
*/
Dave Moxey
committed
FieldIOHdf5::FieldIOHdf5(LibUtilities::CommSharedPtr pComm,
bool sharedFilesystem)
: FieldIO(pComm, sharedFilesystem)
{
}
/**
* @brief Write a HDF5 file to @p outFile given the field definitions @p
* fielddefs, field data @p fielddata and metadata @p fieldmetadatamap.
*
* The writing strategy for HDF5 output is as follows:
*
* - Each rank determines the amount of data needed to be written into each
* dataset.
* - Each rank communicates its decomposition information to the root process.
* - The root processor initialises the output structure, writes the
* decomposition dataset and all the field definition information.
* - Other ranks may have field definitions that do not belong to the root
* process, in which case they open the file and append this (since
* attributes cannot be written in parallel).
* - Each of the other ranks writes their data contributions to the rest of
* the set.
*
* @param outFile Output filename.
* @param fielddefs Input field definitions.
* @param fielddata Input field data.
* @param fieldmetadatamap Field metadata.
* @return The number of bytes written.
uint64_t FieldIOHdf5::v_Write(const std::string &outFile,
std::vector<FieldDefinitionsSharedPtr> &fielddefs,
std::vector<std::vector<NekDouble> > &fielddata,
const FieldMetaDataMap &fieldmetadatamap,
const bool backup)
Dave Moxey
committed
{
std::stringstream prfx;
prfx << m_comm->GetRank() << ": FieldIOHdf5::v_Write(): ";
uint64_t nWritten = 0;
Dave Moxey
committed
double tm0 = 0.0, tm1 = 0.0;
Dave Moxey
committed
tm0 = m_comm->Wtime();
}
SetUpOutput(outFile, false, backup);
// We make a number of assumptions in this code:
// 1. All element ids have the same type: unsigned int
// 2. All elements within a given field have the same number of values
// 3. All element values have the same type, NekDouble
// Determine the root MPI process, i.e., the lowest ranked process handling
// nMaxFields fields, that will be responsible for writing our file.
Dave Moxey
committed
ASSERTL1(fielddefs.size() == fielddata.size(),
prfx.str() + "fielddefs and fielddata have incompatible lengths.");
std::size_t nFields = fielddefs.size();
std::size_t nMaxFields = nFields;
std::size_t nMinFields = nFields;
Dave Moxey
committed
m_comm->AllReduce(nMaxFields, LibUtilities::ReduceMax);
m_comm->AllReduce(nMinFields, LibUtilities::ReduceMin);
//cout << prfx.str() << "nFields " << nFields << endl;
Dave Moxey
committed
int root_rank = -1;
bool amRoot = false;
LibUtilities::CommSharedPtr max_fields_comm;
if (m_comm->GetSize() > 1)
{
max_fields_comm = m_comm->CommCreateIf((nFields == nMaxFields) ? 1 : 0);
}
else
{
max_fields_comm = m_comm;
}
Dave Moxey
committed
if (max_fields_comm)
Dave Moxey
committed
int rank = m_comm->GetRank();
Dave Moxey
committed
root_rank = rank;
max_fields_comm->AllReduce(root_rank, LibUtilities::ReduceMin);
amRoot = (rank == root_rank);
if (!amRoot)
Dave Moxey
committed
{
Dave Moxey
committed
root_rank = -1;
Dave Moxey
committed
}
Dave Moxey
committed
Dave Moxey
committed
m_comm->AllReduce(root_rank, LibUtilities::ReduceMax);
Dave Moxey
committed
ASSERTL1(root_rank >= 0 && root_rank < m_comm->GetSize(),
prfx.str() + "invalid root rank.");
std::vector<uint64_t> decomps(nMaxFields * MAX_DCMPS, 0);
std::vector<uint64_t> all_hashes(nMaxFields * m_comm->GetSize(), 0);
std::vector<uint64_t> cnts(MAX_CNTS, 0);
Dave Moxey
committed
std::vector<std::string> fieldNames(nFields);
std::vector<std::string> shapeStrings(nFields);
std::vector<std::vector<NekDouble> > homoLengths(nFields);
std::vector<std::vector<unsigned int> > homoSIDs(nFields),
homoYIDs(nFields), homoZIDs(nFields);
Dave Moxey
committed
std::vector<std::vector<unsigned int> > numModesPerDirVar(nFields);
std::vector<std::string> numModesPerDirUni(nFields);
int homDim = -1;
int varOrder = 0;
for (std::size_t f = 0; f < nFields; ++f)
Dave Moxey
committed
{
if (!fielddefs[f]->m_uniOrder)
{
varOrder = 1;
break;
}
}
m_comm->AllReduce(varOrder, LibUtilities::ReduceMax);
Dave Moxey
committed
// Calculate the total number of elements handled by this MPI process and
// the total number of bytes required to store the elements. Base the name
for (std::size_t f = 0; f < nFields; ++f)
Dave Moxey
committed
{
ASSERTL1(fielddata[f].size() > 0,
prfx.str() +
"fielddata vector must contain at least one value.");
ASSERTL1(fielddata[f].size() ==
fielddefs[f]->m_fields.size() *
CheckFieldDefinition(fielddefs[f]),
prfx.str() + "fielddata vector has invalid size.");
Dave Moxey
committed
std::size_t nFieldElems = fielddefs[f]->m_elementIDs.size();
Dave Moxey
committed
std::size_t nElemVals = fielddata[f].size();
Dave Moxey
committed
decomps[f * MAX_DCMPS + ELEM_DCMP_IDX] = nFieldElems;
decomps[f * MAX_DCMPS + VAL_DCMP_IDX] = nElemVals;
cnts[ELEM_CNT_IDX] += nFieldElems;
Dave Moxey
committed
cnts[VAL_CNT_IDX] += nElemVals;
Dave Moxey
committed
Dave Moxey
committed
std::stringstream hashStream;
std::size_t nSubFields = fielddefs[f]->m_fields.size();
for (std::size_t sf = 0; sf < nSubFields; ++sf)
Dave Moxey
committed
{
Dave Moxey
committed
hashStream << fielddefs[f]->m_fields[sf];
Dave Moxey
committed
}
Dave Moxey
committed
nSubFields = fielddefs[f]->m_basis.size();
for (std::size_t sf = 0; sf < nSubFields; ++sf)
Dave Moxey
committed
{
Dave Moxey
committed
hashStream << fielddefs[f]->m_basis[sf];
Dave Moxey
committed
}
Dave Moxey
committed
Dave Moxey
committed
std::stringstream shapeStringStream;
shapeStringStream << ShapeTypeMap[fielddefs[f]->m_shapeType];
Dave Moxey
committed
if (fielddefs[f]->m_numHomogeneousDir > 0)
Dave Moxey
committed
{
Dave Moxey
committed
if (homDim == -1)
{
homDim = fielddefs[f]->m_numHomogeneousDir;
}
ASSERTL1(homDim == fielddefs[f]->m_numHomogeneousDir,
"HDF5 does not support variable homogeneous directions in "
"the same file.");
shapeStringStream << "-HomogenousExp"
<< fielddefs[f]->m_numHomogeneousDir << "D";
Dave Moxey
committed
}
Dave Moxey
committed
Dave Moxey
committed
if (fielddefs[f]->m_homoStrips)
{
shapeStringStream << "-Strips";
}
Dave Moxey
committed
Dave Moxey
committed
shapeStrings[f] = shapeStringStream.str();
hashStream << shapeStringStream.str();
Dave Moxey
committed
if (fielddefs[f]->m_numHomogeneousDir)
{
nSubFields = fielddefs[f]->m_homogeneousLengths.size();
homoLengths[f].resize(nSubFields);
for (std::size_t sf = 0; sf < nSubFields; ++sf)
NekDouble len = fielddefs[f]->m_homogeneousLengths[sf];
Dave Moxey
committed
hashStream << len;
homoLengths[f][sf] = len;
Dave Moxey
committed
nSubFields = fielddefs[f]->m_homogeneousYIDs.size();
if (nSubFields > 0)
Dave Moxey
committed
homoYIDs[f].resize(nSubFields);
decomps[f * MAX_DCMPS + HOMY_DCMP_IDX] = nSubFields;
Dave Moxey
committed
cnts[HOMY_CNT_IDX] += nSubFields;
for (std::size_t sf = 0; sf < nSubFields; ++sf)
Dave Moxey
committed
homoYIDs[f][sf] = fielddefs[f]->m_homogeneousYIDs[sf];
Dave Moxey
committed
nSubFields = fielddefs[f]->m_homogeneousZIDs.size();
if (nSubFields > 0)
Dave Moxey
committed
homoZIDs[f].resize(nSubFields);
decomps[f * MAX_DCMPS + HOMZ_DCMP_IDX] = nSubFields;
Dave Moxey
committed
cnts[HOMZ_CNT_IDX] += nSubFields;
for (std::size_t sf = 0; sf < nSubFields; ++sf)
{
Dave Moxey
committed
homoZIDs[f][sf] = fielddefs[f]->m_homogeneousZIDs[sf];
}
Dave Moxey
committed
nSubFields = fielddefs[f]->m_homogeneousSIDs.size();
if (nSubFields > 0)
Dave Moxey
committed
homoSIDs[f].resize(nSubFields);
decomps[f * MAX_DCMPS + HOMS_DCMP_IDX] = nSubFields;
cnts[HOMS_CNT_IDX] += nSubFields;
for (std::size_t sf = 0; sf < nSubFields; ++sf)
Dave Moxey
committed
homoSIDs[f][sf] = fielddefs[f]->m_homogeneousSIDs[sf];
Dave Moxey
committed
}
if (fielddefs[f]->m_uniOrder)
Dave Moxey
committed
std::vector<unsigned int> elemModes(fielddefs[f]->m_basis.size());
Dave Moxey
committed
for (std::vector<int>::size_type i = 0;
Dave Moxey
committed
i < fielddefs[f]->m_basis.size(); ++i)
Dave Moxey
committed
elemModes[i] = fielddefs[f]->m_numModes[i];
}
if (varOrder)
{
for (std::vector<int>::size_type i = 0; i < nFieldElems; ++i)
{
std::copy(elemModes.begin(), elemModes.end(),
std::back_inserter(numModesPerDirVar[f]));
}
decomps[f * MAX_DCMPS + ORDER_DCMP_IDX] =
nFieldElems * elemModes.size();
Dave Moxey
committed
cnts[ORDER_CNT_IDX] += nFieldElems * elemModes.size();
}
else
{
stringstream numModesStringStream;
numModesStringStream << "UNIORDER:";
for (std::vector<int>::size_type i = 0;
i < elemModes.size(); i++)
{
if (i > 0)
{
numModesStringStream << ",";
}
numModesStringStream << elemModes[i];
}
numModesPerDirUni[f] = numModesStringStream.str();
hashStream << numModesPerDirUni[f];
Dave Moxey
committed
}
else
{
Dave Moxey
committed
numModesPerDirVar[f] = fielddefs[f]->m_numModes;
decomps[f * MAX_DCMPS + ORDER_DCMP_IDX] =
fielddefs[f]->m_numModes.size();
Dave Moxey
committed
cnts[ORDER_CNT_IDX] += fielddefs[f]->m_numModes.size();
Dave Moxey
committed
}
std::hash<std::string> string_hasher;
Dave Moxey
committed
std::stringstream fieldNameStream;
uint64_t fieldDefHash = string_hasher(hashStream.str());
Dave Moxey
committed
decomps[f * MAX_DCMPS + HASH_DCMP_IDX] = fieldDefHash;
Dave Moxey
committed
all_hashes[m_comm->GetRank() * nMaxFields + f] = fieldDefHash;
Dave Moxey
committed
Dave Moxey
committed
fieldNameStream << fieldDefHash;
fieldNames[f] = fieldNameStream.str();
Dave Moxey
committed
std::vector<uint64_t> all_cnts = m_comm->Gather(root_rank, cnts);
std::vector<uint64_t> all_idxs(m_comm->GetSize() * MAX_IDXS, 0);
std::vector<uint64_t> all_decomps = m_comm->Gather(root_rank, decomps);
std::vector<uint64_t> all_dsetsize(MAX_CNTS, 0);
Dave Moxey
committed
Dave Moxey
committed
if (amRoot)
{
H5::FileSharedPtr outfile = H5::File::Create(outFile, H5F_ACC_TRUNC);
ASSERTL1(outfile, prfx.str() + "cannot create HDF5 file.");
H5::GroupSharedPtr root = outfile->CreateGroup("NEKTAR");
ASSERTL1(root, prfx.str() + "cannot create root group.");
TagWriterSharedPtr info_writer(new H5TagWriter(root));
AddInfoTag(info_writer, fieldmetadatamap);
// Record file format version as attribute in main group.
root->SetAttribute("FORMAT_VERSION", FORMAT_VERSION);
nWritten += sizeof(FORMAT_VERSION);
// Calculate the indexes to be used by each MPI process when reading the
Dave Moxey
committed
// IDS and DATA datasets
Dave Moxey
committed
std::size_t nTotElems = 0, nTotVals = 0, nTotOrder = 0;
std::size_t nTotHomY = 0, nTotHomZ = 0, nTotHomS = 0;
Dave Moxey
committed
int nRanks = m_comm->GetSize();
for (int r = 0; r < nRanks; ++r)
{
Dave Moxey
committed
all_idxs[r * MAX_IDXS + IDS_IDX_IDX] = nTotElems;
all_idxs[r * MAX_IDXS + DATA_IDX_IDX] = nTotVals;
all_idxs[r * MAX_IDXS + ORDER_IDX_IDX] = nTotOrder;
all_idxs[r * MAX_IDXS + HOMY_IDX_IDX] = nTotHomY;
all_idxs[r * MAX_IDXS + HOMZ_IDX_IDX] = nTotHomZ;
all_idxs[r * MAX_IDXS + HOMS_IDX_IDX] = nTotHomS;
Dave Moxey
committed
nTotElems += all_cnts[r * MAX_CNTS + ELEM_CNT_IDX];
Dave Moxey
committed
nTotVals += all_cnts[r * MAX_CNTS + VAL_CNT_IDX];
nTotOrder += all_cnts[r * MAX_CNTS + ORDER_CNT_IDX];
nTotHomY += all_cnts[r * MAX_CNTS + HOMY_CNT_IDX];
nTotHomZ += all_cnts[r * MAX_CNTS + HOMZ_CNT_IDX];
nTotHomS += all_cnts[r * MAX_CNTS + HOMS_CNT_IDX];
Dave Moxey
committed
}
Dave Moxey
committed
all_dsetsize[ELEM_CNT_IDX ] = nTotElems;
all_dsetsize[VAL_CNT_IDX ] = nTotVals;
all_dsetsize[ORDER_CNT_IDX] = nTotOrder;
all_dsetsize[HOMY_CNT_IDX ] = nTotHomY;
all_dsetsize[HOMZ_CNT_IDX ] = nTotHomZ;
all_dsetsize[HOMS_CNT_IDX ] = nTotHomS;
Dave Moxey
committed
// Create DECOMPOSITION dataset: basic field info for each MPI process
Dave Moxey
committed
H5::DataTypeSharedPtr decomps_type =
H5::DataType::OfObject(all_decomps[0]);
H5::DataSpaceSharedPtr decomps_space =
H5::DataSpace::OneD(all_decomps.size());
H5::DataSetSharedPtr decomps_dset =
root->CreateDataSet("DECOMPOSITION", decomps_type, decomps_space);
ASSERTL1(decomps_dset,
prfx.str() + "cannot create DECOMPOSITION dataset.");
Dave Moxey
committed
H5::DataTypeSharedPtr ids_type =
H5::DataType::OfObject(fielddefs[0]->m_elementIDs[0]);
H5::DataSpaceSharedPtr ids_space = H5::DataSpace::OneD(nTotElems);
H5::DataSetSharedPtr ids_dset =
Dave Moxey
committed
root->CreateDataSet("ELEMENTIDS", ids_type, ids_space);
ASSERTL1(ids_dset, prfx.str() + "cannot create ELEMENTIDS dataset.");
Dave Moxey
committed
Dave Moxey
committed
H5::DataTypeSharedPtr data_type =
H5::DataType::OfObject(fielddata[0][0]);
H5::DataSpaceSharedPtr data_space = H5::DataSpace::OneD(nTotVals);
H5::DataSetSharedPtr data_dset =
root->CreateDataSet("DATA", data_type, data_space);
ASSERTL1(data_dset, prfx.str() + "cannot create DATA dataset.");
Dave Moxey
committed
// Create HOMOGENEOUSYIDS dataset: homogeneous y-plane IDs
if (nTotHomY > 0)
{
H5::DataTypeSharedPtr homy_type =
H5::DataType::OfObject(homoYIDs[0][0]);
H5::DataSpaceSharedPtr homy_space = H5::DataSpace::OneD(nTotHomY);
H5::DataSetSharedPtr homy_dset =
root->CreateDataSet("HOMOGENEOUSYIDS", homy_type, homy_space);
ASSERTL1(homy_dset,
prfx.str() + "cannot create HOMOGENEOUSYIDS dataset.");
}
// Create HOMOGENEOUSYIDS dataset: homogeneous z-plane IDs
if (nTotHomZ > 0)
{
H5::DataTypeSharedPtr homz_type =
H5::DataType::OfObject(homoZIDs[0][0]);
H5::DataSpaceSharedPtr homz_space = H5::DataSpace::OneD(nTotHomZ);
H5::DataSetSharedPtr homz_dset =
root->CreateDataSet("HOMOGENEOUSZIDS", homz_type, homz_space);
Dave Moxey
committed
prfx.str() + "cannot create HOMOGENEOUSZIDS dataset.");
}
// Create HOMOGENEOUSSIDS dataset: homogeneous strip IDs
if (nTotHomS > 0)
Dave Moxey
committed
{
H5::DataTypeSharedPtr homs_type =
Dave Moxey
committed
H5::DataType::OfObject(homoSIDs[0][0]);
H5::DataSpaceSharedPtr homs_space = H5::DataSpace::OneD(nTotHomS);
H5::DataSetSharedPtr homs_dset =
root->CreateDataSet("HOMOGENEOUSSIDS", homs_type, homs_space);
ASSERTL1(homs_dset,
Dave Moxey
committed
prfx.str() + "cannot create HOMOGENEOUSSIDS dataset.");
}
// Create POLYORDERS dataset: elemental polynomial orders
if (varOrder)
{
H5::DataTypeSharedPtr order_type =
H5::DataType::OfObject(numModesPerDirVar[0][0]);
H5::DataSpaceSharedPtr order_space = H5::DataSpace::OneD(nTotOrder);
H5::DataSetSharedPtr order_dset =
root->CreateDataSet("POLYORDERS", order_type, order_space);
ASSERTL1(order_dset,
prfx.str() + "cannot create POLYORDERS dataset.");
}
Dave Moxey
committed
Dave Moxey
committed
m_comm->Bcast(all_dsetsize, root_rank);
// Datasets, root group and HDF5 file are all closed automatically since
// they are now out of scope. Now we need to determine which process will
// write the group representing the field description in the HDF5 file. This
// next block of code performs this by finding all unique hashes and then
// determining one process that will create (possibly more than one) group
// for that hash. An alternative would be to communicate the field
// information to the root processor, but this is a bit convoluted.
// This set stores the unique hashes.
std::set<uint64_t> hashToProc;
std::map<int, std::vector<uint64_t> > writingProcs;
Dave Moxey
committed
// Gather all field hashes to every processor.
m_comm->AllReduce(all_hashes, LibUtilities::ReduceMax);
for (int n = 0; n < m_comm->GetSize(); ++n)
{
for (std::size_t i = 0; i < nMaxFields; ++i)
Dave Moxey
committed
{
uint64_t hash = all_hashes[n*nMaxFields + i];
// Note hash can be zero if, on this process, nFields < nMaxFields.
Dave Moxey
committed
if (hashToProc.find(hash) != hashToProc.end() || hash == 0)
{
continue;
}
Dave Moxey
committed
writingProcs[n].push_back(hash);
}
}
Dave Moxey
committed
// Having constructed the map, go ahead and write the attributes out.
map<int, std::vector<uint64_t> >::iterator sIt;
for (sIt = writingProcs.begin(); sIt != writingProcs.end(); sIt++)
Dave Moxey
committed
{
int rank = sIt->first;
Dave Moxey
committed
if (m_comm->GetRank() == rank)
{
H5::PListSharedPtr serialProps = H5::PList::Default();
H5::PListSharedPtr writeSR = H5::PList::Default();
Dave Moxey
committed
H5::FileSharedPtr outfile =
H5::File::Open(outFile, H5F_ACC_RDWR, serialProps);
ASSERTL1(outfile, prfx.str() + "cannot open HDF5 file.");
H5::GroupSharedPtr root = outfile->OpenGroup("NEKTAR");
ASSERTL1(root, prfx.str() + "cannot open root group.");
for (std::size_t i = 0; i < sIt->second.size(); ++i)
Dave Moxey
committed
{
for (std::size_t f = 0; f < nFields; ++f)
Dave Moxey
committed
{
if (sIt->second[i] !=
all_hashes[m_comm->GetRank() * nMaxFields + f] ||
hashToProc.find(sIt->second[i]) != hashToProc.end())
Dave Moxey
committed
{
continue;
}
hashToProc.insert(sIt->second[i]);
// Just in case we've already written this
H5::GroupSharedPtr field_group =
root->CreateGroup(fieldNames[f]);
ASSERTL1(field_group,
prfx.str() + "cannot create field group.");
Dave Moxey
committed
field_group->SetAttribute("FIELDS", fielddefs[f]->m_fields);
field_group->SetAttribute("BASIS", fielddefs[f]->m_basis);
field_group->SetAttribute("SHAPE", shapeStrings[f]);
Dave Moxey
committed
for (std::size_t j = 0; j < fielddefs[f]->m_fields.size(); ++j)
{
nWritten += fielddefs[f]->m_fields[j].size();
}
nWritten += fielddefs[f]->m_basis.size()*sizeof(LibUtilities::BasisType);
nWritten += shapeStrings[f].size();
Dave Moxey
committed
if (homoLengths[f].size() > 0)
Dave Moxey
committed
{
field_group->SetAttribute("HOMOGENEOUSLENGTHS",
homoLengths[f]);
nWritten += homoLengths[f].size()*sizeof(NekDouble);
Dave Moxey
committed
}
// If the field has only uniform order, we write the order
// into the NUMMODESPERDIR attribute. Otherwise, we'll go
// ahead and assume everything is mixed and fix this in the
// read later if required.
if (!varOrder)
{
field_group->SetAttribute("NUMMODESPERDIR",
numModesPerDirUni[f]);
nWritten += numModesPerDirUni[f].size();
Dave Moxey
committed
}
else
{
std::string numModesPerDir = "MIXORDER";
field_group->SetAttribute("NUMMODESPERDIR",
numModesPerDir);
nWritten += numModesPerDir.size();
Dave Moxey
committed
}
Dave Moxey
committed
}
}
}
// We block to avoid more than one processor opening the file at a time.
Dave Moxey
committed
m_comm->Block();
}
// Write the DECOMPOSITION dataset
Dave Moxey
committed
if (amRoot)
{
H5::PListSharedPtr serialProps = H5::PList::Default();
H5::PListSharedPtr writeSR = H5::PList::Default();
Dave Moxey
committed
H5::FileSharedPtr outfile =
H5::File::Open(outFile, H5F_ACC_RDWR, serialProps);
ASSERTL1(outfile, prfx.str() + "cannot open HDF5 file.");
H5::GroupSharedPtr root = outfile->OpenGroup("NEKTAR");
Dave Moxey
committed
ASSERTL1(root, prfx.str() + "cannot open root group.");
Dave Moxey
committed
H5::DataSetSharedPtr decomps_dset = root->OpenDataSet("DECOMPOSITION");
ASSERTL1(decomps_dset,
prfx.str() + "cannot open DECOMPOSITION dataset.");
H5::DataSpaceSharedPtr decomps_fspace = decomps_dset->GetSpace();
ASSERTL1(decomps_fspace,
prfx.str() + "cannot open DECOMPOSITION filespace.");
decomps_fspace->SelectRange(0, all_decomps.size());
decomps_dset->Write(all_decomps, decomps_fspace, writeSR);
nWritten += all_decomps.size()*sizeof(uint64_t);
Dave Moxey
committed
// Initialise the dataset indexes for all MPI processes
std::vector<uint64_t> idx = m_comm->Scatter(root_rank, all_idxs);
uint64_t ids_i = idx[IDS_IDX_IDX];
uint64_t data_i = idx[DATA_IDX_IDX];
uint64_t order_i = idx[ORDER_IDX_IDX];
uint64_t homy_i = idx[HOMY_IDX_IDX];
uint64_t homz_i = idx[HOMZ_IDX_IDX];
uint64_t homs_i = idx[HOMS_IDX_IDX];
Dave Moxey
committed
// Set properties for parallel file access (if we're in parallel)
Dave Moxey
committed
H5::PListSharedPtr parallelProps = H5::PList::Default();
if (m_comm->GetSize() > 1)
{
Dave Moxey
committed
parallelProps = H5::PList::FileAccess();
parallelProps->SetMpio(m_comm);
Dave Moxey
committed
Dave Moxey
committed
H5::FileSharedPtr outfile =
H5::File::Open(outFile, H5F_ACC_RDWR, parallelProps);
ASSERTL1(outfile, prfx.str() + "cannot open HDF5 file.");
H5::GroupSharedPtr root = outfile->OpenGroup("NEKTAR");
ASSERTL1(root, prfx.str() + "cannot open root group.");
m_comm->Block();
// all HDF5 groups have now been created. Open the IDS dataset and
// associated data space
Dave Moxey
committed
H5::DataSetSharedPtr ids_dset = root->OpenDataSet("ELEMENTIDS");
ASSERTL1(ids_dset, prfx.str() + "cannot open ELEMENTIDS dataset.");
Dave Moxey
committed
H5::DataSpaceSharedPtr ids_fspace = ids_dset->GetSpace();
Dave Moxey
committed
ASSERTL1(ids_fspace, prfx.str() + "cannot open ELEMENTIDS filespace.");
Dave Moxey
committed
// Open the DATA dataset and associated data space
Dave Moxey
committed
H5::DataSetSharedPtr data_dset = root->OpenDataSet("DATA");
ASSERTL1(data_dset, prfx.str() + "cannot open DATA dataset.");
H5::DataSpaceSharedPtr data_fspace = data_dset->GetSpace();
ASSERTL1(data_fspace, prfx.str() + "cannot open DATA filespace.");
Dave Moxey
committed
// Open the optional datasets and data spaces.
H5::DataSetSharedPtr order_dset, homy_dset, homz_dset, homs_dset;
H5::DataSpaceSharedPtr order_fspace , homy_fspace, homz_fspace, homs_fspace;
Dave Moxey
committed
if (all_dsetsize[ORDER_CNT_IDX])
{
order_dset = root->OpenDataSet("POLYORDERS");
ASSERTL1(order_dset, prfx.str() + "cannot open POLYORDERS dataset.");
order_fspace = order_dset->GetSpace();
ASSERTL1(order_fspace, prfx.str() + "cannot open POLYORDERS filespace.");
}
if (all_dsetsize[HOMY_CNT_IDX])
{
homy_dset = root->OpenDataSet("HOMOGENEOUSYIDS");
ASSERTL1(homy_dset, prfx.str() + "cannot open HOMOGENEOUSYIDS dataset.");
homy_fspace = homy_dset->GetSpace();
ASSERTL1(homy_fspace, prfx.str() + "cannot open HOMOGENEOUSYIDS filespace.");
}
if (all_dsetsize[HOMZ_CNT_IDX])
{
homz_dset = root->OpenDataSet("HOMOGENEOUSZIDS");
ASSERTL1(homz_dset, prfx.str() + "cannot open HOMOGENEOUSZIDS dataset.");
homz_fspace = homz_dset->GetSpace();
ASSERTL1(homz_fspace, prfx.str() + "cannot open HOMOGENEOUSZIDS filespace.");
}
if (all_dsetsize[HOMS_CNT_IDX])
Dave Moxey
committed
{
homs_dset = root->OpenDataSet("HOMOGENEOUSSIDS");
ASSERTL1(homs_dset, prfx.str() + "cannot open HOMOGENEOUSSIDS dataset.");
homs_fspace = homs_dset->GetSpace();
ASSERTL1(homs_fspace, prfx.str() + "cannot open HOMOGENEOUSSIDS filespace.");
Dave Moxey
committed
}
// we use independent (non-collective) writes for all datasets that are actually part of the field definition,
// i.e., {order_dset,homz_dset,homy_dset,homs_dset}; otherwise, we would have to assume that all fields
// were defined such that they all used the same selection of field definition datasets
nWritten += WriteFieldDataInd(nFields, order_fspace, order_dset, order_i, numModesPerDirVar);
nWritten += WriteFieldDataInd(nFields, homy_fspace, homy_dset, homy_i, homoYIDs);
nWritten += WriteFieldDataInd(nFields, homz_fspace, homz_dset, homz_i, homoZIDs);
nWritten += WriteFieldDataInd(nFields, homs_fspace, homs_dset, homs_i, homoSIDs);
// write all the element IDs and element data collectively, taking into account the fact that
// different processes maybe handling different numbers of fields
std::vector<std::vector<unsigned int> > elemIDs(nFields);
for (std::size_t f = 0; f < nFields; ++f)
Dave Moxey
committed
{
elemIDs[f] = fielddefs[f]->m_elementIDs;
Dave Moxey
committed
}
nWritten += WriteFieldData(nMinFields, nFields, ids_fspace, ids_dset, ids_i, elemIDs);
nWritten += WriteFieldData(nMinFields, nFields, data_fspace, data_dset, data_i, fielddata);
m_comm->Block();
Dave Moxey
committed
// all data has been written
//if (m_comm->GetRank() == 0)
//{
//tm1 = m_comm->Wtime();
//cout << " (" << tm1 - tm0 << "s, HDF5)" << endl;
//}
Dave Moxey
committed
return nWritten;
}
Dave Moxey
committed
/**
* @brief Write the contributions from a number of fields to a specific dataset.
*
* The data are written independently.
*
* @param nFields the number of fields handled by this process
* @param fspace hdf5 file space
* @param dset hdf5 data set
* @param data_i starting offset into hdf5 data set for this process
* @param data vector of data vectors (one for each field) to be written to data set
* @return The number of bytes written.
*/
template <class T>
uint64_t FieldIOHdf5::WriteFieldDataInd(std::size_t nFields,
H5::DataSpaceSharedPtr &fspace, H5::DataSetSharedPtr &dset,
uint64_t data_i, std::vector<std::vector<T> > &data)
{
if (!fspace || !dset) return 0;
Dave Moxey
committed
std::size_t nDataItems = 0;
uint64_t nWritten = 0;
H5::PListSharedPtr writePL = H5::PList::DatasetXfer();
writePL->SetDxMpioIndependent();
for (std::size_t f = 0; f < nFields; ++f)
Dave Moxey
committed
{
nDataItems = data[f].size();
if (nDataItems > 0)
Dave Moxey
committed
{
fspace->SelectRange(data_i, nDataItems);
dset->Write(data[f], fspace, writePL);
data_i += nDataItems;
nWritten += nDataItems*sizeof(T);
Dave Moxey
committed
}
}
return nWritten;
}
/**
* @brief Write the contributions from a number of fields to a specific dataset.
*
* The data are written collectively; hence, this method allows for the fact that
* different processes might be handling different numbers of fields.
*
* @param nMinFields the lowest number of fields handled by any process
* @param nFields the number of fields handled by this process
* @param fspace hdf5 file space
* @param dset hdf5 data set
* @param data_i starting offset into hdf5 data set for this process
* @param data vector of data vectors (one for each field) to be written to data set
* @return The number of bytes written.
*/
template <class T>
uint64_t FieldIOHdf5::WriteFieldData(std::size_t nMinFields, std::size_t nFields,
H5::DataSpaceSharedPtr &fspace, H5::DataSetSharedPtr &dset,
uint64_t data_i, std::vector<std::vector<T> > &data)
{
if (!fspace || !dset) return 0;
bool concatenate_last_fields = nMinFields < nFields;
std::size_t nFirstFields = concatenate_last_fields ? nMinFields-1 : nFields;
std::size_t nDataItems = 0;
std::size_t f = 0;
uint64_t nWritten = 0;
H5::PListSharedPtr writePL = H5::PList::DatasetXfer();
writePL->SetDxMpioCollective();
for (; f < nFirstFields; ++f)
{
nDataItems = data[f].size();
fspace->SelectRange(data_i, nDataItems);
dset->Write(data[f], fspace, writePL);
data_i += nDataItems;
nWritten += nDataItems*sizeof(T);
Dave Moxey
committed
}
if (!concatenate_last_fields) return nWritten;
Dave Moxey
committed
std::vector<T> last_data;
nDataItems = data[f].size();
fspace->SelectRange(H5S_SELECT_SET, data_i, nDataItems);
last_data.insert(last_data.end(), data[f].begin(), data[f].end());
data_i += nDataItems;
f++;
for (; f < nFields; ++f)
Dave Moxey
committed
{
nDataItems = data[f].size();
fspace->SelectRange(H5S_SELECT_OR, data_i, nDataItems);
last_data.insert(last_data.end(), data[f].begin(), data[f].end());
data_i += nDataItems;
Dave Moxey
committed
}
dset->Write(last_data, fspace, writePL);
return nWritten + last_data.size()*sizeof(T);
Dave Moxey
committed
/**
* @brief Import a HDF5 format file.
*
* @param finfilename Input filename
* @param fielddefs Field definitions of resulting field
* @param fielddata Field data of resulting field
* @param fieldinfomap Field metadata of resulting field
* @param ElementIDs If specified, contains the list of element IDs on
* this rank. The resulting field definitions will only
* contain data for the element IDs specified in this
* array.
* @return The number of bytes read.
uint64_t FieldIOHdf5::v_Import(const std::string &infilename,
std::vector<FieldDefinitionsSharedPtr> &fielddefs,
std::vector<std::vector<NekDouble> > &fielddata,
FieldMetaDataMap &fieldinfomap,
const Array<OneD, int> &ElementIDs)
Dave Moxey
committed
{
std::stringstream prfx;
prfx << m_comm->GetRank() << ": FieldIOHdf5::v_Import(): ";
Dave Moxey
committed
int nRanks = m_comm->GetSize();
uint64_t nRead = 0;
// Set properties for parallel file access (if we're in parallel)
Dave Moxey
committed
H5::PListSharedPtr parallelProps = H5::PList::Default();
H5::PListSharedPtr readPL = H5::PList::Default();
Dave Moxey
committed
if (nRanks > 1)
{
Dave Moxey
committed
parallelProps = H5::PList::FileAccess();
parallelProps->SetMpio(m_comm);
Dave Moxey
committed
readPL = H5::PList::DatasetXfer();
readPL->SetDxMpioCollective();
}
DataSourceSharedPtr dataSource = H5DataSource::create(
infilename, parallelProps);
Dave Moxey
committed
H5DataSourceSharedPtr h5 =
std::static_pointer_cast<H5DataSource>(dataSource);
Dave Moxey
committed
ASSERTL1(h5, prfx.str() + "cannot open HDF5 file.");
H5::GroupSharedPtr root = h5->Get()->OpenGroup("NEKTAR");
ASSERTL1(root, prfx.str() + "cannot open root group.");
// Check format version
unsigned int formatVersion;
H5::Group::AttrIterator attrIt = root->attr_begin();
H5::Group::AttrIterator attrEnd = root->attr_end();
for (; attrIt != attrEnd; ++attrIt)
{
if (*attrIt == "FORMAT_VERSION")
{
break;
}
}
ASSERTL0(attrIt != attrEnd,
"Unable to determine Nektar++ HDF5 file version");
root->GetAttribute("FORMAT_VERSION", formatVersion);
nRead += sizeof(formatVersion);
ASSERTL0(formatVersion <= FORMAT_VERSION,
"File format if " + infilename + " is higher than supported in "
"this version of Nektar++");
std::unordered_set<uint64_t> selectedIDs;
bool selective = false;
bool auto_selective = false;
if (&ElementIDs != &NullInt1DArray)
Dave Moxey
committed
{